summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIvan Maidanski <ivmai@mail.ru>2011-07-26 20:09:54 +0400
committerIvan Maidanski <ivmai@mail.ru>2011-07-26 20:09:54 +0400
commite35a4171fe47dfbf847e08988ea6cec4dfc8d124 (patch)
tree25f3501669fbf24a4b370c4f8b0c0701b1da4062
parentffa0c9ea38b3dd87e91b5ed2118c74002fed6782 (diff)
downloadbdwgc-e35a4171fe47dfbf847e08988ea6cec4dfc8d124.tar.gz
gc7.0alpha1 tarball importgc7_0alpha1
-rw-r--r--BCC_MAKEFILE3
-rw-r--r--EMX_MAKEFILE5
-rw-r--r--MacOS.c8
-rw-r--r--Makefile53
-rw-r--r--Makefile.am14
-rw-r--r--Makefile.direct53
-rw-r--r--Makefile.dj5
-rw-r--r--Makefile.in19
-rw-r--r--NT_MAKEFILE4
-rw-r--r--NT_STATIC_THREADS_MAKEFILE6
-rw-r--r--NT_THREADS_MAKEFILE8
-rw-r--r--OS2_MAKEFILE2
-rw-r--r--PCR-Makefile2
-rw-r--r--SMakefile.amiga1
-rw-r--r--WCC_MAKEFILE18
-rw-r--r--aix_irix_threads.c12
-rw-r--r--allchblk.c175
-rw-r--r--alloc.c496
-rw-r--r--backgraph.c20
-rw-r--r--blacklst.c81
-rw-r--r--checksums.c47
-rwxr-xr-xconfigure174
-rw-r--r--configure.in40
-rw-r--r--darwin_stop_world.c34
-rw-r--r--dbg_mlc.c484
-rw-r--r--digimars.mak4
-rw-r--r--doc/Makefile.in2
-rw-r--r--doc/README2
-rw-r--r--doc/README.changes134
-rw-r--r--doc/README.environment20
-rw-r--r--doc/README.solaris24
-rw-r--r--dyn_load.c209
-rw-r--r--finalize.c232
-rw-r--r--gc.mak8
-rw-r--r--gc_dlopen.c4
-rw-r--r--gcc_support.c516
-rw-r--r--gcj_mlc.c136
-rw-r--r--headers.c128
-rw-r--r--if_mach.c5
-rw-r--r--if_not_there.c5
-rw-r--r--include/Makefile.am4
-rw-r--r--include/Makefile.in6
-rw-r--r--include/gc.h224
-rw-r--r--include/gc_alloc.h383
-rw-r--r--include/gc_config_macros.h7
-rw-r--r--include/gc_gcj.h12
-rw-r--r--include/gc_inl.h8
-rw-r--r--include/gc_local_alloc.h8
-rw-r--r--include/gc_mark.h46
-rw-r--r--include/gc_tiny_fl.h67
-rw-r--r--include/gc_typed.h16
-rw-r--r--include/new_gc_alloc.h64
-rw-r--r--include/private/dbg_mlc.h21
-rw-r--r--include/private/gc_hdrs.h67
-rw-r--r--include/private/gc_locks.h567
-rw-r--r--include/private/gc_pmark.h299
-rw-r--r--include/private/gc_priv.h859
-rw-r--r--include/private/gcconfig.h249
-rw-r--r--include/private/pthread_support.h29
-rw-r--r--include/private/solaris_threads.h37
-rw-r--r--include/private/specific.h3
-rw-r--r--mach_dep.c15
-rw-r--r--malloc.c311
-rw-r--r--mallocx.c363
-rw-r--r--mark.c749
-rw-r--r--mark_rts.c152
-rw-r--r--misc.c526
-rw-r--r--new_hblk.c122
-rw-r--r--obj_map.c131
-rw-r--r--os_dep.c1002
-rw-r--r--pcr_interface.c23
-rw-r--r--pthread_stop_world.c75
-rw-r--r--pthread_support.c227
-rw-r--r--ptr_chck.c126
-rw-r--r--real_malloc.c9
-rw-r--r--reclaim.c793
-rw-r--r--setjmp_t.c2
-rw-r--r--solaris_pthreads.c179
-rw-r--r--solaris_threads.c959
-rw-r--r--specific.c44
-rw-r--r--stubborn.c302
-rw-r--r--tests/test.c332
-rw-r--r--threadlibs.c9
-rw-r--r--typd_mlc.c293
-rw-r--r--version.h6
-rwxr-xr-xwin32_threads.c47
86 files changed, 4216 insertions, 8730 deletions
diff --git a/BCC_MAKEFILE b/BCC_MAKEFILE
index e21bc3d8..b49d6b99 100644
--- a/BCC_MAKEFILE
+++ b/BCC_MAKEFILE
@@ -15,8 +15,7 @@ lib= $(bcbin)\tlib
link= $(bcbin)\ilink32
cflags= -O2 -R -v- -vi -H -H=gc.csm -I$(bcinclude);$(gcinclude1);$(gcinclude2) -L$(bclib) \
-w-pro -w-aus -w-par -w-ccc -w-rch -a4 -D__STDC__=0
-#defines= -DSILENT
-defines= -DSILENT -DALL_INTERIOR_POINTERS -DUSE_GENERIC -DNO_GETENV -DJAVA_FINALIZATION -DGC_OPERATOR_NEW_ARRAY
+defines= -DALL_INTERIOR_POINTERS -DUSE_GENERIC -DNO_GETENV -DJAVA_FINALIZATION -DGC_OPERATOR_NEW_ARRAY
.c.obj:
$(cc) @&&|
diff --git a/EMX_MAKEFILE b/EMX_MAKEFILE
index d7674b3a..c7e5bb80 100644
--- a/EMX_MAKEFILE
+++ b/EMX_MAKEFILE
@@ -18,10 +18,9 @@ CC= gcc
CXX=g++
# Needed only for "make c++", which adds the c++ interface
-CFLAGS= -O -DALL_INTERIOR_POINTERS -DSILENT
+CFLAGS= -O -DALL_INTERIOR_POINTERS
# Setjmp_test may yield overly optimistic results when compiled
# without optimization.
-# -DSILENT disables statistics printing, and improves performance.
# -DCHECKSUMS reports on erroneously clear dirty bits, and unexpectedly
# altered stubborn objects, at substantial performance cost.
# -DFIND_LEAK causes the collector to assume that all inaccessible
@@ -74,7 +73,7 @@ all: gc.a gctest.exe
$(OBJS) test.o: $(srcdir)/gc_priv.h $(srcdir)/gc_hdrs.h $(srcdir)/gc.h \
$(srcdir)/gcconfig.h $(srcdir)/gc_typed.h
# The dependency on Makefile is needed. Changing
-# options such as -DSILENT affects the size of GC_arrays,
+# options affects the size of GC_arrays,
# invalidating all .o files that rely on gc_priv.h
mark.o typd_mlc.o finalize.o: $(srcdir)/include/gc_mark.h $(srcdir)/include/private/gc_pmark.h
diff --git a/MacOS.c b/MacOS.c
index cc12cd15..b56bea78 100644
--- a/MacOS.c
+++ b/MacOS.c
@@ -128,10 +128,12 @@ void GC_MacFreeTemporaryMemory()
}
theTemporaryMemory = NULL;
-# if !defined(SILENT) && !defined(SHARED_LIBRARY_BUILD)
- fprintf(stdout, "[total memory used: %ld bytes.]\n",
+# if !defined(SHARED_LIBRARY_BUILD)
+ if (GC_print_stats) {
+ fprintf(stdout, "[total memory used: %ld bytes.]\n",
totalMemoryUsed);
- fprintf(stdout, "[total collections: %ld.]\n", GC_gc_no);
+ fprintf(stdout, "[total collections: %ld.]\n", GC_gc_no);
+ }
# endif
}
}
diff --git a/Makefile b/Makefile
index 20fa40a9..83d4d60e 100644
--- a/Makefile
+++ b/Makefile
@@ -30,7 +30,13 @@ AS=as $(AS_ABI_FLAG)
srcdir= .
VPATH= $(srcdir)
-CFLAGS= -O -I$(srcdir)/include -DATOMIC_UNCOLLECTABLE -DNO_SIGNALS -DNO_EXECUTE_PERMISSION -DSILENT -DALL_INTERIOR_POINTERS
+# Atomic_ops installation directory. If this doesn't exist, we create
+# it from the included atomic_ops distribution.
+AO_VERSION=0.6
+AO_SRC_DIR=$(srcdir)/atomic_ops-$(AO_VERSION)
+AO_INSTALL_DIR=$(AO_SRC_DIR)/installed
+
+CFLAGS= -O -I$(srcdir)/include -I$(AO_INSTALL_DIR)/include -DATOMIC_UNCOLLECTABLE -DNO_EXECUTE_PERMISSION -DALL_INTERIOR_POINTERS
# To build the parallel collector on Linux, add to the above:
# -DGC_LINUX_THREADS -DPARALLEL_MARK -DTHREAD_LOCAL_ALLOC
@@ -54,7 +60,6 @@ HOSTCFLAGS=$(CFLAGS)
# without optimization.
# These define arguments influence the collector configuration:
-# -DSILENT disables statistics printing, and improves performance.
# -DFIND_LEAK causes GC_find_leak to be initially set.
# This causes the collector to assume that all inaccessible
# objects should have been explicitly deallocated, and reports exceptions.
@@ -96,13 +101,6 @@ HOSTCFLAGS=$(CFLAGS)
# an object can be recognized. This can be expensive. (The padding
# is normally more than one byte due to alignment constraints.)
# -DDONT_ADD_BYTE_AT_END disables the padding.
-# -DNO_SIGNALS does not disable signals during critical parts of
-# the GC process. This is no less correct than many malloc
-# implementations, and it sometimes has a significant performance
-# impact. However, it is dangerous for many not-quite-ANSI C
-# programs that call things like printf in asynchronous signal handlers.
-# This is on by default. Turning it off has not been extensively tested with
-# compilers that reorder stores. It should have been.
# -DNO_EXECUTE_PERMISSION may cause some or all of the heap to not
# have execute permission, i.e. it may be impossible to execute
# code from the heap. Currently this only affects the incremental
@@ -154,6 +152,14 @@ HOSTCFLAGS=$(CFLAGS)
# -DATOMIC_UNCOLLECTABLE includes code for GC_malloc_atomic_uncollectable.
# This is useful if either the vendor malloc implementation is poor,
# or if REDIRECT_MALLOC is used.
+# -DMARK_BIT_PER_GRANULE requests that a mark bit (or often byte)
+# be allocated for each allocation granule, as opposed to each object.
+# This often improves speed, possibly at some cost in space and/or
+# cache footprint. Normally it is best to let this decision be
+# made automatically depending on platform.
+# -DMARK_BIT_PER_OBJ requests that a mark bit be allocated for each
+# object instead of allocation granule. The opposiet of
+# MARK_BIT_PER_GRANULE.
# -DHBLKSIZE=ddd, where ddd is a power of 2 between 512 and 16384, explicitly
# sets the heap block size. Each heap block is devoted to a single size and
# kind of object. For the incremental collector it makes sense to match
@@ -276,6 +282,9 @@ HOSTCFLAGS=$(CFLAGS)
# -DPOINTER_SHIFT=n causes the collector to left shift candidate pointers
# by the indicated amount before trying to interpret them. Applied
# after POINTER_MASK. EXPERIMENTAL. See also the preceding macro.
+# -DENABLE_TRACE enables the GC_TRACE=addr environment setting to do its
+# job. By default this is not supported in order tokeep the marker as fast
+# as possible.
#
CXXFLAGS= $(CFLAGS)
@@ -283,25 +292,25 @@ AR= ar
RANLIB= ranlib
-OBJS= alloc.o reclaim.o allchblk.o misc.o mach_dep.o os_dep.o mark_rts.o headers.o mark.o obj_map.o blacklst.o finalize.o new_hblk.o dbg_mlc.o malloc.o stubborn.o checksums.o solaris_threads.o aix_irix_threads.o pthread_support.o pthread_stop_world.o darwin_stop_world.o typd_mlc.o ptr_chck.o mallocx.o solaris_pthreads.o gcj_mlc.o specific.o gc_dlopen.o backgraph.o win32_threads.o
+OBJS= alloc.o reclaim.o allchblk.o misc.o mach_dep.o os_dep.o mark_rts.o headers.o mark.o obj_map.o blacklst.o finalize.o new_hblk.o dbg_mlc.o malloc.o stubborn.o checksums.o aix_irix_threads.o pthread_support.o pthread_stop_world.o darwin_stop_world.o typd_mlc.o ptr_chck.o mallocx.o gcj_mlc.o specific.o gc_dlopen.o backgraph.o win32_threads.o
-CSRCS= reclaim.c allchblk.c misc.c alloc.c mach_dep.c os_dep.c mark_rts.c headers.c mark.c obj_map.c pcr_interface.c blacklst.c finalize.c new_hblk.c real_malloc.c dyn_load.c dbg_mlc.c malloc.c stubborn.c checksums.c solaris_threads.c aix_irix_threads.c pthread_support.c pthread_stop_world.c darwin_stop_world.c typd_mlc.c ptr_chck.c mallocx.c solaris_pthreads.c gcj_mlc.c specific.c gc_dlopen.c backgraph.c win32_threads.c
+CSRCS= reclaim.c allchblk.c misc.c alloc.c mach_dep.c os_dep.c mark_rts.c headers.c mark.c obj_map.c pcr_interface.c blacklst.c finalize.c new_hblk.c real_malloc.c dyn_load.c dbg_mlc.c malloc.c stubborn.c checksums.c aix_irix_threads.c pthread_support.c pthread_stop_world.c darwin_stop_world.c typd_mlc.c ptr_chck.c mallocx.c gcj_mlc.c specific.c gc_dlopen.c backgraph.c win32_threads.c
CORD_SRCS= cord/cordbscs.c cord/cordxtra.c cord/cordprnt.c cord/de.c cord/cordtest.c include/cord.h include/ec.h include/private/cord_pos.h cord/de_win.c cord/de_win.h cord/de_cmds.h cord/de_win.ICO cord/de_win.RC
CORD_OBJS= cord/cordbscs.o cord/cordxtra.o cord/cordprnt.o
SRCS= $(CSRCS) mips_sgi_mach_dep.s rs6000_mach_dep.s alpha_mach_dep.S \
- sparc_mach_dep.S include/gc.h include/gc_typed.h \
+ sparc_mach_dep.S include/gc.h include/gc_typed.h include/gc_tiny_fl.h \
include/private/gc_hdrs.h include/private/gc_priv.h \
include/private/gcconfig.h include/private/gc_pmark.h \
include/gc_inl.h include/gc_inline.h include/gc_mark.h \
threadlibs.c if_mach.c if_not_there.c gc_cpp.cc include/gc_cpp.h \
gcname.c include/weakpointer.h include/private/gc_locks.h \
- gcc_support.c mips_ultrix_mach_dep.s include/gc_alloc.h \
+ mips_ultrix_mach_dep.s \
include/new_gc_alloc.h include/gc_allocator.h \
include/javaxfc.h sparc_sunos4_mach_dep.s sparc_netbsd_mach_dep.s \
- include/private/solaris_threads.h include/gc_backptr.h \
+ include/gc_backptr.h \
hpux_test_and_clear.s include/gc_gcj.h \
include/gc_local_alloc.h include/private/dbg_mlc.h \
include/private/specific.h powerpc_darwin_mach_dep.s \
@@ -344,7 +353,7 @@ OTHER_FILES= Makefile setjmp_t.c callprocs pc_excludes \
Mac_files/datastart.c Mac_files/dataend.c \
Mac_files/MacOS_config.h Mac_files/MacOS_Test_config.h \
add_gc_prefix.c gc_cpp.cpp \
- version.h AmigaOS.c \
+ version.h AmigaOS.c atomic_ops-0.6.tar.gz \
$(TESTS) $(GNU_BUILD_FILES) $(OTHER_MAKEFILES)
CORD_INCLUDE_FILES= $(srcdir)/include/gc.h $(srcdir)/include/cord.h \
@@ -360,7 +369,7 @@ CURSES= -lcurses -ltermlib
# the SHELL environment variable.
SHELL= /bin/sh
-SPECIALCFLAGS = -I$(srcdir)/include
+SPECIALCFLAGS = -I$(srcdir)/include -I$(AO_INSTALL_DIR)/include
# Alternative flags to the C compiler for mach_dep.c.
# Mach_dep.c often doesn't like optimization, and it's
# not time-critical anyway.
@@ -368,6 +377,11 @@ SPECIALCFLAGS = -I$(srcdir)/include
all: gc.a gctest
+# if AO_INSTALL_DIR doesn't exist, we assume that it is pointing to
+# the default location, and we need to build
+$(AO_INSTALL_DIR):
+ tar xvfz $(AO_SRC_DIR).tar.gz; cd $(AO_SRC_DIR); make CC=$(CC) install
+
LEAKFLAGS=$(CFLAGS) -DFIND_LEAK
BSD-pkg-all: bsd-libgc.a bsd-libleak.a
@@ -397,17 +411,15 @@ $(OBJS) tests/test.o dyn_load.o dyn_load_sunos53.o: \
$(srcdir)/include/private/gc_hdrs.h $(srcdir)/include/private/gc_locks.h \
$(srcdir)/include/gc.h $(srcdir)/include/gc_pthread_redirects.h \
$(srcdir)/include/private/gcconfig.h $(srcdir)/include/gc_typed.h \
- $(srcdir)/include/gc_config_macros.h Makefile
+ $(srcdir)/include/gc_config_macros.h Makefile $(AO_INSTALL_DIR)
# The dependency on Makefile is needed. Changing
-# options such as -DSILENT affects the size of GC_arrays,
+# options affects the size of GC_arrays,
# invalidating all .o files that rely on gc_priv.h
mark.o typd_mlc.o finalize.o ptr_chck.o: $(srcdir)/include/gc_mark.h $(srcdir)/include/private/gc_pmark.h
specific.o pthread_support.o: $(srcdir)/include/private/specific.h
-solaris_threads.o solaris_pthreads.o: $(srcdir)/include/private/solaris_threads.h
-
dbg_mlc.o gcj_mlc.o: $(srcdir)/include/private/dbg_mlc.h
tests/test.o: tests $(srcdir)/tests/test.c
@@ -512,7 +524,6 @@ mach_dep.o: $(srcdir)/mach_dep.c $(srcdir)/mips_sgi_mach_dep.s \
./if_mach POWERPC DARWIN $(AS) -o mach_dep.o $(srcdir)/powerpc_darwin_mach_dep.s
./if_mach ALPHA LINUX $(CC) -c -o mach_dep.o $(srcdir)/alpha_mach_dep.S
./if_mach SPARC SUNOS5 $(CC) -c -o mach_dep.o $(srcdir)/sparc_mach_dep.S
- ./if_mach SPARC SUNOS4 $(AS) -o mach_dep.o $(srcdir)/sparc_sunos4_mach_dep.s
./if_mach SPARC OPENBSD $(AS) -o mach_dep.o $(srcdir)/sparc_sunos4_mach_dep.s
./if_mach SPARC NETBSD $(AS) -o mach_dep.o $(srcdir)/sparc_netbsd_mach_dep.s
./if_mach IA64 "" as $(AS_ABI_FLAG) -o ia64_save_regs_in_stack.o $(srcdir)/ia64_save_regs_in_stack.s
diff --git a/Makefile.am b/Makefile.am
index 0ee551ea..6389d0b1 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -34,7 +34,8 @@ lib_LTLIBRARIES = libgc.la $(extra)
include_HEADERS = include/gc.h include/gc_local_alloc.h \
include/gc_pthread_redirects.h include/gc_config_macros.h \
-include/leak_detector.h include/gc_typed.h @addincludes@
+include/leak_detector.h include/gc_typed.h include/gc_tiny_fl.h \
+@addincludes@
EXTRA_HEADERS = include/gc_cpp.h include/gc_allocator.h
@@ -53,9 +54,9 @@ backgraph.c win32_threads.c \
pthread_support.c pthread_stop_world.c darwin_stop_world.c \
$(asm_libgc_sources)
-# Include THREADDLLIBS here to ensure that the correct versions of
+# Include THREADLIBS here to ensure that the correct versions of
# linuxthread semaphore functions get linked:
-libgc_la_LIBADD = @addobjs@ $(THREADDLLIBS) $(UNWINDLIBS)
+libgc_la_LIBADD = @addobjs@ $(THREADLIBS) $(UNWINDLIBS)
libgc_la_DEPENDENCIES = @addobjs@
libgc_la_LDFLAGS = -version-info 1:2:0
@@ -65,7 +66,7 @@ EXTRA_libgc_la_SOURCES = alpha_mach_dep.S \
sparc_sunos4_mach_dep.s ia64_save_regs_in_stack.s
libgccpp_la_SOURCES = gc_cpp.cc
-libgccpp_la_LIBADD = $(THREADDLLIBS) $(UNWINDLIBS)
+libgccpp_la_LIBADD = $(THREADLIBS) $(UNWINDLIBS)
libgccpp_la_LDFLAGS = -version-info 1:2:0
EXTRA_DIST += alpha_mach_dep.S mips_sgi_mach_dep.s sparc_mach_dep.S
@@ -91,9 +92,9 @@ test_cpp.o: $(srcdir)/tests/test_cpp.cc
## are included in the distribution
# gctest_OBJECTS = test.o
gctest_SOURCES = tests/test.c
-gctest_LDADD = ./libgc.la $(THREADDLLIBS) $(UNWINDLIBS) $(EXTRA_TEST_LIBS)
+gctest_LDADD = ./libgc.la $(THREADLIBS) $(UNWINDLIBS) $(EXTRA_TEST_LIBS)
test_cpp_SOURCES = tests/test_cpp.cc
-test_cpp_LDADD = ./libgc.la ./libgccpp.la $(THREADDLLIBS) $(UNWINDLIBS) $(EXTRA_TEST_LIBS)
+test_cpp_LDADD = ./libgc.la ./libgccpp.la $(THREADLIBS) $(UNWINDLIBS) $(EXTRA_TEST_LIBS)
TESTS = gctest $(extra_checks)
@@ -106,6 +107,7 @@ include/gc_mark.h @addincludes@
## FIXME: we shouldn't have to do this, but automake forces us to.
.s.lo:
+.S.lo:
## We use -Wp,-P to strip #line directives. Irix `as' chokes on
## these.
$(LTCOMPILE) -Wp,-P -x assembler-with-cpp -c $<
diff --git a/Makefile.direct b/Makefile.direct
index 20fa40a9..83d4d60e 100644
--- a/Makefile.direct
+++ b/Makefile.direct
@@ -30,7 +30,13 @@ AS=as $(AS_ABI_FLAG)
srcdir= .
VPATH= $(srcdir)
-CFLAGS= -O -I$(srcdir)/include -DATOMIC_UNCOLLECTABLE -DNO_SIGNALS -DNO_EXECUTE_PERMISSION -DSILENT -DALL_INTERIOR_POINTERS
+# Atomic_ops installation directory. If this doesn't exist, we create
+# it from the included atomic_ops distribution.
+AO_VERSION=0.6
+AO_SRC_DIR=$(srcdir)/atomic_ops-$(AO_VERSION)
+AO_INSTALL_DIR=$(AO_SRC_DIR)/installed
+
+CFLAGS= -O -I$(srcdir)/include -I$(AO_INSTALL_DIR)/include -DATOMIC_UNCOLLECTABLE -DNO_EXECUTE_PERMISSION -DALL_INTERIOR_POINTERS
# To build the parallel collector on Linux, add to the above:
# -DGC_LINUX_THREADS -DPARALLEL_MARK -DTHREAD_LOCAL_ALLOC
@@ -54,7 +60,6 @@ HOSTCFLAGS=$(CFLAGS)
# without optimization.
# These define arguments influence the collector configuration:
-# -DSILENT disables statistics printing, and improves performance.
# -DFIND_LEAK causes GC_find_leak to be initially set.
# This causes the collector to assume that all inaccessible
# objects should have been explicitly deallocated, and reports exceptions.
@@ -96,13 +101,6 @@ HOSTCFLAGS=$(CFLAGS)
# an object can be recognized. This can be expensive. (The padding
# is normally more than one byte due to alignment constraints.)
# -DDONT_ADD_BYTE_AT_END disables the padding.
-# -DNO_SIGNALS does not disable signals during critical parts of
-# the GC process. This is no less correct than many malloc
-# implementations, and it sometimes has a significant performance
-# impact. However, it is dangerous for many not-quite-ANSI C
-# programs that call things like printf in asynchronous signal handlers.
-# This is on by default. Turning it off has not been extensively tested with
-# compilers that reorder stores. It should have been.
# -DNO_EXECUTE_PERMISSION may cause some or all of the heap to not
# have execute permission, i.e. it may be impossible to execute
# code from the heap. Currently this only affects the incremental
@@ -154,6 +152,14 @@ HOSTCFLAGS=$(CFLAGS)
# -DATOMIC_UNCOLLECTABLE includes code for GC_malloc_atomic_uncollectable.
# This is useful if either the vendor malloc implementation is poor,
# or if REDIRECT_MALLOC is used.
+# -DMARK_BIT_PER_GRANULE requests that a mark bit (or often byte)
+# be allocated for each allocation granule, as opposed to each object.
+# This often improves speed, possibly at some cost in space and/or
+# cache footprint. Normally it is best to let this decision be
+# made automatically depending on platform.
+# -DMARK_BIT_PER_OBJ requests that a mark bit be allocated for each
+# object instead of allocation granule. The opposiet of
+# MARK_BIT_PER_GRANULE.
# -DHBLKSIZE=ddd, where ddd is a power of 2 between 512 and 16384, explicitly
# sets the heap block size. Each heap block is devoted to a single size and
# kind of object. For the incremental collector it makes sense to match
@@ -276,6 +282,9 @@ HOSTCFLAGS=$(CFLAGS)
# -DPOINTER_SHIFT=n causes the collector to left shift candidate pointers
# by the indicated amount before trying to interpret them. Applied
# after POINTER_MASK. EXPERIMENTAL. See also the preceding macro.
+# -DENABLE_TRACE enables the GC_TRACE=addr environment setting to do its
+# job. By default this is not supported in order tokeep the marker as fast
+# as possible.
#
CXXFLAGS= $(CFLAGS)
@@ -283,25 +292,25 @@ AR= ar
RANLIB= ranlib
-OBJS= alloc.o reclaim.o allchblk.o misc.o mach_dep.o os_dep.o mark_rts.o headers.o mark.o obj_map.o blacklst.o finalize.o new_hblk.o dbg_mlc.o malloc.o stubborn.o checksums.o solaris_threads.o aix_irix_threads.o pthread_support.o pthread_stop_world.o darwin_stop_world.o typd_mlc.o ptr_chck.o mallocx.o solaris_pthreads.o gcj_mlc.o specific.o gc_dlopen.o backgraph.o win32_threads.o
+OBJS= alloc.o reclaim.o allchblk.o misc.o mach_dep.o os_dep.o mark_rts.o headers.o mark.o obj_map.o blacklst.o finalize.o new_hblk.o dbg_mlc.o malloc.o stubborn.o checksums.o aix_irix_threads.o pthread_support.o pthread_stop_world.o darwin_stop_world.o typd_mlc.o ptr_chck.o mallocx.o gcj_mlc.o specific.o gc_dlopen.o backgraph.o win32_threads.o
-CSRCS= reclaim.c allchblk.c misc.c alloc.c mach_dep.c os_dep.c mark_rts.c headers.c mark.c obj_map.c pcr_interface.c blacklst.c finalize.c new_hblk.c real_malloc.c dyn_load.c dbg_mlc.c malloc.c stubborn.c checksums.c solaris_threads.c aix_irix_threads.c pthread_support.c pthread_stop_world.c darwin_stop_world.c typd_mlc.c ptr_chck.c mallocx.c solaris_pthreads.c gcj_mlc.c specific.c gc_dlopen.c backgraph.c win32_threads.c
+CSRCS= reclaim.c allchblk.c misc.c alloc.c mach_dep.c os_dep.c mark_rts.c headers.c mark.c obj_map.c pcr_interface.c blacklst.c finalize.c new_hblk.c real_malloc.c dyn_load.c dbg_mlc.c malloc.c stubborn.c checksums.c aix_irix_threads.c pthread_support.c pthread_stop_world.c darwin_stop_world.c typd_mlc.c ptr_chck.c mallocx.c gcj_mlc.c specific.c gc_dlopen.c backgraph.c win32_threads.c
CORD_SRCS= cord/cordbscs.c cord/cordxtra.c cord/cordprnt.c cord/de.c cord/cordtest.c include/cord.h include/ec.h include/private/cord_pos.h cord/de_win.c cord/de_win.h cord/de_cmds.h cord/de_win.ICO cord/de_win.RC
CORD_OBJS= cord/cordbscs.o cord/cordxtra.o cord/cordprnt.o
SRCS= $(CSRCS) mips_sgi_mach_dep.s rs6000_mach_dep.s alpha_mach_dep.S \
- sparc_mach_dep.S include/gc.h include/gc_typed.h \
+ sparc_mach_dep.S include/gc.h include/gc_typed.h include/gc_tiny_fl.h \
include/private/gc_hdrs.h include/private/gc_priv.h \
include/private/gcconfig.h include/private/gc_pmark.h \
include/gc_inl.h include/gc_inline.h include/gc_mark.h \
threadlibs.c if_mach.c if_not_there.c gc_cpp.cc include/gc_cpp.h \
gcname.c include/weakpointer.h include/private/gc_locks.h \
- gcc_support.c mips_ultrix_mach_dep.s include/gc_alloc.h \
+ mips_ultrix_mach_dep.s \
include/new_gc_alloc.h include/gc_allocator.h \
include/javaxfc.h sparc_sunos4_mach_dep.s sparc_netbsd_mach_dep.s \
- include/private/solaris_threads.h include/gc_backptr.h \
+ include/gc_backptr.h \
hpux_test_and_clear.s include/gc_gcj.h \
include/gc_local_alloc.h include/private/dbg_mlc.h \
include/private/specific.h powerpc_darwin_mach_dep.s \
@@ -344,7 +353,7 @@ OTHER_FILES= Makefile setjmp_t.c callprocs pc_excludes \
Mac_files/datastart.c Mac_files/dataend.c \
Mac_files/MacOS_config.h Mac_files/MacOS_Test_config.h \
add_gc_prefix.c gc_cpp.cpp \
- version.h AmigaOS.c \
+ version.h AmigaOS.c atomic_ops-0.6.tar.gz \
$(TESTS) $(GNU_BUILD_FILES) $(OTHER_MAKEFILES)
CORD_INCLUDE_FILES= $(srcdir)/include/gc.h $(srcdir)/include/cord.h \
@@ -360,7 +369,7 @@ CURSES= -lcurses -ltermlib
# the SHELL environment variable.
SHELL= /bin/sh
-SPECIALCFLAGS = -I$(srcdir)/include
+SPECIALCFLAGS = -I$(srcdir)/include -I$(AO_INSTALL_DIR)/include
# Alternative flags to the C compiler for mach_dep.c.
# Mach_dep.c often doesn't like optimization, and it's
# not time-critical anyway.
@@ -368,6 +377,11 @@ SPECIALCFLAGS = -I$(srcdir)/include
all: gc.a gctest
+# if AO_INSTALL_DIR doesn't exist, we assume that it is pointing to
+# the default location, and we need to build
+$(AO_INSTALL_DIR):
+ tar xvfz $(AO_SRC_DIR).tar.gz; cd $(AO_SRC_DIR); make CC=$(CC) install
+
LEAKFLAGS=$(CFLAGS) -DFIND_LEAK
BSD-pkg-all: bsd-libgc.a bsd-libleak.a
@@ -397,17 +411,15 @@ $(OBJS) tests/test.o dyn_load.o dyn_load_sunos53.o: \
$(srcdir)/include/private/gc_hdrs.h $(srcdir)/include/private/gc_locks.h \
$(srcdir)/include/gc.h $(srcdir)/include/gc_pthread_redirects.h \
$(srcdir)/include/private/gcconfig.h $(srcdir)/include/gc_typed.h \
- $(srcdir)/include/gc_config_macros.h Makefile
+ $(srcdir)/include/gc_config_macros.h Makefile $(AO_INSTALL_DIR)
# The dependency on Makefile is needed. Changing
-# options such as -DSILENT affects the size of GC_arrays,
+# options affects the size of GC_arrays,
# invalidating all .o files that rely on gc_priv.h
mark.o typd_mlc.o finalize.o ptr_chck.o: $(srcdir)/include/gc_mark.h $(srcdir)/include/private/gc_pmark.h
specific.o pthread_support.o: $(srcdir)/include/private/specific.h
-solaris_threads.o solaris_pthreads.o: $(srcdir)/include/private/solaris_threads.h
-
dbg_mlc.o gcj_mlc.o: $(srcdir)/include/private/dbg_mlc.h
tests/test.o: tests $(srcdir)/tests/test.c
@@ -512,7 +524,6 @@ mach_dep.o: $(srcdir)/mach_dep.c $(srcdir)/mips_sgi_mach_dep.s \
./if_mach POWERPC DARWIN $(AS) -o mach_dep.o $(srcdir)/powerpc_darwin_mach_dep.s
./if_mach ALPHA LINUX $(CC) -c -o mach_dep.o $(srcdir)/alpha_mach_dep.S
./if_mach SPARC SUNOS5 $(CC) -c -o mach_dep.o $(srcdir)/sparc_mach_dep.S
- ./if_mach SPARC SUNOS4 $(AS) -o mach_dep.o $(srcdir)/sparc_sunos4_mach_dep.s
./if_mach SPARC OPENBSD $(AS) -o mach_dep.o $(srcdir)/sparc_sunos4_mach_dep.s
./if_mach SPARC NETBSD $(AS) -o mach_dep.o $(srcdir)/sparc_netbsd_mach_dep.s
./if_mach IA64 "" as $(AS_ABI_FLAG) -o ia64_save_regs_in_stack.o $(srcdir)/ia64_save_regs_in_stack.s
diff --git a/Makefile.dj b/Makefile.dj
index 7757f151..4548feb7 100644
--- a/Makefile.dj
+++ b/Makefile.dj
@@ -29,11 +29,10 @@ EXE_SUFFIX=.exe
srcdir= .
VPATH= $(srcdir)
-CFLAGS= -gstabs+ -O2 -I$(srcdir)/include -DATOMIC_UNCOLLECTABLE -DNO_SIGNALS -DALL_INTERIOR_POINTERS -DNO_EXECUTE_PERMISSION -DSILENT
+CFLAGS= -gstabs+ -O2 -I$(srcdir)/include -DATOMIC_UNCOLLECTABLE -DALL_INTERIOR_POINTERS -DNO_EXECUTE_PERMISSION
# Setjmp_test may yield overly optimistic results when compiled
# without optimization.
-# -DSILENT disables statistics printing, and improves performance.
# -DFIND_LEAK causes GC_find_leak to be initially set.
# This causes the collector to assume that all inaccessible
# objects should have been explicitly deallocated, and reports exceptions.
@@ -221,7 +220,7 @@ $(OBJS) test.o dyn_load.o dyn_load_sunos53.o: \
$(srcdir)/include/private/gcconfig.h $(srcdir)/include/gc_typed.h \
Makefile
# The dependency on Makefile is needed. Changing
-# options such as -DSILENT affects the size of GC_arrays,
+# options affects the size of GC_arrays,
# invalidating all .o files that rely on gc_priv.h
mark.o typd_mlc.o finalize.o: $(srcdir)/include/gc_mark.h
diff --git a/Makefile.in b/Makefile.in
index ef39e7d2..0c353cf9 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -108,7 +108,7 @@ OBJDUMP = @OBJDUMP@
PACKAGE = @PACKAGE@
RANLIB = @RANLIB@
STRIP = @STRIP@
-THREADDLLIBS = @THREADDLLIBS@
+THREADLIBS = @THREADLIBS@
UNWINDLIBS = @UNWINDLIBS@
VERSION = @VERSION@
addincludes = @addincludes@
@@ -168,7 +168,8 @@ lib_LTLIBRARIES = libgc.la $(extra)
include_HEADERS = include/gc.h include/gc_local_alloc.h \
include/gc_pthread_redirects.h include/gc_config_macros.h \
-include/leak_detector.h include/gc_typed.h @addincludes@
+include/leak_detector.h include/gc_typed.h include/gc_tiny_fl.h \
+@addincludes@
EXTRA_HEADERS = include/gc_cpp.h include/gc_allocator.h
@@ -186,9 +187,9 @@ pthread_support.c pthread_stop_world.c darwin_stop_world.c \
$(asm_libgc_sources)
-# Include THREADDLLIBS here to ensure that the correct versions of
+# Include THREADLIBS here to ensure that the correct versions of
# linuxthread semaphore functions get linked:
-libgc_la_LIBADD = @addobjs@ $(THREADDLLIBS) $(UNWINDLIBS)
+libgc_la_LIBADD = @addobjs@ $(THREADLIBS) $(UNWINDLIBS)
libgc_la_DEPENDENCIES = @addobjs@
libgc_la_LDFLAGS = -version-info 1:2:0
@@ -199,7 +200,7 @@ EXTRA_libgc_la_SOURCES = alpha_mach_dep.S \
libgccpp_la_SOURCES = gc_cpp.cc
-libgccpp_la_LIBADD = $(THREADDLLIBS) $(UNWINDLIBS)
+libgccpp_la_LIBADD = $(THREADLIBS) $(UNWINDLIBS)
libgccpp_la_LDFLAGS = -version-info 1:2:0
AM_CXXFLAGS = @GC_CFLAGS@
@@ -212,9 +213,9 @@ check_PROGRAMS = gctest $(extra_checks)
# gctest_OBJECTS = test.o
gctest_SOURCES = tests/test.c
-gctest_LDADD = ./libgc.la $(THREADDLLIBS) $(UNWINDLIBS) $(EXTRA_TEST_LIBS)
+gctest_LDADD = ./libgc.la $(THREADLIBS) $(UNWINDLIBS) $(EXTRA_TEST_LIBS)
test_cpp_SOURCES = tests/test_cpp.cc
-test_cpp_LDADD = ./libgc.la ./libgccpp.la $(THREADDLLIBS) $(UNWINDLIBS) $(EXTRA_TEST_LIBS)
+test_cpp_LDADD = ./libgc.la ./libgccpp.la $(THREADLIBS) $(UNWINDLIBS) $(EXTRA_TEST_LIBS)
TESTS = gctest $(extra_checks)
@@ -450,9 +451,6 @@ distclean-depend:
.S.obj:
$(CCASCOMPILE) -c `cygpath -w $<`
-.S.lo:
- $(LTCCASCOMPILE) -c -o $@ `test -f '$<' || echo '$(srcdir)/'`$<
-
.c.o:
@AMDEP_TRUE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@ depfile='$(DEPDIR)/$*.Po' tmpdepfile='$(DEPDIR)/$*.TPo' @AMDEPBACKSLASH@
@@ -909,6 +907,7 @@ include/gc_pthread_redirects.h include/gc_config_macros.h \
include/gc_mark.h @addincludes@
.s.lo:
+.S.lo:
$(LTCOMPILE) -Wp,-P -x assembler-with-cpp -c $<
#
diff --git a/NT_MAKEFILE b/NT_MAKEFILE
index d1b6a5d6..3d0e3a53 100644
--- a/NT_MAKEFILE
+++ b/NT_MAKEFILE
@@ -11,10 +11,10 @@ OBJS= alloc.obj reclaim.obj allchblk.obj misc.obj mach_dep.obj os_dep.obj mark_r
all: gctest.exe cord\de.exe test_cpp.exe
.c.obj:
- $(cc) $(cdebug) $(cflags) $(cvars) -Iinclude -DSILENT -DALL_INTERIOR_POINTERS -D__STDC__ -DGC_NOT_DLL -DGC_BUILD $*.c /Fo$*.obj
+ $(cc) $(cdebug) $(cflags) $(cvars) -Iinclude -DALL_INTERIOR_POINTERS -D__STDC__ -DGC_NOT_DLL -DGC_BUILD $*.c /Fo$*.obj
.cpp.obj:
- $(cc) $(cdebug) $(cflags) $(cvars) -Iinclude -DSILENT -DALL_INTERIOR_POINTERS -DGC_NOT_DLL -DGC_BUILD $*.CPP /Fo$*.obj
+ $(cc) $(cdebug) $(cflags) $(cvars) -Iinclude -DALL_INTERIOR_POINTERS -DGC_NOT_DLL -DGC_BUILD $*.CPP /Fo$*.obj
$(OBJS) tests\test.obj: include\private\gc_priv.h include\private\gc_hdrs.h include\gc.h include\private\gcconfig.h include\private\gc_locks.h include\private\gc_pmark.h include\gc_mark.h
diff --git a/NT_STATIC_THREADS_MAKEFILE b/NT_STATIC_THREADS_MAKEFILE
index a7582af6..cd951bf3 100644
--- a/NT_STATIC_THREADS_MAKEFILE
+++ b/NT_STATIC_THREADS_MAKEFILE
@@ -1,4 +1,4 @@
-# Makefile for Windows NT. Assumes Microsoft compiler.
+# Makefile for Windows NT. Assumes Microsoft compiler, and a single thread.
# DLLs are included in the root set under NT, but not under win32S.
# Use "nmake nodebug=1 all" for optimized versions of library, gctest and editor.
@@ -11,10 +11,10 @@ OBJS= alloc.obj reclaim.obj allchblk.obj misc.obj mach_dep.obj os_dep.obj mark_r
all: gctest.exe cord\de.exe test_cpp.exe
.c.obj:
- $(cc) $(cdebug) $(cflags) $(cvars) -Iinclude -DSILENT -DALL_INTERIOR_POINTERS -D__STDC__ -DGC_NOT_DLL -DGC_WIN32_THREADS $*.c /Fo$*.obj
+ $(cc) $(cdebug) $(cflags) $(cvars) -Iinclude -DALL_INTERIOR_POINTERS -D__STDC__ -DGC_NOT_DLL -DGC_WIN32_THREADS $*.c /Fo$*.obj
.cpp.obj:
- $(cc) $(cdebug) $(cflags) $(cvars) -Iinclude -DSILENT -DALL_INTERIOR_POINTERS -DGC_NOT_DLL $*.CPP -DGC_WIN32_THREADS /Fo$*.obj
+ $(cc) $(cdebug) $(cflags) $(cvars) -Iinclude -DALL_INTERIOR_POINTERS -DGC_NOT_DLL $*.CPP -DGC_WIN32_THREADS /Fo$*.obj
$(OBJS) tests\test.obj: include\private\gc_priv.h include\private\gc_hdrs.h include\gc.h include\private\gcconfig.h include\private\gc_locks.h include\private\gc_pmark.h include\gc_mark.h
diff --git a/NT_THREADS_MAKEFILE b/NT_THREADS_MAKEFILE
index 5f0b5462..73cad89f 100644
--- a/NT_THREADS_MAKEFILE
+++ b/NT_THREADS_MAKEFILE
@@ -114,8 +114,8 @@ CLEAN :
CPP=cl.exe
# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /c
-# ADD CPP /nologo /MD /W3 /GX /O2 /I include /D "NDEBUG" /D "SILENT" /D "GC_BUILD" /D "WIN32" /D "_WINDOWS" /D "ALL_INTERIOR_POINTERS" /D "__STDC__" /D "GC_WIN32_THREADS" /FR /YX /c
-CPP_PROJ=/nologo /MD /W3 /GX /O2 /I include /D "NDEBUG" /D "SILENT" /D "GC_BUILD" /D\
+# ADD CPP /nologo /MD /W3 /GX /O2 /I include /D "NDEBUG" /D "GC_BUILD" /D "WIN32" /D "_WINDOWS" /D "ALL_INTERIOR_POINTERS" /D "__STDC__" /D "GC_WIN32_THREADS" /FR /YX /c
+CPP_PROJ=/nologo /MD /W3 /GX /O2 /I include /D "NDEBUG" /D "GC_BUILD" /D\
"WIN32" /D "_WINDOWS" /D "ALL_INTERIOR_POINTERS" /D "__STDC__" /D\
"GC_WIN32_THREADS" /FR"$(INTDIR)/" /Fp"$(INTDIR)/gc.pch" /YX /Fo"$(INTDIR)/" /c
CPP_OBJS=.\Release/
@@ -296,8 +296,8 @@ CLEAN :
CPP=cl.exe
# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /c
-# ADD CPP /nologo /MDd /W3 /Gm /GX /Zi /Od /I include /D "_DEBUG" /D "SILENT" /D "GC_BUILD" /D "WIN32" /D "_WINDOWS" /D "ALL_INTERIOR_POINTERS" /D "__STDC__" /D "GC_WIN32_THREADS" /FR /YX /c
-CPP_PROJ=/nologo /MDd /W3 /Gm /GX /Zi /Od /I include /D "_DEBUG" /D "SILENT" /D "GC_BUILD"\
+# ADD CPP /nologo /MDd /W3 /Gm /GX /Zi /Od /I include /D "_DEBUG" /D "GC_BUILD" /D "WIN32" /D "_WINDOWS" /D "ALL_INTERIOR_POINTERS" /D "__STDC__" /D "GC_WIN32_THREADS" /FR /YX /c
+CPP_PROJ=/nologo /MDd /W3 /Gm /GX /Zi /Od /I include /D "_DEBUG" /D "GC_BUILD"\
/D "WIN32" /D "_WINDOWS" /D "ALL_INTERIOR_POINTERS" /D "__STDC__" /D\
"GC_WIN32_THREADS" /FR"$(INTDIR)/" /Fp"$(INTDIR)/gc.pch" /YX /Fo"$(INTDIR)/"\
/Fd"$(INTDIR)/" /c
diff --git a/OS2_MAKEFILE b/OS2_MAKEFILE
index 690598d6..c6bad7ab 100644
--- a/OS2_MAKEFILE
+++ b/OS2_MAKEFILE
@@ -10,7 +10,7 @@ OBJS= alloc.obj reclaim.obj allchblk.obj misc.obj mach_dep.obj os_dep.obj mark_r
CORDOBJS= cord\cordbscs.obj cord\cordxtra.obj cord\cordprnt.obj
CC= icc
-CFLAGS= /O /Q /DSILENT /DSMALL_CONFIG /DALL_INTERIOR_POINTERS
+CFLAGS= /O /Q /DSMALL_CONFIG /DALL_INTERIOR_POINTERS
# Use /Ti instead of /O for debugging
# Setjmp_test may yield overly optimistic results when compiled
# without optimization.
diff --git a/PCR-Makefile b/PCR-Makefile
index 1eae3672..e1655248 100644
--- a/PCR-Makefile
+++ b/PCR-Makefile
@@ -13,7 +13,7 @@ include ../config/common.mk
CPPFLAGS = $(INCLUDE) $(CONFIG_CPPFLAGS) \
-DPCR_NO_RENAME -DPCR_NO_HOSTDEP_ERR
-#CFLAGS = -DPCR -DSILENT $(CONFIG_CFLAGS)
+#CFLAGS = -DPCR $(CONFIG_CFLAGS)
CFLAGS = -DPCR $(CONFIG_CFLAGS)
SPECIALCFLAGS = # For code involving asm's
diff --git a/SMakefile.amiga b/SMakefile.amiga
index e2085051..94fb7238 100644
--- a/SMakefile.amiga
+++ b/SMakefile.amiga
@@ -26,7 +26,6 @@ OPTIMIZE=optimize optimizetime optglobal optimizerdepth=100 optimizerpeephole op
OPT= $(OPTIMIZE) CPU=$(CPU) math=$(MATH) NOSTACKCHECK VERBOSE \
MAPHUNK NOVERSION NOICONS nodebug \
-DEFINE SILENT \
parm=reg \
DEFINE __USE_SYSBASE
diff --git a/WCC_MAKEFILE b/WCC_MAKEFILE
index 32b01df1..6b1d78ba 100644
--- a/WCC_MAKEFILE
+++ b/WCC_MAKEFILE
@@ -25,7 +25,7 @@ CPU=5
OPTIM=-oneatx -s
#OPTIM=-ohneatx -s
-DEFS=-DALL_INTERIOR_POINTERS -DSILENT -DNO_SIGNALS #-DSMALL_CONFIG #-DGC_DEBUG
+DEFS=-DALL_INTERIOR_POINTERS #-DSMALL_CONFIG #-DGC_DEBUG
#####
@@ -98,13 +98,13 @@ gc.dll: $(OBJS) .AUTODEPEND
@for %i in ($(OBJS)) do @%append $*.lnk file '%i'
!ifeq CALLING s
@%append $*.lnk export GC_is_marked
- @%append $*.lnk export GC_incr_words_allocd
- @%append $*.lnk export GC_incr_mem_freed
+ @%append $*.lnk export GC_incr_bytes_allocd
+ @%append $*.lnk export GC_incr_bytes_freed
@%append $*.lnk export GC_generic_malloc_words_small
!else
@%append $*.lnk export GC_is_marked_
- @%append $*.lnk export GC_incr_words_allocd_
- @%append $*.lnk export GC_incr_mem_freed_
+ @%append $*.lnk export GC_incr_bytes_allocd_
+ @%append $*.lnk export GC_incr_bytes_freed_
@%append $*.lnk export GC_generic_malloc_words_small_
!endif
*wlink @$*.lnk
@@ -156,12 +156,12 @@ test_cpp.exe: test_cpp.obj gc.lib
@%append $*.lnk library gc.lib
!ifdef MAKE_AS_DLL
!ifeq CALLING s
- @%append $*.lnk import GC_incr_words_allocd gc
- @%append $*.lnk import GC_incr_mem_freed gc
+ @%append $*.lnk import GC_incr_bytes_allocd gc
+ @%append $*.lnk import GC_incr_bytes_freed gc
@%append $*.lnk import GC_generic_malloc_words_small gc
!else
- @%append $*.lnk import GC_incr_words_allocd_ gc
- @%append $*.lnk import GC_incr_mem_freed_ gc
+ @%append $*.lnk import GC_incr_bytes_allocd_ gc
+ @%append $*.lnk import GC_incr_bytes_freed_ gc
@%append $*.lnk import GC_generic_malloc_words_small_ gc
!endif
!endif
diff --git a/aix_irix_threads.c b/aix_irix_threads.c
index 5d27afd1..f1cf14c4 100644
--- a/aix_irix_threads.c
+++ b/aix_irix_threads.c
@@ -26,10 +26,9 @@
* as a base instead.
*/
-# include "private/gc_priv.h"
-
# if defined(GC_IRIX_THREADS) || defined(GC_AIX_THREADS)
+# include "private/gc_priv.h"
# include <pthread.h>
# include <assert.h>
# include <semaphore.h>
@@ -65,6 +64,8 @@ void GC_print_sig_mask()
}
#endif
+GC_bool GC_need_to_lock = FALSE;
+
/* We use the allocation lock to protect thread-related data structures. */
/* The set of all known threads. We intercept thread creation and */
@@ -583,6 +584,7 @@ GC_pthread_create(pthread_t *new_thread,
pthread_attr_getdetachstate(attr, &detachstate);
if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
si -> flags = my_flags;
+ GC_need_to_lock = TRUE;
result = pthread_create(new_thread, attr, GC_start_routine, si);
/* Wait until child has been added to the thread table. */
@@ -617,9 +619,9 @@ VOLATILE GC_bool GC_collecting = 0; /* A hint that we're in the collector and
#define SLEEP_THRESHOLD 3
-volatile unsigned int GC_allocate_lock = 0;
-#define GC_TRY_LOCK() !GC_test_and_set(&GC_allocate_lock)
-#define GC_LOCK_TAKEN GC_allocate_lock
+volatile AO_TS_t GC_allocate_lock = 0;
+#define GC_TRY_LOCK() !AO_test_and_set_acquire(&GC_allocate_lock)
+#define GC_LOCK_TAKEN ((int)(GC_allocate_lock)) /* FIXME */
void GC_lock()
{
diff --git a/allchblk.c b/allchblk.c
index f9c31e04..7ed647a7 100644
--- a/allchblk.c
+++ b/allchblk.c
@@ -106,40 +106,39 @@ void GC_print_hblkfreelist()
word total_free = 0;
hdr * hhdr;
word sz;
- int i;
+ unsigned i;
for (i = 0; i <= N_HBLK_FLS; ++i) {
h = GC_hblkfreelist[i];
# ifdef USE_MUNMAP
- if (0 != h) GC_printf1("Free list %ld:\n",
- (unsigned long)i);
+ if (0 != h) GC_printf("Free list %ld:\n",
+ (unsigned long)i);
# else
- if (0 != h) GC_printf2("Free list %ld (Total size %ld):\n",
- (unsigned long)i,
- (unsigned long)GC_free_bytes[i]);
+ if (0 != h) GC_printf("Free list %lu (Total size %lu):\n",
+ i, (unsigned long)GC_free_bytes[i]);
# endif
while (h != 0) {
hhdr = HDR(h);
sz = hhdr -> hb_sz;
- GC_printf2("\t0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
+ GC_printf("\t0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
total_free += sz;
if (GC_is_black_listed(h, HBLKSIZE) != 0) {
- GC_printf0("start black listed\n");
+ GC_printf("start black listed\n");
} else if (GC_is_black_listed(h, hhdr -> hb_sz) != 0) {
- GC_printf0("partially black listed\n");
+ GC_printf("partially black listed\n");
} else {
- GC_printf0("not black listed\n");
+ GC_printf("not black listed\n");
}
h = hhdr -> hb_next;
}
}
# ifndef USE_MUNMAP
if (total_free != GC_large_free_bytes) {
- GC_printf1("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
- (unsigned long) GC_large_free_bytes);
+ GC_printf("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
+ (unsigned long) GC_large_free_bytes);
}
# endif
- GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);
+ GC_printf("Total of %lu bytes on free list\n", (unsigned long)total_free);
}
/* Return the free list index on which the block described by the header */
@@ -178,12 +177,12 @@ void GC_dump_regions()
++i;
end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
}
- GC_printf2("***Section from 0x%lx to 0x%lx\n", start, end);
+ GC_printf("***Section from %p to %p\n", start, end);
for (p = start; p < end;) {
hhdr = HDR(p);
- GC_printf1("\t0x%lx ", (unsigned long)p);
+ GC_printf("\t%p ", p);
if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
- GC_printf1("Missing header!!(%ld)\n", hhdr);
+ GC_printf("Missing header!!(%d)\n", hhdr);
p += HBLKSIZE;
continue;
}
@@ -192,25 +191,25 @@ void GC_dump_regions()
divHBLKSZ(hhdr -> hb_sz));
int actual_index;
- GC_printf1("\tfree block of size 0x%lx bytes",
- (unsigned long)(hhdr -> hb_sz));
+ GC_printf("\tfree block of size 0x%lx bytes",
+ (unsigned long)(hhdr -> hb_sz));
if (IS_MAPPED(hhdr)) {
- GC_printf0("\n");
+ GC_printf("\n");
} else {
- GC_printf0("(unmapped)\n");
+ GC_printf("(unmapped)\n");
}
actual_index = free_list_index_of(hhdr);
if (-1 == actual_index) {
- GC_printf1("\t\tBlock not on free list %ld!!\n",
- correct_index);
+ GC_printf("\t\tBlock not on free list %d!!\n",
+ correct_index);
} else if (correct_index != actual_index) {
- GC_printf2("\t\tBlock on list %ld, should be on %ld!!\n",
- actual_index, correct_index);
+ GC_printf("\t\tBlock on list %d, should be on %d!!\n",
+ actual_index, correct_index);
}
p += hhdr -> hb_sz;
} else {
- GC_printf1("\tused for blocks of size 0x%lx bytes\n",
- (unsigned long)WORDS_TO_BYTES(hhdr -> hb_sz));
+ GC_printf("\tused for blocks of size 0x%lx bytes\n",
+ (unsigned long)(hhdr -> hb_sz));
p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
}
}
@@ -222,25 +221,56 @@ void GC_dump_regions()
/* Initialize hdr for a block containing the indicated size and */
/* kind of objects. */
/* Return FALSE on failure. */
-static GC_bool setup_header(hhdr, sz, kind, flags)
-register hdr * hhdr;
-word sz; /* object size in words */
-int kind;
-unsigned char flags;
+static GC_bool setup_header(hdr * hhdr, struct hblk *block, size_t byte_sz,
+ int kind, unsigned char flags)
{
- register word descr;
+ word descr;
+ size_t granules;
- /* Add description of valid object pointers */
- if (!GC_add_map_entry(sz)) return(FALSE);
- hhdr -> hb_map = GC_obj_map[sz > MAXOBJSZ? 0 : sz];
-
/* Set size, kind and mark proc fields */
- hhdr -> hb_sz = sz;
+ hhdr -> hb_sz = byte_sz;
hhdr -> hb_obj_kind = kind;
hhdr -> hb_flags = flags;
+ hhdr -> hb_block = block;
descr = GC_obj_kinds[kind].ok_descriptor;
- if (GC_obj_kinds[kind].ok_relocate_descr) descr += WORDS_TO_BYTES(sz);
+ if (GC_obj_kinds[kind].ok_relocate_descr) descr += byte_sz;
hhdr -> hb_descr = descr;
+
+# ifdef MARK_BIT_PER_OBJ
+ /* Set hb_inv_sz as portably as possible. */
+ /* We set it to the smallest value such that sz * inv_sz > 2**32 */
+ /* This may be more precision than necessary. */
+ if (byte_sz > MAXOBJBYTES) {
+ hhdr -> hb_inv_sz = LARGE_INV_SZ;
+ } else {
+ word inv_sz;
+
+# if CPP_WORDSZ == 64
+ inv_sz = ((word)1 << 32)/byte_sz;
+ if (((inv_sz*byte_sz) >> 32) == 0) ++inv_sz;
+# else /* 32 bit words */
+ GC_ASSERT(byte_sz >= 4);
+ inv_sz = ((unsigned)1 << 31)/byte_sz;
+ inv_sz *= 2;
+ while (inv_sz*byte_sz > byte_sz) ++inv_sz;
+# endif
+ hhdr -> hb_inv_sz = inv_sz;
+ }
+# else /* MARK_BIT_PER_GRANULE */
+ hhdr -> hb_large_block = (byte_sz > MAXOBJBYTES);
+ granules = BYTES_TO_GRANULES(byte_sz);
+ if (EXPECT(!GC_add_map_entry(granules), FALSE)) {
+ /* Make it look like a valid block. */
+ hhdr -> hb_sz = HBLKSIZE;
+ hhdr -> hb_descr = 0;
+ hhdr -> hb_large_block = TRUE;
+ hhdr -> hb_map = 0;
+ return FALSE;
+ } else {
+ int index = (hhdr -> hb_large_block? 0 : granules);
+ hhdr -> hb_map = GC_obj_map[index];
+ }
+# endif /* MARK_BIT_PER_GRANULE */
/* Clear mark bits */
GC_clear_hdr_marks(hhdr);
@@ -355,7 +385,7 @@ hdr * hhdr;
GET_HDR(second, second_hdr);
second_hdr -> hb_prev = h;
}
- GC_invalidate_map(hhdr);
+ hhdr -> hb_flags |= FREE_BLK;
}
#ifdef USE_MUNMAP
@@ -471,7 +501,7 @@ int index;
rest = (struct hblk *)((word)h + bytes);
rest_hdr = GC_install_header(rest);
if (0 == rest_hdr) {
- /* This may be very bad news ... */
+ /* FIXME: This is likely to be very bad news ... */
WARN("Header allocation failed: Dropping block.\n", 0);
return(0);
}
@@ -479,7 +509,7 @@ int index;
rest_hdr -> hb_flags = 0;
# ifdef GC_ASSERTIONS
/* Mark h not free, to avoid assertion about adjacent free blocks. */
- hhdr -> hb_map = 0;
+ hhdr -> hb_flags &= ~FREE_BLK;
# endif
GC_add_to_fl(rest, rest_hdr);
return h;
@@ -525,7 +555,8 @@ int index; /* Index of free list */
INCR_FREE_BYTES(index, -(signed_word)h_size);
FREE_ASSERT(GC_free_bytes[index] > 0);
# ifdef GC_ASSERTIONS
- nhdr -> hb_map = 0; /* Don't fail test for consecutive */
+ nhdr -> hb_flags &= ~FREE_BLK;
+ /* Don't fail test for consecutive */
/* free blocks in GC_add_to_fl. */
# endif
# ifdef USE_MUNMAP
@@ -533,30 +564,31 @@ int index; /* Index of free list */
# endif
hhdr -> hb_sz = h_size;
GC_add_to_fl(h, hhdr);
- GC_invalidate_map(nhdr);
+ nhdr -> hb_flags |= FREE_BLK;
}
-struct hblk * GC_allochblk_nth();
+struct hblk *
+GC_allochblk_nth(word sz/* bytes */, int kind, unsigned char flags, int n);
/*
* Allocate (and return pointer to) a heap block
- * for objects of size sz words, searching the nth free list.
+ * for objects of size sz bytes, searching the nth free list.
*
* NOTE: We set obj_map field in header correctly.
* Caller is responsible for building an object freelist in block.
*
- * Unlike older versions of the collectors, the client is responsible
- * for clearing the block, if necessary.
+ * The client is responsible for clearing the block, if necessary.
*/
struct hblk *
-GC_allochblk(sz, kind, flags)
-word sz;
-int kind;
-unsigned flags; /* IGNORE_OFF_PAGE or 0 */
+GC_allochblk(size_t sz, int kind, unsigned flags/* IGNORE_OFF_PAGE or 0 */)
{
- word blocks = OBJ_SZ_TO_BLOCKS(sz);
- int start_list = GC_hblk_fl_from_blocks(blocks);
+ word blocks;
+ int start_list;
int i;
+
+ GC_ASSERT((sz & (GRANULE_BYTES - 1)) == 0);
+ blocks = OBJ_SZ_TO_BLOCKS(sz);
+ start_list = GC_hblk_fl_from_blocks(blocks);
for (i = start_list; i <= N_HBLK_FLS; ++i) {
struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
if (0 != result) {
@@ -567,18 +599,18 @@ unsigned flags; /* IGNORE_OFF_PAGE or 0 */
}
/*
* The same, but with search restricted to nth free list.
+ * Flags is IGNORE_OFF_PAGE or zero.
+ * Unlike the above, sz is in bytes.
*/
struct hblk *
-GC_allochblk_nth(sz, kind, flags, n)
-word sz;
-int kind;
-unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
-int n;
+GC_allochblk_nth(word sz, int kind, unsigned char flags, int n)
{
- register struct hblk *hbp;
- register hdr * hhdr; /* Header corr. to hbp */
- register struct hblk *thishbp;
- register hdr * thishdr; /* Header corr. to hbp */
+ struct hblk *hbp;
+ hdr * hhdr; /* Header corr. to hbp */
+ /* Initialized after loop if hbp !=0 */
+ /* Gcc uninitialized use warning is bogus. */
+ struct hblk *thishbp;
+ hdr * thishdr; /* Header corr. to hbp */
signed_word size_needed; /* number of bytes in requested objects */
signed_word size_avail; /* bytes available in this block */
@@ -609,8 +641,7 @@ int n;
/* If we are deallocating lots of memory from */
/* finalizers, fail and collect sooner rather */
/* than later. */
- if (WORDS_TO_BYTES(GC_finalizer_mem_freed)
- > (GC_heapsize >> 4)) {
+ if (GC_finalizer_bytes_freed > (GC_heapsize >> 4)) {
continue;
}
# endif /* !USE_MUNMAP */
@@ -700,14 +731,14 @@ int n;
struct hblk * h;
struct hblk * prev = hhdr -> hb_prev;
- GC_words_wasted += BYTES_TO_WORDS(total_size);
+ GC_bytes_wasted += total_size;
GC_large_free_bytes -= total_size;
GC_remove_from_fl(hhdr, n);
for (h = hbp; h < limit; h++) {
if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
(void) setup_header(
- hhdr,
- BYTES_TO_WORDS(HBLKSIZE),
+ hhdr, h,
+ HBLKSIZE,
PTRFREE, 0); /* Cant fail */
if (GC_debugging_started) {
BZERO(h, HBLKSIZE);
@@ -745,7 +776,7 @@ int n;
/* This leaks memory under very rare conditions. */
/* Set up header */
- if (!setup_header(hhdr, sz, kind, flags)) {
+ if (!setup_header(hhdr, hbp, sz, kind, flags)) {
GC_remove_counts(hbp, (word)size_needed);
return(0); /* ditto */
}
@@ -779,8 +810,7 @@ struct hblk * GC_freehblk_ptr = 0; /* Search position hint for GC_freehblk */
* All mark words are assumed to be cleared.
*/
void
-GC_freehblk(hbp)
-struct hblk *hbp;
+GC_freehblk(struct hblk *hbp)
{
struct hblk *next, *prev;
hdr *hhdr, *prevhdr, *nexthdr;
@@ -798,13 +828,12 @@ signed_word size;
/* Check for duplicate deallocation in the easy case */
if (HBLK_IS_FREE(hhdr)) {
- GC_printf1("Duplicate large block deallocation of 0x%lx\n",
- (unsigned long) hbp);
+ GC_printf("Duplicate large block deallocation of %p\n", hbp);
ABORT("Duplicate large block deallocation");
}
GC_ASSERT(IS_MAPPED(hhdr));
- GC_invalidate_map(hhdr);
+ hhdr -> hb_flags |= FREE_BLK;
next = (struct hblk *)((word)hbp + size);
GET_HDR(next, nexthdr);
prev = GC_free_block_ending_at(hbp);
diff --git a/alloc.c b/alloc.c
index 53afa0d9..834e8a0f 100644
--- a/alloc.c
+++ b/alloc.c
@@ -2,7 +2,7 @@
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
* Copyright (c) 1998 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -26,7 +26,7 @@
/*
* Separate free lists are maintained for different sized objects
- * up to MAXOBJSZ.
+ * up to MAXOBJBYTES.
* The call GC_allocobj(i,k) ensures that the freelist for
* kind k objects of size i points to a non-empty
* free list. It returns a pointer to the first entry on the free list.
@@ -94,7 +94,7 @@ char * GC_copyright[] =
/* some more variables */
-extern signed_word GC_mem_found; /* Number of reclaimed longwords */
+extern signed_word GC_bytes_found; /* Number of reclaimed bytes */
/* after garbage collection */
GC_bool GC_dont_expand = 0;
@@ -104,7 +104,7 @@ word GC_free_space_divisor = 3;
extern GC_bool GC_collection_in_progress();
/* Collection is in progress, or was abandoned. */
-int GC_never_stop_func GC_PROTO((void)) { return(0); }
+int GC_never_stop_func (void) { return(0); }
unsigned long GC_time_limit = TIME_LIMIT;
@@ -117,7 +117,7 @@ int GC_n_attempts = 0; /* Number of attempts at finishing */
#if defined(SMALL_CONFIG) || defined(NO_CLOCK)
# define GC_timeout_stop_func GC_never_stop_func
#else
- int GC_timeout_stop_func GC_PROTO((void))
+ int GC_timeout_stop_func (void)
{
CLOCK_TYPE current_time;
static unsigned count = 0;
@@ -127,13 +127,11 @@ int GC_n_attempts = 0; /* Number of attempts at finishing */
GET_TIME(current_time);
time_diff = MS_TIME_DIFF(current_time,GC_start_time);
if (time_diff >= GC_time_limit) {
-# ifdef CONDPRINT
- if (GC_print_stats) {
- GC_printf0("Abandoning stopped marking after ");
- GC_printf1("%lu msecs", (unsigned long)time_diff);
- GC_printf1("(attempt %ld)\n", (unsigned long) GC_n_attempts);
- }
-# endif
+ if (GC_print_stats) {
+ GC_log_printf("Abandoning stopped marking after ");
+ GC_log_printf("%lu msecs", time_diff);
+ GC_log_printf("(attempt %d)\n", GC_n_attempts);
+ }
return(1);
}
return(0);
@@ -142,14 +140,14 @@ int GC_n_attempts = 0; /* Number of attempts at finishing */
/* Return the minimum number of words that must be allocated between */
/* collections to amortize the collection cost. */
-static word min_words_allocd()
+static word min_bytes_allocd()
{
# ifdef THREADS
/* We punt, for now. */
register signed_word stack_size = 10000;
# else
int dummy;
- register signed_word stack_size = (ptr_t)(&dummy) - GC_stackbottom;
+ signed_word stack_size = (ptr_t)(&dummy) - GC_stackbottom;
# endif
word total_root_size; /* includes double stack size, */
/* since the stack is expensive */
@@ -159,10 +157,8 @@ static word min_words_allocd()
if (stack_size < 0) stack_size = -stack_size;
total_root_size = 2 * stack_size + GC_root_size;
- scan_size = BYTES_TO_WORDS(GC_heapsize - GC_large_free_bytes
- + (GC_large_free_bytes >> 2)
- /* use a bit more of large empty heap */
- + total_root_size);
+ scan_size = 2 * GC_composite_in_use + GC_atomic_in_use
+ + total_root_size;
if (TRUE_INCREMENTAL) {
return scan_size / (2 * GC_free_space_divisor);
} else {
@@ -173,40 +169,39 @@ static word min_words_allocd()
/* Return the number of words allocated, adjusted for explicit storage */
/* management, etc.. This number is used in deciding when to trigger */
/* collections. */
-word GC_adj_words_allocd()
+word GC_adj_bytes_allocd(void)
{
register signed_word result;
register signed_word expl_managed =
- BYTES_TO_WORDS((long)GC_non_gc_bytes
- - (long)GC_non_gc_bytes_at_gc);
+ (long)GC_non_gc_bytes - (long)GC_non_gc_bytes_at_gc;
/* Don't count what was explicitly freed, or newly allocated for */
/* explicit management. Note that deallocating an explicitly */
/* managed object should not alter result, assuming the client */
/* is playing by the rules. */
- result = (signed_word)GC_words_allocd
- - (signed_word)GC_mem_freed
- + (signed_word)GC_finalizer_mem_freed - expl_managed;
- if (result > (signed_word)GC_words_allocd) {
- result = GC_words_allocd;
+ result = (signed_word)GC_bytes_allocd
+ - (signed_word)GC_bytes_freed
+ + (signed_word)GC_finalizer_bytes_freed
+ - expl_managed;
+ if (result > (signed_word)GC_bytes_allocd) {
+ result = GC_bytes_allocd;
/* probably client bug or unfortunate scheduling */
}
- result += GC_words_finalized;
+ result += GC_bytes_finalized;
/* We count objects enqueued for finalization as though they */
/* had been reallocated this round. Finalization is user */
/* visible progress. And if we don't count this, we have */
/* stability problems for programs that finalize all objects. */
- if ((GC_words_wasted >> 3) < result)
- result += GC_words_wasted;
+ result += GC_bytes_wasted;
/* This doesn't reflect useful work. But if there is lots of */
/* new fragmentation, the same is probably true of the heap, */
/* and the collection will be correspondingly cheaper. */
- if (result < (signed_word)(GC_words_allocd >> 3)) {
+ if (result < (signed_word)(GC_bytes_allocd >> 3)) {
/* Always count at least 1/8 of the allocations. We don't want */
/* to collect too infrequently, since that would inhibit */
/* coalescing of free storage blocks. */
/* This also makes us partially robust against client bugs. */
- return(GC_words_allocd >> 3);
+ return(GC_bytes_allocd >> 3);
} else {
return(result);
}
@@ -232,16 +227,16 @@ void GC_clear_a_few_frames()
static word GC_collect_at_heapsize = (word)(-1);
/* Have we allocated enough to amortize a collection? */
-GC_bool GC_should_collect()
+GC_bool GC_should_collect(void)
{
- return(GC_adj_words_allocd() >= min_words_allocd()
+ return(GC_adj_bytes_allocd() >= min_bytes_allocd()
|| GC_heapsize >= GC_collect_at_heapsize);
}
-void GC_notify_full_gc()
+void GC_notify_full_gc(void)
{
- if (GC_start_call_back != (void (*) GC_PROTO((void)))0) {
+ if (GC_start_call_back != (void (*) (void))0) {
(*GC_start_call_back)();
}
}
@@ -254,7 +249,7 @@ GC_bool GC_is_full_gc = FALSE;
* between partial, full, and stop-world collections.
* Assumes lock held, signals disabled.
*/
-void GC_maybe_gc()
+void GC_maybe_gc(void)
{
static int n_partial_gcs = 0;
@@ -268,14 +263,12 @@ void GC_maybe_gc()
GC_wait_for_reclaim();
# endif
if (GC_need_full_gc || n_partial_gcs >= GC_full_freq) {
-# ifdef CONDPRINT
- if (GC_print_stats) {
- GC_printf2(
+ if (GC_print_stats) {
+ GC_log_printf(
"***>Full mark for collection %lu after %ld allocd bytes\n",
- (unsigned long) GC_gc_no+1,
- (long)WORDS_TO_BYTES(GC_words_allocd));
- }
-# endif
+ (unsigned long)GC_gc_no+1,
+ (long)GC_bytes_allocd);
+ }
GC_promote_black_lists();
(void)GC_reclaim_all((GC_stop_func)0, TRUE);
GC_clear_marks();
@@ -313,20 +306,15 @@ void GC_maybe_gc()
* If stop_func is not GC_never_stop_func, then abort if stop_func returns TRUE.
* Return TRUE if we successfully completed the collection.
*/
-GC_bool GC_try_to_collect_inner(stop_func)
-GC_stop_func stop_func;
+GC_bool GC_try_to_collect_inner(GC_stop_func stop_func)
{
-# ifdef CONDPRINT
- CLOCK_TYPE start_time, current_time;
-# endif
+ CLOCK_TYPE start_time, current_time;
if (GC_dont_gc) return FALSE;
if (GC_incremental && GC_collection_in_progress()) {
-# ifdef CONDPRINT
- if (GC_print_stats) {
- GC_printf0(
+ if (GC_print_stats) {
+ GC_log_printf(
"GC_try_to_collect_inner: finishing collection in progress\n");
- }
-# endif /* CONDPRINT */
+ }
/* Just finish collection already in progress. */
while(GC_collection_in_progress()) {
if (stop_func()) return(FALSE);
@@ -334,15 +322,12 @@ GC_stop_func stop_func;
}
}
if (stop_func == GC_never_stop_func) GC_notify_full_gc();
-# ifdef CONDPRINT
- if (GC_print_stats) {
- if (GC_print_stats) GET_TIME(start_time);
- GC_printf2(
+ if (GC_print_stats) {
+ GET_TIME(start_time);
+ GC_log_printf(
"Initiating full world-stop collection %lu after %ld allocd bytes\n",
- (unsigned long) GC_gc_no+1,
- (long)WORDS_TO_BYTES(GC_words_allocd));
- }
-# endif
+ (unsigned long)GC_gc_no+1, (long)GC_bytes_allocd);
+ }
GC_promote_black_lists();
/* Make sure all blocks have been reclaimed, so sweep routines */
/* don't see cleared mark bits. */
@@ -375,13 +360,11 @@ GC_stop_func stop_func;
return(FALSE);
}
GC_finish_collection();
-# if defined(CONDPRINT)
- if (GC_print_stats) {
+ if (GC_print_stats) {
GET_TIME(current_time);
- GC_printf1("Complete collection took %lu msecs\n",
- MS_TIME_DIFF(current_time,start_time));
- }
-# endif
+ GC_log_printf("Complete collection took %lu msecs\n",
+ MS_TIME_DIFF(current_time,start_time));
+ }
return(TRUE);
}
@@ -404,10 +387,9 @@ GC_stop_func stop_func;
int GC_deficit = 0; /* The number of extra calls to GC_mark_some */
/* that we have made. */
-void GC_collect_a_little_inner(n)
-int n;
+void GC_collect_a_little_inner(int n)
{
- register int i;
+ int i;
if (GC_dont_gc) return;
if (GC_incremental && GC_collection_in_progress()) {
@@ -441,17 +423,15 @@ int n;
}
}
-int GC_collect_a_little GC_PROTO(())
+int GC_collect_a_little(void)
{
int result;
DCL_LOCK_STATE;
- DISABLE_SIGNALS();
LOCK();
GC_collect_a_little_inner(1);
result = (int)GC_collection_in_progress();
UNLOCK();
- ENABLE_SIGNALS();
if (!result && GC_debugging_started) GC_print_all_smashed();
return(result);
}
@@ -462,35 +442,27 @@ int GC_collect_a_little GC_PROTO(())
* If stop_func() ever returns TRUE, we may fail and return FALSE.
* Increment GC_gc_no if we succeed.
*/
-GC_bool GC_stopped_mark(stop_func)
-GC_stop_func stop_func;
+GC_bool GC_stopped_mark(GC_stop_func stop_func)
{
- register int i;
+ unsigned i;
int dummy;
-# if defined(PRINTTIMES) || defined(CONDPRINT)
- CLOCK_TYPE start_time, current_time;
-# endif
+ CLOCK_TYPE start_time, current_time;
-# ifdef PRINTTIMES
+ if (GC_print_stats)
GET_TIME(start_time);
-# endif
-# if defined(CONDPRINT) && !defined(PRINTTIMES)
- if (GC_print_stats) GET_TIME(start_time);
-# endif
+
# if defined(REGISTER_LIBRARIES_EARLY)
GC_cond_register_dynamic_libraries();
# endif
STOP_WORLD();
IF_THREADS(GC_world_stopped = TRUE);
-# ifdef CONDPRINT
- if (GC_print_stats) {
- GC_printf1("--> Marking for collection %lu ",
- (unsigned long) GC_gc_no + 1);
- GC_printf2("after %lu allocd bytes + %lu wasted bytes\n",
- (unsigned long) WORDS_TO_BYTES(GC_words_allocd),
- (unsigned long) WORDS_TO_BYTES(GC_words_wasted));
- }
-# endif
+ if (GC_print_stats) {
+ GC_log_printf("--> Marking for collection %lu ",
+ (unsigned long)GC_gc_no + 1);
+ GC_log_printf("after %lu allocd bytes + %lu wasted bytes\n",
+ (unsigned long) GC_bytes_allocd,
+ (unsigned long) GC_bytes_wasted);
+ }
# ifdef MAKE_BACK_GRAPH
if (GC_print_back_height) {
GC_build_back_graph();
@@ -504,13 +476,10 @@ GC_stop_func stop_func;
GC_initiate_gc();
for(i = 0;;i++) {
if ((*stop_func)()) {
-# ifdef CONDPRINT
- if (GC_print_stats) {
- GC_printf0("Abandoned stopped marking after ");
- GC_printf1("%lu iterations\n",
- (unsigned long)i);
- }
-# endif
+ if (GC_print_stats) {
+ GC_log_printf("Abandoned stopped marking after ");
+ GC_log_printf("%u iterations\n", i);
+ }
GC_deficit = i; /* Give the mutator a chance. */
IF_THREADS(GC_world_stopped = FALSE);
START_WORLD();
@@ -520,26 +489,16 @@ GC_stop_func stop_func;
}
GC_gc_no++;
-# ifdef PRINTSTATS
- GC_printf2("Collection %lu reclaimed %ld bytes",
- (unsigned long) GC_gc_no - 1,
- (long)WORDS_TO_BYTES(GC_mem_found));
-# else
-# ifdef CONDPRINT
- if (GC_print_stats) {
- GC_printf1("Collection %lu finished", (unsigned long) GC_gc_no - 1);
- }
-# endif
-# endif /* !PRINTSTATS */
-# ifdef CONDPRINT
- if (GC_print_stats) {
- GC_printf1(" ---> heapsize = %lu bytes\n",
- (unsigned long) GC_heapsize);
+ if (GC_print_stats) {
+ GC_log_printf("Collection %lu reclaimed %ld bytes",
+ (unsigned long)GC_gc_no - 1,
+ (long)GC_bytes_found);
+ GC_log_printf(" ---> heapsize = %lu bytes\n",
+ (unsigned long) GC_heapsize);
/* Printf arguments may be pushed in funny places. Clear the */
/* space. */
- GC_printf0("");
- }
-# endif /* CONDPRINT */
+ GC_log_printf("");
+ }
/* Check all debugged objects for consistency */
if (GC_debugging_started) {
@@ -548,90 +507,103 @@ GC_stop_func stop_func;
IF_THREADS(GC_world_stopped = FALSE);
START_WORLD();
-# ifdef PRINTTIMES
- GET_TIME(current_time);
- GC_printf1("World-stopped marking took %lu msecs\n",
- MS_TIME_DIFF(current_time,start_time));
-# else
-# ifdef CONDPRINT
- if (GC_print_stats) {
- GET_TIME(current_time);
- GC_printf1("World-stopped marking took %lu msecs\n",
- MS_TIME_DIFF(current_time,start_time));
- }
-# endif
-# endif
+ if (GC_print_stats) {
+ GET_TIME(current_time);
+ GC_log_printf("World-stopped marking took %lu msecs\n",
+ MS_TIME_DIFF(current_time,start_time));
+ }
return(TRUE);
}
/* Set all mark bits for the free list whose first entry is q */
-#ifdef __STDC__
- void GC_set_fl_marks(ptr_t q)
-#else
- void GC_set_fl_marks(q)
- ptr_t q;
-#endif
+void GC_set_fl_marks(ptr_t q)
{
ptr_t p;
struct hblk * h, * last_h = 0;
- hdr *hhdr;
- int word_no;
+ hdr *hhdr; /* gcc "might be uninitialized" warning is bogus. */
+ IF_PER_OBJ(size_t sz;)
+ int bit_no;
for (p = q; p != 0; p = obj_link(p)){
h = HBLKPTR(p);
if (h != last_h) {
last_h = h;
hhdr = HDR(h);
+ IF_PER_OBJ(sz = hhdr->hb_sz;)
}
- word_no = (((word *)p) - ((word *)h));
- set_mark_bit_from_hdr(hhdr, word_no);
+ bit_no = MARK_BIT_NO((ptr_t)p - (ptr_t)h, sz);
+ if (!mark_bit_from_hdr(hhdr, bit_no)) {
+ set_mark_bit_from_hdr(hhdr, bit_no);
+ ++hhdr -> hb_n_marks;
+ }
}
}
-/* Clear all mark bits for the free list whose first entry is q */
-/* Decrement GC_mem_found by number of words on free list. */
-#ifdef __STDC__
- void GC_clear_fl_marks(ptr_t q)
-#else
- void GC_clear_fl_marks(q)
- ptr_t q;
+#ifdef GC_ASSERTIONS
+/* Check that all mark bits for the free list whose first entry is q */
+/* are set. */
+void GC_check_fl_marks(ptr_t q)
+{
+ ptr_t p;
+
+ for (p = q; p != 0; p = obj_link(p)){
+ if (!GC_is_marked(p)) {
+ GC_err_printf("Unmarked object %p on list %p\n", p, q);
+ ABORT("Unmarked local free list entry.");
+ }
+ }
+}
#endif
+
+/* Clear all mark bits for the free list whose first entry is q */
+/* Decrement GC_bytes_found by number of bytes on free list. */
+void GC_clear_fl_marks(ptr_t q)
{
ptr_t p;
struct hblk * h, * last_h = 0;
hdr *hhdr;
- int word_no;
+ size_t sz;
+ int bit_no;
for (p = q; p != 0; p = obj_link(p)){
h = HBLKPTR(p);
if (h != last_h) {
last_h = h;
hhdr = HDR(h);
+ sz = hhdr->hb_sz; /* Normally set only once. */
}
- word_no = (((word *)p) - ((word *)h));
- clear_mark_bit_from_hdr(hhdr, word_no);
-# ifdef GATHERSTATS
- GC_mem_found -= hhdr -> hb_sz;
-# endif
+ bit_no = MARK_BIT_NO((ptr_t)p - (ptr_t)h, sz);
+ if (mark_bit_from_hdr(hhdr, bit_no)) {
+ clear_mark_bit_from_hdr(hhdr, bit_no);
+ --hhdr -> hb_n_marks;
+ }
+ GC_bytes_found -= sz;
}
}
+#if defined(GC_ASSERTIONS) && defined(GC_LINUX_THREADS)
+extern void GC_check_tls(void);
+#endif
+
/* Finish up a collection. Assumes lock is held, signals are disabled, */
/* but the world is otherwise running. */
void GC_finish_collection()
{
-# ifdef PRINTTIMES
- CLOCK_TYPE start_time;
- CLOCK_TYPE finalize_time;
- CLOCK_TYPE done_time;
+ CLOCK_TYPE start_time;
+ CLOCK_TYPE finalize_time;
+ CLOCK_TYPE done_time;
- GET_TIME(start_time);
- finalize_time = start_time;
+# if defined(GC_ASSERTIONS) && defined(GC_LINUX_THREADS) \
+ && defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
+ /* Check that we marked some of our own data. */
+ /* FIXME: Add more checks. */
+ GC_check_tls();
# endif
-# ifdef GATHERSTATS
- GC_mem_found = 0;
-# endif
+ if (GC_print_stats)
+ GET_TIME(start_time);
+
+ GC_bytes_found = 0;
# if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
if (getenv("GC_PRINT_ADDRESS_MAP") != 0) {
GC_print_address_map();
@@ -647,7 +619,7 @@ void GC_finish_collection()
ptr_t q;
for (kind = 0; kind < GC_n_kinds; kind++) {
- for (size = 1; size <= MAXOBJSZ; size++) {
+ for (size = 1; size <= MAXOBJGRANULES; size++) {
q = GC_obj_kinds[kind].ok_freelist[size];
if (q != 0) GC_set_fl_marks(q);
}
@@ -662,24 +634,23 @@ void GC_finish_collection()
GC_clean_changing_list();
# endif
-# ifdef PRINTTIMES
+ if (GC_print_stats)
GET_TIME(finalize_time);
-# endif
if (GC_print_back_height) {
# ifdef MAKE_BACK_GRAPH
GC_traverse_back_graph();
# else
# ifndef SMALL_CONFIG
- GC_err_printf0("Back height not available: "
- "Rebuild collector with -DMAKE_BACK_GRAPH\n");
+ GC_err_printf("Back height not available: "
+ "Rebuild collector with -DMAKE_BACK_GRAPH\n");
# endif
# endif
}
/* Clear free list mark bits, in case they got accidentally marked */
/* (or GC_find_leak is set and they were intentionally marked). */
- /* Also subtract memory remaining from GC_mem_found count. */
+ /* Also subtract memory remaining from GC_bytes_found count. */
/* Note that composite objects on free list are cleared. */
/* Thus accidentally marking a free list is not a problem; only */
/* objects on the list itself will be marked, and that's fixed here. */
@@ -689,7 +660,7 @@ void GC_finish_collection()
int kind;
for (kind = 0; kind < GC_n_kinds; kind++) {
- for (size = 1; size <= MAXOBJSZ; size++) {
+ for (size = 1; size <= MAXOBJGRANULES; size++) {
q = GC_obj_kinds[kind].ok_freelist[size];
if (q != 0) GC_clear_fl_marks(q);
}
@@ -697,70 +668,67 @@ void GC_finish_collection()
}
-# ifdef PRINTSTATS
- GC_printf1("Bytes recovered before sweep - f.l. count = %ld\n",
- (long)WORDS_TO_BYTES(GC_mem_found));
-# endif
+ if (GC_print_stats == VERBOSE)
+ GC_log_printf("Bytes recovered before sweep - f.l. count = %ld\n",
+ (long)GC_bytes_found);
+
/* Reconstruct free lists to contain everything not marked */
GC_start_reclaim(FALSE);
+ if (GC_print_stats) {
+ GC_log_printf("Heap contains %lu pointer-containing "
+ "+ %lu pointer-free reachable bytes\n",
+ (unsigned long)GC_composite_in_use,
+ (unsigned long)GC_atomic_in_use);
+ }
if (GC_is_full_gc) {
GC_used_heap_size_after_full = USED_HEAP_SIZE;
GC_need_full_gc = FALSE;
} else {
GC_need_full_gc =
- BYTES_TO_WORDS(USED_HEAP_SIZE - GC_used_heap_size_after_full)
- > min_words_allocd();
+ USED_HEAP_SIZE - GC_used_heap_size_after_full
+ > min_bytes_allocd();
}
-# ifdef PRINTSTATS
- GC_printf2(
+ if (GC_print_stats == VERBOSE) {
+ GC_log_printf(
"Immediately reclaimed %ld bytes in heap of size %lu bytes",
- (long)WORDS_TO_BYTES(GC_mem_found),
+ (long)GC_bytes_found,
(unsigned long)GC_heapsize);
# ifdef USE_MUNMAP
- GC_printf1("(%lu unmapped)", GC_unmapped_bytes);
+ GC_log_printf("(%lu unmapped)", (unsigned long)GC_unmapped_bytes);
# endif
- GC_printf2(
- "\n%lu (atomic) + %lu (composite) collectable bytes in use\n",
- (unsigned long)WORDS_TO_BYTES(GC_atomic_in_use),
- (unsigned long)WORDS_TO_BYTES(GC_composite_in_use));
-# endif
+ GC_log_printf("\n");
+ }
+ /* Reset or increment counters for next cycle */
GC_n_attempts = 0;
GC_is_full_gc = FALSE;
- /* Reset or increment counters for next cycle */
- GC_words_allocd_before_gc += GC_words_allocd;
+ GC_bytes_allocd_before_gc += GC_bytes_allocd;
GC_non_gc_bytes_at_gc = GC_non_gc_bytes;
- GC_words_allocd = 0;
- GC_words_wasted = 0;
- GC_mem_freed = 0;
- GC_finalizer_mem_freed = 0;
+ GC_bytes_allocd = 0;
+ GC_bytes_wasted = 0;
+ GC_bytes_freed = 0;
+ GC_finalizer_bytes_freed = 0;
# ifdef USE_MUNMAP
GC_unmap_old();
# endif
-# ifdef PRINTTIMES
+ if (GC_print_stats) {
GET_TIME(done_time);
- GC_printf2("Finalize + initiate sweep took %lu + %lu msecs\n",
- MS_TIME_DIFF(finalize_time,start_time),
- MS_TIME_DIFF(done_time,finalize_time));
-# endif
+ GC_log_printf("Finalize + initiate sweep took %lu + %lu msecs\n",
+ MS_TIME_DIFF(finalize_time,start_time),
+ MS_TIME_DIFF(done_time,finalize_time));
+ }
}
/* Externally callable routine to invoke full, stop-world collection */
-# if defined(__STDC__) || defined(__cplusplus)
- int GC_try_to_collect(GC_stop_func stop_func)
-# else
- int GC_try_to_collect(stop_func)
- GC_stop_func stop_func;
-# endif
+int GC_try_to_collect(GC_stop_func stop_func)
{
int result;
DCL_LOCK_STATE;
if (GC_debugging_started) GC_print_all_smashed();
GC_INVOKE_FINALIZERS();
- DISABLE_SIGNALS();
LOCK();
ENTER_GC();
if (!GC_is_initialized) GC_init_inner();
@@ -769,7 +737,6 @@ void GC_finish_collection()
result = (int)GC_try_to_collect_inner(stop_func);
EXIT_GC();
UNLOCK();
- ENABLE_SIGNALS();
if(result) {
if (GC_debugging_started) GC_print_all_smashed();
GC_INVOKE_FINALIZERS();
@@ -777,7 +744,7 @@ void GC_finish_collection()
return(result);
}
-void GC_gcollect GC_PROTO(())
+void GC_gcollect(void)
{
(void)GC_try_to_collect(GC_never_stop_func);
if (GC_have_errors) GC_print_all_errors();
@@ -789,11 +756,8 @@ word GC_n_heap_sects = 0; /* Number of sections currently in heap. */
* Use the chunk of memory starting at p of size bytes as part of the heap.
* Assumes p is HBLKSIZE aligned, and bytes is a multiple of HBLKSIZE.
*/
-void GC_add_to_heap(p, bytes)
-struct hblk *p;
-word bytes;
+void GC_add_to_heap(struct hblk *p, size_t bytes)
{
- word words;
hdr * phdr;
if (GC_n_heap_sects >= MAX_HEAP_SECTS) {
@@ -809,69 +773,60 @@ word bytes;
GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)p;
GC_heap_sects[GC_n_heap_sects].hs_bytes = bytes;
GC_n_heap_sects++;
- words = BYTES_TO_WORDS(bytes);
- phdr -> hb_sz = words;
- phdr -> hb_map = (unsigned char *)1; /* A value != GC_invalid_map */
+ phdr -> hb_sz = bytes;
phdr -> hb_flags = 0;
GC_freehblk(p);
GC_heapsize += bytes;
if ((ptr_t)p <= (ptr_t)GC_least_plausible_heap_addr
|| GC_least_plausible_heap_addr == 0) {
- GC_least_plausible_heap_addr = (GC_PTR)((ptr_t)p - sizeof(word));
+ GC_least_plausible_heap_addr = (void *)((ptr_t)p - sizeof(word));
/* Making it a little smaller than necessary prevents */
/* us from getting a false hit from the variable */
/* itself. There's some unintentional reflection */
/* here. */
}
if ((ptr_t)p + bytes >= (ptr_t)GC_greatest_plausible_heap_addr) {
- GC_greatest_plausible_heap_addr = (GC_PTR)((ptr_t)p + bytes);
+ GC_greatest_plausible_heap_addr = (void *)((ptr_t)p + bytes);
}
}
# if !defined(NO_DEBUGGING)
-void GC_print_heap_sects()
+void GC_print_heap_sects(void)
{
register unsigned i;
- GC_printf1("Total heap size: %lu\n", (unsigned long) GC_heapsize);
+ GC_printf("Total heap size: %lu\n", (unsigned long) GC_heapsize);
for (i = 0; i < GC_n_heap_sects; i++) {
- unsigned long start = (unsigned long) GC_heap_sects[i].hs_start;
+ ptr_t start = GC_heap_sects[i].hs_start;
unsigned long len = (unsigned long) GC_heap_sects[i].hs_bytes;
struct hblk *h;
unsigned nbl = 0;
- GC_printf3("Section %ld from 0x%lx to 0x%lx ", (unsigned long)i,
+ GC_printf("Section %d from %p to %p ", i,
start, (unsigned long)(start + len));
for (h = (struct hblk *)start; h < (struct hblk *)(start + len); h++) {
if (GC_is_black_listed(h, HBLKSIZE)) nbl++;
}
- GC_printf2("%lu/%lu blacklisted\n", (unsigned long)nbl,
+ GC_printf("%lu/%lu blacklisted\n", (unsigned long)nbl,
(unsigned long)(len/HBLKSIZE));
}
}
# endif
-GC_PTR GC_least_plausible_heap_addr = (GC_PTR)ONES;
-GC_PTR GC_greatest_plausible_heap_addr = 0;
+void * GC_least_plausible_heap_addr = (void *)ONES;
+void * GC_greatest_plausible_heap_addr = 0;
-ptr_t GC_max(x,y)
-ptr_t x, y;
+static INLINE ptr_t GC_max(ptr_t x, ptr_t y)
{
return(x > y? x : y);
}
-ptr_t GC_min(x,y)
-ptr_t x, y;
+static INLINE ptr_t GC_min(ptr_t x, ptr_t y)
{
return(x < y? x : y);
}
-# if defined(__STDC__) || defined(__cplusplus)
- void GC_set_max_heap_size(GC_word n)
-# else
- void GC_set_max_heap_size(n)
- GC_word n;
-# endif
+void GC_set_max_heap_size(GC_word n)
{
GC_max_heapsize = n;
}
@@ -885,8 +840,7 @@ GC_word GC_max_retries = 0;
* Tiny values of n are rounded up.
* Returns FALSE on failure.
*/
-GC_bool GC_expand_hp_inner(n)
-word n;
+GC_bool GC_expand_hp_inner(word n)
{
word bytes;
struct hblk * space;
@@ -908,37 +862,28 @@ word n;
}
space = GET_MEM(bytes);
if( space == 0 ) {
-# ifdef CONDPRINT
- if (GC_print_stats) {
- GC_printf1("Failed to expand heap by %ld bytes\n",
- (unsigned long)bytes);
- }
-# endif
+ if (GC_print_stats) {
+ GC_log_printf("Failed to expand heap by %ld bytes\n",
+ (unsigned long)bytes);
+ }
return(FALSE);
}
-# ifdef CONDPRINT
- if (GC_print_stats) {
- GC_printf2("Increasing heap size by %lu after %lu allocated bytes\n",
- (unsigned long)bytes,
- (unsigned long)WORDS_TO_BYTES(GC_words_allocd));
-# ifdef UNDEFINED
- GC_printf1("Root size = %lu\n", GC_root_size);
- GC_print_block_list(); GC_print_hblkfreelist();
- GC_printf0("\n");
-# endif
- }
-# endif
- expansion_slop = WORDS_TO_BYTES(min_words_allocd()) + 4*MAXHINCR*HBLKSIZE;
- if (GC_last_heap_addr == 0 && !((word)space & SIGNB)
+ if (GC_print_stats) {
+ GC_log_printf("Increasing heap size by %lu after %lu allocated bytes\n",
+ (unsigned long)bytes,
+ (unsigned long)GC_bytes_allocd);
+ }
+ expansion_slop = min_bytes_allocd() + 4*MAXHINCR*HBLKSIZE;
+ if ((GC_last_heap_addr == 0 && !((word)space & SIGNB))
|| (GC_last_heap_addr != 0 && GC_last_heap_addr < (ptr_t)space)) {
/* Assume the heap is growing up */
GC_greatest_plausible_heap_addr =
- (GC_PTR)GC_max((ptr_t)GC_greatest_plausible_heap_addr,
+ (void *)GC_max((ptr_t)GC_greatest_plausible_heap_addr,
(ptr_t)space + bytes + expansion_slop);
} else {
/* Heap is growing down */
GC_least_plausible_heap_addr =
- (GC_PTR)GC_min((ptr_t)GC_least_plausible_heap_addr,
+ (void *)GC_min((ptr_t)GC_least_plausible_heap_addr,
(ptr_t)space - expansion_slop);
}
# if defined(LARGE_CONFIG)
@@ -954,33 +899,26 @@ word n;
GC_add_to_heap(space, bytes);
/* Force GC before we are likely to allocate past expansion_slop */
GC_collect_at_heapsize =
- GC_heapsize + expansion_slop - 2*MAXHINCR*HBLKSIZE;
+ GC_heapsize + expansion_slop - 2*MAXHINCR*HBLKSIZE;
# if defined(LARGE_CONFIG)
if (GC_collect_at_heapsize < GC_heapsize /* wrapped */)
- GC_collect_at_heapsize = (word)(-1);
+ GC_collect_at_heapsize = (word)(-1);
# endif
return(TRUE);
}
/* Really returns a bool, but it's externally visible, so that's clumsy. */
/* Arguments is in bytes. */
-# if defined(__STDC__) || defined(__cplusplus)
- int GC_expand_hp(size_t bytes)
-# else
- int GC_expand_hp(bytes)
- size_t bytes;
-# endif
+int GC_expand_hp(size_t bytes)
{
int result;
DCL_LOCK_STATE;
- DISABLE_SIGNALS();
LOCK();
if (!GC_is_initialized) GC_init_inner();
result = (int)GC_expand_hp_inner(divHBLKSZ((word)bytes));
if (result) GC_requested_heapsize += bytes;
UNLOCK();
- ENABLE_SIGNALS();
return(result);
}
@@ -988,12 +926,10 @@ unsigned GC_fail_count = 0;
/* How many consecutive GC/expansion failures? */
/* Reset by GC_allochblk. */
-GC_bool GC_collect_or_expand(needed_blocks, ignore_off_page)
-word needed_blocks;
-GC_bool ignore_off_page;
+GC_bool GC_collect_or_expand(word needed_blocks, GC_bool ignore_off_page)
{
if (!GC_incremental && !GC_dont_gc &&
- ((GC_dont_expand && GC_words_allocd > 0) || GC_should_collect())) {
+ ((GC_dont_expand && GC_bytes_allocd > 0) || GC_should_collect())) {
GC_gcollect_inner();
} else {
word blocks_to_get = GC_heapsize/(HBLKSIZE*GC_free_space_divisor)
@@ -1029,41 +965,37 @@ GC_bool ignore_off_page;
return(FALSE);
}
} else {
-# ifdef CONDPRINT
- if (GC_fail_count && GC_print_stats) {
- GC_printf0("Memory available again ...\n");
- }
-# endif
+ if (GC_fail_count && GC_print_stats) {
+ GC_printf("Memory available again ...\n");
+ }
}
}
return(TRUE);
}
/*
- * Make sure the object free list for sz is not empty.
+ * Make sure the object free list for size gran (in granules) is not empty.
* Return a pointer to the first object on the free list.
* The object MUST BE REMOVED FROM THE FREE LIST BY THE CALLER.
* Assumes we hold the allocator lock and signals are disabled.
*
*/
-ptr_t GC_allocobj(sz, kind)
-word sz;
-int kind;
+ptr_t GC_allocobj(size_t gran, int kind)
{
- ptr_t * flh = &(GC_obj_kinds[kind].ok_freelist[sz]);
+ void ** flh = &(GC_obj_kinds[kind].ok_freelist[gran]);
GC_bool tried_minor = FALSE;
- if (sz == 0) return(0);
+ if (gran == 0) return(0);
while (*flh == 0) {
ENTER_GC();
/* Do our share of marking work */
if(TRUE_INCREMENTAL) GC_collect_a_little_inner(1);
/* Sweep blocks for objects of this size */
- GC_continue_reclaim(sz, kind);
+ GC_continue_reclaim(gran, kind);
EXIT_GC();
if (*flh == 0) {
- GC_new_hblk(sz, kind);
+ GC_new_hblk(gran, kind);
}
if (*flh == 0) {
ENTER_GC();
diff --git a/backgraph.c b/backgraph.c
index 0c512e2c..b4dbbb0f 100644
--- a/backgraph.c
+++ b/backgraph.c
@@ -254,12 +254,12 @@ static void add_edge(ptr_t p, ptr_t q)
}
}
-typedef void (*per_object_func)(ptr_t p, word n_words, word gc_descr);
+typedef void (*per_object_func)(ptr_t p, word n_bytes, word gc_descr);
static void per_object_helper(struct hblk *h, word fn)
{
hdr * hhdr = HDR(h);
- word sz = hhdr -> hb_sz;
+ size_t sz = hhdr -> hb_sz;
word descr = hhdr -> hb_descr;
per_object_func f = (per_object_func)fn;
int i = 0;
@@ -275,7 +275,7 @@ void GC_apply_to_each_object(per_object_func f)
GC_apply_to_all_blocks(per_object_helper, (word)f);
}
-static void reset_back_edge(ptr_t p, word n_words, word gc_descr)
+static void reset_back_edge(ptr_t p, word n_bytes, word gc_descr)
{
/* Skip any free list links, or dropped blocks */
if (GC_HAS_DEBUG_INFO(p)) {
@@ -311,20 +311,20 @@ static void reset_back_edge(ptr_t p, word n_words, word gc_descr)
}
}
-static void add_back_edges(ptr_t p, word n_words, word gc_descr)
+static void add_back_edges(ptr_t p, size_t n_bytes, word gc_descr)
{
word *currentp = (word *)(p + sizeof(oh));
/* For now, fix up non-length descriptors conservatively. */
if((gc_descr & GC_DS_TAGS) != GC_DS_LENGTH) {
- gc_descr = WORDS_TO_BYTES(n_words);
+ gc_descr = n_bytes;
}
while (currentp < (word *)(p + gc_descr)) {
word current = *currentp++;
FIXUP_POINTER(current);
if (current >= (word)GC_least_plausible_heap_addr &&
current <= (word)GC_greatest_plausible_heap_addr) {
- ptr_t target = GC_base((GC_PTR)current);
+ ptr_t target = GC_base((void *)current);
if (0 != target) {
add_edge(p, target);
}
@@ -369,7 +369,7 @@ static word backwards_height(ptr_t p)
word this_height;
if (GC_is_marked(q) && !(FLAG_MANY & (word)GET_OH_BG_PTR(p))) {
if (GC_print_stats)
- GC_printf2("Found bogus pointer from 0x%lx to 0x%lx\n", q, p);
+ GC_log_printf("Found bogus pointer from 0x%lx to 0x%lx\n", q, p);
/* Reachable object "points to" unreachable one. */
/* Could be caused by our lax treatment of GC descriptors. */
this_height = 1;
@@ -392,7 +392,7 @@ ptr_t GC_deepest_obj;
/* next GC. */
/* Set GC_max_height to be the maximum height we encounter, and */
/* GC_deepest_obj to be the corresponding object. */
-static void update_max_height(ptr_t p, word n_words, word gc_descr)
+static void update_max_height(ptr_t p, word n_bytes, word gc_descr)
{
if (GC_is_marked(p) && GC_HAS_DEBUG_INFO(p)) {
int i;
@@ -457,8 +457,8 @@ void GC_print_back_graph_stats(void)
GC_print_heap_obj(GC_deepest_obj);
}
if (GC_print_stats) {
- GC_printf1("Needed max total of %ld back-edge structs\n",
- GC_n_back_edge_structs);
+ GC_log_printf("Needed max total of %ld back-edge structs\n",
+ GC_n_back_edge_structs);
}
GC_apply_to_each_object(reset_back_edge);
GC_deepest_obj = 0;
diff --git a/blacklst.c b/blacklst.c
index ae2f95cb..686893d2 100644
--- a/blacklst.c
+++ b/blacklst.c
@@ -50,40 +50,34 @@ word GC_total_stack_black_listed;
word GC_black_list_spacing = MINHINCR*HBLKSIZE; /* Initial rough guess */
-void GC_clear_bl();
+void GC_clear_bl(word *);
-# if defined(__STDC__) || defined(__cplusplus)
- void GC_default_print_heap_obj_proc(ptr_t p)
-# else
- void GC_default_print_heap_obj_proc(p)
- ptr_t p;
-# endif
+void GC_default_print_heap_obj_proc(ptr_t p)
{
ptr_t base = GC_base(p);
- GC_err_printf2("start: 0x%lx, appr. length: %ld", base, GC_size(base));
+ GC_err_printf("start: %p, appr. length: %ld", base,
+ (unsigned long)GC_size(base));
}
-void (*GC_print_heap_obj) GC_PROTO((ptr_t p)) =
- GC_default_print_heap_obj_proc;
+void (*GC_print_heap_obj) (ptr_t p) = GC_default_print_heap_obj_proc;
-void GC_print_source_ptr(p)
-ptr_t p;
+void GC_print_source_ptr(ptr_t p)
{
ptr_t base = GC_base(p);
if (0 == base) {
if (0 == p) {
- GC_err_printf0("in register");
+ GC_err_printf("in register");
} else {
- GC_err_printf0("in root set");
+ GC_err_printf("in root set");
}
} else {
- GC_err_printf0("in object at ");
+ GC_err_printf("in object at ");
(*GC_print_heap_obj)(base);
}
}
-void GC_bl_init()
+void GC_bl_init(void)
{
if (!GC_all_interior_pointers) {
GC_old_normal_bl = (word *)
@@ -91,7 +85,7 @@ void GC_bl_init()
GC_incomplete_normal_bl = (word *)GC_scratch_alloc
((word)(sizeof(page_hash_table)));
if (GC_old_normal_bl == 0 || GC_incomplete_normal_bl == 0) {
- GC_err_printf0("Insufficient memory for black list\n");
+ GC_err_printf("Insufficient memory for black list\n");
EXIT();
}
GC_clear_bl(GC_old_normal_bl);
@@ -101,30 +95,28 @@ void GC_bl_init()
GC_incomplete_stack_bl = (word *)GC_scratch_alloc
((word)(sizeof(page_hash_table)));
if (GC_old_stack_bl == 0 || GC_incomplete_stack_bl == 0) {
- GC_err_printf0("Insufficient memory for black list\n");
+ GC_err_printf("Insufficient memory for black list\n");
EXIT();
}
GC_clear_bl(GC_old_stack_bl);
GC_clear_bl(GC_incomplete_stack_bl);
}
-void GC_clear_bl(doomed)
-word *doomed;
+void GC_clear_bl(word *doomed)
{
BZERO(doomed, sizeof(page_hash_table));
}
-void GC_copy_bl(old, new)
-word *new, *old;
+void GC_copy_bl(word *old, word *new)
{
BCOPY(old, new, sizeof(page_hash_table));
}
-static word total_stack_black_listed();
+static word total_stack_black_listed(void);
/* Signal the completion of a collection. Turn the incomplete black */
/* lists into new black lists, etc. */
-void GC_promote_black_lists()
+void GC_promote_black_lists(void)
{
word * very_old_normal_bl = GC_old_normal_bl;
word * very_old_stack_bl = GC_old_stack_bl;
@@ -138,10 +130,9 @@ void GC_promote_black_lists()
GC_incomplete_normal_bl = very_old_normal_bl;
GC_incomplete_stack_bl = very_old_stack_bl;
GC_total_stack_black_listed = total_stack_black_listed();
-# ifdef PRINTSTATS
- GC_printf1("%ld bytes in heap blacklisted for interior pointers\n",
- (unsigned long)GC_total_stack_black_listed);
-# endif
+ if (GC_print_stats == VERBOSE)
+ GC_log_printf("%ld bytes in heap blacklisted for interior pointers\n",
+ (unsigned long)GC_total_stack_black_listed);
if (GC_total_stack_black_listed != 0) {
GC_black_list_spacing =
HBLKSIZE*(GC_heapsize/GC_total_stack_black_listed);
@@ -158,7 +149,7 @@ void GC_promote_black_lists()
}
}
-void GC_unpromote_black_lists()
+void GC_unpromote_black_lists(void)
{
if (!GC_all_interior_pointers) {
GC_copy_bl(GC_old_normal_bl, GC_incomplete_normal_bl);
@@ -170,12 +161,10 @@ void GC_unpromote_black_lists()
/* the plausible heap bounds. */
/* Add it to the normal incomplete black list if appropriate. */
#ifdef PRINT_BLACK_LIST
- void GC_add_to_black_list_normal(p, source)
- ptr_t source;
+ void GC_add_to_black_list_normal(word p, ptr_t source)
#else
- void GC_add_to_black_list_normal(p)
+ void GC_add_to_black_list_normal(word p)
#endif
-word p;
{
if (!(GC_modws_valid_offsets[p & (sizeof(word)-1)])) return;
{
@@ -184,9 +173,9 @@ word p;
if (HDR(p) == 0 || get_pht_entry_from_index(GC_old_normal_bl, index)) {
# ifdef PRINT_BLACK_LIST
if (!get_pht_entry_from_index(GC_incomplete_normal_bl, index)) {
- GC_err_printf2(
- "Black listing (normal) 0x%lx referenced from 0x%lx ",
- (unsigned long) p, (unsigned long) source);
+ GC_err_printf(
+ "Black listing (normal) %p referenced from %p ",
+ (ptr_t) p, source);
GC_print_source_ptr(source);
GC_err_puts("\n");
}
@@ -199,21 +188,20 @@ word p;
/* And the same for false pointers from the stack. */
#ifdef PRINT_BLACK_LIST
- void GC_add_to_black_list_stack(p, source)
+ void GC_add_to_black_list_stack(word p, ptr_t source)
ptr_t source;
#else
- void GC_add_to_black_list_stack(p)
+ void GC_add_to_black_list_stack(word p)
#endif
-word p;
{
register int index = PHT_HASH(p);
if (HDR(p) == 0 || get_pht_entry_from_index(GC_old_stack_bl, index)) {
# ifdef PRINT_BLACK_LIST
if (!get_pht_entry_from_index(GC_incomplete_stack_bl, index)) {
- GC_err_printf2(
- "Black listing (stack) 0x%lx referenced from 0x%lx ",
- (unsigned long)p, (unsigned long)source);
+ GC_err_printf(
+ "Black listing (stack) %p referenced from %p ",
+ (ptr_t)p, source);
GC_print_source_ptr(source);
GC_err_puts("\n");
}
@@ -230,9 +218,7 @@ word p;
* If (h,len) is not black listed, return 0.
* Knows about the structure of the black list hash tables.
*/
-struct hblk * GC_is_black_listed(h, len)
-struct hblk * h;
-word len;
+struct hblk * GC_is_black_listed(struct hblk *h, word len)
{
register int index = PHT_HASH((word)h);
register word i;
@@ -267,8 +253,7 @@ word len;
/* Return the number of blacklisted blocks in a given range. */
/* Used only for statistical purposes. */
/* Looks only at the GC_incomplete_stack_bl. */
-word GC_number_stack_black_listed(start, endp1)
-struct hblk *start, *endp1;
+word GC_number_stack_black_listed(struct hblk *start, struct hblk *endp1)
{
register struct hblk * h;
word result = 0;
@@ -283,7 +268,7 @@ struct hblk *start, *endp1;
/* Return the total number of (stack) black-listed bytes. */
-static word total_stack_black_listed()
+static word total_stack_black_listed(void)
{
register unsigned i;
word total = 0;
diff --git a/checksums.c b/checksums.c
index 57a6ebc2..0942acb4 100644
--- a/checksums.c
+++ b/checksums.c
@@ -50,14 +50,14 @@ struct hblk *h;
# ifdef STUBBORN_ALLOC
/* Check whether a stubborn object from the given block appears on */
/* the appropriate free list. */
-GC_bool GC_on_free_list(h)
+GC_bool GC_on_free_list(struct hblk *h)
struct hblk *h;
{
- register hdr * hhdr = HDR(h);
- register int sz = hhdr -> hb_sz;
+ hdr * hhdr = HDR(h);
+ int sz = BYTES_TO_WORDS(hhdr -> hb_sz);
ptr_t p;
- if (sz > MAXOBJSZ) return(FALSE);
+ if (sz > MAXOBJWORDS) return(FALSE);
for (p = GC_sobjfreelist[sz]; p != 0; p = obj_link(p)) {
if (HBLKPTR(p) == h) return(TRUE);
}
@@ -70,9 +70,7 @@ int GC_n_changed_errors;
int GC_n_clean;
int GC_n_dirty;
-void GC_update_check_page(h, index)
-struct hblk *h;
-int index;
+void GC_update_check_page(struct hblk *h, int index)
{
page_entry *pe = GC_sums + index;
register hdr * hhdr = HDR(h);
@@ -83,8 +81,7 @@ int index;
pe -> new_sum = GC_checksum(h);
# if !defined(MSWIN32) && !defined(MSWINCE)
if (pe -> new_sum != 0x80000000 && !GC_page_was_ever_dirty(h)) {
- GC_printf1("GC_page_was_ever_dirty(0x%lx) is wrong\n",
- (unsigned long)h);
+ GC_printf("GC_page_was_ever_dirty(%p) is wrong\n", h);
}
# endif
if (GC_page_was_dirty(h)) {
@@ -104,7 +101,7 @@ int index;
/* Set breakpoint here */GC_n_dirty_errors++;
}
# ifdef STUBBORN_ALLOC
- if ( hhdr -> hb_map != GC_invalid_map
+ if (!HBLK_IS_FREE(hhdr)
&& hhdr -> hb_obj_kind == STUBBORN
&& !GC_page_was_changed(h)
&& !GC_on_free_list(h)) {
@@ -118,14 +115,14 @@ int index;
pe -> block = h + OFFSET;
}
-word GC_bytes_in_used_blocks;
+unsigned long GC_bytes_in_used_blocks;
void GC_add_block(h, dummy)
struct hblk *h;
word dummy;
{
- register hdr * hhdr = HDR(h);
- register bytes = WORDS_TO_BYTES(hhdr -> hb_sz);
+ hdr * hhdr = HDR(h);
+ bytes = hhdr -> hb_sz;
bytes += HBLKSIZE-1;
bytes &= ~(HBLKSIZE-1);
@@ -134,15 +131,15 @@ word dummy;
void GC_check_blocks()
{
- word bytes_in_free_blocks = GC_large_free_bytes;
+ unsigned long bytes_in_free_blocks = GC_large_free_bytes;
GC_bytes_in_used_blocks = 0;
GC_apply_to_all_blocks(GC_add_block, (word)0);
- GC_printf2("GC_bytes_in_used_blocks = %ld, bytes_in_free_blocks = %ld ",
- GC_bytes_in_used_blocks, bytes_in_free_blocks);
- GC_printf1("GC_heapsize = %ld\n", GC_heapsize);
+ GC_printf("GC_bytes_in_used_blocks = %lu, bytes_in_free_blocks = %lu ",
+ GC_bytes_in_used_blocks, bytes_in_free_blocks);
+ GC_printf("GC_heapsize = %lu\n", (unsigned long)GC_heapsize);
if (GC_bytes_in_used_blocks + bytes_in_free_blocks != GC_heapsize) {
- GC_printf0("LOST SOME BLOCKS!!\n");
+ GC_printf("LOST SOME BLOCKS!!\n");
}
}
@@ -173,18 +170,18 @@ void GC_check_dirty()
}
}
out:
- GC_printf2("Checked %lu clean and %lu dirty pages\n",
+ GC_printf("Checked %lu clean and %lu dirty pages\n",
(unsigned long) GC_n_clean, (unsigned long) GC_n_dirty);
if (GC_n_dirty_errors > 0) {
- GC_printf1("Found %lu dirty bit errors\n",
- (unsigned long)GC_n_dirty_errors);
+ GC_printf("Found %lu dirty bit errors\n",
+ (unsigned long)GC_n_dirty_errors);
}
if (GC_n_changed_errors > 0) {
- GC_printf1("Found %lu changed bit errors\n",
- (unsigned long)GC_n_changed_errors);
- GC_printf0("These may be benign (provoked by nonpointer changes)\n");
+ GC_printf("Found %lu changed bit errors\n",
+ (unsigned long)GC_n_changed_errors);
+ GC_printf("These may be benign (provoked by nonpointer changes)\n");
# ifdef THREADS
- GC_printf0(
+ GC_printf(
"Also expect 1 per thread currently allocating a stubborn obj.\n");
# endif
}
diff --git a/configure b/configure
index 3ea15df2..adc02d48 100755
--- a/configure
+++ b/configure
@@ -1,7 +1,7 @@
#! /bin/sh
# From configure.in Revision: 1.2 .
# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.59 for gc 6.4.
+# Generated by GNU Autoconf 2.59 for gc 7.0alpha1.
#
# Report bugs to <Hans.Boehm@hp.com>.
#
@@ -429,8 +429,8 @@ SHELL=${CONFIG_SHELL-/bin/sh}
# Identity of this package.
PACKAGE_NAME='gc'
PACKAGE_TARNAME='gc'
-PACKAGE_VERSION='6.4'
-PACKAGE_STRING='gc 6.4'
+PACKAGE_VERSION='7.0alpha1'
+PACKAGE_STRING='gc 7.0alpha1'
PACKAGE_BUGREPORT='Hans.Boehm@hp.com'
ac_unique_file="gcj_mlc.c"
@@ -471,7 +471,7 @@ ac_includes_default="\
# include <unistd.h>
#endif"
-ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS build build_cpu build_vendor build_os host host_cpu host_vendor host_os target target_cpu target_vendor target_os INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA PACKAGE VERSION ACLOCAL AUTOCONF AUTOMAKE AUTOHEADER MAKEINFO AMTAR install_sh STRIP ac_ct_STRIP INSTALL_STRIP_PROGRAM AWK SET_MAKE GC_VERSION CC CFLAGS LDFLAGS CPPFLAGS ac_ct_CC EXEEXT OBJEXT DEPDIR am__include am__quote AMDEP_TRUE AMDEP_FALSE AMDEPBACKSLASH CCDEPMODE CXX CXXFLAGS ac_ct_CXX CXXDEPMODE CCAS CCASFLAGS AR ac_ct_AR RANLIB ac_ct_RANLIB MAINTAINER_MODE_TRUE MAINTAINER_MODE_FALSE MAINT GC_CFLAGS THREADDLLIBS POWERPC_DARWIN_TRUE POWERPC_DARWIN_FALSE EXTRA_TEST_LIBS target_all CPLUSPLUS_TRUE CPLUSPLUS_FALSE INCLUDES CXXINCLUDES addobjs addincludes addlibs addtests LN_S ECHO CPP EGREP LIBTOOL MY_CFLAGS UNWINDLIBS USE_LIBDIR_TRUE USE_LIBDIR_FALSE LIBOBJS LTLIBOBJS'
+ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS build build_cpu build_vendor build_os host host_cpu host_vendor host_os target target_cpu target_vendor target_os INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA PACKAGE VERSION ACLOCAL AUTOCONF AUTOMAKE AUTOHEADER MAKEINFO AMTAR install_sh STRIP ac_ct_STRIP INSTALL_STRIP_PROGRAM AWK SET_MAKE GC_VERSION CC CFLAGS LDFLAGS CPPFLAGS ac_ct_CC EXEEXT OBJEXT DEPDIR am__include am__quote AMDEP_TRUE AMDEP_FALSE AMDEPBACKSLASH CCDEPMODE CXX CXXFLAGS ac_ct_CXX CXXDEPMODE CCAS CCASFLAGS AR ac_ct_AR RANLIB ac_ct_RANLIB MAINTAINER_MODE_TRUE MAINTAINER_MODE_FALSE MAINT GC_CFLAGS THREADLIBS POWERPC_DARWIN_TRUE POWERPC_DARWIN_FALSE EXTRA_TEST_LIBS target_all CPLUSPLUS_TRUE CPLUSPLUS_FALSE INCLUDES CXXINCLUDES addobjs addincludes addlibs addtests LN_S ECHO CPP EGREP LIBTOOL MY_CFLAGS UNWINDLIBS USE_LIBDIR_TRUE USE_LIBDIR_FALSE LIBOBJS LTLIBOBJS'
ac_subst_files=''
# Initialize some variables set by options.
@@ -948,7 +948,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
-\`configure' configures gc 6.4 to adapt to many kinds of systems.
+\`configure' configures gc 7.0alpha1 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@@ -1015,7 +1015,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
- short | recursive ) echo "Configuration of gc 6.4:";;
+ short | recursive ) echo "Configuration of gc 7.0alpha1:";;
esac
cat <<\_ACEOF
@@ -1033,7 +1033,7 @@ Optional Features:
--enable-static=PKGS build static libraries default=yes
--enable-fast-install=PKGS optimize for fast installation default=yes
--disable-libtool-lock avoid locking (might break parallel builds)
- --enable-full-debug include full support for pointer backtracing etc.
+ --enable-gc-debug include full support for pointer backtracing etc.
--enable-redirect-malloc redirect malloc and friends to GC routines
--enable-gc-assertions collector-internal assertion checking
@@ -1150,14 +1150,14 @@ esac
else
echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
fi
- cd "$ac_popdir"
+ cd $ac_popdir
done
fi
test -n "$ac_init_help" && exit 0
if $ac_init_version; then
cat <<\_ACEOF
-gc configure 6.4
+gc configure 7.0alpha1
generated by GNU Autoconf 2.59
Copyright (C) 2003 Free Software Foundation, Inc.
@@ -1171,7 +1171,7 @@ cat >&5 <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
-It was created by gc $as_me 6.4, which was
+It was created by gc $as_me 7.0alpha1, which was
generated by GNU Autoconf 2.59. Invocation command line was
$ $0 $@
@@ -1892,7 +1892,7 @@ fi
# Define the identity of the package.
PACKAGE=gc
- VERSION=6.4
+ VERSION=7.0alpha1
cat >>confdefs.h <<_ACEOF
@@ -2590,7 +2590,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_c_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -2648,7 +2649,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_c_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -2764,7 +2766,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_c_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -2818,7 +2821,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_c_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -2863,7 +2867,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_c_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -2907,7 +2912,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_c_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -3233,7 +3239,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_cxx_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_cxx_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -3291,7 +3298,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_cxx_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_cxx_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -3362,7 +3370,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_cxx_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_cxx_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -3406,7 +3415,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_cxx_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_cxx_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -3821,15 +3831,14 @@ if test "${enable_cplusplus+set}" = set; then
fi;
INCLUDES=-I${srcdir}/include
-THREADDLLIBS=
-## Libraries needed to support dynamic loading and/or threads.
+THREADLIBS=
case "$THREADS" in
no | none | single)
THREADS=none
;;
posix | pthreads)
THREADS=posix
- THREADDLLIBS=-lpthread
+ THREADLIBS=-lpthread
case "$host" in
x86-*-linux* | ia64-*-linux* | i586-*-linux* | i686-*-linux* | x86_64-*-linux* | alpha-*-linux*)
cat >>confdefs.h <<\_ACEOF
@@ -3892,7 +3901,7 @@ _ACEOF
#define THREAD_LOCAL_ALLOC 1
_ACEOF
- THREADDLLIBS="-lpthread -lrt"
+ THREADLIBS="-lpthread -lrt"
;;
*-*-freebsd*)
{ echo "$as_me:$LINENO: WARNING: \"FreeBSD does not yet fully support threads with Boehm GC.\"" >&5
@@ -3902,7 +3911,7 @@ echo "$as_me: WARNING: \"FreeBSD does not yet fully support threads with Boehm G
_ACEOF
INCLUDES="$INCLUDES -pthread"
- THREADDLLIBS=-pthread
+ THREADLIBS=-pthread
;;
*-*-solaris*)
cat >>confdefs.h <<\_ACEOF
@@ -3960,12 +3969,7 @@ _ACEOF
# Measurements havent yet been done.
fi
INCLUDES="$INCLUDES -pthread"
- THREADDLLIBS="-lpthread -lrt"
- ;;
- *)
- { { echo "$as_me:$LINENO: error: \"Pthreads not supported by the GC on this platform.\"" >&5
-echo "$as_me: error: \"Pthreads not supported by the GC on this platform.\"" >&2;}
- { (exit 1); exit 1; }; }
+ THREADLIBS="-lpthread -lrt"
;;
esac
;;
@@ -3981,10 +3985,10 @@ _ACEOF
;;
dgux386)
THREADS=dgux386
- echo "$as_me:$LINENO: result: $THREADDLLIBS" >&5
-echo "${ECHO_T}$THREADDLLIBS" >&6
+ echo "$as_me:$LINENO: result: $THREADLIBS" >&5
+echo "${ECHO_T}$THREADLIBS" >&6
# Use pthread GCC switch
- THREADDLLIBS=-pthread
+ THREADLIBS=-pthread
if test "${enable_parallel_mark}" = yes; then
cat >>confdefs.h <<\_ACEOF
#define PARALLEL_MARK 1
@@ -4008,7 +4012,7 @@ _ACEOF
;;
aix)
THREADS=posix
- THREADDLLIBS=-lpthread
+ THREADLIBS=-lpthread
cat >>confdefs.h <<\_ACEOF
#define GC_AIX_THREADS 1
_ACEOF
@@ -4091,7 +4095,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_c_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -4117,7 +4122,7 @@ fi
echo "$as_me:$LINENO: result: $ac_cv_lib_dl_dlopen" >&5
echo "${ECHO_T}$ac_cv_lib_dl_dlopen" >&6
if test $ac_cv_lib_dl_dlopen = yes; then
- THREADDLLIBS="$THREADDLLIBS -ldl"
+ EXTRA_TEST_LIBS="$EXTRA_TEST_LIBS -ldl"
fi
;;
@@ -4272,7 +4277,7 @@ _ACEOF
_ACEOF
;;
- sparc*-sun-solaris2.*)
+ sparc-sun-solaris2.*)
machdep="sparc_mach_dep.lo"
;;
ia64-*-*)
@@ -5174,7 +5179,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_c_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -5344,7 +5350,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_c_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -5411,7 +5418,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_c_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -5845,7 +5853,7 @@ test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes
case $host in
*-*-irix6*)
# Find out which ABI we are using.
- echo '#line 5848 "configure"' > conftest.$ac_ext
+ echo '#line 5856 "configure"' > conftest.$ac_ext
if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
(eval $ac_compile) 2>&5
ac_status=$?
@@ -5907,7 +5915,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_c_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -6249,7 +6258,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_c_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -6345,7 +6355,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_c_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -6404,7 +6415,7 @@ chmod -w .
save_CFLAGS="$CFLAGS"
CFLAGS="$CFLAGS -o out/conftest2.$ac_objext"
compiler_c_o=no
-if { (eval echo configure:6407: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>out/conftest.err; } && test -s out/conftest2.$ac_objext; then
+if { (eval echo configure:6418: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>out/conftest.err; } && test -s out/conftest2.$ac_objext; then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings
if test -s out/conftest.err; then
@@ -6468,7 +6479,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_c_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -6561,7 +6573,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_c_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -7863,7 +7876,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_c_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -7928,7 +7942,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_c_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -8021,7 +8036,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_c_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -8086,7 +8102,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_c_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -8152,7 +8169,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_c_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -8218,7 +8236,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_c_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -8293,7 +8312,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<EOF
-#line 8296 "configure"
+#line 8315 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -8391,7 +8410,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<EOF
-#line 8394 "configure"
+#line 8413 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -9150,7 +9169,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_c_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -9302,7 +9322,7 @@ fi
echo "$as_me:$LINENO: checking whether Solaris gcc optimization fix is necessary" >&5
echo $ECHO_N "checking whether Solaris gcc optimization fix is necessary... $ECHO_C" >&6
case "$host" in
- *aix*)
+ sparc-sun-solaris2*|*aix*)
if test "$GCC" = yes; then
echo "$as_me:$LINENO: result: yes" >&5
echo "${ECHO_T}yes" >&6
@@ -9330,14 +9350,6 @@ MY_CFLAGS="$CFLAGS"
cat >>confdefs.h <<\_ACEOF
-#define SILENT 1
-_ACEOF
-
-cat >>confdefs.h <<\_ACEOF
-#define NO_SIGNALS 1
-_ACEOF
-
-cat >>confdefs.h <<\_ACEOF
#define NO_EXECUTE_PERMISSION 1
_ACEOF
@@ -9379,10 +9391,10 @@ _ACEOF
fi
UNWINDLIBS=
-# Check whether --enable-full-debug or --disable-full-debug was given.
-if test "${enable_full_debug+set}" = set; then
- enableval="$enable_full_debug"
- if test "$enable_full_debug" = "yes"; then
+# Check whether --enable-gc-debug or --disable-gc-debug was given.
+if test "${enable_gc_debug+set}" = set; then
+ enableval="$enable_gc_debug"
+ if test "$enable_gc_debug" = "yes"; then
{ echo "$as_me:$LINENO: WARNING: \"Should define GC_DEBUG and use debug alloc. in clients.\"" >&5
echo "$as_me: WARNING: \"Should define GC_DEBUG and use debug alloc. in clients.\"" >&2;}
cat >>confdefs.h <<\_ACEOF
@@ -9441,7 +9453,8 @@ if { (eval echo "$as_me:$LINENO: \"$ac_link\"") >&5
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
- { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { ac_try='test -z "$ac_c_werror_flag"
+ || test ! -s conftest.err'
{ (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
(eval $ac_try) 2>&5
ac_status=$?
@@ -9984,7 +9997,7 @@ _ASBOX
} >&5
cat >&5 <<_CSEOF
-This file was extended by gc $as_me 6.4, which was
+This file was extended by gc $as_me 7.0alpha1, which was
generated by GNU Autoconf 2.59. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@@ -10042,7 +10055,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF
ac_cs_version="\\
-gc config.status 6.4
+gc config.status 7.0alpha1
configured by $0, generated by GNU Autoconf 2.59,
with options \\"`echo "$ac_configure_args" | sed 's/[\\""\`\$]/\\\\&/g'`\\"
@@ -10305,7 +10318,7 @@ s,@MAINTAINER_MODE_TRUE@,$MAINTAINER_MODE_TRUE,;t t
s,@MAINTAINER_MODE_FALSE@,$MAINTAINER_MODE_FALSE,;t t
s,@MAINT@,$MAINT,;t t
s,@GC_CFLAGS@,$GC_CFLAGS,;t t
-s,@THREADDLLIBS@,$THREADDLLIBS,;t t
+s,@THREADLIBS@,$THREADLIBS,;t t
s,@POWERPC_DARWIN_TRUE@,$POWERPC_DARWIN_TRUE,;t t
s,@POWERPC_DARWIN_FALSE@,$POWERPC_DARWIN_FALSE,;t t
s,@EXTRA_TEST_LIBS@,$EXTRA_TEST_LIBS,;t t
@@ -10495,6 +10508,11 @@ esac
*) ac_INSTALL=$ac_top_builddir$INSTALL ;;
esac
+ if test x"$ac_file" != x-; then
+ { echo "$as_me:$LINENO: creating $ac_file" >&5
+echo "$as_me: creating $ac_file" >&6;}
+ rm -f "$ac_file"
+ fi
# Let's still pretend it is `configure' which instantiates (i.e., don't
# use $as_me), people would be surprised to read:
# /* config.h. Generated by config.status. */
@@ -10533,12 +10551,6 @@ echo "$as_me: error: cannot find input file: $f" >&2;}
fi;;
esac
done` || { (exit 1); exit 1; }
-
- if test x"$ac_file" != x-; then
- { echo "$as_me:$LINENO: creating $ac_file" >&5
-echo "$as_me: creating $ac_file" >&6;}
- rm -f "$ac_file"
- fi
_ACEOF
cat >>$CONFIG_STATUS <<_ACEOF
sed "$ac_vpsub
diff --git a/configure.in b/configure.in
index c2a6ea95..798a8359 100644
--- a/configure.in
+++ b/configure.in
@@ -17,7 +17,7 @@ dnl Process this file with autoconf to produce configure.
# Initialization
# ==============
-AC_INIT(gc,6.4,Hans.Boehm@hp.com)
+AC_INIT(gc,7.0alpha1,Hans.Boehm@hp.com)
## version must conform to [0-9]+[.][0-9]+(alpha[0-9]+)?
AC_CONFIG_SRCDIR(gcj_mlc.c)
AC_CANONICAL_TARGET
@@ -68,15 +68,14 @@ AC_ARG_ENABLE(cplusplus,
)
INCLUDES=-I${srcdir}/include
-THREADDLLIBS=
-## Libraries needed to support dynamic loading and/or threads.
+THREADLIBS=
case "$THREADS" in
no | none | single)
THREADS=none
;;
posix | pthreads)
THREADS=posix
- THREADDLLIBS=-lpthread
+ THREADLIBS=-lpthread
case "$host" in
x86-*-linux* | ia64-*-linux* | i586-*-linux* | i686-*-linux* | x86_64-*-linux* | alpha-*-linux*)
AC_DEFINE(GC_LINUX_THREADS)
@@ -102,13 +101,13 @@ case "$THREADS" in
AC_DEFINE(PARALLEL_MARK)
fi
AC_DEFINE(THREAD_LOCAL_ALLOC)
- THREADDLLIBS="-lpthread -lrt"
+ THREADLIBS="-lpthread -lrt"
;;
*-*-freebsd*)
AC_MSG_WARN("FreeBSD does not yet fully support threads with Boehm GC.")
AC_DEFINE(GC_FREEBSD_THREADS)
INCLUDES="$INCLUDES -pthread"
- THREADDLLIBS=-pthread
+ THREADLIBS=-pthread
;;
*-*-solaris*)
AC_DEFINE(GC_SOLARIS_THREADS)
@@ -136,10 +135,7 @@ case "$THREADS" in
# Measurements havent yet been done.
fi
INCLUDES="$INCLUDES -pthread"
- THREADDLLIBS="-lpthread -lrt"
- ;;
- *)
- AC_MSG_ERROR("Pthreads not supported by the GC on this platform.")
+ THREADLIBS="-lpthread -lrt"
;;
esac
;;
@@ -150,9 +146,9 @@ case "$THREADS" in
;;
dgux386)
THREADS=dgux386
- AC_MSG_RESULT($THREADDLLIBS)
+ AC_MSG_RESULT($THREADLIBS)
# Use pthread GCC switch
- THREADDLLIBS=-pthread
+ THREADLIBS=-pthread
if test "${enable_parallel_mark}" = yes; then
AC_DEFINE(PARALLEL_MARK)
fi
@@ -164,7 +160,7 @@ case "$THREADS" in
;;
aix)
THREADS=posix
- THREADDLLIBS=-lpthread
+ THREADLIBS=-lpthread
AC_DEFINE(GC_AIX_THREADS)
AC_DEFINE(_REENTRANT)
;;
@@ -175,7 +171,7 @@ case "$THREADS" in
AC_MSG_ERROR($THREADS is an unknown thread package)
;;
esac
-AC_SUBST(THREADDLLIBS)
+AC_SUBST(THREADLIBS)
case "$host" in
powerpc-*-darwin*)
@@ -189,7 +185,7 @@ AM_CONDITIONAL(POWERPC_DARWIN,test x$powerpc_darwin = xtrue)
case "$host" in
*-*-darwin*) ;;
*)
- AC_CHECK_LIB(dl, dlopen, THREADDLLIBS="$THREADDLLIBS -ldl")
+ AC_CHECK_LIB(dl, dlopen, EXTRA_TEST_LIBS="$EXTRA_TEST_LIBS -ldl")
;;
esac
@@ -290,7 +286,7 @@ case "$host" in
machdep="sparc_mach_dep.lo"
AC_DEFINE(SUNOS53_SHARED_LIB)
;;
- sparc*-sun-solaris2.*)
+ sparc-sun-solaris2.*)
machdep="sparc_mach_dep.lo"
;;
ia64-*-*)
@@ -349,10 +345,10 @@ fi
dnl As of 4.13a2, the collector will not properly work on Solaris when
dnl built with gcc and -O. So we remove -O in the appropriate case.
-dnl Not needed anymore on Solaris.
+dnl
AC_MSG_CHECKING(whether Solaris gcc optimization fix is necessary)
case "$host" in
- *aix*)
+ sparc-sun-solaris2*|*aix*)
if test "$GCC" = yes; then
AC_MSG_RESULT(yes)
new_CFLAGS=
@@ -379,8 +375,6 @@ AC_SUBST(MY_CFLAGS)
dnl Include defines that have become de facto standard.
dnl ALL_INTERIOR_POINTERS can be overridden in startup code.
-AC_DEFINE(SILENT)
-AC_DEFINE(NO_SIGNALS)
AC_DEFINE(NO_EXECUTE_PERMISSION)
AC_DEFINE(ALL_INTERIOR_POINTERS)
@@ -400,9 +394,9 @@ if test -n "${with_cross_host}"; then
fi
UNWINDLIBS=
-AC_ARG_ENABLE(full-debug,
-[ --enable-full-debug include full support for pointer backtracing etc.],
-[ if test "$enable_full_debug" = "yes"; then
+AC_ARG_ENABLE(gc-debug,
+[ --enable-gc-debug include full support for pointer backtracing etc.],
+[ if test "$enable_gc_debug" = "yes"; then
AC_MSG_WARN("Should define GC_DEBUG and use debug alloc. in clients.")
AC_DEFINE(KEEP_BACK_PTRS)
AC_DEFINE(DBG_HDRS_ALL)
diff --git a/darwin_stop_world.c b/darwin_stop_world.c
index cb81e7c9..3fc5bee6 100644
--- a/darwin_stop_world.c
+++ b/darwin_stop_world.c
@@ -34,7 +34,7 @@ unsigned int FindTopOfStack(unsigned int stack_start) {
}
# ifdef DEBUG_THREADS
- /* GC_printf1("FindTopOfStack start at sp = %p\n", frame); */
+ /* GC_printf("FindTopOfStack start at sp = %p\n", frame); */
# endif
do {
if (frame->savedSP == NULL) break;
@@ -50,7 +50,7 @@ unsigned int FindTopOfStack(unsigned int stack_start) {
} while (1);
# ifdef DEBUG_THREADS
- /* GC_printf1("FindTopOfStack finish at sp = %p\n", frame); */
+ /* GC_printf("FindTopOfStack finish at sp = %p\n", frame); */
# endif
return (unsigned int)frame;
@@ -146,11 +146,9 @@ void GC_push_all_stacks() {
# endif /* !POWERPC */
}
# if DEBUG_THREADS
- GC_printf3("Darwin: Stack for thread 0x%lx = [%lx,%lx)\n",
- (unsigned long) thread,
- (unsigned long) lo,
- (unsigned long) hi
- );
+ GC_printf("Darwin: Stack for thread 0x%lx = [%p,%p)\n",
+ (unsigned long) thread, lo, hi
+ );
# endif
GC_push_all_stack(lo, hi);
} /* for(p=GC_threads[i]...) */
@@ -183,7 +181,7 @@ int GC_suspend_thread_list(thread_act_array_t act_list, int count,
for(i = 0; i < count; i++) {
thread_act_t thread = act_list[i];
# if DEBUG_THREADS
- GC_printf1("Attempting to suspend thread %p\n", thread);
+ GC_printf("Attempting to suspend thread %p\n", thread);
# endif
/* find the current thread in the old list */
int found = 0;
@@ -221,7 +219,8 @@ int GC_suspend_thread_list(thread_act_array_t act_list, int count,
continue;
}
# if DEBUG_THREADS
- GC_printf2("Thread state for 0x%lx = %d\n", thread, info.run_state);
+ GC_printf("Thread state for 0x%lx = %d\n",
+ (unsigned long)thread, info.run_state);
# endif
if (!found) {
GC_mach_threads[GC_mach_threads_count].already_suspended = info.suspend_count;
@@ -229,7 +228,7 @@ int GC_suspend_thread_list(thread_act_array_t act_list, int count,
if (info.suspend_count) continue;
# if DEBUG_THREADS
- GC_printf1("Suspending 0x%lx\n", thread);
+ GC_printf("Suspending 0x%lx\n", (unsigned long)thread);
# endif
/* Suspend the thread */
kern_result = thread_suspend(thread);
@@ -261,7 +260,8 @@ void GC_stop_world()
mach_msg_type_number_t listcount, prevcount;
# if DEBUG_THREADS
- GC_printf1("Stopping the world from 0x%lx\n", mach_thread_self());
+ GC_printf("Stopping the world from 0x%lx\n",
+ (unsigned long)mach_thread_self());
# endif
/* clear out the mach threads list table */
@@ -310,7 +310,7 @@ void GC_stop_world()
GC_release_mark_lock();
# endif
#if DEBUG_THREADS
- GC_printf1("World stopped from 0x%lx\n", my_thread);
+ GC_printf("World stopped from 0x%lx\n", (unsigned long)my_thread);
#endif
}
@@ -328,7 +328,7 @@ void GC_start_world()
mach_msg_type_number_t outCount = THREAD_INFO_MAX;
# if DEBUG_THREADS
- GC_printf0("World starting\n");
+ GC_printf("World starting\n");
# endif
# ifdef MPROTECT_VDB
@@ -348,7 +348,7 @@ void GC_start_world()
if (thread == GC_mach_threads[j].thread) {
if (GC_mach_threads[j].already_suspended) {
# if DEBUG_THREADS
- GC_printf1("Not resuming already suspended thread %p\n", thread);
+ GC_printf("Not resuming already suspended thread %p\n", thread);
# endif
continue;
}
@@ -356,9 +356,9 @@ void GC_start_world()
(thread_info_t)&info, &outCount);
if(kern_result != KERN_SUCCESS) ABORT("thread_info failed");
# if DEBUG_THREADS
- GC_printf2("Thread state for 0x%lx = %d\n", thread,
+ GC_printf("Thread state for 0x%lx = %d\n", (unsigned long)thread,
info.run_state);
- GC_printf1("Resuming 0x%lx\n", thread);
+ GC_printf("Resuming 0x%lx\n", (unsigned long)thread);
# endif
/* Resume the thread */
kern_result = thread_resume(thread);
@@ -368,7 +368,7 @@ void GC_start_world()
}
}
# if DEBUG_THREADS
- GC_printf0("World started\n");
+ GC_printf("World started\n");
# endif
}
diff --git a/dbg_mlc.c b/dbg_mlc.c
index aacbb7a1..643f0e2d 100644
--- a/dbg_mlc.c
+++ b/dbg_mlc.c
@@ -18,8 +18,8 @@
void GC_default_print_heap_obj_proc();
GC_API void GC_register_finalizer_no_order
- GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
- GC_finalization_proc *ofn, GC_PTR *ocd));
+ (void * obj, GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void * *ocd);
#ifndef SHORT_DBG_HDRS
@@ -32,8 +32,7 @@ GC_API void GC_register_finalizer_no_order
/* on free lists may not have debug information set. Thus it's */
/* not always safe to return TRUE, even if the client does */
/* its part. */
-GC_bool GC_has_other_debug_info(p)
-ptr_t p;
+GC_bool GC_has_other_debug_info(ptr_t p)
{
register oh * ohdr = (oh *)p;
register ptr_t body = (ptr_t)(ohdr + 1);
@@ -59,7 +58,7 @@ ptr_t p;
# include <stdlib.h>
-# if defined(LINUX) || defined(SUNOS4) || defined(SUNOS5) \
+# if defined(LINUX) || defined(SUNOS5) \
|| defined(HPUX) || defined(IRIX5) || defined(OSF1)
# define RANDOM() random()
# else
@@ -180,34 +179,34 @@ ptr_t p;
void *base;
GC_print_heap_obj(GC_base(current));
- GC_err_printf0("\n");
+ GC_err_printf("\n");
for (i = 0; ; ++i) {
source = GC_get_back_ptr_info(current, &base, &offset);
if (GC_UNREFERENCED == source) {
- GC_err_printf0("Reference could not be found\n");
+ GC_err_printf("Reference could not be found\n");
goto out;
}
if (GC_NO_SPACE == source) {
- GC_err_printf0("No debug info in object: Can't find reference\n");
+ GC_err_printf("No debug info in object: Can't find reference\n");
goto out;
}
- GC_err_printf1("Reachable via %d levels of pointers from ",
+ GC_err_printf("Reachable via %d levels of pointers from ",
(unsigned long)i);
switch(source) {
case GC_REFD_FROM_ROOT:
- GC_err_printf1("root at 0x%lx\n\n", (unsigned long)base);
+ GC_err_printf("root at %p\n\n", base);
goto out;
case GC_REFD_FROM_REG:
- GC_err_printf0("root in register\n\n");
+ GC_err_printf("root in register\n\n");
goto out;
case GC_FINALIZER_REFD:
- GC_err_printf0("list of finalizable objects\n\n");
+ GC_err_printf("list of finalizable objects\n\n");
goto out;
case GC_REFD_FROM_HEAP:
- GC_err_printf1("offset %ld in object:\n", (unsigned long)offset);
+ GC_err_printf("offset %ld in object:\n", (unsigned long)offset);
/* Take GC_base(base) to get real base, i.e. header. */
GC_print_heap_obj(GC_base(base));
- GC_err_printf0("\n");
+ GC_err_printf("\n");
break;
}
current = base;
@@ -221,7 +220,7 @@ ptr_t p;
{
void * current;
current = GC_generate_random_valid_address();
- GC_printf1("\n****Chose address 0x%lx in object\n", (unsigned long)current);
+ GC_printf("\n****Chose address %p in object\n", current);
GC_print_backtrace(current);
}
@@ -237,11 +236,7 @@ ptr_t p;
(((word)(p + sizeof(oh) + sz - 1) ^ (word)p) >= HBLKSIZE)
/* Store debugging info into p. Return displaced pointer. */
/* Assumes we don't hold allocation lock. */
-ptr_t GC_store_debug_info(p, sz, string, integer)
-register ptr_t p; /* base pointer */
-word sz; /* bytes */
-GC_CONST char * string;
-word integer;
+ptr_t GC_store_debug_info(ptr_t p, word sz, const char *string, word integer)
{
register word * result = (word *)((oh *)p + 1);
DCL_LOCK_STATE;
@@ -273,11 +268,7 @@ word integer;
#ifdef DBG_HDRS_ALL
/* Store debugging info into p. Return displaced pointer. */
/* This version assumes we do hold the allocation lock. */
-ptr_t GC_store_debug_info_inner(p, sz, string, integer)
-register ptr_t p; /* base pointer */
-word sz; /* bytes */
-char * string;
-word integer;
+ptr_t GC_store_debug_info_inner(ptr_t p, word sz, char *string, word integer)
{
register word * result = (word *)((oh *)p + 1);
@@ -375,7 +366,8 @@ ptr_t p;
GC_err_puts("STUBBORN");
break;
default:
- GC_err_printf2("kind %ld, descr 0x%lx", kind, hhdr -> hb_descr);
+ GC_err_printf("kind %d, descr 0x%lx", kind,
+ (unsigned long)(hhdr -> hb_descr));
}
}
}
@@ -388,12 +380,12 @@ ptr_t p;
register oh * ohdr = (oh *)GC_base(p);
GC_ASSERT(!I_HOLD_LOCK());
- GC_err_printf1("0x%lx (", ((unsigned long)ohdr + sizeof(oh)));
+ GC_err_printf("%p (", ((ptr_t)ohdr + sizeof(oh)));
GC_err_puts(ohdr -> oh_string);
# ifdef SHORT_DBG_HDRS
- GC_err_printf1(":%ld, ", (unsigned long)(ohdr -> oh_int));
+ GC_err_printf(":%ld, ", (unsigned long)(ohdr -> oh_int));
# else
- GC_err_printf2(":%ld, sz=%ld, ", (unsigned long)(ohdr -> oh_int),
+ GC_err_printf(":%ld, sz=%ld, ", (unsigned long)(ohdr -> oh_int),
(unsigned long)(ohdr -> oh_sz));
# endif
GC_print_type((ptr_t)(ohdr + 1));
@@ -401,12 +393,7 @@ ptr_t p;
PRINT_CALL_CHAIN(ohdr);
}
-# if defined(__STDC__) || defined(__cplusplus)
- void GC_debug_print_heap_obj_proc(ptr_t p)
-# else
- void GC_debug_print_heap_obj_proc(p)
- ptr_t p;
-# endif
+void GC_debug_print_heap_obj_proc(ptr_t p)
{
GC_ASSERT(!I_HOLD_LOCK());
if (GC_HAS_DEBUG_INFO(p)) {
@@ -417,17 +404,15 @@ ptr_t p;
}
#ifndef SHORT_DBG_HDRS
-void GC_print_smashed_obj(p, clobbered_addr)
-ptr_t p, clobbered_addr;
+void GC_print_smashed_obj(ptr_t p, ptr_t clobbered_addr)
{
register oh * ohdr = (oh *)GC_base(p);
GC_ASSERT(!I_HOLD_LOCK());
- GC_err_printf2("0x%lx in object at 0x%lx(", (unsigned long)clobbered_addr,
- (unsigned long)p);
+ GC_err_printf("%p in object at %p(", clobbered_addr, p);
if (clobbered_addr <= (ptr_t)(&(ohdr -> oh_sz))
|| ohdr -> oh_string == 0) {
- GC_err_printf1("<smashed>, appr. sz = %ld)\n",
+ GC_err_printf("<smashed>, appr. sz = %ld)\n",
(GC_size((ptr_t)ohdr) - DEBUG_BYTES));
} else {
if (ohdr -> oh_string[0] == '\0') {
@@ -435,20 +420,20 @@ ptr_t p, clobbered_addr;
} else {
GC_err_puts(ohdr -> oh_string);
}
- GC_err_printf2(":%ld, sz=%ld)\n", (unsigned long)(ohdr -> oh_int),
+ GC_err_printf(":%ld, sz=%ld)\n", (unsigned long)(ohdr -> oh_int),
(unsigned long)(ohdr -> oh_sz));
PRINT_CALL_CHAIN(ohdr);
}
}
#endif
-void GC_check_heap_proc GC_PROTO((void));
+void GC_check_heap_proc (void);
-void GC_print_all_smashed_proc GC_PROTO((void));
+void GC_print_all_smashed_proc (void);
-void GC_do_nothing() {}
+void GC_do_nothing(void) {}
-void GC_start_debugging()
+void GC_start_debugging(void)
{
# ifndef SHORT_DBG_HDRS
GC_check_heap = GC_check_heap_proc;
@@ -464,36 +449,21 @@ void GC_start_debugging()
size_t GC_debug_header_size = sizeof(oh);
-# if defined(__STDC__) || defined(__cplusplus)
- void GC_debug_register_displacement(GC_word offset)
-# else
- void GC_debug_register_displacement(offset)
- GC_word offset;
-# endif
+void GC_debug_register_displacement(size_t offset)
{
GC_register_displacement(offset);
GC_register_displacement((word)sizeof(oh) + offset);
}
-# ifdef __STDC__
- GC_PTR GC_debug_malloc(size_t lb, GC_EXTRA_PARAMS)
-# else
- GC_PTR GC_debug_malloc(lb, s, i)
- size_t lb;
- char * s;
- int i;
-# ifdef GC_ADD_CALLER
- --> GC_ADD_CALLER not implemented for K&R C
-# endif
-# endif
+void * GC_debug_malloc(size_t lb, GC_EXTRA_PARAMS)
{
- GC_PTR result = GC_malloc(lb + DEBUG_BYTES);
+ void * result = GC_malloc(lb + DEBUG_BYTES);
if (result == 0) {
- GC_err_printf1("GC_debug_malloc(%ld) returning NIL (",
- (unsigned long) lb);
+ GC_err_printf("GC_debug_malloc(%lu) returning NIL (",
+ (unsigned long) lb);
GC_err_puts(s);
- GC_err_printf1(":%ld)\n", (unsigned long)i);
+ GC_err_printf(":%ld)\n", (unsigned long)i);
return(0);
}
if (!GC_debugging_started) {
@@ -503,25 +473,15 @@ size_t GC_debug_header_size = sizeof(oh);
return (GC_store_debug_info(result, (word)lb, s, (word)i));
}
-# ifdef __STDC__
- GC_PTR GC_debug_malloc_ignore_off_page(size_t lb, GC_EXTRA_PARAMS)
-# else
- GC_PTR GC_debug_malloc_ignore_off_page(lb, s, i)
- size_t lb;
- char * s;
- int i;
-# ifdef GC_ADD_CALLER
- --> GC_ADD_CALLER not implemented for K&R C
-# endif
-# endif
+void * GC_debug_malloc_ignore_off_page(size_t lb, GC_EXTRA_PARAMS)
{
- GC_PTR result = GC_malloc_ignore_off_page(lb + DEBUG_BYTES);
+ void * result = GC_malloc_ignore_off_page(lb + DEBUG_BYTES);
if (result == 0) {
- GC_err_printf1("GC_debug_malloc_ignore_off_page(%ld) returning NIL (",
+ GC_err_printf("GC_debug_malloc_ignore_off_page(%lu) returning NIL (",
(unsigned long) lb);
GC_err_puts(s);
- GC_err_printf1(":%ld)\n", (unsigned long)i);
+ GC_err_printf(":%lu)\n", (unsigned long)i);
return(0);
}
if (!GC_debugging_started) {
@@ -531,25 +491,15 @@ size_t GC_debug_header_size = sizeof(oh);
return (GC_store_debug_info(result, (word)lb, s, (word)i));
}
-# ifdef __STDC__
- GC_PTR GC_debug_malloc_atomic_ignore_off_page(size_t lb, GC_EXTRA_PARAMS)
-# else
- GC_PTR GC_debug_malloc_atomic_ignore_off_page(lb, s, i)
- size_t lb;
- char * s;
- int i;
-# ifdef GC_ADD_CALLER
- --> GC_ADD_CALLER not implemented for K&R C
-# endif
-# endif
+void * GC_debug_malloc_atomic_ignore_off_page(size_t lb, GC_EXTRA_PARAMS)
{
- GC_PTR result = GC_malloc_atomic_ignore_off_page(lb + DEBUG_BYTES);
+ void * result = GC_malloc_atomic_ignore_off_page(lb + DEBUG_BYTES);
if (result == 0) {
- GC_err_printf1("GC_debug_malloc_atomic_ignore_off_page(%ld)"
+ GC_err_printf("GC_debug_malloc_atomic_ignore_off_page(%lu)"
" returning NIL (", (unsigned long) lb);
GC_err_puts(s);
- GC_err_printf1(":%ld)\n", (unsigned long)i);
+ GC_err_printf(":%lu)\n", (unsigned long)i);
return(0);
}
if (!GC_debugging_started) {
@@ -568,12 +518,12 @@ size_t GC_debug_header_size = sizeof(oh);
* We assume debugging was started in collector initialization,
* and we already hold the GC lock.
*/
- GC_PTR GC_debug_generic_malloc_inner(size_t lb, int k)
+ void * GC_debug_generic_malloc_inner(size_t lb, int k)
{
- GC_PTR result = GC_generic_malloc_inner(lb + DEBUG_BYTES, k);
+ void * result = GC_generic_malloc_inner(lb + DEBUG_BYTES, k);
if (result == 0) {
- GC_err_printf1("GC internal allocation (%ld bytes) returning NIL\n",
+ GC_err_printf("GC internal allocation (%lu bytes) returning NIL\n",
(unsigned long) lb);
return(0);
}
@@ -581,13 +531,13 @@ size_t GC_debug_header_size = sizeof(oh);
return (GC_store_debug_info_inner(result, (word)lb, "INTERNAL", (word)0));
}
- GC_PTR GC_debug_generic_malloc_inner_ignore_off_page(size_t lb, int k)
+ void * GC_debug_generic_malloc_inner_ignore_off_page(size_t lb, int k)
{
- GC_PTR result = GC_generic_malloc_inner_ignore_off_page(
+ void * result = GC_generic_malloc_inner_ignore_off_page(
lb + DEBUG_BYTES, k);
if (result == 0) {
- GC_err_printf1("GC internal allocation (%ld bytes) returning NIL\n",
+ GC_err_printf("GC internal allocation (%lu bytes) returning NIL\n",
(unsigned long) lb);
return(0);
}
@@ -597,22 +547,15 @@ size_t GC_debug_header_size = sizeof(oh);
# endif
#ifdef STUBBORN_ALLOC
-# ifdef __STDC__
- GC_PTR GC_debug_malloc_stubborn(size_t lb, GC_EXTRA_PARAMS)
-# else
- GC_PTR GC_debug_malloc_stubborn(lb, s, i)
- size_t lb;
- char * s;
- int i;
-# endif
+void * GC_debug_malloc_stubborn(size_t lb, GC_EXTRA_PARAMS)
{
- GC_PTR result = GC_malloc_stubborn(lb + DEBUG_BYTES);
+ void * result = GC_malloc_stubborn(lb + DEBUG_BYTES);
if (result == 0) {
- GC_err_printf1("GC_debug_malloc(%ld) returning NIL (",
- (unsigned long) lb);
+ GC_err_printf("GC_debug_malloc(%lu) returning NIL (",
+ (unsigned long) lb);
GC_err_puts(s);
- GC_err_printf1(":%ld)\n", (unsigned long)i);
+ GC_err_printf(":%lu)\n", (unsigned long)i);
return(0);
}
if (!GC_debugging_started) {
@@ -622,41 +565,35 @@ size_t GC_debug_header_size = sizeof(oh);
return (GC_store_debug_info(result, (word)lb, s, (word)i));
}
-void GC_debug_change_stubborn(p)
-GC_PTR p;
+void GC_debug_change_stubborn(void *p)
{
- register GC_PTR q = GC_base(p);
- register hdr * hhdr;
+ void * q = GC_base(p);
+ hdr * hhdr;
if (q == 0) {
- GC_err_printf1("Bad argument: 0x%lx to GC_debug_change_stubborn\n",
- (unsigned long) p);
+ GC_err_printf("Bad argument: %p to GC_debug_change_stubborn\n", p);
ABORT("GC_debug_change_stubborn: bad arg");
}
hhdr = HDR(q);
if (hhdr -> hb_obj_kind != STUBBORN) {
- GC_err_printf1("GC_debug_change_stubborn arg not stubborn: 0x%lx\n",
- (unsigned long) p);
+ GC_err_printf("GC_debug_change_stubborn arg not stubborn: %p\n", p);
ABORT("GC_debug_change_stubborn: arg not stubborn");
}
GC_change_stubborn(q);
}
-void GC_debug_end_stubborn_change(p)
-GC_PTR p;
+void GC_debug_end_stubborn_change(void *p)
{
- register GC_PTR q = GC_base(p);
+ register void * q = GC_base(p);
register hdr * hhdr;
if (q == 0) {
- GC_err_printf1("Bad argument: 0x%lx to GC_debug_end_stubborn_change\n",
- (unsigned long) p);
+ GC_err_printf("Bad argument: %p to GC_debug_end_stubborn_change\n", p);
ABORT("GC_debug_end_stubborn_change: bad arg");
}
hhdr = HDR(q);
if (hhdr -> hb_obj_kind != STUBBORN) {
- GC_err_printf1("debug_end_stubborn_change arg not stubborn: 0x%lx\n",
- (unsigned long) p);
+ GC_err_printf("debug_end_stubborn_change arg not stubborn: %p\n", p);
ABORT("GC_debug_end_stubborn_change: arg not stubborn");
}
GC_end_stubborn_change(q);
@@ -664,46 +601,32 @@ GC_PTR p;
#else /* !STUBBORN_ALLOC */
-# ifdef __STDC__
- GC_PTR GC_debug_malloc_stubborn(size_t lb, GC_EXTRA_PARAMS)
-# else
- GC_PTR GC_debug_malloc_stubborn(lb, s, i)
- size_t lb;
- char * s;
- int i;
-# endif
+void * GC_debug_malloc_stubborn(size_t lb, GC_EXTRA_PARAMS)
{
return GC_debug_malloc(lb, OPT_RA s, i);
}
void GC_debug_change_stubborn(p)
-GC_PTR p;
+void * p;
{
}
void GC_debug_end_stubborn_change(p)
-GC_PTR p;
+void * p;
{
}
#endif /* !STUBBORN_ALLOC */
-# ifdef __STDC__
- GC_PTR GC_debug_malloc_atomic(size_t lb, GC_EXTRA_PARAMS)
-# else
- GC_PTR GC_debug_malloc_atomic(lb, s, i)
- size_t lb;
- char * s;
- int i;
-# endif
+void * GC_debug_malloc_atomic(size_t lb, GC_EXTRA_PARAMS)
{
- GC_PTR result = GC_malloc_atomic(lb + DEBUG_BYTES);
+ void * result = GC_malloc_atomic(lb + DEBUG_BYTES);
if (result == 0) {
- GC_err_printf1("GC_debug_malloc_atomic(%ld) returning NIL (",
+ GC_err_printf("GC_debug_malloc_atomic(%lu) returning NIL (",
(unsigned long) lb);
GC_err_puts(s);
- GC_err_printf1(":%ld)\n", (unsigned long)i);
+ GC_err_printf(":%lu)\n", (unsigned long)i);
return(0);
}
if (!GC_debugging_started) {
@@ -713,22 +636,15 @@ GC_PTR p;
return (GC_store_debug_info(result, (word)lb, s, (word)i));
}
-# ifdef __STDC__
- GC_PTR GC_debug_malloc_uncollectable(size_t lb, GC_EXTRA_PARAMS)
-# else
- GC_PTR GC_debug_malloc_uncollectable(lb, s, i)
- size_t lb;
- char * s;
- int i;
-# endif
+void * GC_debug_malloc_uncollectable(size_t lb, GC_EXTRA_PARAMS)
{
- GC_PTR result = GC_malloc_uncollectable(lb + UNCOLLECTABLE_DEBUG_BYTES);
+ void * result = GC_malloc_uncollectable(lb + UNCOLLECTABLE_DEBUG_BYTES);
if (result == 0) {
- GC_err_printf1("GC_debug_malloc_uncollectable(%ld) returning NIL (",
+ GC_err_printf("GC_debug_malloc_uncollectable(%lu) returning NIL (",
(unsigned long) lb);
GC_err_puts(s);
- GC_err_printf1(":%ld)\n", (unsigned long)i);
+ GC_err_printf(":%lu)\n", (unsigned long)i);
return(0);
}
if (!GC_debugging_started) {
@@ -739,24 +655,17 @@ GC_PTR p;
}
#ifdef ATOMIC_UNCOLLECTABLE
-# ifdef __STDC__
- GC_PTR GC_debug_malloc_atomic_uncollectable(size_t lb, GC_EXTRA_PARAMS)
-# else
- GC_PTR GC_debug_malloc_atomic_uncollectable(lb, s, i)
- size_t lb;
- char * s;
- int i;
-# endif
+void * GC_debug_malloc_atomic_uncollectable(size_t lb, GC_EXTRA_PARAMS)
{
- GC_PTR result =
+ void * result =
GC_malloc_atomic_uncollectable(lb + UNCOLLECTABLE_DEBUG_BYTES);
if (result == 0) {
- GC_err_printf1(
- "GC_debug_malloc_atomic_uncollectable(%ld) returning NIL (",
+ GC_err_printf(
+ "GC_debug_malloc_atomic_uncollectable(%lu) returning NIL (",
(unsigned long) lb);
GC_err_puts(s);
- GC_err_printf1(":%ld)\n", (unsigned long)i);
+ GC_err_printf(":%lu)\n", (unsigned long)i);
return(0);
}
if (!GC_debugging_started) {
@@ -767,36 +676,29 @@ GC_PTR p;
}
#endif /* ATOMIC_UNCOLLECTABLE */
-# ifdef __STDC__
- void GC_debug_free(GC_PTR p)
-# else
- void GC_debug_free(p)
- GC_PTR p;
-# endif
+void GC_debug_free(void * p)
{
- register GC_PTR base;
- register ptr_t clobbered;
+ ptr_t base;
+ ptr_t clobbered;
if (0 == p) return;
base = GC_base(p);
if (base == 0) {
- GC_err_printf1("Attempt to free invalid pointer %lx\n",
- (unsigned long)p);
+ GC_err_printf("Attempt to free invalid pointer %p\n", p);
ABORT("free(invalid pointer)");
}
if ((ptr_t)p - (ptr_t)base != sizeof(oh)) {
- GC_err_printf1(
- "GC_debug_free called on pointer %lx wo debugging info\n",
- (unsigned long)p);
+ GC_err_printf(
+ "GC_debug_free called on pointer %p wo debugging info\n", p);
} else {
# ifndef SHORT_DBG_HDRS
clobbered = GC_check_annotated_obj((oh *)base);
if (clobbered != 0) {
if (((oh *)base) -> oh_sz == GC_size(base)) {
- GC_err_printf0(
+ GC_err_printf(
"GC_debug_free: found previously deallocated (?) object at ");
} else {
- GC_err_printf0("GC_debug_free: found smashed location at ");
+ GC_err_printf("GC_debug_free: found smashed location at ");
}
GC_print_smashed_obj(p, clobbered);
}
@@ -807,7 +709,7 @@ GC_PTR p;
if (GC_find_leak) {
GC_free(base);
} else {
- register hdr * hhdr = HDR(p);
+ hdr * hhdr = HDR(p);
GC_bool uncollectable = FALSE;
if (hhdr -> hb_obj_kind == UNCOLLECTABLE) {
@@ -822,52 +724,42 @@ GC_PTR p;
GC_free(base);
} else {
size_t i;
- size_t obj_sz = hhdr -> hb_sz - BYTES_TO_WORDS(sizeof(oh));
+ size_t obj_sz = BYTES_TO_WORDS(hhdr -> hb_sz - sizeof(oh));
for (i = 0; i < obj_sz; ++i) ((word *)p)[i] = 0xdeadbeef;
- GC_ASSERT((word *)p + i == (word *)base + hhdr -> hb_sz);
+ GC_ASSERT((word *)p + i == (word *)(base + hhdr -> hb_sz));
}
} /* !GC_find_leak */
}
#ifdef THREADS
-extern void GC_free_inner(GC_PTR p);
+extern void GC_free_inner(void * p);
/* Used internally; we assume it's called correctly. */
-void GC_debug_free_inner(GC_PTR p)
+void GC_debug_free_inner(void * p)
{
GC_free_inner(GC_base(p));
}
#endif
-# ifdef __STDC__
- GC_PTR GC_debug_realloc(GC_PTR p, size_t lb, GC_EXTRA_PARAMS)
-# else
- GC_PTR GC_debug_realloc(p, lb, s, i)
- GC_PTR p;
- size_t lb;
- char *s;
- int i;
-# endif
+void * GC_debug_realloc(void * p, size_t lb, GC_EXTRA_PARAMS)
{
- register GC_PTR base = GC_base(p);
- register ptr_t clobbered;
- register GC_PTR result;
- register size_t copy_sz = lb;
- register size_t old_sz;
- register hdr * hhdr;
+ void * base = GC_base(p);
+ ptr_t clobbered;
+ void * result;
+ size_t copy_sz = lb;
+ size_t old_sz;
+ hdr * hhdr;
if (p == 0) return(GC_debug_malloc(lb, OPT_RA s, i));
if (base == 0) {
- GC_err_printf1(
- "Attempt to reallocate invalid pointer %lx\n", (unsigned long)p);
+ GC_err_printf("Attempt to reallocate invalid pointer %p\n", p);
ABORT("realloc(invalid pointer)");
}
if ((ptr_t)p - (ptr_t)base != sizeof(oh)) {
- GC_err_printf1(
- "GC_debug_realloc called on pointer %lx wo debugging info\n",
- (unsigned long)p);
+ GC_err_printf(
+ "GC_debug_realloc called on pointer %p wo debugging info\n", p);
return(GC_realloc(p, lb));
}
hhdr = HDR(base);
@@ -892,7 +784,7 @@ void GC_debug_free_inner(GC_PTR p)
break;
# endif
default:
- GC_err_printf0("GC_debug_realloc: encountered bad kind\n");
+ GC_err_printf("GC_debug_realloc: encountered bad kind\n");
ABORT("bad kind");
}
# ifdef SHORT_DBG_HDRS
@@ -900,7 +792,7 @@ void GC_debug_free_inner(GC_PTR p)
# else
clobbered = GC_check_annotated_obj((oh *)base);
if (clobbered != 0) {
- GC_err_printf0("GC_debug_realloc: found smashed location at ");
+ GC_err_printf("GC_debug_realloc: found smashed location at ");
GC_print_smashed_obj(p, clobbered);
}
old_sz = ((oh *)base) -> oh_sz;
@@ -922,12 +814,7 @@ void GC_debug_free_inner(GC_PTR p)
ptr_t GC_smashed[MAX_SMASHED];
unsigned GC_n_smashed = 0;
-# if defined(__STDC__) || defined(__cplusplus)
- void GC_add_smashed(ptr_t smashed)
-# else
- void GC_add_smashed(smashed)
- ptr_t smashed;
-#endif
+void GC_add_smashed(ptr_t smashed)
{
GC_ASSERT(GC_is_marked(GC_base(smashed)));
GC_smashed[GC_n_smashed] = smashed;
@@ -938,13 +825,13 @@ unsigned GC_n_smashed = 0;
}
/* Print all objects on the list. Clear the list. */
-void GC_print_all_smashed_proc ()
+void GC_print_all_smashed_proc(void)
{
unsigned i;
GC_ASSERT(!I_HOLD_LOCK());
if (GC_n_smashed == 0) return;
- GC_err_printf0("GC_check_heap_block: found smashed heap objects:\n");
+ GC_err_printf("GC_check_heap_block: found smashed heap objects:\n");
for (i = 0; i < GC_n_smashed; ++i) {
GC_print_smashed_obj(GC_base(GC_smashed[i]), GC_smashed[i]);
GC_smashed[i] = 0;
@@ -952,37 +839,32 @@ void GC_print_all_smashed_proc ()
GC_n_smashed = 0;
}
-/* Check all marked objects in the given block for validity */
+/* Check all marked objects in the given block for validity */
+/* Avoid GC_apply_to_each_object for performance reasons. */
/*ARGSUSED*/
-# if defined(__STDC__) || defined(__cplusplus)
- void GC_check_heap_block(register struct hblk *hbp, word dummy)
-# else
- void GC_check_heap_block(hbp, dummy)
- register struct hblk *hbp; /* ptr to current heap block */
- word dummy;
-# endif
+void GC_check_heap_block(struct hblk *hbp, word dummy)
{
- register struct hblkhdr * hhdr = HDR(hbp);
- register word sz = hhdr -> hb_sz;
- register int word_no;
- register word *p, *plim;
+ struct hblkhdr * hhdr = HDR(hbp);
+ size_t sz = hhdr -> hb_sz;
+ int bit_no;
+ unsigned char *p, *plim;
- p = (word *)(hbp->hb_body);
- word_no = 0;
- if (sz > MAXOBJSZ) {
+ p = hbp->hb_body;
+ bit_no = 0;
+ if (sz > MAXOBJBYTES) {
plim = p;
} else {
- plim = (word *)((((word)hbp) + HBLKSIZE) - WORDS_TO_BYTES(sz));
+ plim = hbp->hb_body + HBLKSIZE - sz;
}
/* go through all words in block */
while( p <= plim ) {
- if( mark_bit_from_hdr(hhdr, word_no)
+ if( mark_bit_from_hdr(hhdr, bit_no)
&& GC_HAS_DEBUG_INFO((ptr_t)p)) {
ptr_t clobbered = GC_check_annotated_obj((oh *)p);
if (clobbered != 0) GC_add_smashed(clobbered);
}
- word_no += sz;
+ bit_no += MARK_BIT_OFFSET(sz);
p += sz;
}
}
@@ -990,14 +872,12 @@ void GC_print_all_smashed_proc ()
/* This assumes that all accessible objects are marked, and that */
/* I hold the allocation lock. Normally called by collector. */
-void GC_check_heap_proc()
+void GC_check_heap_proc(void)
{
# ifndef SMALL_CONFIG
-# ifdef ALIGN_DOUBLE
- GC_STATIC_ASSERT((sizeof(oh) & (2 * sizeof(word) - 1)) == 0);
-# else
- GC_STATIC_ASSERT((sizeof(oh) & (sizeof(word) - 1)) == 0);
-# endif
+ /* Ignore gcc no effect warning on the following. */
+ GC_STATIC_ASSERT((sizeof(oh) & (GRANULE_BYTES - 1)) == 0);
+ /* FIXME: Should we check for twice that alignment? */
# endif
GC_apply_to_all_blocks(GC_check_heap_block, (word)0);
}
@@ -1006,16 +886,10 @@ void GC_check_heap_proc()
struct closure {
GC_finalization_proc cl_fn;
- GC_PTR cl_data;
+ void * cl_data;
};
-# ifdef __STDC__
- void * GC_make_closure(GC_finalization_proc fn, void * data)
-# else
- GC_PTR GC_make_closure(fn, data)
- GC_finalization_proc fn;
- GC_PTR data;
-# endif
+void * GC_make_closure(GC_finalization_proc fn, void * data)
{
struct closure * result =
# ifdef DBG_HDRS_ALL
@@ -1027,34 +901,25 @@ struct closure {
result -> cl_fn = fn;
result -> cl_data = data;
- return((GC_PTR)result);
+ return((void *)result);
}
-# ifdef __STDC__
- void GC_debug_invoke_finalizer(void * obj, void * data)
-# else
- void GC_debug_invoke_finalizer(obj, data)
- char * obj;
- char * data;
-# endif
+void GC_debug_invoke_finalizer(void * obj, void * data)
{
register struct closure * cl = (struct closure *) data;
- (*(cl -> cl_fn))((GC_PTR)((char *)obj + sizeof(oh)), cl -> cl_data);
+ (*(cl -> cl_fn))((void *)((char *)obj + sizeof(oh)), cl -> cl_data);
}
/* Set ofn and ocd to reflect the values we got back. */
-static void store_old (obj, my_old_fn, my_old_cd, ofn, ocd)
-GC_PTR obj;
-GC_finalization_proc my_old_fn;
-struct closure * my_old_cd;
-GC_finalization_proc *ofn;
-GC_PTR *ocd;
+static void store_old (void *obj, GC_finalization_proc my_old_fn,
+ struct closure *my_old_cd, GC_finalization_proc *ofn,
+ void **ocd)
{
if (0 != my_old_fn) {
if (my_old_fn != GC_debug_invoke_finalizer) {
- GC_err_printf1("Debuggable object at 0x%lx had non-debug finalizer.\n",
- obj);
+ GC_err_printf("Debuggable object at %p had non-debug finalizer.\n",
+ obj);
/* This should probably be fatal. */
} else {
if (ofn) *ofn = my_old_cd -> cl_fn;
@@ -1066,26 +931,17 @@ GC_PTR *ocd;
}
}
-# ifdef __STDC__
- void GC_debug_register_finalizer(GC_PTR obj, GC_finalization_proc fn,
- GC_PTR cd, GC_finalization_proc *ofn,
- GC_PTR *ocd)
-# else
- void GC_debug_register_finalizer(obj, fn, cd, ofn, ocd)
- GC_PTR obj;
- GC_finalization_proc fn;
- GC_PTR cd;
- GC_finalization_proc *ofn;
- GC_PTR *ocd;
-# endif
+void GC_debug_register_finalizer(void * obj, GC_finalization_proc fn,
+ void * cd, GC_finalization_proc *ofn,
+ void * *ocd)
{
GC_finalization_proc my_old_fn;
- GC_PTR my_old_cd;
+ void * my_old_cd;
ptr_t base = GC_base(obj);
if (0 == base) return;
if ((ptr_t)obj - base != sizeof(oh)) {
- GC_err_printf1(
- "GC_debug_register_finalizer called with non-base-pointer 0x%lx\n",
+ GC_err_printf(
+ "GC_debug_register_finalizer called with non-base-pointer %p\n",
obj);
}
if (0 == fn) {
@@ -1097,28 +953,19 @@ GC_PTR *ocd;
store_old(obj, my_old_fn, (struct closure *)my_old_cd, ofn, ocd);
}
-# ifdef __STDC__
- void GC_debug_register_finalizer_no_order
- (GC_PTR obj, GC_finalization_proc fn,
- GC_PTR cd, GC_finalization_proc *ofn,
- GC_PTR *ocd)
-# else
- void GC_debug_register_finalizer_no_order
- (obj, fn, cd, ofn, ocd)
- GC_PTR obj;
- GC_finalization_proc fn;
- GC_PTR cd;
- GC_finalization_proc *ofn;
- GC_PTR *ocd;
-# endif
+void GC_debug_register_finalizer_no_order
+ (void * obj, GC_finalization_proc fn,
+ void * cd, GC_finalization_proc *ofn,
+ void * *ocd)
{
GC_finalization_proc my_old_fn;
- GC_PTR my_old_cd;
+ void * my_old_cd;
ptr_t base = GC_base(obj);
if (0 == base) return;
if ((ptr_t)obj - base != sizeof(oh)) {
- GC_err_printf1(
- "GC_debug_register_finalizer_no_order called with non-base-pointer 0x%lx\n",
+ GC_err_printf(
+ "GC_debug_register_finalizer_no_order called with "
+ "non-base-pointer %p\n",
obj);
}
if (0 == fn) {
@@ -1131,29 +978,19 @@ GC_PTR *ocd;
store_old(obj, my_old_fn, (struct closure *)my_old_cd, ofn, ocd);
}
-# ifdef __STDC__
- void GC_debug_register_finalizer_ignore_self
- (GC_PTR obj, GC_finalization_proc fn,
- GC_PTR cd, GC_finalization_proc *ofn,
- GC_PTR *ocd)
-# else
- void GC_debug_register_finalizer_ignore_self
- (obj, fn, cd, ofn, ocd)
- GC_PTR obj;
- GC_finalization_proc fn;
- GC_PTR cd;
- GC_finalization_proc *ofn;
- GC_PTR *ocd;
-# endif
+void GC_debug_register_finalizer_ignore_self
+ (void * obj, GC_finalization_proc fn,
+ void * cd, GC_finalization_proc *ofn,
+ void * *ocd)
{
GC_finalization_proc my_old_fn;
- GC_PTR my_old_cd;
+ void * my_old_cd;
ptr_t base = GC_base(obj);
if (0 == base) return;
if ((ptr_t)obj - base != sizeof(oh)) {
- GC_err_printf1(
- "GC_debug_register_finalizer_ignore_self called with non-base-pointer 0x%lx\n",
- obj);
+ GC_err_printf(
+ "GC_debug_register_finalizer_ignore_self called with "
+ "non-base-pointer %p\n", obj);
}
if (0 == fn) {
GC_register_finalizer_ignore_self(base, 0, 0, &my_old_fn, &my_old_cd);
@@ -1171,15 +1008,12 @@ GC_PTR *ocd;
# define RA
#endif
-GC_PTR GC_debug_malloc_replacement(lb)
-size_t lb;
+void * GC_debug_malloc_replacement(size_t lb)
{
return GC_debug_malloc(lb, RA "unknown", 0);
}
-GC_PTR GC_debug_realloc_replacement(p, lb)
-GC_PTR p;
-size_t lb;
+void * GC_debug_realloc_replacement(void *p, size_t lb)
{
return GC_debug_realloc(p, lb, RA "unknown", 0);
}
diff --git a/digimars.mak b/digimars.mak
index 9778feee..41178fdc 100644
--- a/digimars.mak
+++ b/digimars.mak
@@ -3,7 +3,7 @@
# Written by Walter Bright
-DEFINES=-DNDEBUG -DSILENT -DGC_BUILD -D_WINDOWS -DGC_DLL -DALL_INTERIOR_POINTERS -D__STDC__ -DWIN32_THREADS
+DEFINES=-DNDEBUG -DGC_BUILD -D_WINDOWS -DGC_DLL -DALL_INTERIOR_POINTERS -D__STDC__ -DWIN32_THREADS
CFLAGS=-Iinclude $(DEFINES) -wx -g
LFLAGS=/ma/implib/co
CC=sc
@@ -61,7 +61,7 @@ gctest.exe : gc.lib tests\test.obj
sc -ogctest.exe tests\test.obj gc.lib
tests\test.obj : tests\test.c
- $(CC) -c -g -DNDEBUG -DSILENT -DGC_BUILD -D_WINDOWS -DGC_DLL \
+ $(CC) -c -g -DNDEBUG -DGC_BUILD -D_WINDOWS -DGC_DLL \
-DALL_INTERIOR_POINTERS -DWIN32_THREADS \
-Iinclude tests\test.c -otests\test.obj
diff --git a/doc/Makefile.in b/doc/Makefile.in
index bf7d9a5d..708fd51b 100644
--- a/doc/Makefile.in
+++ b/doc/Makefile.in
@@ -105,7 +105,7 @@ OBJDUMP = @OBJDUMP@
PACKAGE = @PACKAGE@
RANLIB = @RANLIB@
STRIP = @STRIP@
-THREADDLLIBS = @THREADDLLIBS@
+THREADLIBS = @THREADLIBS@
UNWINDLIBS = @UNWINDLIBS@
VERSION = @VERSION@
addincludes = @addincludes@
diff --git a/doc/README b/doc/README
index b7820d5e..ff77113e 100644
--- a/doc/README
+++ b/doc/README
@@ -28,7 +28,7 @@ are GPL'ed, but with an exception that should cover all uses in the
collector. (If you are concerned about such things, I recommend you look
at the notice in config.guess or ltmain.sh.)
-This is version 6.4 of a conservative garbage collector for C and C++.
+This is version 6.3 of a conservative garbage collector for C and C++.
You might find a more recent version of this at
diff --git a/doc/README.changes b/doc/README.changes
index 265e06ca..78a4e06c 100644
--- a/doc/README.changes
+++ b/doc/README.changes
@@ -2077,16 +2077,13 @@ Since 6.3alpha5:
Adrian Bunk for the patches).
- Integrated NetBSD/OpenBSD patches from Marc Recht and Matthias Drochner.
-Since gc6.3alpha6:
+Since 6.3alpha6:
- Compile test_cpp.cc with CXXCOMPILE instead of COMPILE.
- Very large allocations could cause a collector hang. Correct
calculation of GC_collect_at_heapsize.
- GC_print_hblkfreelist printed some bogus results if USE_MUNMAP
was defined.
- - The generic GC_THREADS macro didn't work correctly on Solaris,
- since the implementation failed to include gc_config_macros.h
- before deciding whether or not to compile the rest of the file.
- - Threadlibs.c failed to expand the generic GC_THREADS macro.
+ - Include gc_config_macros.h in threadlibs.c.
- Correct MacOSX thread stop code. (Thanks to Dick Porter.)
- SMALL_OBJ definition was off by one. This could cause crashes
at startup. (Thanks to Zoltan Varga for narrowing this down to
@@ -2096,12 +2093,12 @@ Since gc6.3alpha6:
- Changed X86_64 implementation to use SA_SIGINFO in the MPROTECT_VDB
implementation. The old approach appears to have been broken by
recent kernels.
- - Add GC_ATTR_UNUSED to eliminate a warning in gc_allocator.h. (Thanks
+ - Added GC_ATTR_UNUSED to eliminate a warning in gc_allocator.h (Thanks
to Andrew Begel.)
- Fix GC_task_self declaration in os_dep.c. (Thanks to Andrew Pinski.)
- Increase INITIAL_BUF_SZ in os_dep.c for Solaris /proc reads.
-Since 6.3:
+Since gc6.3:
- Merge gcconfig.h changes from gcc tree.
- Unconditionally include gc_priv.h in solaris_pthreads.c, win32_threads.h,
aix_irix_threads.c, and solaris_threads.c to get thread definitions.
@@ -2111,7 +2108,7 @@ Since 6.3:
- Go ahead and split large blocks in GC_allochblk_nth if GC_dont_gc
is set. (Thanks to Alexander Petrossian.)
- GC_PRINT_BACK_HEIGHT would deadlock with thread support.
- - Let in_progress_space in backgraph.s grow dynamically.
+ - Let in_progress_space in backgraph.s grow dyanmically.
- Fix README.solaris2. The GC_thr_init() hack doesn't work anymore.
- Convert GC_finalizer_mem_freed to bytes in allchblk.c.
- Add missing declaration for GC_generic_malloc_words_small_inner.
@@ -2131,46 +2128,91 @@ Since 6.3:
(Thanks to Peter Colson.)
- Changed "int stack_size" declaration in pthread_support.c to use
size_t. (Only mattered with GC_ASSERTIONS enabled.)
- - Added CRIS (etrax) support. (Thanks to Simon Posnjak and
- Hans-Peter Nilsson.)
- - Removed GC_IGNORE_FB frame buffer recognition, and replaced
- it with a check that the mapping type is MEM_IMAGE.
- In theory, this should work much better, but it is a high
- risk change for win32. (Thanks to Ashley Bone for the crucial
- experimental data behind this, and to Rutger Ovidus for
- some further experiments.)
- - Fixed print_block_list to print the correct kind number for
- STUBBORN. (Thanks to Rutger Ovidus.)
- - GC_allochblk_nth incremented GC_words_wasted by bytes rather than
- words.
- - Consider GC_words_wasted in GC_adj_words_allocd only if it is within
- reason. (A hack to avoid some extremely unlikely scenarios in which
- we manage to allocate only "wasted" space. 7.0 has a better fix.)
- - Changed PowerPC GC_clear implementation to use lwsync instead of
- eieio, since the documentation recommends against eieio, and
- it seems to be incorrect if the preceding memory op is a load.
- - Fixed print_block_list to print the correct kind number for
- STUBBORN. (Thanks to Rutger Ovidus.)
- - GC_allochblk_nth incremented GC_words_wasted by bytes rather than
- words.
- - Have configure.in generate an error if it is asked to support
- pthreads, but doesn't know how to.
- - Added Kazuhiro Inaoka's patch for Renesas M32R support.
- - Have the GNU build mechanism link with -ldl. Rename THREADLIBS
- to THREADDLLIBS to reflect this. (Thanks to Sven Verdoolaege.)
- - Added Hannes Mehnert's patch for FreeBSD/SPARC support.
- - Merged some FreeBSD specific patches to threadlibs.c and dyn_load.c.
- (Thanks tp John Merryweather Cooper.)
- - Define MPROTECT_VDB on MACOSX only if threads are being used, since the
- dirty page tracking mechanism uses threads. (This avoids an undefined
- reference to _GC_darwin_register_mach_handler_thread.)
- - By popular demand, use __libc symbols only if we are built with
- USE_LIBC_PRIVATES, which is off by default, and not otherwise documented.
- - Ignore GC_enable_incremental() requests when KEEP_BACK_PTRS is set.
- The GC itself will dirty lots of pages in this cases, probably making
- it counterproductive on all platforms. And the DARWIN port crashes.
+
+
+Since gc6.4:
+ - Remove GC_PROTO, VOLATILE, GC_PTR, and GC_CONST. Assume ANSI C compiler
+ and use ANSI constructs unconditionally.
+ - Introduce #elif and #error in some of the appropriate places.
+ - Remove GC_printf cruft. Use stdargs.
+ - Remove separate Solaris threads support. Use the more generic Posix
+ implementation.
+ - Use atomic_ops for atomic operations and memory barriers.
+ - Clean up MPROTECT_VDB implementation. Use SA_SIGINFO wherever
+ possible.
+ - Remove broken SIGNALS stuff.
+ - Use size_t instead of word, where appropriate.
+ - Add .S.o rule to Makefile.am.
+ - Officially unsupport SunOS4, several old flavors of M68K (SunOS4,
+ A/UX, HP), IBM PC/RTs and RISCOS/Irix4. (I doubt the old code worked.
+ If anyone cares, these should be easy to resurrect.)
+ - Add EXPECT() in some critical places.
+ - Redefined hb_sz and hb_body to deal with bytes rather than words.
+ This affected a great deal of code. I would like to consistently use
+ byte offsets and sizes where there's not a convincing reason to do
+ otherwise.
+ - Redefined several other variables (GC_mem_found, GC_words_allocd)
+ etc. to use units of bytes. Most of these were also renamed to
+ reflect that fact.
+ - Killed as many "register" declarations as possible.
+ - Partially replaced stubborn allocation with manual write barrier.
+ It's currently broken.
+ - Restructured mark code, to allow mark bits to be kept either on
+ a per allocation granule or per object basis. The emphasis is
+ now on the -DUSE_MARK_BYTES option, since individual bits perform
+ quite badly on hyperthreaded P4s, and are probably suboptimal on
+ other architectures. -DUSE_MARK_BITS is currently broken, and may
+ be resurrected only for the single-threaded case. This significantly
+ reduced the cache footprint required by auxiliary GC data structures.
+ It also reduces space overhead for small heaps. It probably slows
+ things down slightly if interior pointers are very common.
+ - As part of the above, we now maintain an approximate count of set
+ mark bits in each heap block.
+ - As part of the above, the semantics of hb_map changed drastically.
+ For MARK_BIT_PER_OBJ, it doesn't exist. For MARK_BIT_PER_GRANULE,
+ it is purely a way to replace a mod instruction with a table lookup.
+ (Somewhat to my surprise, this still wins on modern hardware.)
+ - Removed PRINTSTATS, GATHERSTATS, and SILENT macros. Everything is
+ now controlled by GC_print_stats variable and GC_PRINT_STATS
+ and new GC_PRINT_VERBOSE_STATS environment variables.
+ - Add GC_log_printf and use it consistently for logging output.
+ - Unconditionally count the objects we reclaim in the sweep phase.
+ For thread local allocation, we need that anyway, and we expect
+ that's increasingly the only case that matters. And it simplifies
+ the code. In general expect minor performance hacks that benefit
+ only the single-threaded case to disappear.
+ - Remove GC_quiet from gc.h and elsewhere.
+ - Changed the heap expansion heuristic, and the definition of
+ GC_free_space_divisor, to refer to live data size, instead of total
+ heap size. I believe this is much more robust. It wasn't previously
+ possible, because we didn't have access to live data size.
+ - Thread local allocation added the extra byte in twice: Once in
+ thread_local_alloc, and once in malloc_many.
+ - Removed GC_malloc_words_small and GC_gcj_fast_malloc. A new
+ mechanism based on the thread local allocation data structures
+ is expected to be added instead. This should allow inlined code
+ that is both fast and doesn't rely on collector internals.
+ - Changed both free lists and reclaim lists to be indexed by granules
+ instead of words, norming halving their size.
+ - MERGE_SIZE is now the only option, and the macro was removed.
+ (Without it, we need a memory reference to GC_all_interior_pointers
+ anyway. Thus it costs us nothing.)
+ - Change GC_size_map to map to granules instead of words. Make sure
+ that every possible size up to TINY_FREELISTS is present.
+ - Split of macros need for fast inline allocation into gc_tiny_fl.h
+ in anticipation of a new inline allocator that doesn't rely on GC
+ internals.
+ - Changed thread local allocation to use GRANULE_BYTES and TINY_FREELISTS
+ in anticipation of a merge with the inline allocation code.
+ - Removed ALIGN_DOUBLE. This is mostly handled by GRANULE_BYTES.
+ - Make locking on most platforms conditional on GC_need_to_lock.
To do:
+ - Use thread-local allocation code by default.
+ - Fix USE_MARK_BITS.
+ - Fix stubborn allocation.
+ - function wrapping, conditional locking??
+ - Finish replacing stubborn allocation with manual write barrier??
- The USE_MUNMAP code should really use a separate data structure
indexed by physical page to keep track of time since last use of
a page. Using hblk headers means we lose track of ages when
diff --git a/doc/README.environment b/doc/README.environment
index 686e9482..f2ec428e 100644
--- a/doc/README.environment
+++ b/doc/README.environment
@@ -15,11 +15,11 @@ GC_LOOP_ON_ABORT - Causes the collector abort routine to enter a tight loop.
result in an infinite loop in a handler, allowing
similar debugging techniques.
-GC_PRINT_STATS - Turn on as much logging as is easily feasible without
- adding signifcant runtime overhead. Doesn't work if
- the collector is built with SMALL_CONFIG. Overridden
- by setting GC_quiet. On by default if the collector
- was built without -DSILENT.
+GC_PRINT_STATS - Turn on GC logging. Not functional with -DSMALL_CONFIG.
+
+GC_LOG_FILE - The name of the log file. Stderr by default.
+
+GC_PRINT_VERBOSE_STATS - Turn on even more logging.
GC_DUMP_REGULARLY - Generate a GC debugging dump GC_dump() on startup
and during every collection. Very verbose. Useful
@@ -47,7 +47,7 @@ GC_NPROCS=<n> - Linux w/threads only. Explicitly sets the number of processors
first spinning.
GC_MARKERS=<n> - Linux w/threads and parallel marker only. Set the number
- of marker threads. This is normaly set to the number of
+ of marker threads. This is normally set to the number of
processors. It is safer to adjust GC_MARKERS than GC_NPROCS,
since GC_MARKERS has no impact on the lock implementation.
@@ -115,7 +115,6 @@ GC_IGNORE_FB[=<n>] - (Win32 only.) Try to avoid treating a mapped
are never honored, eliminating this risk for most,
but not all, applications. This feature is likely to disappear
if/when we find a less disgusting "solution".
- IN VERSION 6.4 AND LATER, THIS SHOULD BE UNNECESSARY.
The following turn on runtime flags that are also program settable. Checked
only during initialization. We expect that they will usually be set through
@@ -149,3 +148,10 @@ GC_ALL_INTERIOR_POINTERS - Turns on GC_all_interior_pointers and thus interior
pointer recognition.
GC_DONT_GC - Turns off garbage collection. Use cautiously.
+
+GC_TRACE=addr - Intended for collector debugging. Requires that the collector
+ have been built with ENABLE_TRACE defined. Causes the debugger
+ to log information about the tracing of address ranges containing
+ addr. Typically addr is the address that contains a pointer to
+ an object that mysteriously failed to get marked. Addr must be
+ specified as a hexadecimal integer.
diff --git a/doc/README.solaris2 b/doc/README.solaris2
index 31e75003..6ed61dc8 100644
--- a/doc/README.solaris2
+++ b/doc/README.solaris2
@@ -43,7 +43,9 @@ can result in unpleasant heap growth. But it seems better than the
race/deadlock issues we had before.
If solaris_threads are used on an X86 processor with malloc redirected to
-GC_malloc a deadlock is likely to result.
+GC_malloc, it is necessary to call GC_thr_init explicitly before forking the
+first thread. (This avoids a deadlock arising from calling GC_thr_init
+with the allocation lock held.)
It appears that there is a problem in using gc_cpp.h in conjunction with
Solaris threads and Sun's C++ runtime. Apparently the overloaded new operator
diff --git a/dyn_load.c b/dyn_load.c
index fdb6b304..a35928d4 100644
--- a/dyn_load.c
+++ b/dyn_load.c
@@ -51,7 +51,7 @@
#if (defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE)) \
&& !defined(PCR)
-#if !defined(SUNOS4) && !defined(SUNOS5DL) && !defined(IRIX5) && \
+#if !defined(SUNOS5DL) && !defined(IRIX5) && \
!defined(MSWIN32) && !defined(MSWINCE) && \
!(defined(ALPHA) && defined(OSF1)) && \
!defined(HPUX) && !(defined(LINUX) && defined(__ELF__)) && \
@@ -70,15 +70,6 @@
# include <dlfcn.h>
# include <link.h>
#endif
-#ifdef SUNOS4
-# include <dlfcn.h>
-# include <link.h>
-# include <a.out.h>
- /* struct link_map field overrides */
-# define l_next lm_next
-# define l_addr lm_addr
-# define l_name lm_name
-#endif
#if defined(NETBSD)
# include <machine/elf_machdep.h>
@@ -96,25 +87,17 @@
/* Newer versions of GNU/Linux define this macro. We
* define it similarly for any ELF systems that don't. */
# ifndef ElfW
-# if defined(FREEBSD)
-# if __ELF_WORD_SIZE == 32
+# ifdef NETBSD
+# if ELFSIZE == 32
# define ElfW(type) Elf32_##type
# else
# define ElfW(type) Elf64_##type
# endif
# else
-# ifdef NETBSD
-# if ELFSIZE == 32
-# define ElfW(type) Elf32_##type
-# else
-# define ElfW(type) Elf64_##type
-# endif
+# if !defined(ELF_CLASS) || ELF_CLASS == ELFCLASS32
+# define ElfW(type) Elf32_##type
# else
-# if !defined(ELF_CLASS) || ELF_CLASS == ELFCLASS32
-# define ElfW(type) Elf32_##type
-# else
-# define ElfW(type) Elf64_##type
-# endif
+# define ElfW(type) Elf64_##type
# endif
# endif
# endif
@@ -172,47 +155,7 @@ GC_FirstDLOpenedLinkMap()
# define dlopen GC_dlopen
# endif
-#if defined(SUNOS4) && !defined(USE_PROC_FOR_LIBRARIES)
-
-#ifdef LINT
- struct link_dynamic _DYNAMIC;
-#endif
-
-static struct link_map *
-GC_FirstDLOpenedLinkMap()
-{
- extern struct link_dynamic _DYNAMIC;
-
- if( &_DYNAMIC == 0) {
- return(0);
- }
- return(_DYNAMIC.ld_un.ld_1->ld_loaded);
-}
-
-/* Return the address of the ld.so allocated common symbol */
-/* with the least address, or 0 if none. */
-static ptr_t GC_first_common()
-{
- ptr_t result = 0;
- extern struct link_dynamic _DYNAMIC;
- struct rtc_symb * curr_symbol;
-
- if( &_DYNAMIC == 0) {
- return(0);
- }
- curr_symbol = _DYNAMIC.ldd -> ldd_cp;
- for (; curr_symbol != 0; curr_symbol = curr_symbol -> rtc_next) {
- if (result == 0
- || (ptr_t)(curr_symbol -> rtc_sp -> n_value) < result) {
- result = (ptr_t)(curr_symbol -> rtc_sp -> n_value);
- }
- }
- return(result);
-}
-
-#endif /* SUNOS4 ... */
-
-# if defined(SUNOS4) || defined(SUNOS5DL)
+# if defined(SUNOS5DL)
/* Add dynamic library data sections to the root set. */
# if !defined(PCR) && !defined(GC_SOLARIS_THREADS) && defined(THREADS)
# ifndef SRC_M3
@@ -229,15 +172,6 @@ void GC_register_dynamic_libraries()
for (lm = GC_FirstDLOpenedLinkMap();
lm != (struct link_map *) 0; lm = lm->l_next)
{
-# ifdef SUNOS4
- struct exec *e;
-
- e = (struct exec *) lm->lm_addr;
- GC_add_roots_inner(
- ((char *) (N_DATOFF(*e) + lm->lm_addr)),
- ((char *) (N_BSSADDR(*e) + e->a_bss + lm->lm_addr)),
- TRUE);
-# endif
# ifdef SUNOS5DL
ElfW(Ehdr) * e;
ElfW(Phdr) * p;
@@ -267,19 +201,6 @@ void GC_register_dynamic_libraries()
}
# endif
}
-# ifdef SUNOS4
- {
- static ptr_t common_start = 0;
- ptr_t common_end;
- extern ptr_t GC_find_limit();
-
- if (common_start == 0) common_start = GC_first_common();
- if (common_start != 0) {
- common_end = GC_find_limit(common_start, TRUE);
- GC_add_roots_inner((char *)common_start, (char *)common_end, TRUE);
- }
- }
-# endif
}
# endif /* !USE_PROC ... */
@@ -501,12 +422,6 @@ GC_FirstDLOpenedLinkMap()
if( cachedResult == 0 ) {
int tag;
for( dp = _DYNAMIC; (tag = dp->d_tag) != 0; dp++ ) {
- /* FIXME: The DT_DEBUG header is not mandated by the */
- /* ELF spec. This code appears to be dependent on */
- /* idiosynchracies of older GNU tool chains. If this code */
- /* fails for you, the real problem is probably that it is */
- /* being used at all. You should be getting the */
- /* dl_iterate_phdr version. */
if( tag == DT_DEBUG ) {
struct link_map *lm
= ((struct r_debug *)(dp->d_un.d_ptr))->r_map;
@@ -610,7 +525,7 @@ void GC_register_dynamic_libraries()
}
}
if (ioctl(fd, PIOCNMAP, &needed_sz) < 0) {
- GC_err_printf2("fd = %d, errno = %d\n", fd, errno);
+ GC_err_printf("fd = %d, errno = %d\n", fd, errno);
ABORT("/proc PIOCNMAP ioctl failed");
}
if (needed_sz >= current_sz) {
@@ -620,7 +535,7 @@ void GC_register_dynamic_libraries()
(current_sz * sizeof(prmap_t)));
}
if (ioctl(fd, PIOCMAP, addr_map) < 0) {
- GC_err_printf4("fd = %d, errno = %d, needed_sz = %d, addr_map = 0x%X\n",
+ GC_err_printf("fd = %d, errno = %d, needed_sz = %d, addr_map = 0x%X\n",
fd, errno, needed_sz, addr_map);
ABORT("/proc PIOCMAP ioctl failed");
};
@@ -753,10 +668,6 @@ void GC_register_dynamic_libraries()
# define HAVE_REGISTER_MAIN_STATIC_DATA
- /* The frame buffer testing code is dead in this version. */
- /* We leave it here temporarily in case the switch to just */
- /* testing for MEM_IMAGE sections causes un expected */
- /* problems. */
GC_bool GC_warn_fb = TRUE; /* Warn about traced likely */
/* graphics memory. */
GC_bool GC_disallow_ignore_fb = FALSE;
@@ -771,27 +682,25 @@ void GC_register_dynamic_libraries()
/* Should [start, start+len) be treated as a frame buffer */
/* and ignored? */
- /* Unfortunately, we currently are not quite sure how to tell */
- /* this automatically, and rely largely on user input. */
- /* We expect that any mapping with type MEM_MAPPED (which */
- /* apparently excludes library data sections) can be safely */
- /* ignored. But we're too chicken to do that in this */
- /* version. */
+ /* Unfortunately, we currently have no real way to tell */
+ /* automatically, and rely largely on user input. */
+ /* FIXME: If we had more data on this phenomenon (e.g. */
+ /* is start aligned to a MB multiple?) we should be able to */
+ /* do better. */
/* Based on a very limited sample, it appears that: */
- /* - Frame buffer mappings appear as mappings of large */
- /* length, usually a bit less than a power of two. */
- /* - The definition of "a bit less" in the above cannot */
- /* be made more precise. */
- /* - Have a starting address at best 64K aligned. */
- /* - Have type == MEM_MAPPED. */
- static GC_bool is_frame_buffer(ptr_t start, size_t len, DWORD tp)
+ /* - Frame buffer mappings appear as mappings of length */
+ /* 2**n MB - 192K. (We guess the 192K can vary a bit.) */
+ /* - Have a stating address at best 64K aligned. */
+ /* I'd love more information about the mapping, since I */
+ /* can't reproduce the problem. */
+ static GC_bool is_frame_buffer(ptr_t start, size_t len)
{
static GC_bool initialized = FALSE;
# define MB (1024*1024)
# define DEFAULT_FB_MB 15
# define MIN_FB_MB 3
- if (GC_disallow_ignore_fb || tp != MEM_MAPPED) return FALSE;
+ if (GC_disallow_ignore_fb) return FALSE;
if (!initialized) {
char * ignore_fb_string = GETENV("GC_IGNORE_FB");
@@ -833,10 +742,10 @@ void GC_register_dynamic_libraries()
# ifdef DEBUG_VIRTUALQUERY
void GC_dump_meminfo(MEMORY_BASIC_INFORMATION *buf)
{
- GC_printf4("BaseAddress = %lx, AllocationBase = %lx, RegionSize = %lx(%lu)\n",
+ GC_printf("BaseAddress = %lx, AllocationBase = %lx, RegionSize = %lx(%lu)\n",
buf -> BaseAddress, buf -> AllocationBase, buf -> RegionSize,
buf -> RegionSize);
- GC_printf4("\tAllocationProtect = %lx, State = %lx, Protect = %lx, "
+ GC_printf("\tAllocationProtect = %lx, State = %lx, Protect = %lx, "
"Type = %lx\n",
buf -> AllocationProtect, buf -> State, buf -> Protect,
buf -> Type);
@@ -880,11 +789,7 @@ void GC_register_dynamic_libraries()
&& (protect == PAGE_EXECUTE_READWRITE
|| protect == PAGE_READWRITE)
&& !GC_is_heap_base(buf.AllocationBase)
- /* This used to check for
- * !is_frame_buffer(p, buf.RegionSize, buf.Type)
- * instead of just checking for MEM_IMAGE.
- * If something breaks, change it back. */
- && buf.Type == MEM_IMAGE) {
+ && !is_frame_buffer(p, buf.RegionSize)) {
# ifdef DEBUG_VIRTUALQUERY
GC_dump_meminfo(&buf);
# endif
@@ -940,15 +845,15 @@ void GC_register_dynamic_libraries()
/* Check status AFTER checking moduleid because */
/* of a bug in the non-shared ldr_next_module stub */
if (status != 0 ) {
- GC_printf1("dynamic_load: status = %ld\n", (long)status);
+ GC_printf("dynamic_load: status = %d\n", status);
{
extern char *sys_errlist[];
extern int sys_nerr;
extern int errno;
if (errno <= sys_nerr) {
- GC_printf1("dynamic_load: %s\n", (long)sys_errlist[errno]);
+ GC_printf("dynamic_load: %s\n", sys_errlist[errno]);
} else {
- GC_printf1("dynamic_load: %d\n", (long)errno);
+ GC_printf("dynamic_load: %d\n", errno);
}
}
ABORT("ldr_next_module failed");
@@ -1039,9 +944,9 @@ void GC_register_dynamic_libraries()
break; /* Moved past end of shared library list --> finished */
} else {
if (errno <= sys_nerr) {
- GC_printf1("dynamic_load: %s\n", (long) sys_errlist[errno]);
+ GC_printf("dynamic_load: %s\n", sys_errlist[errno]);
} else {
- GC_printf1("dynamic_load: %d\n", (long) errno);
+ GC_printf("dynamic_load: %d\n", errno);
}
ABORT("shl_get failed");
}
@@ -1049,16 +954,16 @@ void GC_register_dynamic_libraries()
}
# ifdef VERBOSE
- GC_printf0("---Shared library---\n");
- GC_printf1("\tfilename = \"%s\"\n", shl_desc->filename);
- GC_printf1("\tindex = %d\n", index);
- GC_printf1("\thandle = %08x\n",
+ GC_printf("---Shared library---\n");
+ GC_printf("\tfilename = \"%s\"\n", shl_desc->filename);
+ GC_printf("\tindex = %d\n", index);
+ GC_printf("\thandle = %08x\n",
(unsigned long) shl_desc->handle);
- GC_printf1("\ttext seg. start = %08x\n", shl_desc->tstart);
- GC_printf1("\ttext seg. end = %08x\n", shl_desc->tend);
- GC_printf1("\tdata seg. start = %08x\n", shl_desc->dstart);
- GC_printf1("\tdata seg. end = %08x\n", shl_desc->dend);
- GC_printf1("\tref. count = %lu\n", shl_desc->ref_count);
+ GC_printf("\ttext seg. start = %08x\n", shl_desc->tstart);
+ GC_printf("\ttext seg. end = %08x\n", shl_desc->tend);
+ GC_printf("\tdata seg. start = %08x\n", shl_desc->dstart);
+ GC_printf("\tdata seg. end = %08x\n", shl_desc->dend);
+ GC_printf("\tref. count = %lu\n", shl_desc->ref_count);
# endif
/* register shared library's data segment as a garbage collection root */
@@ -1143,18 +1048,18 @@ static void GC_dyld_image_add(struct mach_header* hdr, unsigned long slide) {
for(i=0;i<sizeof(GC_dyld_sections)/sizeof(GC_dyld_sections[0]);i++) {
sec = getsectbynamefromheader(
hdr,GC_dyld_sections[i].seg,GC_dyld_sections[i].sect);
- if(sec == NULL || sec->size == 0) continue;
- start = slide + sec->addr;
- end = start + sec->size;
-# ifdef DARWIN_DEBUG
- GC_printf4("Adding section at %p-%p (%lu bytes) from image %s\n",
- start,end,sec->size,GC_dyld_name_for_hdr(hdr));
-# endif
- GC_add_roots((char*)start,(char*)end);
- }
+ if(sec == NULL || sec->size == 0) continue;
+ start = slide + sec->addr;
+ end = start + sec->size;
# ifdef DARWIN_DEBUG
- GC_print_static_roots();
+ GC_printf("Adding section at %p-%p (%lu bytes) from image %s\n",
+ start,end,sec->size,GC_dyld_name_for_hdr(hdr));
# endif
+ GC_add_roots((char*)start,(char*)end);
+ }
+# ifdef DARWIN_DEBUG
+ GC_print_static_roots();
+# endif
}
/* This should never be called by a thread holding the lock */
@@ -1167,15 +1072,15 @@ static void GC_dyld_image_remove(struct mach_header* hdr, unsigned long slide) {
if(sec == NULL || sec->size == 0) continue;
start = slide + sec->addr;
end = start + sec->size;
-# ifdef DARWIN_DEBUG
- GC_printf4("Removing section at %p-%p (%lu bytes) from image %s\n",
- start,end,sec->size,GC_dyld_name_for_hdr(hdr));
-# endif
- GC_remove_roots((char*)start,(char*)end);
- }
# ifdef DARWIN_DEBUG
- GC_print_static_roots();
+ GC_printf("Removing section at %p-%p (%lu bytes) from image %s\n",
+ start,end,sec->size,GC_dyld_name_for_hdr(hdr));
# endif
+ GC_remove_roots((char*)start,(char*)end);
+ }
+# ifdef DARWIN_DEBUG
+ GC_print_static_roots();
+# endif
}
void GC_register_dynamic_libraries() {
@@ -1196,7 +1101,7 @@ void GC_init_dyld() {
if(initialized) return;
# ifdef DARWIN_DEBUG
- GC_printf0("Registering dyld callbacks...\n");
+ GC_printf("Registering dyld callbacks...\n");
# endif
/* Apple's Documentation:
@@ -1220,7 +1125,7 @@ void GC_init_dyld() {
if (bind_fully_env == NULL) {
# ifdef DARWIN_DEBUG
- GC_printf0("Forcing full bind of GC code...\n");
+ GC_printf("Forcing full bind of GC code...\n");
# endif
if(!_dyld_bind_fully_image_containing_address((unsigned long*)GC_malloc))
diff --git a/finalize.c b/finalize.c
index 893f8259..e31b0de3 100644
--- a/finalize.c
+++ b/finalize.c
@@ -85,7 +85,7 @@ static signed_word log_fo_table_size = -1;
word GC_fo_entries = 0;
-void GC_push_finalizer_structures GC_PROTO((void))
+void GC_push_finalizer_structures(void)
{
GC_push_all((ptr_t)(&dl_head), (ptr_t)(&dl_head) + sizeof(word));
GC_push_all((ptr_t)(&fo_head), (ptr_t)(&fo_head) + sizeof(word));
@@ -98,9 +98,8 @@ void GC_push_finalizer_structures GC_PROTO((void))
/* *table is a pointer to an array of hash headers. If we succeed, we */
/* update both *table and *log_size_ptr. */
/* Lock is held. Signals are disabled. */
-void GC_grow_table(table, log_size_ptr)
-struct hash_chain_entry ***table;
-signed_word * log_size_ptr;
+void GC_grow_table(struct hash_chain_entry ***table,
+ signed_word *log_size_ptr)
{
register word i;
register struct hash_chain_entry *p;
@@ -108,6 +107,7 @@ signed_word * log_size_ptr;
register int log_new_size = log_old_size + 1;
word old_size = ((log_old_size == -1)? 0: (1 << log_old_size));
register word new_size = 1 << log_new_size;
+ /* FIXME: Power of 2 size often gets rounded up to one more page. */
struct hash_chain_entry **new_table = (struct hash_chain_entry **)
GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
(size_t)new_size * sizeof(struct hash_chain_entry *), NORMAL);
@@ -135,30 +135,17 @@ signed_word * log_size_ptr;
*table = new_table;
}
-# if defined(__STDC__) || defined(__cplusplus)
- int GC_register_disappearing_link(GC_PTR * link)
-# else
- int GC_register_disappearing_link(link)
- GC_PTR * link;
-# endif
+int GC_register_disappearing_link(void * * link)
{
ptr_t base;
- base = (ptr_t)GC_base((GC_PTR)link);
+ base = (ptr_t)GC_base((void *)link);
if (base == 0)
ABORT("Bad arg to GC_register_disappearing_link");
return(GC_general_register_disappearing_link(link, base));
}
-# if defined(__STDC__) || defined(__cplusplus)
- int GC_general_register_disappearing_link(GC_PTR * link,
- GC_PTR obj)
-# else
- int GC_general_register_disappearing_link(link, obj)
- GC_PTR * link;
- GC_PTR obj;
-# endif
-
+int GC_general_register_disappearing_link(void * * link, void * obj)
{
struct disappearing_link *curr_dl;
int index;
@@ -168,25 +155,16 @@ signed_word * log_size_ptr;
if ((word)link & (ALIGNMENT-1))
ABORT("Bad arg to GC_general_register_disappearing_link");
# ifdef THREADS
- DISABLE_SIGNALS();
LOCK();
# endif
if (log_dl_table_size == -1
|| GC_dl_entries > ((word)1 << log_dl_table_size)) {
-# ifndef THREADS
- DISABLE_SIGNALS();
-# endif
GC_grow_table((struct hash_chain_entry ***)(&dl_head),
&log_dl_table_size);
-# ifdef CONDPRINT
- if (GC_print_stats) {
- GC_printf1("Grew dl table to %lu entries\n",
- (unsigned long)(1 << log_dl_table_size));
- }
-# endif
-# ifndef THREADS
- ENABLE_SIGNALS();
-# endif
+ if (GC_print_stats) {
+ GC_log_printf("Grew dl table to %u entries\n",
+ (1 << log_dl_table_size));
+ }
}
index = HASH2(link, log_dl_table_size);
curr_dl = dl_head[index];
@@ -195,7 +173,6 @@ signed_word * log_size_ptr;
curr_dl -> dl_hidden_obj = HIDE_POINTER(obj);
# ifdef THREADS
UNLOCK();
- ENABLE_SIGNALS();
# endif
return(1);
}
@@ -205,7 +182,6 @@ signed_word * log_size_ptr;
if (0 == new_dl) {
# ifdef THREADS
UNLOCK();
- ENABLE_SIGNALS();
# endif
new_dl = (struct disappearing_link *)
GC_oom_fn(sizeof(struct disappearing_link));
@@ -215,7 +191,6 @@ signed_word * log_size_ptr;
}
/* It's not likely we'll make it here, but ... */
# ifdef THREADS
- DISABLE_SIGNALS();
LOCK();
# endif
}
@@ -226,23 +201,16 @@ signed_word * log_size_ptr;
GC_dl_entries++;
# ifdef THREADS
UNLOCK();
- ENABLE_SIGNALS();
# endif
return(0);
}
-# if defined(__STDC__) || defined(__cplusplus)
- int GC_unregister_disappearing_link(GC_PTR * link)
-# else
- int GC_unregister_disappearing_link(link)
- GC_PTR * link;
-# endif
+int GC_unregister_disappearing_link(void * * link)
{
struct disappearing_link *curr_dl, *prev_dl;
int index;
DCL_LOCK_STATE;
- DISABLE_SIGNALS();
LOCK();
index = HASH2(link, log_dl_table_size);
if (((unsigned long)link & (ALIGNMENT-1))) goto out;
@@ -256,11 +224,10 @@ signed_word * log_size_ptr;
}
GC_dl_entries--;
UNLOCK();
- ENABLE_SIGNALS();
# ifdef DBG_HDRS_ALL
dl_set_next(curr_dl, 0);
# else
- GC_free((GC_PTR)curr_dl);
+ GC_free((void *)curr_dl);
# endif
return(1);
}
@@ -269,32 +236,29 @@ signed_word * log_size_ptr;
}
out:
UNLOCK();
- ENABLE_SIGNALS();
return(0);
}
/* Possible finalization_marker procedures. Note that mark stack */
/* overflow is handled by the caller, and is not a disaster. */
-GC_API void GC_normal_finalize_mark_proc(p)
-ptr_t p;
+GC_API void GC_normal_finalize_mark_proc(ptr_t p)
{
hdr * hhdr = HDR(p);
- PUSH_OBJ((word *)p, hhdr, GC_mark_stack_top,
+ PUSH_OBJ(p, hhdr, GC_mark_stack_top,
&(GC_mark_stack[GC_mark_stack_size]));
}
/* This only pays very partial attention to the mark descriptor. */
/* It does the right thing for normal and atomic objects, and treats */
/* most others as normal. */
-GC_API void GC_ignore_self_finalize_mark_proc(p)
-ptr_t p;
+GC_API void GC_ignore_self_finalize_mark_proc(ptr_t p)
{
hdr * hhdr = HDR(p);
word descr = hhdr -> hb_descr;
ptr_t q, r;
ptr_t scan_limit;
- ptr_t target_limit = p + WORDS_TO_BYTES(hhdr -> hb_sz) - 1;
+ ptr_t target_limit = p + hhdr -> hb_sz - 1;
if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) {
scan_limit = p + descr - sizeof(word);
@@ -304,14 +268,13 @@ ptr_t p;
for (q = p; q <= scan_limit; q += ALIGNMENT) {
r = *(ptr_t *)q;
if (r < p || r > target_limit) {
- GC_PUSH_ONE_HEAP((word)r, q);
+ GC_PUSH_ONE_HEAP(r, q);
}
}
}
/*ARGSUSED*/
-GC_API void GC_null_finalize_mark_proc(p)
-ptr_t p;
+GC_API void GC_null_finalize_mark_proc(ptr_t p)
{
}
@@ -325,13 +288,10 @@ ptr_t p;
/* marking for finalization ordering. Any objects marked */
/* by that procedure will be guaranteed to not have been */
/* finalized when this finalizer is invoked. */
-GC_API void GC_register_finalizer_inner(obj, fn, cd, ofn, ocd, mp)
-GC_PTR obj;
-GC_finalization_proc fn;
-GC_PTR cd;
-GC_finalization_proc * ofn;
-GC_PTR * ocd;
-finalization_mark_proc * mp;
+GC_API void GC_register_finalizer_inner(void * obj,
+ GC_finalization_proc fn, void *cd,
+ GC_finalization_proc *ofn, void **ocd,
+ finalization_mark_proc mp)
{
ptr_t base;
struct finalizable_object * curr_fo, * prev_fo;
@@ -341,25 +301,16 @@ finalization_mark_proc * mp;
DCL_LOCK_STATE;
# ifdef THREADS
- DISABLE_SIGNALS();
LOCK();
# endif
if (log_fo_table_size == -1
|| GC_fo_entries > ((word)1 << log_fo_table_size)) {
-# ifndef THREADS
- DISABLE_SIGNALS();
-# endif
GC_grow_table((struct hash_chain_entry ***)(&fo_head),
&log_fo_table_size);
-# ifdef CONDPRINT
- if (GC_print_stats) {
- GC_printf1("Grew fo table to %lu entries\n",
- (unsigned long)(1 << log_fo_table_size));
- }
-# endif
-# ifndef THREADS
- ENABLE_SIGNALS();
-# endif
+ if (GC_print_stats) {
+ GC_log_printf("Grew fo table to %u entries\n",
+ (1 << log_fo_table_size));
+ }
}
/* in the THREADS case signals are disabled and we hold allocation */
/* lock; otherwise neither is true. Proceed carefully. */
@@ -367,12 +318,13 @@ finalization_mark_proc * mp;
index = HASH2(base, log_fo_table_size);
prev_fo = 0; curr_fo = fo_head[index];
while (curr_fo != 0) {
+ GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
if (curr_fo -> fo_hidden_base == HIDE_POINTER(base)) {
/* Interruption by a signal in the middle of this */
/* should be safe. The client may see only *ocd */
/* updated, but we'll declare that to be his */
/* problem. */
- if (ocd) *ocd = (GC_PTR) curr_fo -> fo_client_data;
+ if (ocd) *ocd = (void *) (curr_fo -> fo_client_data);
if (ofn) *ofn = curr_fo -> fo_fn;
/* Delete the structure for base. */
if (prev_fo == 0) {
@@ -386,7 +338,7 @@ finalization_mark_proc * mp;
/* estimate will only make the table larger than */
/* necessary. */
# if !defined(THREADS) && !defined(DBG_HDRS_ALL)
- GC_free((GC_PTR)curr_fo);
+ GC_free((void *)curr_fo);
# endif
} else {
curr_fo -> fo_fn = fn;
@@ -402,7 +354,6 @@ finalization_mark_proc * mp;
}
# ifdef THREADS
UNLOCK();
- ENABLE_SIGNALS();
# endif
return;
}
@@ -414,7 +365,6 @@ finalization_mark_proc * mp;
if (fn == 0) {
# ifdef THREADS
UNLOCK();
- ENABLE_SIGNALS();
# endif
return;
}
@@ -423,16 +373,15 @@ finalization_mark_proc * mp;
/* We won't collect it, hence finalizer wouldn't be run. */
# ifdef THREADS
UNLOCK();
- ENABLE_SIGNALS();
# endif
return;
}
new_fo = (struct finalizable_object *)
GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL);
- if (0 == new_fo) {
+ GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object));
+ if (EXPECT(0 == new_fo, FALSE)) {
# ifdef THREADS
UNLOCK();
- ENABLE_SIGNALS();
# endif
new_fo = (struct finalizable_object *)
GC_oom_fn(sizeof(struct finalizable_object));
@@ -442,7 +391,6 @@ finalization_mark_proc * mp;
}
/* It's not likely we'll make it here, but ... */
# ifdef THREADS
- DISABLE_SIGNALS();
LOCK();
# endif
}
@@ -456,63 +404,35 @@ finalization_mark_proc * mp;
fo_head[index] = new_fo;
# ifdef THREADS
UNLOCK();
- ENABLE_SIGNALS();
# endif
}
-# if defined(__STDC__)
- void GC_register_finalizer(void * obj,
+void GC_register_finalizer(void * obj,
GC_finalization_proc fn, void * cd,
GC_finalization_proc *ofn, void ** ocd)
-# else
- void GC_register_finalizer(obj, fn, cd, ofn, ocd)
- GC_PTR obj;
- GC_finalization_proc fn;
- GC_PTR cd;
- GC_finalization_proc * ofn;
- GC_PTR * ocd;
-# endif
{
GC_register_finalizer_inner(obj, fn, cd, ofn,
ocd, GC_normal_finalize_mark_proc);
}
-# if defined(__STDC__)
- void GC_register_finalizer_ignore_self(void * obj,
+void GC_register_finalizer_ignore_self(void * obj,
GC_finalization_proc fn, void * cd,
GC_finalization_proc *ofn, void ** ocd)
-# else
- void GC_register_finalizer_ignore_self(obj, fn, cd, ofn, ocd)
- GC_PTR obj;
- GC_finalization_proc fn;
- GC_PTR cd;
- GC_finalization_proc * ofn;
- GC_PTR * ocd;
-# endif
{
GC_register_finalizer_inner(obj, fn, cd, ofn,
ocd, GC_ignore_self_finalize_mark_proc);
}
-# if defined(__STDC__)
- void GC_register_finalizer_no_order(void * obj,
+void GC_register_finalizer_no_order(void * obj,
GC_finalization_proc fn, void * cd,
GC_finalization_proc *ofn, void ** ocd)
-# else
- void GC_register_finalizer_no_order(obj, fn, cd, ofn, ocd)
- GC_PTR obj;
- GC_finalization_proc fn;
- GC_PTR cd;
- GC_finalization_proc * ofn;
- GC_PTR * ocd;
-# endif
{
GC_register_finalizer_inner(obj, fn, cd, ofn,
ocd, GC_null_finalize_mark_proc);
}
#ifndef NO_DEBUGGING
-void GC_dump_finalization()
+void GC_dump_finalization(void)
{
struct disappearing_link * curr_dl;
struct finalizable_object * curr_fo;
@@ -521,19 +441,19 @@ void GC_dump_finalization()
int fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
int i;
- GC_printf0("Disappearing links:\n");
+ GC_printf("Disappearing links:\n");
for (i = 0; i < dl_size; i++) {
for (curr_dl = dl_head[i]; curr_dl != 0; curr_dl = dl_next(curr_dl)) {
real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj);
real_link = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link);
- GC_printf2("Object: 0x%lx, Link:0x%lx\n", real_ptr, real_link);
+ GC_printf("Object: %p, Link:%p\n", real_ptr, real_link);
}
}
- GC_printf0("Finalizers:\n");
+ GC_printf("Finalizers:\n");
for (i = 0; i < fo_size; i++) {
for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
- GC_printf1("Finalizable object: 0x%lx\n", real_ptr);
+ GC_printf("Finalizable object: %p\n", real_ptr);
}
}
}
@@ -541,7 +461,7 @@ void GC_dump_finalization()
/* Called with world stopped. Cause disappearing links to disappear, */
/* and invoke finalizers. */
-void GC_finalize()
+void GC_finalize(void)
{
struct disappearing_link * curr_dl, * prev_dl, * next_dl;
struct finalizable_object * curr_fo, * prev_fo, * next_fo;
@@ -579,6 +499,7 @@ void GC_finalize()
GC_ASSERT(GC_mark_state == MS_NONE);
for (i = 0; i < fo_size; i++) {
for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
+ GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
if (!GC_is_marked(real_ptr)) {
GC_MARKED_FOR_FINALIZATION(real_ptr);
@@ -591,7 +512,7 @@ void GC_finalize()
}
/* Enqueue for finalization all objects that are still */
/* unreachable. */
- GC_words_finalized = 0;
+ GC_bytes_finalized = 0;
for (i = 0; i < fo_size; i++) {
curr_fo = fo_head[i];
prev_fo = 0;
@@ -616,9 +537,9 @@ void GC_finalize()
/* see it. */
curr_fo -> fo_hidden_base =
(word) REVEAL_POINTER(curr_fo -> fo_hidden_base);
- GC_words_finalized +=
- ALIGNED_WORDS(curr_fo -> fo_object_size)
- + ALIGNED_WORDS(sizeof(struct finalizable_object));
+ GC_bytes_finalized +=
+ curr_fo -> fo_object_size
+ + sizeof(struct finalizable_object);
GC_ASSERT(GC_is_marked(GC_base((ptr_t)curr_fo)));
curr_fo = next_fo;
} else {
@@ -671,7 +592,7 @@ void GC_finalize()
/* Enqueue all remaining finalizers to be run - Assumes lock is
* held, and signals are disabled */
-void GC_enqueue_all_finalizers()
+void GC_enqueue_all_finalizers(void)
{
struct finalizable_object * curr_fo, * prev_fo, * next_fo;
ptr_t real_ptr;
@@ -679,7 +600,7 @@ void GC_enqueue_all_finalizers()
int fo_size;
fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
- GC_words_finalized = 0;
+ GC_bytes_finalized = 0;
for (i = 0; i < fo_size; i++) {
curr_fo = fo_head[i];
prev_fo = 0;
@@ -706,9 +627,8 @@ void GC_enqueue_all_finalizers()
curr_fo -> fo_hidden_base =
(word) REVEAL_POINTER(curr_fo -> fo_hidden_base);
- GC_words_finalized +=
- ALIGNED_WORDS(curr_fo -> fo_object_size)
- + ALIGNED_WORDS(sizeof(struct finalizable_object));
+ GC_bytes_finalized +=
+ curr_fo -> fo_object_size + sizeof(struct finalizable_object);
curr_fo = next_fo;
}
}
@@ -730,55 +650,49 @@ void GC_enqueue_all_finalizers()
* This routine is externally callable, so is called without
* the allocation lock.
*/
-GC_API void GC_finalize_all()
+GC_API void GC_finalize_all(void)
{
DCL_LOCK_STATE;
- DISABLE_SIGNALS();
LOCK();
while (GC_fo_entries > 0) {
GC_enqueue_all_finalizers();
UNLOCK();
- ENABLE_SIGNALS();
GC_INVOKE_FINALIZERS();
- DISABLE_SIGNALS();
LOCK();
}
UNLOCK();
- ENABLE_SIGNALS();
}
#endif
/* Returns true if it is worth calling GC_invoke_finalizers. (Useful if */
/* finalizers can only be called from some kind of `safe state' and */
/* getting into that safe state is expensive.) */
-int GC_should_invoke_finalizers GC_PROTO((void))
+int GC_should_invoke_finalizers(void)
{
return GC_finalize_now != 0;
}
/* Invoke finalizers for all objects that are ready to be finalized. */
/* Should be called without allocation lock. */
-int GC_invoke_finalizers()
+int GC_invoke_finalizers(void)
{
struct finalizable_object * curr_fo;
int count = 0;
- word mem_freed_before;
+ word bytes_freed_before;
DCL_LOCK_STATE;
while (GC_finalize_now != 0) {
# ifdef THREADS
- DISABLE_SIGNALS();
LOCK();
# endif
if (count == 0) {
- mem_freed_before = GC_mem_freed;
+ bytes_freed_before = GC_bytes_freed;
}
curr_fo = GC_finalize_now;
# ifdef THREADS
if (curr_fo != 0) GC_finalize_now = fo_next(curr_fo);
UNLOCK();
- ENABLE_SIGNALS();
if (curr_fo == 0) break;
# else
GC_finalize_now = fo_next(curr_fo);
@@ -792,22 +706,22 @@ int GC_invoke_finalizers()
/* This is probably a bad idea. It throws off accounting if */
/* nearly all objects are finalizable. O.w. it shouldn't */
/* matter. */
- GC_free((GC_PTR)curr_fo);
+ GC_free((void *)curr_fo);
# endif
}
- if (count != 0 && mem_freed_before != GC_mem_freed) {
+ if (count != 0 && bytes_freed_before != GC_bytes_freed) {
LOCK();
- GC_finalizer_mem_freed += (GC_mem_freed - mem_freed_before);
+ GC_finalizer_bytes_freed += (GC_bytes_freed - bytes_freed_before);
UNLOCK();
}
return count;
}
-void (* GC_finalizer_notifier)() = (void (*) GC_PROTO((void)))0;
+void (* GC_finalizer_notifier)() = (void (*) (void))0;
static GC_word last_finalizer_notification = 0;
-void GC_notify_or_invoke_finalizers GC_PROTO((void))
+void GC_notify_or_invoke_finalizers(void)
{
/* This is a convenient place to generate backtraces if appropriate, */
/* since that code is not callable with the allocation lock. */
@@ -816,10 +730,10 @@ void GC_notify_or_invoke_finalizers GC_PROTO((void))
if (GC_gc_no > last_back_trace_gc_no) {
word i;
-
+
# ifdef KEEP_BACK_PTRS
LOCK();
- /* Stops when GC_gc_no wraps; that's OK. */
+ /* Stops when GC_gc_no wraps; that's OK. */
last_back_trace_gc_no = (word)(-1); /* disable others. */
for (i = 0; i < GC_backtraces; ++i) {
/* FIXME: This tolerates concurrent heap mutation, */
@@ -847,28 +761,21 @@ void GC_notify_or_invoke_finalizers GC_PROTO((void))
# endif /* Otherwise GC can run concurrently and add more */
return;
}
- if (GC_finalizer_notifier != (void (*) GC_PROTO((void)))0
+ if (GC_finalizer_notifier != (void (*) (void))0
&& last_finalizer_notification != GC_gc_no) {
last_finalizer_notification = GC_gc_no;
GC_finalizer_notifier();
}
}
-# ifdef __STDC__
- GC_PTR GC_call_with_alloc_lock(GC_fn_type fn,
- GC_PTR client_data)
-# else
- GC_PTR GC_call_with_alloc_lock(fn, client_data)
- GC_fn_type fn;
- GC_PTR client_data;
-# endif
+void * GC_call_with_alloc_lock(GC_fn_type fn, void * client_data)
{
- GC_PTR result;
+ void * result;
DCL_LOCK_STATE;
# ifdef THREADS
- DISABLE_SIGNALS();
LOCK();
+ /* FIXME - This looks wrong!! */
SET_LOCK_HOLDER();
# endif
result = (*fn)(client_data);
@@ -877,22 +784,21 @@ void GC_notify_or_invoke_finalizers GC_PROTO((void))
UNSET_LOCK_HOLDER();
# endif /* o.w. UNLOCK() does it implicitly */
UNLOCK();
- ENABLE_SIGNALS();
# endif
return(result);
}
#if !defined(NO_DEBUGGING)
-void GC_print_finalization_stats()
+void GC_print_finalization_stats(void)
{
struct finalizable_object *fo = GC_finalize_now;
size_t ready = 0;
- GC_printf2("%lu finalization table entries; %lu disappearing links\n",
+ GC_printf("%u finalization table entries; %u disappearing links\n",
GC_fo_entries, GC_dl_entries);
for (; 0 != fo; fo = fo_next(fo)) ++ready;
- GC_printf1("%lu objects are eligible for immediate finalization\n", ready);
+ GC_printf("%u objects are eligible for immediate finalization\n", ready);
}
#endif /* NO_DEBUGGING */
diff --git a/gc.mak b/gc.mak
index 5f0b5462..73cad89f 100644
--- a/gc.mak
+++ b/gc.mak
@@ -114,8 +114,8 @@ CLEAN :
CPP=cl.exe
# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /c
-# ADD CPP /nologo /MD /W3 /GX /O2 /I include /D "NDEBUG" /D "SILENT" /D "GC_BUILD" /D "WIN32" /D "_WINDOWS" /D "ALL_INTERIOR_POINTERS" /D "__STDC__" /D "GC_WIN32_THREADS" /FR /YX /c
-CPP_PROJ=/nologo /MD /W3 /GX /O2 /I include /D "NDEBUG" /D "SILENT" /D "GC_BUILD" /D\
+# ADD CPP /nologo /MD /W3 /GX /O2 /I include /D "NDEBUG" /D "GC_BUILD" /D "WIN32" /D "_WINDOWS" /D "ALL_INTERIOR_POINTERS" /D "__STDC__" /D "GC_WIN32_THREADS" /FR /YX /c
+CPP_PROJ=/nologo /MD /W3 /GX /O2 /I include /D "NDEBUG" /D "GC_BUILD" /D\
"WIN32" /D "_WINDOWS" /D "ALL_INTERIOR_POINTERS" /D "__STDC__" /D\
"GC_WIN32_THREADS" /FR"$(INTDIR)/" /Fp"$(INTDIR)/gc.pch" /YX /Fo"$(INTDIR)/" /c
CPP_OBJS=.\Release/
@@ -296,8 +296,8 @@ CLEAN :
CPP=cl.exe
# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /c
-# ADD CPP /nologo /MDd /W3 /Gm /GX /Zi /Od /I include /D "_DEBUG" /D "SILENT" /D "GC_BUILD" /D "WIN32" /D "_WINDOWS" /D "ALL_INTERIOR_POINTERS" /D "__STDC__" /D "GC_WIN32_THREADS" /FR /YX /c
-CPP_PROJ=/nologo /MDd /W3 /Gm /GX /Zi /Od /I include /D "_DEBUG" /D "SILENT" /D "GC_BUILD"\
+# ADD CPP /nologo /MDd /W3 /Gm /GX /Zi /Od /I include /D "_DEBUG" /D "GC_BUILD" /D "WIN32" /D "_WINDOWS" /D "ALL_INTERIOR_POINTERS" /D "__STDC__" /D "GC_WIN32_THREADS" /FR /YX /c
+CPP_PROJ=/nologo /MDd /W3 /Gm /GX /Zi /Od /I include /D "_DEBUG" /D "GC_BUILD"\
/D "WIN32" /D "_WINDOWS" /D "ALL_INTERIOR_POINTERS" /D "__STDC__" /D\
"GC_WIN32_THREADS" /FR"$(INTDIR)/" /Fp"$(INTDIR)/gc.pch" /YX /Fo"$(INTDIR)/"\
/Fd"$(INTDIR)/" /c
diff --git a/gc_dlopen.c b/gc_dlopen.c
index 4c690edc..79aaeb40 100644
--- a/gc_dlopen.c
+++ b/gc_dlopen.c
@@ -65,9 +65,7 @@
#ifdef GC_USE_LD_WRAP
void * __wrap_dlopen(const char *path, int mode)
#else
- void * GC_dlopen(path, mode)
- GC_CONST char * path;
- int mode;
+ void * GC_dlopen(const char *path, int mode)
#endif
{
void * result;
diff --git a/gcc_support.c b/gcc_support.c
deleted file mode 100644
index e8a7b820..00000000
--- a/gcc_support.c
+++ /dev/null
@@ -1,516 +0,0 @@
-/***************************************************************************
-
-Interface between g++ and Boehm GC
-
- Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
-
- THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
-
- Permission is hereby granted to copy this code for any purpose,
- provided the above notices are retained on all copies.
-
- Last modified on Sun Jul 16 23:21:14 PDT 1995 by ellis
-
-This module provides runtime support for implementing the
-Ellis/Detlefs GC proposal, "Safe, Efficient Garbage Collection for
-C++", within g++, using its -fgc-keyword extension. It defines
-versions of __builtin_new, __builtin_new_gc, __builtin_vec_new,
-__builtin_vec_new_gc, __builtin_delete, and __builtin_vec_delete that
-invoke the Bohem GC. It also implements the WeakPointer.h interface.
-
-This module assumes the following configuration options of the Boehm GC:
-
- -DALL_INTERIOR_POINTERS
- -DDONT_ADD_BYTE_AT_END
-
-This module adds its own required padding to the end of objects to
-support C/C++ "one-past-the-object" pointer semantics.
-
-****************************************************************************/
-
-#include <stddef.h>
-#include "gc.h"
-
-#if defined(__STDC__)
-# define PROTO( args ) args
-#else
-# define PROTO( args ) ()
-# endif
-
-#define BITSPERBYTE 8
- /* What's the portable way to do this? */
-
-
-typedef void (*vfp) PROTO(( void ));
-extern vfp __new_handler;
-extern void __default_new_handler PROTO(( void ));
-
-
-/* A destructor_proc is the compiler generated procedure representing a
-C++ destructor. The "flag" argument is a hidden argument following some
-compiler convention. */
-
-typedef (*destructor_proc) PROTO(( void* this, int flag ));
-
-
-/***************************************************************************
-
-A BI_header is the header the compiler adds to the front of
-new-allocated arrays of objects with destructors. The header is
-padded out to a double, because that's what the compiler does to
-ensure proper alignment of array elements on some architectures.
-
-int NUM_ARRAY_ELEMENTS (void* o)
- returns the number of array elements for array object o.
-
-char* FIRST_ELEMENT_P (void* o)
- returns the address of the first element of array object o.
-
-***************************************************************************/
-
-typedef struct BI_header {
- int nelts;
- char padding [sizeof( double ) - sizeof( int )];
- /* Better way to do this? */
-} BI_header;
-
-#define NUM_ARRAY_ELEMENTS( o ) \
- (((BI_header*) o)->nelts)
-
-#define FIRST_ELEMENT_P( o ) \
- ((char*) o + sizeof( BI_header ))
-
-
-/***************************************************************************
-
-The __builtin_new routines add a descriptor word to the end of each
-object. The descriptor serves two purposes.
-
-First, the descriptor acts as padding, implementing C/C++ pointer
-semantics. C and C++ allow a valid array pointer to be incremented
-one past the end of an object. The extra padding ensures that the
-collector will recognize that such a pointer points to the object and
-not the next object in memory.
-
-Second, the descriptor stores three extra pieces of information,
-whether an object has a registered finalizer (destructor), whether it
-may have any weak pointers referencing it, and for collectible arrays,
-the element size of the array. The element size is required for the
-array's finalizer to iterate through the elements of the array. (An
-alternative design would have the compiler generate a finalizer
-procedure for each different array type. But given the overhead of
-finalization, there isn't any efficiency to be gained by that.)
-
-The descriptor must be added to non-collectible as well as collectible
-objects, since the Ellis/Detlefs proposal allows "pointer to gc T" to
-be assigned to a "pointer to T", which could then be deleted. Thus,
-__builtin_delete must determine at runtime whether an object is
-collectible, whether it has weak pointers referencing it, and whether
-it may have a finalizer that needs unregistering. Though
-GC_REGISTER_FINALIZER doesn't care if you ask it to unregister a
-finalizer for an object that doesn't have one, it is a non-trivial
-procedure that does a hash look-up, etc. The descriptor trades a
-little extra space for a significant increase in time on the fast path
-through delete. (A similar argument applies to
-GC_UNREGISTER_DISAPPEARING_LINK).
-
-For non-array types, the space for the descriptor could be shrunk to a
-single byte for storing the "has finalizer" flag. But this would save
-space only on arrays of char (whose size is not a multiple of the word
-size) and structs whose largest member is less than a word in size
-(very infrequent). And it would require that programmers actually
-remember to call "delete[]" instead of "delete" (which they should,
-but there are probably lots of buggy programs out there). For the
-moment, the space savings seems not worthwhile, especially considering
-that the Boehm GC is already quite space competitive with other
-malloc's.
-
-
-Given a pointer o to the base of an object:
-
-Descriptor* DESCRIPTOR (void* o)
- returns a pointer to the descriptor for o.
-
-The implementation of descriptors relies on the fact that the GC
-implementation allocates objects in units of the machine's natural
-word size (e.g. 32 bits on a SPARC, 64 bits on an Alpha).
-
-**************************************************************************/
-
-typedef struct Descriptor {
- unsigned has_weak_pointers: 1;
- unsigned has_finalizer: 1;
- unsigned element_size: BITSPERBYTE * sizeof( unsigned ) - 2;
-} Descriptor;
-
-#define DESCRIPTOR( o ) \
- ((Descriptor*) ((char*)(o) + GC_size( o ) - sizeof( Descriptor )))
-
-
-/**************************************************************************
-
-Implementations of global operator new() and operator delete()
-
-***************************************************************************/
-
-
-void* __builtin_new( size )
- size_t size;
- /*
- For non-gc non-array types, the compiler generates calls to
- __builtin_new, which allocates non-collected storage via
- GC_MALLOC_UNCOLLECTABLE. This ensures that the non-collected
- storage will be part of the collector's root set, required by the
- Ellis/Detlefs semantics. */
-{
- vfp handler = __new_handler ? __new_handler : __default_new_handler;
-
- while (1) {
- void* o = GC_MALLOC_UNCOLLECTABLE( size + sizeof( Descriptor ) );
- if (o != 0) return o;
- (*handler) ();}}
-
-
-void* __builtin_vec_new( size )
- size_t size;
- /*
- For non-gc array types, the compiler generates calls to
- __builtin_vec_new. */
-{
- return __builtin_new( size );}
-
-
-void* __builtin_new_gc( size )
- size_t size;
- /*
- For gc non-array types, the compiler generates calls to
- __builtin_new_gc, which allocates collected storage via
- GC_MALLOC. */
-{
- vfp handler = __new_handler ? __new_handler : __default_new_handler;
-
- while (1) {
- void* o = GC_MALLOC( size + sizeof( Descriptor ) );
- if (o != 0) return o;
- (*handler) ();}}
-
-
-void* __builtin_new_gc_a( size )
- size_t size;
- /*
- For non-pointer-containing gc non-array types, the compiler
- generates calls to __builtin_new_gc_a, which allocates collected
- storage via GC_MALLOC_ATOMIC. */
-{
- vfp handler = __new_handler ? __new_handler : __default_new_handler;
-
- while (1) {
- void* o = GC_MALLOC_ATOMIC( size + sizeof( Descriptor ) );
- if (o != 0) return o;
- (*handler) ();}}
-
-
-void* __builtin_vec_new_gc( size )
- size_t size;
- /*
- For gc array types, the compiler generates calls to
- __builtin_vec_new_gc. */
-{
- return __builtin_new_gc( size );}
-
-
-void* __builtin_vec_new_gc_a( size )
- size_t size;
- /*
- For non-pointer-containing gc array types, the compiler generates
- calls to __builtin_vec_new_gc_a. */
-{
- return __builtin_new_gc_a( size );}
-
-
-static void call_destructor( o, data )
- void* o;
- void* data;
- /*
- call_destructor is the GC finalizer proc registered for non-array
- gc objects with destructors. Its client data is the destructor
- proc, which it calls with the magic integer 2, a special flag
- obeying the compiler convention for destructors. */
-{
- ((destructor_proc) data)( o, 2 );}
-
-
-void* __builtin_new_gc_dtor( o, d )
- void* o;
- destructor_proc d;
- /*
- The compiler generates a call to __builtin_new_gc_dtor to register
- the destructor "d" of a non-array gc object "o" as a GC finalizer.
- The destructor is registered via
- GC_REGISTER_FINALIZER_IGNORE_SELF, which causes the collector to
- ignore pointers from the object to itself when determining when
- the object can be finalized. This is necessary due to the self
- pointers used in the internal representation of multiply-inherited
- objects. */
-{
- Descriptor* desc = DESCRIPTOR( o );
-
- GC_REGISTER_FINALIZER_IGNORE_SELF( o, call_destructor, d, 0, 0 );
- desc->has_finalizer = 1;}
-
-
-static void call_array_destructor( o, data )
- void* o;
- void* data;
- /*
- call_array_destructor is the GC finalizer proc registered for gc
- array objects whose elements have destructors. Its client data is
- the destructor proc. It iterates through the elements of the
- array in reverse order, calling the destructor on each. */
-{
- int num = NUM_ARRAY_ELEMENTS( o );
- Descriptor* desc = DESCRIPTOR( o );
- size_t size = desc->element_size;
- char* first_p = FIRST_ELEMENT_P( o );
- char* p = first_p + (num - 1) * size;
-
- if (num > 0) {
- while (1) {
- ((destructor_proc) data)( p, 2 );
- if (p == first_p) break;
- p -= size;}}}
-
-
-void* __builtin_vec_new_gc_dtor( first_elem, d, element_size )
- void* first_elem;
- destructor_proc d;
- size_t element_size;
- /*
- The compiler generates a call to __builtin_vec_new_gc_dtor to
- register the destructor "d" of a gc array object as a GC
- finalizer. "first_elem" points to the first element of the array,
- *not* the beginning of the object (this makes the generated call
- to this function smaller). The elements of the array are of size
- "element_size". The destructor is registered as in
- _builtin_new_gc_dtor. */
-{
- void* o = (char*) first_elem - sizeof( BI_header );
- Descriptor* desc = DESCRIPTOR( o );
-
- GC_REGISTER_FINALIZER_IGNORE_SELF( o, call_array_destructor, d, 0, 0 );
- desc->element_size = element_size;
- desc->has_finalizer = 1;}
-
-
-void __builtin_delete( o )
- void* o;
- /*
- The compiler generates calls to __builtin_delete for operator
- delete(). The GC currently requires that any registered
- finalizers be unregistered before explicitly freeing an object.
- If the object has any weak pointers referencing it, we can't
- actually free it now. */
-{
- if (o != 0) {
- Descriptor* desc = DESCRIPTOR( o );
- if (desc->has_finalizer) GC_REGISTER_FINALIZER( o, 0, 0, 0, 0 );
- if (! desc->has_weak_pointers) GC_FREE( o );}}
-
-
-void __builtin_vec_delete( o )
- void* o;
- /*
- The compiler generates calls to __builitn_vec_delete for operator
- delete[](). */
-{
- __builtin_delete( o );}
-
-
-/**************************************************************************
-
-Implementations of the template class WeakPointer from WeakPointer.h
-
-***************************************************************************/
-
-typedef struct WeakPointer {
- void* pointer;
-} WeakPointer;
-
-
-void* _WeakPointer_New( t )
- void* t;
-{
- if (t == 0) {
- return 0;}
- else {
- void* base = GC_base( t );
- WeakPointer* wp =
- (WeakPointer*) GC_MALLOC_ATOMIC( sizeof( WeakPointer ) );
- Descriptor* desc = DESCRIPTOR( base );
-
- wp->pointer = t;
- desc->has_weak_pointers = 1;
- GC_general_register_disappearing_link( &wp->pointer, base );
- return wp;}}
-
-
-static void* PointerWithLock( wp )
- WeakPointer* wp;
-{
- if (wp == 0 || wp->pointer == 0) {
- return 0;}
- else {
- return (void*) wp->pointer;}}
-
-
-void* _WeakPointer_Pointer( wp )
- WeakPointer* wp;
-{
- return (void*) GC_call_with_alloc_lock( PointerWithLock, wp );}
-
-
-typedef struct EqualClosure {
- WeakPointer* wp1;
- WeakPointer* wp2;
-} EqualClosure;
-
-
-static void* EqualWithLock( ec )
- EqualClosure* ec;
-{
- if (ec->wp1 == 0 || ec->wp2 == 0) {
- return (void*) (ec->wp1 == ec->wp2);}
- else {
- return (void*) (ec->wp1->pointer == ec->wp2->pointer);}}
-
-
-int _WeakPointer_Equal( wp1, wp2 )
- WeakPointer* wp1;
- WeakPointer* wp2;
-{
- EqualClosure ec;
-
- ec.wp1 = wp1;
- ec.wp2 = wp2;
- return (int) GC_call_with_alloc_lock( EqualWithLock, &ec );}
-
-
-int _WeakPointer_Hash( wp )
- WeakPointer* wp;
-{
- return (int) _WeakPointer_Pointer( wp );}
-
-
-/**************************************************************************
-
-Implementations of the template class CleanUp from WeakPointer.h
-
-***************************************************************************/
-
-typedef struct Closure {
- void (*c) PROTO(( void* d, void* t ));
- ptrdiff_t t_offset;
- void* d;
-} Closure;
-
-
-static void _CleanUp_CallClosure( obj, data )
- void* obj;
- void* data;
-{
- Closure* closure = (Closure*) data;
- closure->c( closure->d, (char*) obj + closure->t_offset );}
-
-
-void _CleanUp_Set( t, c, d )
- void* t;
- void (*c) PROTO(( void* d, void* t ));
- void* d;
-{
- void* base = GC_base( t );
- Descriptor* desc = DESCRIPTOR( t );
-
- if (c == 0) {
- GC_REGISTER_FINALIZER_IGNORE_SELF( base, 0, 0, 0, 0 );
- desc->has_finalizer = 0;}
- else {
- Closure* closure = (Closure*) GC_MALLOC( sizeof( Closure ) );
- closure->c = c;
- closure->t_offset = (char*) t - (char*) base;
- closure->d = d;
- GC_REGISTER_FINALIZER_IGNORE_SELF( base, _CleanUp_CallClosure,
- closure, 0, 0 );
- desc->has_finalizer = 1;}}
-
-
-void _CleanUp_Call( t )
- void* t;
-{
- /* ? Aren't we supposed to deactivate weak pointers to t too?
- Why? */
- void* base = GC_base( t );
- void* d;
- GC_finalization_proc f;
-
- GC_REGISTER_FINALIZER( base, 0, 0, &f, &d );
- f( base, d );}
-
-
-typedef struct QueueElem {
- void* o;
- GC_finalization_proc f;
- void* d;
- struct QueueElem* next;
-} QueueElem;
-
-
-void* _CleanUp_Queue_NewHead()
-{
- return GC_MALLOC( sizeof( QueueElem ) );}
-
-
-static void _CleanUp_Queue_Enqueue( obj, data )
- void* obj;
- void* data;
-{
- QueueElem* q = (QueueElem*) data;
- QueueElem* head = q->next;
-
- q->o = obj;
- q->next = head->next;
- head->next = q;}
-
-
-void _CleanUp_Queue_Set( h, t )
- void* h;
- void* t;
-{
- QueueElem* head = (QueueElem*) h;
- void* base = GC_base( t );
- void* d;
- GC_finalization_proc f;
- QueueElem* q = (QueueElem*) GC_MALLOC( sizeof( QueueElem ) );
-
- GC_REGISTER_FINALIZER( base, _CleanUp_Queue_Enqueue, q, &f, &d );
- q->f = f;
- q->d = d;
- q->next = head;}
-
-
-int _CleanUp_Queue_Call( h )
- void* h;
-{
- QueueElem* head = (QueueElem*) h;
- QueueElem* q = head->next;
-
- if (q == 0) {
- return 0;}
- else {
- head->next = q->next;
- q->next = 0;
- if (q->f != 0) q->f( q->o, q->d );
- return 1;}}
-
-
-
diff --git a/gcj_mlc.c b/gcj_mlc.c
index 8b1da826..31aed25e 100644
--- a/gcj_mlc.c
+++ b/gcj_mlc.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -58,20 +58,16 @@ void GC_init_gcj_malloc(int mp_index, void * /* really GC_mark_proc */mp)
DCL_LOCK_STATE;
GC_init(); /* In case it's not already done. */
- DISABLE_SIGNALS();
LOCK();
if (GC_gcj_malloc_initialized) {
UNLOCK();
- ENABLE_SIGNALS();
return;
}
GC_gcj_malloc_initialized = TRUE;
ignore_gcj_info = (0 != GETENV("GC_IGNORE_GCJ_INFO"));
-# ifdef CONDPRINT
- if (GC_print_stats && ignore_gcj_info) {
- GC_printf0("Gcj-style type information is disabled!\n");
- }
-# endif
+ if (GC_print_stats && ignore_gcj_info) {
+ GC_log_printf("Gcj-style type information is disabled!\n");
+ }
GC_ASSERT(GC_mark_procs[mp_index] == (GC_mark_proc)0); /* unused */
GC_mark_procs[mp_index] = (GC_mark_proc)mp;
if (mp_index >= GC_n_mark_procs) ABORT("GC_init_gcj_malloc: bad index");
@@ -103,16 +99,15 @@ void GC_init_gcj_malloc(int mp_index, void * /* really GC_mark_proc */mp)
FALSE, TRUE);
}
UNLOCK();
- ENABLE_SIGNALS();
}
-ptr_t GC_clear_stack();
+void * GC_clear_stack(void *);
#define GENERAL_MALLOC(lb,k) \
- (GC_PTR)GC_clear_stack(GC_generic_malloc_inner((word)lb, k))
+ GC_clear_stack(GC_generic_malloc_inner((word)lb, k))
#define GENERAL_MALLOC_IOP(lb,k) \
- (GC_PTR)GC_clear_stack(GC_generic_malloc_inner_ignore_off_page(lb, k))
+ GC_clear_stack(GC_generic_malloc_inner_ignore_off_page(lb, k))
/* We need a mechanism to release the lock and invoke finalizers. */
/* We don't really have an opportunity to do this on a rarely executed */
@@ -138,18 +133,14 @@ static void maybe_finalize()
/* This adds a byte at the end of the object if GC_malloc would.*/
void * GC_gcj_malloc(size_t lb, void * ptr_to_struct_containing_descr)
{
-register ptr_t op;
-register ptr_t * opp;
-register word lw;
-DCL_LOCK_STATE;
+ ptr_t op;
+ ptr_t * opp;
+ word lg;
+ DCL_LOCK_STATE;
- if( EXPECT(SMALL_OBJ(lb), 1) ) {
-# ifdef MERGE_SIZES
- lw = GC_size_map[lb];
-# else
- lw = ALIGNED_WORDS(lb);
-# endif
- opp = &(GC_gcjobjfreelist[lw]);
+ if(SMALL_OBJ(lb)) {
+ lg = GC_size_map[lb];
+ opp = &(GC_gcjobjfreelist[lg]);
LOCK();
op = *opp;
if(EXPECT(op == 0, 0)) {
@@ -159,12 +150,10 @@ DCL_LOCK_STATE;
UNLOCK();
return(GC_oom_fn(lb));
}
-# ifdef MERGE_SIZES
- lw = GC_size_map[lb]; /* May have been uninitialized. */
-# endif
+ lw = GC_size_map[lb]; /* May have been uninitialized. */
} else {
*opp = obj_link(op);
- GC_words_allocd += lw;
+ GC_bytes_allocd += GRANULES_TO_BYTES(lg);
}
*(void **)op = ptr_to_struct_containing_descr;
GC_ASSERT(((void **)op)[1] == 0);
@@ -180,15 +169,15 @@ DCL_LOCK_STATE;
*(void **)op = ptr_to_struct_containing_descr;
UNLOCK();
}
- return((GC_PTR) op);
+ return((void *) op);
}
/* Similar to GC_gcj_malloc, but add debug info. This is allocated */
/* with GC_gcj_debug_kind. */
-GC_PTR GC_debug_gcj_malloc(size_t lb, void * ptr_to_struct_containing_descr,
+void * GC_debug_gcj_malloc(size_t lb, void * ptr_to_struct_containing_descr,
GC_EXTRA_PARAMS)
{
- GC_PTR result;
+ void * result;
/* We're careful to avoid extra calls, which could */
/* confuse the backtrace. */
@@ -213,92 +202,25 @@ GC_PTR GC_debug_gcj_malloc(size_t lb, void * ptr_to_struct_containing_descr,
return (GC_store_debug_info(result, (word)lb, s, (word)i));
}
-/* Similar to GC_gcj_malloc, but the size is in words, and we don't */
-/* adjust it. The size is assumed to be such that it can be */
-/* allocated as a small object. */
-void * GC_gcj_fast_malloc(size_t lw, void * ptr_to_struct_containing_descr)
-{
-ptr_t op;
-ptr_t * opp;
-DCL_LOCK_STATE;
-
- opp = &(GC_gcjobjfreelist[lw]);
- LOCK();
- op = *opp;
- if( EXPECT(op == 0, 0) ) {
- maybe_finalize();
- op = (ptr_t)GC_clear_stack(
- GC_generic_malloc_words_small_inner(lw, GC_gcj_kind));
- if (0 == op) {
- UNLOCK();
- return GC_oom_fn(WORDS_TO_BYTES(lw));
- }
- } else {
- *opp = obj_link(op);
- GC_words_allocd += lw;
- }
- *(void **)op = ptr_to_struct_containing_descr;
- UNLOCK();
- return((GC_PTR) op);
-}
-
-/* And a debugging version of the above: */
-void * GC_debug_gcj_fast_malloc(size_t lw,
- void * ptr_to_struct_containing_descr,
- GC_EXTRA_PARAMS)
-{
- GC_PTR result;
- size_t lb = WORDS_TO_BYTES(lw);
-
- /* We clone the code from GC_debug_gcj_malloc, so that we */
- /* dont end up with extra frames on the stack, which could */
- /* confuse the backtrace. */
- LOCK();
- maybe_finalize();
- result = GC_generic_malloc_inner(lb + DEBUG_BYTES, GC_gcj_debug_kind);
- if (result == 0) {
- UNLOCK();
- GC_err_printf2("GC_debug_gcj_fast_malloc(%ld, 0x%lx) returning NIL (",
- (unsigned long) lw,
- (unsigned long) ptr_to_struct_containing_descr);
- GC_err_puts(s);
- GC_err_printf1(":%ld)\n", (unsigned long)i);
- return GC_oom_fn(WORDS_TO_BYTES(lw));
- }
- *((void **)((ptr_t)result + sizeof(oh))) = ptr_to_struct_containing_descr;
- UNLOCK();
- if (!GC_debugging_started) {
- GC_start_debugging();
- }
- ADD_CALL_CHAIN(result, ra);
- return (GC_store_debug_info(result, (word)lb, s, (word)i));
-}
-
void * GC_gcj_malloc_ignore_off_page(size_t lb,
void * ptr_to_struct_containing_descr)
{
-register ptr_t op;
-register ptr_t * opp;
-register word lw;
-DCL_LOCK_STATE;
+ ptr_t op;
+ ptr_t * opp;
+ word lg;
+ DCL_LOCK_STATE;
- if( SMALL_OBJ(lb) ) {
-# ifdef MERGE_SIZES
- lw = GC_size_map[lb];
-# else
- lw = ALIGNED_WORDS(lb);
-# endif
- opp = &(GC_gcjobjfreelist[lw]);
+ if(SMALL_OBJ(lb)) {
+ lg = GC_size_map[lb];
+ opp = &(GC_gcjobjfreelist[lg]);
LOCK();
if( (op = *opp) == 0 ) {
maybe_finalize();
op = (ptr_t)GENERAL_MALLOC_IOP(lb, GC_gcj_kind);
-# ifdef MERGE_SIZES
- lw = GC_size_map[lb]; /* May have been uninitialized. */
-# endif
+ lg = GC_size_map[lb]; /* May have been uninitialized. */
} else {
*opp = obj_link(op);
- GC_words_allocd += lw;
+ GC_bytes_allocd += GRANULES_TO_BYTES(lg);
}
*(void **)op = ptr_to_struct_containing_descr;
UNLOCK();
@@ -311,7 +233,7 @@ DCL_LOCK_STATE;
}
UNLOCK();
}
- return((GC_PTR) op);
+ return((void *) op);
}
#else
diff --git a/headers.c b/headers.c
index b7be1d84..8b14b4be 100644
--- a/headers.c
+++ b/headers.c
@@ -33,17 +33,75 @@ bottom_index * GC_all_bottom_indices_end = 0;
/* bottom_index. */
/* Non-macro version of header location routine */
-hdr * GC_find_header(h)
-ptr_t h;
+hdr * GC_find_header(ptr_t h)
{
# ifdef HASH_TL
- register hdr * result;
+ hdr * result;
GET_HDR(h, result);
return(result);
# else
return(HDR_INNER(h));
# endif
}
+
+/* Handle a header cache miss. Returns a pointer to the */
+/* header corresponding to p, if p can possibly be a valid */
+/* object pointer, and 0 otherwise. */
+/* GUARANTEED to return 0 for a pointer past the first page */
+/* of an object unless both GC_all_interior_pointers is set */
+/* and p is in fact a valid object pointer. */
+#ifdef PRINT_BLACK_LIST
+ hdr * GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce, ptr_t source)
+#else
+ hdr * GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce)
+#endif
+{
+ hdr *hhdr;
+ HC_MISS();
+ GET_HDR(p, hhdr);
+ if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
+ if (GC_all_interior_pointers) {
+ if (hhdr != 0) {
+ ptr_t current = p;
+
+ current = (ptr_t)HBLKPTR(current);
+ do {
+ current = current - HBLKSIZE*(word)hhdr;
+ hhdr = HDR(current);
+ } while(IS_FORWARDING_ADDR_OR_NIL(hhdr));
+ /* current points to near the start of the large object */
+ if (hhdr -> hb_flags & IGNORE_OFF_PAGE
+ || HBLK_IS_FREE(hhdr))
+ return 0;
+ if (p - current >= (ptrdiff_t)(hhdr->hb_sz)) {
+ GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
+ /* Pointer past the end of the block */
+ return 0;
+ }
+ } else {
+ GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
+ }
+ return hhdr;
+ /* Pointers past the first page are probably too rare */
+ /* to add them to the cache. We don't. */
+ /* And correctness relies on the fact that we don't. */
+ } else {
+ if (hhdr == 0) {
+ GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
+ }
+ return 0;
+ }
+ } else {
+ if (HBLK_IS_FREE(hhdr)) {
+ GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
+ return 0;
+ } else {
+ hce -> block_addr = (word)(p) >> LOG_HBLKSIZE;
+ hce -> hce_hdr = hhdr;
+ return hhdr;
+ }
+ }
+}
/* Routines to dynamically allocate collector data structures that will */
/* never be freed. */
@@ -53,18 +111,12 @@ static ptr_t scratch_free_ptr = 0;
/* GC_scratch_last_end_ptr is end point of last obtained scratch area. */
/* GC_scratch_end_ptr is end point of current scratch area. */
-ptr_t GC_scratch_alloc(bytes)
-register word bytes;
+ptr_t GC_scratch_alloc(size_t bytes)
{
register ptr_t result = scratch_free_ptr;
-# ifdef ALIGN_DOUBLE
-# define GRANULARITY (2 * sizeof(word))
-# else
-# define GRANULARITY sizeof(word)
-# endif
- bytes += GRANULARITY-1;
- bytes &= ~(GRANULARITY-1);
+ bytes += GRANULE_BYTES-1;
+ bytes &= ~(GRANULE_BYTES-1);
scratch_free_ptr += bytes;
if (scratch_free_ptr <= GC_scratch_end_ptr) {
return(result);
@@ -86,9 +138,8 @@ register word bytes;
}
result = (ptr_t)GET_MEM(bytes_to_get);
if (result == 0) {
-# ifdef PRINTSTATS
- GC_printf0("Out of memory - trying to allocate less\n");
-# endif
+ if (GC_print_stats)
+ GC_printf("Out of memory - trying to allocate less\n");
scratch_free_ptr -= bytes;
bytes_to_get = bytes;
# ifdef USE_MMAP
@@ -107,7 +158,7 @@ register word bytes;
static hdr * hdr_free_list = 0;
/* Return an uninitialized header */
-static hdr * alloc_hdr()
+static hdr * alloc_hdr(void)
{
register hdr * result;
@@ -120,21 +171,18 @@ static hdr * alloc_hdr()
return(result);
}
-static void free_hdr(hhdr)
-hdr * hhdr;
+static void free_hdr(hdr * hhdr)
{
hhdr -> hb_next = (struct hblk *) hdr_free_list;
hdr_free_list = hhdr;
}
-hdr * GC_invalid_header;
-
#ifdef USE_HDR_CACHE
word GC_hdr_cache_hits = 0;
word GC_hdr_cache_misses = 0;
#endif
-void GC_init_headers()
+void GC_init_headers(void)
{
register unsigned i;
@@ -143,14 +191,11 @@ void GC_init_headers()
for (i = 0; i < TOP_SZ; i++) {
GC_top_index[i] = GC_all_nils;
}
- GC_invalid_header = alloc_hdr();
- GC_invalidate_map(GC_invalid_header);
}
/* Make sure that there is a bottom level index block for address addr */
/* Return FALSE on failure. */
-static GC_bool get_index(addr)
-word addr;
+static GC_bool get_index(word addr)
{
word hi = (word)(addr) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
bottom_index * r;
@@ -201,8 +246,7 @@ word addr;
/* Install a header for block h. */
/* The header is uninitialized. */
/* Returns the header or 0 on failure. */
-struct hblkhdr * GC_install_header(h)
-register struct hblk * h;
+struct hblkhdr * GC_install_header(struct hblk *h)
{
hdr * result;
@@ -216,9 +260,7 @@ register struct hblk * h;
}
/* Set up forwarding counts for block h of size sz */
-GC_bool GC_install_counts(h, sz)
-register struct hblk * h;
-register word sz; /* bytes */
+GC_bool GC_install_counts(struct hblk *h, size_t sz/* bytes */)
{
register struct hblk * hbp;
register int i;
@@ -235,8 +277,7 @@ register word sz; /* bytes */
}
/* Remove the header for block h */
-void GC_remove_header(h)
-register struct hblk * h;
+void GC_remove_header(struct hblk *h)
{
hdr ** ha;
@@ -246,9 +287,7 @@ register struct hblk * h;
}
/* Remove forwarding counts for h */
-void GC_remove_counts(h, sz)
-register struct hblk * h;
-register word sz; /* bytes */
+void GC_remove_counts(struct hblk *h, size_t sz/* bytes */)
{
register struct hblk * hbp;
@@ -259,18 +298,17 @@ register word sz; /* bytes */
/* Apply fn to all allocated blocks */
/*VARARGS1*/
-void GC_apply_to_all_blocks(fn, client_data)
-void (*fn) GC_PROTO((struct hblk *h, word client_data));
-word client_data;
+void GC_apply_to_all_blocks(void (*fn)(struct hblk *h, word client_data),
+ word client_data)
{
- register int j;
- register bottom_index * index_p;
+ int j;
+ bottom_index * index_p;
for (index_p = GC_all_bottom_indices; index_p != 0;
index_p = index_p -> asc_link) {
for (j = BOTTOM_SZ-1; j >= 0;) {
if (!IS_FORWARDING_ADDR_OR_NIL(index_p->index[j])) {
- if (index_p->index[j]->hb_map != GC_invalid_map) {
+ if (!HBLK_IS_FREE(index_p->index[j])) {
(*fn)(((struct hblk *)
(((index_p->key << LOG_BOTTOM_SZ) + (word)j)
<< LOG_HBLKSIZE)),
@@ -288,8 +326,7 @@ word client_data;
/* Get the next valid block whose address is at least h */
/* Return 0 if there is none. */
-struct hblk * GC_next_used_block(h)
-struct hblk * h;
+struct hblk * GC_next_used_block(struct hblk *h)
{
register bottom_index * bi;
register word j = ((word)h >> LOG_HBLKSIZE) & (BOTTOM_SZ-1);
@@ -307,7 +344,7 @@ struct hblk * h;
if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
j++;
} else {
- if (hhdr->hb_map != GC_invalid_map) {
+ if (!HBLK_IS_FREE(hhdr)) {
return((struct hblk *)
(((bi -> key << LOG_BOTTOM_SZ) + j)
<< LOG_HBLKSIZE));
@@ -325,8 +362,7 @@ struct hblk * h;
/* Get the last (highest address) block whose address is */
/* at most h. Return 0 if there is none. */
/* Unlike the above, this may return a free block. */
-struct hblk * GC_prev_block(h)
-struct hblk * h;
+struct hblk * GC_prev_block(struct hblk *h)
{
register bottom_index * bi;
register signed_word j = ((word)h >> LOG_HBLKSIZE) & (BOTTOM_SZ-1);
diff --git a/if_mach.c b/if_mach.c
index 3dcccf21..d6e0a70d 100644
--- a/if_mach.c
+++ b/if_mach.c
@@ -5,10 +5,7 @@
# include <string.h>
# include <unistd.h>
-int main(argc, argv, envp)
-int argc;
-char ** argv;
-char ** envp;
+int main(int argc, char **argv, char **envp)
{
if (argc < 4) goto Usage;
if (strcmp(MACH_TYPE, argv[1]) != 0) return(0);
diff --git a/if_not_there.c b/if_not_there.c
index 8691e925..7af6fba4 100644
--- a/if_not_there.c
+++ b/if_not_there.c
@@ -8,10 +8,7 @@
#include <dirent.h>
#endif /* __DJGPP__ */
-int main(argc, argv, envp)
-int argc;
-char ** argv;
-char ** envp;
+int main(int argc, char **argv, char **envp)
{
FILE * f;
#ifdef __DJGPP__
diff --git a/include/Makefile.am b/include/Makefile.am
index 306026b0..5bc68ddc 100644
--- a/include/Makefile.am
+++ b/include/Makefile.am
@@ -17,7 +17,7 @@
#
pkginclude_HEADERS = gc.h gc_typed.h gc_inl.h \
gc_inline.h gc_mark.h gc_cpp.h \
- weakpointer.h gc_alloc.h new_gc_alloc.h \
+ weakpointer.h new_gc_alloc.h \
gc_allocator.h gc_backptr.h \
gc_gcj.h gc_local_alloc.h leak_detector.h \
gc_amiga_redirects.h gc_pthread_redirects.h \
@@ -28,7 +28,7 @@ pkginclude_HEADERS = gc.h gc_typed.h gc_inl.h \
dist_noinst_HEADERS = private/gc_hdrs.h \
private/gc_priv.h private/gcconfig.h \
private/gc_pmark.h private/gc_locks.h \
- private/solaris_threads.h private/dbg_mlc.h \
+ private/dbg_mlc.h \
private/specific.h private/cord_pos.h \
private/pthread_support.h private/pthread_stop_world.h \
private/darwin_semaphore.h private/darwin_stop_world.h \
diff --git a/include/Makefile.in b/include/Makefile.in
index ff95e78c..1a1b58fe 100644
--- a/include/Makefile.in
+++ b/include/Makefile.in
@@ -105,7 +105,7 @@ OBJDUMP = @OBJDUMP@
PACKAGE = @PACKAGE@
RANLIB = @RANLIB@
STRIP = @STRIP@
-THREADDLLIBS = @THREADDLLIBS@
+THREADLIBS = @THREADLIBS@
UNWINDLIBS = @UNWINDLIBS@
VERSION = @VERSION@
addincludes = @addincludes@
@@ -121,7 +121,7 @@ target_all = @target_all@
#
pkginclude_HEADERS = gc.h gc_typed.h gc_inl.h \
gc_inline.h gc_mark.h gc_cpp.h \
- weakpointer.h gc_alloc.h new_gc_alloc.h \
+ weakpointer.h new_gc_alloc.h \
gc_allocator.h gc_backptr.h \
gc_gcj.h gc_local_alloc.h leak_detector.h \
gc_amiga_redirects.h gc_pthread_redirects.h \
@@ -133,7 +133,7 @@ pkginclude_HEADERS = gc.h gc_typed.h gc_inl.h \
dist_noinst_HEADERS = private/gc_hdrs.h \
private/gc_priv.h private/gcconfig.h \
private/gc_pmark.h private/gc_locks.h \
- private/solaris_threads.h private/dbg_mlc.h \
+ private/dbg_mlc.h \
private/specific.h private/cord_pos.h \
private/pthread_support.h private/pthread_stop_world.h \
private/darwin_semaphore.h private/darwin_stop_world.h \
diff --git a/include/gc.h b/include/gc.h
index 5f151701..064b0186 100644
--- a/include/gc.h
+++ b/include/gc.h
@@ -32,16 +32,6 @@
# include "gc_config_macros.h"
-# if defined(__STDC__) || defined(__cplusplus)
-# define GC_PROTO(args) args
- typedef void * GC_PTR;
-# define GC_CONST const
-# else
-# define GC_PROTO(args) ()
- typedef char * GC_PTR;
-# define GC_CONST
-# endif
-
# ifdef __cplusplus
extern "C" {
# endif
@@ -83,7 +73,7 @@ GC_API int GC_parallel; /* GC is parallelized for performance on */
/* Public R/W variables */
-GC_API GC_PTR (*GC_oom_fn) GC_PROTO((size_t bytes_requested));
+GC_API void * (*GC_oom_fn) (size_t bytes_requested);
/* When there is insufficient memory to satisfy */
/* an allocation request, we return */
/* (*GC_oom_fn)(). By default this just */
@@ -110,11 +100,6 @@ GC_API int GC_all_interior_pointers;
/* pointer recognition. */
/* MUST BE 0 or 1. */
-GC_API int GC_quiet; /* Disable statistics output. Only matters if */
- /* collector has been compiled with statistics */
- /* enabled. This involves a performance cost, */
- /* and is thus not the default. */
-
GC_API int GC_finalize_on_demand;
/* If nonzero, finalizers will only be run in */
/* response to an explicit GC_invoke_finalizers */
@@ -189,15 +174,16 @@ GC_API int GC_no_dls;
GC_API GC_word GC_free_space_divisor;
/* We try to make sure that we allocate at */
/* least N/GC_free_space_divisor bytes between */
- /* collections, where N is the heap size plus */
+ /* collections, where N is twice the number */
+ /* of traced bytes, plus the number of untraced */
+ /* bytes (bytes in "atomic" objects), plus */
/* a rough estimate of the root set size. */
+ /* N approximates GC tracing work per GC. */
/* Initially, GC_free_space_divisor = 3. */
/* Increasing its value will use less space */
/* but more collection time. Decreasing it */
/* will appreciably decrease collection time */
/* at the expense of space. */
- /* GC_free_space_divisor = 1 will effectively */
- /* disable collections. */
GC_API GC_word GC_max_retries;
/* The maximum number of GCs attempted before */
@@ -205,7 +191,7 @@ GC_API GC_word GC_max_retries;
/* expansion fails. Initially 0. */
-GC_API char *GC_stackbottom; /* Cool end of user stack. */
+GC_API char *GC_stackbottom; /* Cool end of user stack. */
/* May be set in the client prior to */
/* calling any GC_ routines. This */
/* avoids some overhead, and */
@@ -247,7 +233,7 @@ GC_API unsigned long GC_time_limit;
* to call this somehow (e.g. from a constructor) before doing any allocation.
* For win32 threads, it needs to be called explicitly.
*/
-GC_API void GC_init GC_PROTO((void));
+GC_API void GC_init(void);
/*
* general purpose allocation routines, with roughly malloc calling conv.
@@ -265,14 +251,14 @@ GC_API void GC_init GC_PROTO((void));
* starting in 6.0. GC_malloc_stubborn is an alias for GC_malloc unless
* the collector is built with STUBBORN_ALLOC defined.
*/
-GC_API GC_PTR GC_malloc GC_PROTO((size_t size_in_bytes));
-GC_API GC_PTR GC_malloc_atomic GC_PROTO((size_t size_in_bytes));
-GC_API GC_PTR GC_malloc_uncollectable GC_PROTO((size_t size_in_bytes));
-GC_API GC_PTR GC_malloc_stubborn GC_PROTO((size_t size_in_bytes));
+GC_API void * GC_malloc(size_t size_in_bytes);
+GC_API void * GC_malloc_atomic(size_t size_in_bytes);
+GC_API void * GC_malloc_uncollectable(size_t size_in_bytes);
+GC_API void * GC_malloc_stubborn(size_t size_in_bytes);
/* The following is only defined if the library has been suitably */
/* compiled: */
-GC_API GC_PTR GC_malloc_atomic_uncollectable GC_PROTO((size_t size_in_bytes));
+GC_API void * GC_malloc_atomic_uncollectable(size_t size_in_bytes);
/* Explicitly deallocate an object. Dangerous if used incorrectly. */
/* Requires a pointer to the base of an object. */
@@ -280,7 +266,7 @@ GC_API GC_PTR GC_malloc_atomic_uncollectable GC_PROTO((size_t size_in_bytes));
/* An object should not be enable for finalization when it is */
/* explicitly deallocated. */
/* GC_free(0) is a no-op, as required by ANSI C for free. */
-GC_API void GC_free GC_PROTO((GC_PTR object_addr));
+GC_API void GC_free(void * object_addr);
/*
* Stubborn objects may be changed only if the collector is explicitly informed.
@@ -297,8 +283,8 @@ GC_API void GC_free GC_PROTO((GC_PTR object_addr));
* do so. The same applies to dropping stubborn objects that are still
* changeable.
*/
-GC_API void GC_change_stubborn GC_PROTO((GC_PTR));
-GC_API void GC_end_stubborn_change GC_PROTO((GC_PTR));
+GC_API void GC_change_stubborn(void *);
+GC_API void GC_end_stubborn_change(void *);
/* Return a pointer to the base (lowest address) of an object given */
/* a pointer to a location within the object. */
@@ -310,13 +296,13 @@ GC_API void GC_end_stubborn_change GC_PROTO((GC_PTR));
/* object. */
/* Note that a deallocated object in the garbage collected heap */
/* may be considered valid, even if it has been deallocated with */
-/* GC_free. */
-GC_API GC_PTR GC_base GC_PROTO((GC_PTR displaced_pointer));
+/* GC_free. */
+GC_API void * GC_base(void * displaced_pointer);
/* Given a pointer to the base of an object, return its size in bytes. */
/* The returned size may be slightly larger than what was originally */
/* requested. */
-GC_API size_t GC_size GC_PROTO((GC_PTR object_addr));
+GC_API size_t GC_size(void * object_addr);
/* For compatibility with C library. This is occasionally faster than */
/* a malloc followed by a bcopy. But if you rely on that, either here */
@@ -326,35 +312,33 @@ GC_API size_t GC_size GC_PROTO((GC_PTR object_addr));
/* If the argument is stubborn, the result will have changes enabled. */
/* It is an error to have changes enabled for the original object. */
/* Follows ANSI comventions for NULL old_object. */
-GC_API GC_PTR GC_realloc
- GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes));
+GC_API void * GC_realloc(void * old_object, size_t new_size_in_bytes);
/* Explicitly increase the heap size. */
/* Returns 0 on failure, 1 on success. */
-GC_API int GC_expand_hp GC_PROTO((size_t number_of_bytes));
+GC_API int GC_expand_hp(size_t number_of_bytes);
/* Limit the heap size to n bytes. Useful when you're debugging, */
/* especially on systems that don't handle running out of memory well. */
/* n == 0 ==> unbounded. This is the default. */
-GC_API void GC_set_max_heap_size GC_PROTO((GC_word n));
+GC_API void GC_set_max_heap_size(GC_word n);
/* Inform the collector that a certain section of statically allocated */
/* memory contains no pointers to garbage collected memory. Thus it */
/* need not be scanned. This is sometimes important if the application */
/* maps large read/write files into the address space, which could be */
/* mistaken for dynamic library data segments on some systems. */
-GC_API void GC_exclude_static_roots GC_PROTO((GC_PTR start, GC_PTR finish));
+GC_API void GC_exclude_static_roots(void * low_address,
+ void * high_address_plus_1);
/* Clear the set of root segments. Wizards only. */
-GC_API void GC_clear_roots GC_PROTO((void));
+GC_API void GC_clear_roots(void);
/* Add a root segment. Wizards only. */
-GC_API void GC_add_roots GC_PROTO((char * low_address,
- char * high_address_plus_1));
+GC_API void GC_add_roots(void * low_address, void * high_address_plus_1);
/* Remove a root segment. Wizards only. */
-GC_API void GC_remove_roots GC_PROTO((char * low_address,
- char * high_address_plus_1));
+GC_API void GC_remove_roots(void * low_address, void * high_address_plus_1);
/* Add a displacement to the set of those considered valid by the */
/* collector. GC_register_displacement(n) means that if p was returned */
@@ -368,14 +352,14 @@ GC_API void GC_remove_roots GC_PROTO((char * low_address,
/* retention. */
/* This is a no-op if the collector has recognition of */
/* arbitrary interior pointers enabled, which is now the default. */
-GC_API void GC_register_displacement GC_PROTO((GC_word n));
+GC_API void GC_register_displacement(size_t n);
/* The following version should be used if any debugging allocation is */
/* being done. */
-GC_API void GC_debug_register_displacement GC_PROTO((GC_word n));
+GC_API void GC_debug_register_displacement(size_t n);
/* Explicitly trigger a full, world-stop collection. */
-GC_API void GC_gcollect GC_PROTO((void));
+GC_API void GC_gcollect(void);
/* Trigger a full world-stopped collection. Abort the collection if */
/* and when stop_func returns a nonzero value. Stop_func will be */
@@ -386,32 +370,32 @@ GC_API void GC_gcollect GC_PROTO((void));
/* aborted collections do no useful work; the next collection needs */
/* to start from the beginning. */
/* Return 0 if the collection was aborted, 1 if it succeeded. */
-typedef int (* GC_stop_func) GC_PROTO((void));
-GC_API int GC_try_to_collect GC_PROTO((GC_stop_func stop_func));
+typedef int (* GC_stop_func)(void);
+GC_API int GC_try_to_collect(GC_stop_func stop_func);
/* Return the number of bytes in the heap. Excludes collector private */
/* data structures. Includes empty blocks and fragmentation loss. */
/* Includes some pages that were allocated but never written. */
-GC_API size_t GC_get_heap_size GC_PROTO((void));
+GC_API size_t GC_get_heap_size(void);
/* Return a lower bound on the number of free bytes in the heap. */
-GC_API size_t GC_get_free_bytes GC_PROTO((void));
+GC_API size_t GC_get_free_bytes(void);
/* Return the number of bytes allocated since the last collection. */
-GC_API size_t GC_get_bytes_since_gc GC_PROTO((void));
+GC_API size_t GC_get_bytes_since_gc(void);
/* Return the total number of bytes allocated in this process. */
/* Never decreases, except due to wrapping. */
-GC_API size_t GC_get_total_bytes GC_PROTO((void));
+GC_API size_t GC_get_total_bytes(void);
/* Disable garbage collection. Even GC_gcollect calls will be */
/* ineffective. */
-GC_API void GC_disable GC_PROTO((void));
+GC_API void GC_disable(void);
/* Reenable garbage collection. GC_disable() and GC_enable() calls */
/* nest. Garbage collection is enabled if the number of calls to both */
/* both functions is equal. */
-GC_API void GC_enable GC_PROTO((void));
+GC_API void GC_enable(void);
/* Enable incremental/generational collection. */
/* Not advisable unless dirty bits are */
@@ -425,7 +409,7 @@ GC_API void GC_enable GC_PROTO((void));
/* Causes GC_local_gcj_malloc() to revert to */
/* locked allocation. Must be called */
/* before any GC_local_gcj_malloc() calls. */
-GC_API void GC_enable_incremental GC_PROTO((void));
+GC_API void GC_enable_incremental(void);
/* Does incremental mode write-protect pages? Returns zero or */
/* more of the following, or'ed together: */
@@ -435,7 +419,7 @@ GC_API void GC_enable_incremental GC_PROTO((void));
#define GC_PROTECTS_STACK 8 /* Probably impractical. */
#define GC_PROTECTS_NONE 0
-GC_API int GC_incremental_protection_needs GC_PROTO((void));
+GC_API int GC_incremental_protection_needs(void);
/* Perform some garbage collection work, if appropriate. */
/* Return 0 if there is no more work to be done. */
@@ -444,7 +428,7 @@ GC_API int GC_incremental_protection_needs GC_PROTO((void));
/* progress requires it, e.g. if incremental collection is */
/* disabled. It is reasonable to call this in a wait loop */
/* until it returns 0. */
-GC_API int GC_collect_a_little GC_PROTO((void));
+GC_API int GC_collect_a_little(void);
/* Allocate an object of size lb bytes. The client guarantees that */
/* as long as the object is live, it will be referenced by a pointer */
@@ -460,8 +444,8 @@ GC_API int GC_collect_a_little GC_PROTO((void));
/* for arrays likely to be larger than 100K or so. For other systems, */
/* or if the collector is not configured to recognize all interior */
/* pointers, the threshold is normally much higher. */
-GC_API GC_PTR GC_malloc_ignore_off_page GC_PROTO((size_t lb));
-GC_API GC_PTR GC_malloc_atomic_ignore_off_page GC_PROTO((size_t lb));
+GC_API void * GC_malloc_ignore_off_page(size_t lb);
+GC_API void * GC_malloc_atomic_ignore_off_page(size_t lb);
#if defined(__sgi) && !defined(__GNUC__) && _COMPILER_VERSION >= 720
# define GC_ADD_CALLER
@@ -511,32 +495,29 @@ GC_API GC_PTR GC_malloc_atomic_ignore_off_page GC_PROTO((size_t lb));
#ifdef GC_ADD_CALLER
# define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__
-# define GC_EXTRA_PARAMS GC_word ra, GC_CONST char * s, int i
+# define GC_EXTRA_PARAMS GC_word ra, const char * s, int i
#else
# define GC_EXTRAS __FILE__, __LINE__
-# define GC_EXTRA_PARAMS GC_CONST char * s, int i
+# define GC_EXTRA_PARAMS const char * s, int i
#endif
/* Debugging (annotated) allocation. GC_gcollect will check */
/* objects allocated in this way for overwrites, etc. */
-GC_API GC_PTR GC_debug_malloc
- GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
-GC_API GC_PTR GC_debug_malloc_atomic
- GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
-GC_API GC_PTR GC_debug_malloc_uncollectable
- GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
-GC_API GC_PTR GC_debug_malloc_stubborn
- GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
-GC_API GC_PTR GC_debug_malloc_ignore_off_page
- GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
-GC_API GC_PTR GC_debug_malloc_atomic_ignore_off_page
- GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
-GC_API void GC_debug_free GC_PROTO((GC_PTR object_addr));
-GC_API GC_PTR GC_debug_realloc
- GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes,
- GC_EXTRA_PARAMS));
-GC_API void GC_debug_change_stubborn GC_PROTO((GC_PTR));
-GC_API void GC_debug_end_stubborn_change GC_PROTO((GC_PTR));
+GC_API void * GC_debug_malloc(size_t size_in_bytes, GC_EXTRA_PARAMS);
+GC_API void * GC_debug_malloc_atomic(size_t size_in_bytes, GC_EXTRA_PARAMS);
+GC_API void * GC_debug_malloc_uncollectable
+ (size_t size_in_bytes, GC_EXTRA_PARAMS);
+GC_API void * GC_debug_malloc_stubborn
+ (size_t size_in_bytes, GC_EXTRA_PARAMS);
+GC_API void * GC_debug_malloc_ignore_off_page
+ (size_t size_in_bytes, GC_EXTRA_PARAMS);
+GC_API void * GC_debug_malloc_atomic_ignore_off_page
+ (size_t size_in_bytes, GC_EXTRA_PARAMS);
+GC_API void GC_debug_free (void * object_addr);
+GC_API void * GC_debug_realloc
+ (void * old_object, size_t new_size_in_bytes, GC_EXTRA_PARAMS);
+GC_API void GC_debug_change_stubborn(void *);
+GC_API void GC_debug_end_stubborn_change(void *);
/* Routines that allocate objects with debug information (like the */
/* above), but just fill in dummy file and line number information. */
@@ -550,9 +531,9 @@ GC_API void GC_debug_end_stubborn_change GC_PROTO((GC_PTR));
/* platforms it may be more convenient not to recompile, e.g. for */
/* leak detection. This can be accomplished by instructing the */
/* linker to replace malloc/realloc with these. */
-GC_API GC_PTR GC_debug_malloc_replacement GC_PROTO((size_t size_in_bytes));
-GC_API GC_PTR GC_debug_realloc_replacement
- GC_PROTO((GC_PTR object_addr, size_t size_in_bytes));
+GC_API void * GC_debug_malloc_replacement (size_t size_in_bytes);
+GC_API void * GC_debug_realloc_replacement
+ (void * object_addr, size_t size_in_bytes);
# ifdef GC_DEBUG
# define GC_MALLOC(sz) GC_debug_malloc(sz, GC_EXTRAS)
@@ -616,15 +597,14 @@ GC_API GC_PTR GC_debug_realloc_replacement
/* with Alan Demers, Dan Greene, Carl Hauser, Barry Hayes, */
/* Christian Jacobi, and Russ Atkinson. It's not perfect, and */
/* probably nobody else agrees with it. Hans-J. Boehm 3/13/92 */
-typedef void (*GC_finalization_proc)
- GC_PROTO((GC_PTR obj, GC_PTR client_data));
+typedef void (*GC_finalization_proc) (void * obj, void * client_data);
-GC_API void GC_register_finalizer
- GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
- GC_finalization_proc *ofn, GC_PTR *ocd));
+GC_API void GC_register_finalizer(void * obj, GC_finalization_proc fn,
+ void * cd, GC_finalization_proc *ofn,
+ void * *ocd);
GC_API void GC_debug_register_finalizer
- GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
- GC_finalization_proc *ofn, GC_PTR *ocd));
+ (void * obj, GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void * *ocd);
/* When obj is no longer accessible, invoke */
/* (*fn)(obj, cd). If a and b are inaccessible, and */
/* a points to b (after disappearing links have been */
@@ -668,22 +648,22 @@ GC_API void GC_debug_register_finalizer
/* Note that cd will still be viewed as accessible, even if it */
/* refers to the object itself. */
GC_API void GC_register_finalizer_ignore_self
- GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
- GC_finalization_proc *ofn, GC_PTR *ocd));
+ (void * obj, GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void * *ocd);
GC_API void GC_debug_register_finalizer_ignore_self
- GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
- GC_finalization_proc *ofn, GC_PTR *ocd));
+ (void * obj, GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void * *ocd);
/* Another version of the above. It ignores all cycles. */
/* It should probably only be used by Java implementations. */
/* Note that cd will still be viewed as accessible, even if it */
/* refers to the object itself. */
GC_API void GC_register_finalizer_no_order
- GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
- GC_finalization_proc *ofn, GC_PTR *ocd));
+ (void * obj, GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void * *ocd);
GC_API void GC_debug_register_finalizer_no_order
- GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
- GC_finalization_proc *ofn, GC_PTR *ocd));
+ (void * obj, GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void * *ocd);
/* The following routine may be used to break cycles between */
@@ -693,7 +673,7 @@ GC_API void GC_debug_register_finalizer_no_order
/* where p is a pointer that is not followed by finalization */
/* code, and should not be considered in determining */
/* finalization order. */
-GC_API int GC_register_disappearing_link GC_PROTO((GC_PTR * /* link */));
+GC_API int GC_register_disappearing_link(void * * link );
/* Link should point to a field of a heap allocated */
/* object obj. *link will be cleared when obj is */
/* found to be inaccessible. This happens BEFORE any */
@@ -713,8 +693,7 @@ GC_API int GC_register_disappearing_link GC_PROTO((GC_PTR * /* link */));
/* otherwise. */
/* Only exists for backward compatibility. See below: */
-GC_API int GC_general_register_disappearing_link
- GC_PROTO((GC_PTR * /* link */, GC_PTR obj));
+GC_API int GC_general_register_disappearing_link (void * * link, void * obj);
/* A slight generalization of the above. *link is */
/* cleared when obj first becomes inaccessible. This */
/* can be used to implement weak pointers easily and */
@@ -732,15 +711,15 @@ GC_API int GC_general_register_disappearing_link
/* the object containing link. Explicitly deallocating */
/* obj may or may not cause link to eventually be */
/* cleared. */
-GC_API int GC_unregister_disappearing_link GC_PROTO((GC_PTR * /* link */));
+GC_API int GC_unregister_disappearing_link (void * * link);
/* Returns 0 if link was not actually registered. */
/* Undoes a registration by either of the above two */
/* routines. */
/* Returns !=0 if GC_invoke_finalizers has something to do. */
-GC_API int GC_should_invoke_finalizers GC_PROTO((void));
+GC_API int GC_should_invoke_finalizers(void);
-GC_API int GC_invoke_finalizers GC_PROTO((void));
+GC_API int GC_invoke_finalizers(void);
/* Run finalizers for all objects that are ready to */
/* be finalized. Return the number of finalizers */
/* that were run. Normally this is also called */
@@ -750,11 +729,11 @@ GC_API int GC_invoke_finalizers GC_PROTO((void));
/* GC_set_warn_proc can be used to redirect or filter warning messages. */
/* p may not be a NULL pointer. */
-typedef void (*GC_warn_proc) GC_PROTO((char *msg, GC_word arg));
-GC_API GC_warn_proc GC_set_warn_proc GC_PROTO((GC_warn_proc p));
+typedef void (*GC_warn_proc) (char *msg, GC_word arg);
+GC_API GC_warn_proc GC_set_warn_proc(GC_warn_proc p);
/* Returns old warning procedure. */
-GC_API GC_word GC_set_free_space_divisor GC_PROTO((GC_word value));
+GC_API GC_word GC_set_free_space_divisor(GC_word value);
/* Set free_space_divisor. See above for definition. */
/* Returns old value. */
@@ -769,15 +748,14 @@ GC_API GC_word GC_set_free_space_divisor GC_PROTO((GC_word value));
# if defined(I_HIDE_POINTERS) || defined(GC_I_HIDE_POINTERS)
typedef GC_word GC_hidden_pointer;
# define HIDE_POINTER(p) (~(GC_hidden_pointer)(p))
-# define REVEAL_POINTER(p) ((GC_PTR)(HIDE_POINTER(p)))
+# define REVEAL_POINTER(p) ((void *)(HIDE_POINTER(p)))
/* Converting a hidden pointer to a real pointer requires verifying */
/* that the object still exists. This involves acquiring the */
/* allocator lock to avoid a race with the collector. */
# endif /* I_HIDE_POINTERS */
-typedef GC_PTR (*GC_fn_type) GC_PROTO((GC_PTR client_data));
-GC_API GC_PTR GC_call_with_alloc_lock
- GC_PROTO((GC_fn_type fn, GC_PTR client_data));
+typedef void * (*GC_fn_type) (void * client_data);
+GC_API void * GC_call_with_alloc_lock (GC_fn_type fn, void * client_data);
/* The following routines are primarily intended for use with a */
/* preprocessor which inserts calls to check C pointer arithmetic. */
@@ -788,14 +766,14 @@ GC_API GC_PTR GC_call_with_alloc_lock
/* Returns the first argument. */
/* Succeeds if neither p nor q points to the heap. */
/* May succeed if both p and q point to between heap objects. */
-GC_API GC_PTR GC_same_obj GC_PROTO((GC_PTR p, GC_PTR q));
+GC_API void * GC_same_obj (void * p, void * q);
/* Checked pointer pre- and post- increment operations. Note that */
/* the second argument is in units of bytes, not multiples of the */
/* object size. This should either be invoked from a macro, or the */
/* call should be automatically generated. */
-GC_API GC_PTR GC_pre_incr GC_PROTO((GC_PTR *p, size_t how_much));
-GC_API GC_PTR GC_post_incr GC_PROTO((GC_PTR *p, size_t how_much));
+GC_API void * GC_pre_incr (void * *p, size_t how_much);
+GC_API void * GC_post_incr (void * *p, size_t how_much);
/* Check that p is visible */
/* to the collector as a possibly pointer containing location. */
@@ -805,14 +783,14 @@ GC_API GC_PTR GC_post_incr GC_PROTO((GC_PTR *p, size_t how_much));
/* untyped allocations. The idea is that it should be possible, though */
/* slow, to add such a call to all indirect pointer stores.) */
/* Currently useless for multithreaded worlds. */
-GC_API GC_PTR GC_is_visible GC_PROTO((GC_PTR p));
+GC_API void * GC_is_visible (void * p);
/* Check that if p is a pointer to a heap page, then it points to */
/* a valid displacement within a heap object. */
/* Fail conspicuously if this property does not hold. */
/* Uninteresting with GC_all_interior_pointers. */
/* Always returns its argument. */
-GC_API GC_PTR GC_is_valid_displacement GC_PROTO((GC_PTR p));
+GC_API void * GC_is_valid_displacement (void * p);
/* Safer, but slow, pointer addition. Probably useful mainly with */
/* a preprocessor. Useful only for heap pointers. */
@@ -848,25 +826,18 @@ GC_API GC_PTR GC_is_valid_displacement GC_PROTO((GC_PTR p));
/* Safer assignment of a pointer to a nonstack location. */
#ifdef GC_DEBUG
-# ifdef __STDC__
# define GC_PTR_STORE(p, q) \
(*(void **)GC_is_visible(p) = GC_is_valid_displacement(q))
-# else
-# define GC_PTR_STORE(p, q) \
- (*(char **)GC_is_visible(p) = GC_is_valid_displacement(q))
-# endif
#else /* !GC_DEBUG */
# define GC_PTR_STORE(p, q) *((p) = (q))
#endif
/* Functions called to report pointer checking errors */
-GC_API void (*GC_same_obj_print_proc) GC_PROTO((GC_PTR p, GC_PTR q));
+GC_API void (*GC_same_obj_print_proc) (void * p, void * q);
-GC_API void (*GC_is_valid_displacement_print_proc)
- GC_PROTO((GC_PTR p));
+GC_API void (*GC_is_valid_displacement_print_proc) (void * p);
-GC_API void (*GC_is_visible_print_proc)
- GC_PROTO((GC_PTR p));
+GC_API void (*GC_is_visible_print_proc) (void * p);
/* For pthread support, we generally need to intercept a number of */
@@ -883,11 +854,10 @@ GC_API void (*GC_is_visible_print_proc)
/* This returns a list of objects, linked through their first */
/* word. Its use can greatly reduce lock contention problems, since */
/* the allocation lock can be acquired and released many fewer times. */
-/* lb must be large enough to hold the pointer field. */
/* It is used internally by gc_local_alloc.h, which provides a simpler */
/* programming interface on Linux. */
-GC_PTR GC_malloc_many(size_t lb);
-#define GC_NEXT(p) (*(GC_PTR *)(p)) /* Retrieve the next element */
+void * GC_malloc_many(size_t lb);
+#define GC_NEXT(p) (*(void * *)(p)) /* Retrieve the next element */
/* in returned list. */
extern void GC_thr_init(); /* Needed for Solaris/X86 */
diff --git a/include/gc_alloc.h b/include/gc_alloc.h
deleted file mode 100644
index c50a7589..00000000
--- a/include/gc_alloc.h
+++ /dev/null
@@ -1,383 +0,0 @@
-/*
- * Copyright (c) 1996-1998 by Silicon Graphics. All rights reserved.
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- */
-
-//
-// This is a C++ header file that is intended to replace the SGI STL
-// alloc.h. This assumes SGI STL version < 3.0.
-//
-// This assumes the collector has been compiled with -DATOMIC_UNCOLLECTABLE
-// and -DALL_INTERIOR_POINTERS. We also recommend
-// -DREDIRECT_MALLOC=GC_uncollectable_malloc.
-//
-// Some of this could be faster in the explicit deallocation case. In particular,
-// we spend too much time clearing objects on the free lists. That could be avoided.
-//
-// This uses template classes with static members, and hence does not work
-// with g++ 2.7.2 and earlier.
-//
-// This code assumes that the collector itself has been compiled with a
-// compiler that defines __STDC__ .
-//
-
-#include "gc.h"
-
-#ifndef GC_ALLOC_H
-
-#define GC_ALLOC_H
-#define __ALLOC_H // Prevent inclusion of the default version. Ugly.
-#define __SGI_STL_ALLOC_H
-#define __SGI_STL_INTERNAL_ALLOC_H
-
-#ifndef __ALLOC
-# define __ALLOC alloc
-#endif
-
-#include <stddef.h>
-#include <string.h>
-
-// The following is just replicated from the conventional SGI alloc.h:
-
-template<class T, class alloc>
-class simple_alloc {
-
-public:
- static T *allocate(size_t n)
- { return 0 == n? 0 : (T*) alloc::allocate(n * sizeof (T)); }
- static T *allocate(void)
- { return (T*) alloc::allocate(sizeof (T)); }
- static void deallocate(T *p, size_t n)
- { if (0 != n) alloc::deallocate(p, n * sizeof (T)); }
- static void deallocate(T *p)
- { alloc::deallocate(p, sizeof (T)); }
-};
-
-#include "gc.h"
-
-// The following need to match collector data structures.
-// We can't include gc_priv.h, since that pulls in way too much stuff.
-// This should eventually be factored out into another include file.
-
-extern "C" {
- extern void ** const GC_objfreelist_ptr;
- extern void ** const GC_aobjfreelist_ptr;
- extern void ** const GC_uobjfreelist_ptr;
- extern void ** const GC_auobjfreelist_ptr;
-
- extern void GC_incr_words_allocd(size_t words);
- extern void GC_incr_mem_freed(size_t words);
-
- extern char * GC_generic_malloc_words_small(size_t word, int kind);
-}
-
-// Object kinds; must match PTRFREE, NORMAL, UNCOLLECTABLE, and
-// AUNCOLLECTABLE in gc_priv.h.
-
-enum { GC_PTRFREE = 0, GC_NORMAL = 1, GC_UNCOLLECTABLE = 2,
- GC_AUNCOLLECTABLE = 3 };
-
-enum { GC_max_fast_bytes = 255 };
-
-enum { GC_bytes_per_word = sizeof(char *) };
-
-enum { GC_byte_alignment = 8 };
-
-enum { GC_word_alignment = GC_byte_alignment/GC_bytes_per_word };
-
-inline void * &GC_obj_link(void * p)
-{ return *(void **)p; }
-
-// Compute a number of words >= n+1 bytes.
-// The +1 allows for pointers one past the end.
-inline size_t GC_round_up(size_t n)
-{
- return ((n + GC_byte_alignment)/GC_byte_alignment)*GC_word_alignment;
-}
-
-// The same but don't allow for extra byte.
-inline size_t GC_round_up_uncollectable(size_t n)
-{
- return ((n + GC_byte_alignment - 1)/GC_byte_alignment)*GC_word_alignment;
-}
-
-template <int dummy>
-class GC_aux_template {
-public:
- // File local count of allocated words. Occasionally this is
- // added into the global count. A separate count is necessary since the
- // real one must be updated with a procedure call.
- static size_t GC_words_recently_allocd;
-
- // Same for uncollectable mmory. Not yet reflected in either
- // GC_words_recently_allocd or GC_non_gc_bytes.
- static size_t GC_uncollectable_words_recently_allocd;
-
- // Similar counter for explicitly deallocated memory.
- static size_t GC_mem_recently_freed;
-
- // Again for uncollectable memory.
- static size_t GC_uncollectable_mem_recently_freed;
-
- static void * GC_out_of_line_malloc(size_t nwords, int kind);
-};
-
-template <int dummy>
-size_t GC_aux_template<dummy>::GC_words_recently_allocd = 0;
-
-template <int dummy>
-size_t GC_aux_template<dummy>::GC_uncollectable_words_recently_allocd = 0;
-
-template <int dummy>
-size_t GC_aux_template<dummy>::GC_mem_recently_freed = 0;
-
-template <int dummy>
-size_t GC_aux_template<dummy>::GC_uncollectable_mem_recently_freed = 0;
-
-template <int dummy>
-void * GC_aux_template<dummy>::GC_out_of_line_malloc(size_t nwords, int kind)
-{
- GC_words_recently_allocd += GC_uncollectable_words_recently_allocd;
- GC_non_gc_bytes +=
- GC_bytes_per_word * GC_uncollectable_words_recently_allocd;
- GC_uncollectable_words_recently_allocd = 0;
-
- GC_mem_recently_freed += GC_uncollectable_mem_recently_freed;
- GC_non_gc_bytes -=
- GC_bytes_per_word * GC_uncollectable_mem_recently_freed;
- GC_uncollectable_mem_recently_freed = 0;
-
- GC_incr_words_allocd(GC_words_recently_allocd);
- GC_words_recently_allocd = 0;
-
- GC_incr_mem_freed(GC_mem_recently_freed);
- GC_mem_recently_freed = 0;
-
- return GC_generic_malloc_words_small(nwords, kind);
-}
-
-typedef GC_aux_template<0> GC_aux;
-
-// A fast, single-threaded, garbage-collected allocator
-// We assume the first word will be immediately overwritten.
-// In this version, deallocation is not a noop, and explicit
-// deallocation is likely to help performance.
-template <int dummy>
-class single_client_gc_alloc_template {
- public:
- static void * allocate(size_t n)
- {
- size_t nwords = GC_round_up(n);
- void ** flh;
- void * op;
-
- if (n > GC_max_fast_bytes) return GC_malloc(n);
- flh = GC_objfreelist_ptr + nwords;
- if (0 == (op = *flh)) {
- return GC_aux::GC_out_of_line_malloc(nwords, GC_NORMAL);
- }
- *flh = GC_obj_link(op);
- GC_aux::GC_words_recently_allocd += nwords;
- return op;
- }
- static void * ptr_free_allocate(size_t n)
- {
- size_t nwords = GC_round_up(n);
- void ** flh;
- void * op;
-
- if (n > GC_max_fast_bytes) return GC_malloc_atomic(n);
- flh = GC_aobjfreelist_ptr + nwords;
- if (0 == (op = *flh)) {
- return GC_aux::GC_out_of_line_malloc(nwords, GC_PTRFREE);
- }
- *flh = GC_obj_link(op);
- GC_aux::GC_words_recently_allocd += nwords;
- return op;
- }
- static void deallocate(void *p, size_t n)
- {
- size_t nwords = GC_round_up(n);
- void ** flh;
-
- if (n > GC_max_fast_bytes) {
- GC_free(p);
- } else {
- flh = GC_objfreelist_ptr + nwords;
- GC_obj_link(p) = *flh;
- memset((char *)p + GC_bytes_per_word, 0,
- GC_bytes_per_word * (nwords - 1));
- *flh = p;
- GC_aux::GC_mem_recently_freed += nwords;
- }
- }
- static void ptr_free_deallocate(void *p, size_t n)
- {
- size_t nwords = GC_round_up(n);
- void ** flh;
-
- if (n > GC_max_fast_bytes) {
- GC_free(p);
- } else {
- flh = GC_aobjfreelist_ptr + nwords;
- GC_obj_link(p) = *flh;
- *flh = p;
- GC_aux::GC_mem_recently_freed += nwords;
- }
- }
-};
-
-typedef single_client_gc_alloc_template<0> single_client_gc_alloc;
-
-// Once more, for uncollectable objects.
-template <int dummy>
-class single_client_alloc_template {
- public:
- static void * allocate(size_t n)
- {
- size_t nwords = GC_round_up_uncollectable(n);
- void ** flh;
- void * op;
-
- if (n > GC_max_fast_bytes) return GC_malloc_uncollectable(n);
- flh = GC_uobjfreelist_ptr + nwords;
- if (0 == (op = *flh)) {
- return GC_aux::GC_out_of_line_malloc(nwords, GC_UNCOLLECTABLE);
- }
- *flh = GC_obj_link(op);
- GC_aux::GC_uncollectable_words_recently_allocd += nwords;
- return op;
- }
- static void * ptr_free_allocate(size_t n)
- {
- size_t nwords = GC_round_up_uncollectable(n);
- void ** flh;
- void * op;
-
- if (n > GC_max_fast_bytes) return GC_malloc_atomic_uncollectable(n);
- flh = GC_auobjfreelist_ptr + nwords;
- if (0 == (op = *flh)) {
- return GC_aux::GC_out_of_line_malloc(nwords, GC_AUNCOLLECTABLE);
- }
- *flh = GC_obj_link(op);
- GC_aux::GC_uncollectable_words_recently_allocd += nwords;
- return op;
- }
- static void deallocate(void *p, size_t n)
- {
- size_t nwords = GC_round_up_uncollectable(n);
- void ** flh;
-
- if (n > GC_max_fast_bytes) {
- GC_free(p);
- } else {
- flh = GC_uobjfreelist_ptr + nwords;
- GC_obj_link(p) = *flh;
- *flh = p;
- GC_aux::GC_uncollectable_mem_recently_freed += nwords;
- }
- }
- static void ptr_free_deallocate(void *p, size_t n)
- {
- size_t nwords = GC_round_up_uncollectable(n);
- void ** flh;
-
- if (n > GC_max_fast_bytes) {
- GC_free(p);
- } else {
- flh = GC_auobjfreelist_ptr + nwords;
- GC_obj_link(p) = *flh;
- *flh = p;
- GC_aux::GC_uncollectable_mem_recently_freed += nwords;
- }
- }
-};
-
-typedef single_client_alloc_template<0> single_client_alloc;
-
-template < int dummy >
-class gc_alloc_template {
- public:
- static void * allocate(size_t n) { return GC_malloc(n); }
- static void * ptr_free_allocate(size_t n)
- { return GC_malloc_atomic(n); }
- static void deallocate(void *, size_t) { }
- static void ptr_free_deallocate(void *, size_t) { }
-};
-
-typedef gc_alloc_template < 0 > gc_alloc;
-
-template < int dummy >
-class alloc_template {
- public:
- static void * allocate(size_t n) { return GC_malloc_uncollectable(n); }
- static void * ptr_free_allocate(size_t n)
- { return GC_malloc_atomic_uncollectable(n); }
- static void deallocate(void *p, size_t) { GC_free(p); }
- static void ptr_free_deallocate(void *p, size_t) { GC_free(p); }
-};
-
-typedef alloc_template < 0 > alloc;
-
-#ifdef _SGI_SOURCE
-
-// We want to specialize simple_alloc so that it does the right thing
-// for all pointerfree types. At the moment there is no portable way to
-// even approximate that. The following approximation should work for
-// SGI compilers, and perhaps some others.
-
-# define __GC_SPECIALIZE(T,alloc) \
-class simple_alloc<T, alloc> { \
-public: \
- static T *allocate(size_t n) \
- { return 0 == n? 0 : \
- (T*) alloc::ptr_free_allocate(n * sizeof (T)); } \
- static T *allocate(void) \
- { return (T*) alloc::ptr_free_allocate(sizeof (T)); } \
- static void deallocate(T *p, size_t n) \
- { if (0 != n) alloc::ptr_free_deallocate(p, n * sizeof (T)); } \
- static void deallocate(T *p) \
- { alloc::ptr_free_deallocate(p, sizeof (T)); } \
-};
-
-__GC_SPECIALIZE(char, gc_alloc)
-__GC_SPECIALIZE(int, gc_alloc)
-__GC_SPECIALIZE(unsigned, gc_alloc)
-__GC_SPECIALIZE(float, gc_alloc)
-__GC_SPECIALIZE(double, gc_alloc)
-
-__GC_SPECIALIZE(char, alloc)
-__GC_SPECIALIZE(int, alloc)
-__GC_SPECIALIZE(unsigned, alloc)
-__GC_SPECIALIZE(float, alloc)
-__GC_SPECIALIZE(double, alloc)
-
-__GC_SPECIALIZE(char, single_client_gc_alloc)
-__GC_SPECIALIZE(int, single_client_gc_alloc)
-__GC_SPECIALIZE(unsigned, single_client_gc_alloc)
-__GC_SPECIALIZE(float, single_client_gc_alloc)
-__GC_SPECIALIZE(double, single_client_gc_alloc)
-
-__GC_SPECIALIZE(char, single_client_alloc)
-__GC_SPECIALIZE(int, single_client_alloc)
-__GC_SPECIALIZE(unsigned, single_client_alloc)
-__GC_SPECIALIZE(float, single_client_alloc)
-__GC_SPECIALIZE(double, single_client_alloc)
-
-#ifdef __STL_USE_STD_ALLOCATORS
-
-???copy stuff from stl_alloc.h or remove it to a different file ???
-
-#endif /* __STL_USE_STD_ALLOCATORS */
-
-#endif /* _SGI_SOURCE */
-
-#endif /* GC_ALLOC_H */
diff --git a/include/gc_config_macros.h b/include/gc_config_macros.h
index d8d31141..bdae6a90 100644
--- a/include/gc_config_macros.h
+++ b/include/gc_config_macros.h
@@ -97,11 +97,14 @@
# endif
#endif /* GC_THREADS */
-#if defined(GC_THREADS) && !defined(GC_PTHREADS) && \
- (defined(_WIN32) || defined(_MSC_VER) || defined(__CYGWIN__) \
+#if defined(GC_THREADS) && !defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS) \
+ && (defined(_WIN32) || defined(_MSC_VER) || defined(__CYGWIN__) \
|| defined(__MINGW32__) || defined(__BORLANDC__) \
|| defined(_WIN32_WCE))
# define GC_WIN32_THREADS
+# if defined(__CYGWIN__)
+# define GC_PTHREADS
+# endif
#endif
#if defined(GC_SOLARIS_PTHREADS) && !defined(GC_SOLARIS_THREADS)
diff --git a/include/gc_gcj.h b/include/gc_gcj.h
index 5e79e27b..df5c6c71 100644
--- a/include/gc_gcj.h
+++ b/include/gc_gcj.h
@@ -71,18 +71,6 @@ extern void * GC_debug_gcj_malloc(size_t lb,
void * ptr_to_struct_containing_descr,
GC_EXTRA_PARAMS);
-/* Similar to the above, but the size is in words, and we don't */
-/* adjust it. The size is assumed to be such that it can be */
-/* allocated as a small object. */
-/* Unless it is known that the collector is not configured */
-/* with USE_MARK_BYTES and unless it is known that the object */
-/* has weak alignment requirements, lw must be even. */
-extern void * GC_gcj_fast_malloc(size_t lw,
- void * ptr_to_struct_containing_descr);
-extern void * GC_debug_gcj_fast_malloc(size_t lw,
- void * ptr_to_struct_containing_descr,
- GC_EXTRA_PARAMS);
-
/* Similar to GC_gcj_malloc, but assumes that a pointer to near the */
/* beginning of the resulting object is always maintained. */
extern void * GC_gcj_malloc_ignore_off_page(size_t lb,
diff --git a/include/gc_inl.h b/include/gc_inl.h
index c535cfd7..73d63ec2 100644
--- a/include/gc_inl.h
+++ b/include/gc_inl.h
@@ -13,6 +13,8 @@
*/
/* Boehm, October 3, 1995 2:07 pm PDT */
+#error FIXME: This needs to be updated.
+
# ifndef GC_PRIVATE_H
# include "private/gc_priv.h"
# endif
@@ -58,7 +60,7 @@
obj_link(op) = 0; \
GC_words_allocd += (n); \
FASTUNLOCK(); \
- (result) = (GC_PTR) op; \
+ (result) = (void *) op; \
} \
}
@@ -80,7 +82,7 @@
obj_link(op) = 0; \
GC_words_allocd += (n); \
FASTUNLOCK(); \
- (result) = (GC_PTR) op; \
+ (result) = (void *) op; \
} \
}
@@ -103,5 +105,5 @@
} \
((word *)op)[0] = (word)(first); \
((word *)op)[1] = (word)(second); \
- (result) = (GC_PTR) op; \
+ (result) = (void *) op; \
}
diff --git a/include/gc_local_alloc.h b/include/gc_local_alloc.h
index 1874c7b6..7a7b7bb7 100644
--- a/include/gc_local_alloc.h
+++ b/include/gc_local_alloc.h
@@ -49,14 +49,12 @@
# include "gc_gcj.h"
#endif
-/* We assume ANSI C for this interface. */
+void * GC_local_malloc(size_t bytes);
-GC_PTR GC_local_malloc(size_t bytes);
-
-GC_PTR GC_local_malloc_atomic(size_t bytes);
+void * GC_local_malloc_atomic(size_t bytes);
#if defined(GC_GCJ_SUPPORT)
- GC_PTR GC_local_gcj_malloc(size_t bytes,
+ void * GC_local_gcj_malloc(size_t bytes,
void * ptr_to_struct_containing_descr);
#endif
diff --git a/include/gc_mark.h b/include/gc_mark.h
index 953bb74d..79f5a8bf 100644
--- a/include/gc_mark.h
+++ b/include/gc_mark.h
@@ -53,9 +53,9 @@
/* case correctly somehow. */
# define GC_PROC_BYTES 100
struct GC_ms_entry;
-typedef struct GC_ms_entry * (*GC_mark_proc) GC_PROTO((
+typedef struct GC_ms_entry * (*GC_mark_proc) (
GC_word * addr, struct GC_ms_entry * mark_stack_ptr,
- struct GC_ms_entry * mark_stack_limit, GC_word env));
+ struct GC_ms_entry * mark_stack_limit, GC_word env);
# define GC_LOG_MAX_MARK_PROCS 6
# define GC_MAX_MARK_PROCS (1 << GC_LOG_MAX_MARK_PROCS)
@@ -106,8 +106,8 @@ typedef struct GC_ms_entry * (*GC_mark_proc) GC_PROTO((
/* held. */
#define GC_INDIR_PER_OBJ_BIAS 0x10
-extern GC_PTR GC_least_plausible_heap_addr;
-extern GC_PTR GC_greatest_plausible_heap_addr;
+extern void * GC_least_plausible_heap_addr;
+extern void * GC_greatest_plausible_heap_addr;
/* Bounds on the heap. Guaranteed valid */
/* Likely to include future heap expansion. */
@@ -130,10 +130,10 @@ extern GC_PTR GC_greatest_plausible_heap_addr;
/* which would tie the client code to a fixed collector version.) */
/* Note that mark procedures should explicitly call FIXUP_POINTER() */
/* if required. */
-struct GC_ms_entry *GC_mark_and_push
- GC_PROTO((GC_PTR obj,
- struct GC_ms_entry * mark_stack_ptr,
- struct GC_ms_entry * mark_stack_limit, GC_PTR *src));
+struct GC_ms_entry *GC_mark_and_push(void * obj,
+ struct GC_ms_entry * mark_stack_ptr,
+ struct GC_ms_entry * mark_stack_limit,
+ void * *src);
#define GC_MARK_AND_PUSH(obj, msp, lim, src) \
(((GC_word)obj >= (GC_word)GC_least_plausible_heap_addr && \
@@ -146,29 +146,29 @@ extern size_t GC_debug_header_size;
/* the GC_debug routines. */
/* Defined as a variable so that client mark procedures don't */
/* need to be recompiled for collector version changes. */
-#define GC_USR_PTR_FROM_BASE(p) ((GC_PTR)((char *)(p) + GC_debug_header_size))
+#define GC_USR_PTR_FROM_BASE(p) ((void *)((char *)(p) + GC_debug_header_size))
/* And some routines to support creation of new "kinds", e.g. with */
/* custom mark procedures, by language runtimes. */
/* The _inner versions assume the caller holds the allocation lock. */
/* Return a new free list array. */
-void ** GC_new_free_list GC_PROTO((void));
-void ** GC_new_free_list_inner GC_PROTO((void));
+void ** GC_new_free_list(void);
+void ** GC_new_free_list_inner(void);
/* Return a new kind, as specified. */
-int GC_new_kind GC_PROTO((void **free_list, GC_word mark_descriptor_template,
- int add_size_to_descriptor, int clear_new_objects));
+int GC_new_kind(void **free_list, GC_word mark_descriptor_template,
+ int add_size_to_descriptor, int clear_new_objects);
/* The last two parameters must be zero or one. */
-int GC_new_kind_inner GC_PROTO((void **free_list,
- GC_word mark_descriptor_template,
- int add_size_to_descriptor,
- int clear_new_objects));
+int GC_new_kind_inner(void **free_list,
+ GC_word mark_descriptor_template,
+ int add_size_to_descriptor,
+ int clear_new_objects);
/* Return a new mark procedure identifier, suitable for use as */
/* the first argument in GC_MAKE_PROC. */
-int GC_new_proc GC_PROTO((GC_mark_proc));
-int GC_new_proc_inner GC_PROTO((GC_mark_proc));
+int GC_new_proc(GC_mark_proc);
+int GC_new_proc_inner(GC_mark_proc);
/* Allocate an object of a given kind. Note that in multithreaded */
/* contexts, this is usually unsafe for kinds that have the descriptor */
@@ -176,11 +176,9 @@ int GC_new_proc_inner GC_PROTO((GC_mark_proc));
/* the descriptor is not correct. Even in the single-threaded case, */
/* we need to be sure that cleared objects on a free list don't */
/* cause a GC crash if they are accidentally traced. */
-/* ptr_t */char * GC_generic_malloc GC_PROTO((GC_word lb, int k));
+void * GC_generic_malloc(size_t lb, int k);
-/* FIXME - Should return void *, but that requires other changes. */
-
-typedef void (*GC_describe_type_fn) GC_PROTO((void *p, char *out_buf));
+typedef void (*GC_describe_type_fn) (void *p, char *out_buf);
/* A procedure which */
/* produces a human-readable */
/* description of the "type" of object */
@@ -194,7 +192,7 @@ typedef void (*GC_describe_type_fn) GC_PROTO((void *p, char *out_buf));
/* global free list. */
# define GC_TYPE_DESCR_LEN 40
-void GC_register_describe_type_fn GC_PROTO((int kind, GC_describe_type_fn knd));
+void GC_register_describe_type_fn(int kind, GC_describe_type_fn knd);
/* Register a describe_type function */
/* to be used when printing objects */
/* of a particular kind. */
diff --git a/include/gc_tiny_fl.h b/include/gc_tiny_fl.h
new file mode 100644
index 00000000..537bc343
--- /dev/null
+++ b/include/gc_tiny_fl.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+#ifndef TINY_FL_H
+#define TINY_FL_H
+/*
+ * Constants and data structures for "tiny" free lists.
+ * These are used for thread-local allocation or in-lined allocators.
+ * Each global free list also essentially starts with one of these.
+ * However, global free lists are known to the GC. "Tiny" free lists
+ * are basically private to the client. Their contents are viewed as
+ * "in use" and marked accordingly by the core of the GC.
+ *
+ * Note that inlined code might know about the layout of these and the constants
+ * involved. Thus any change here may invalidate clients, and such changes should
+ * be avoided. Hence we keep this as simple as possible.
+ */
+
+/*
+ * We always set GRANULE_BYTES to twice the length of a pointer.
+ * This means that all allocation requests are rounded up to the next
+ * multiple of 16 on 64-bit architectures or 8 on 32-bit architectures.
+ * This appears to be a reasonable compromise between fragmentation overhead
+ * and space usage for mark bits (usually mark bytes).
+ * On many 64-bit architectures some memory references require 16-byte
+ * alignment, making this necessary anyway.
+ * For a few 32-bit architecture (e.g. 86), we may also need 16-byte alignment
+ * for certain memory references. But currently that does not seem to be the
+ * default for all conventional malloc implementations, so we ignore that
+ * problem.
+ * It would always be safe, and often useful, to be able to allocate very
+ * small objects with smaller alignment. But that would cost us mark bit
+ * space, so we no longer do so.
+ */
+#ifndef GC_GRANULE_BYTES
+# if defined(__LP64__) || defined (_LP64) || defined(_WIN64) || defined(__s390x__) \
+ || defined(__x86_64__) || defined(__alpha__) || defined(__powerpc64__) \
+ || defined(__arch64__)
+# define GC_GRANULE_BYTES 16
+# else
+# define GC_GRANULE_BYTES 8
+# endif
+#endif /* !GC_GRANULE_BYTES */
+
+/* A "tiny" free list header contains TINY_FREELISTS pointers to */
+/* singly linked lists of objects of different sizes, the ith one */
+/* containing objects i granules in size. Note that there is a list */
+/* of size zero objects. */
+#ifndef GC_TINY_FREELISTS
+# if GC_GRANULE_BYTES == 16
+# define GC_TINY_FREELISTS 25
+# else
+# define GC_TINY_FREELISTS 33 /* Up to and including 256 bytes */
+# endif
+#endif /* !GC_TINY_FREELISTS */
+
+#endif /* TINY_FL_H */
diff --git a/include/gc_typed.h b/include/gc_typed.h
index 905734b8..1086acdd 100644
--- a/include/gc_typed.h
+++ b/include/gc_typed.h
@@ -47,7 +47,7 @@ typedef GC_word * GC_bitmap;
typedef GC_word GC_descr;
-GC_API GC_descr GC_make_descriptor GC_PROTO((GC_bitmap bm, size_t len));
+GC_API GC_descr GC_make_descriptor(GC_bitmap bm, size_t len);
/* Return a type descriptor for the object whose layout */
/* is described by the argument. */
/* The least significant bit of the first word is one */
@@ -74,19 +74,17 @@ GC_API GC_descr GC_make_descriptor GC_PROTO((GC_bitmap bm, size_t len));
/* ... */
/* T_descr = GC_make_descriptor(T_bitmap, GC_WORD_LEN(T)); */
-GC_API GC_PTR GC_malloc_explicitly_typed
- GC_PROTO((size_t size_in_bytes, GC_descr d));
+GC_API void * GC_malloc_explicitly_typed(size_t size_in_bytes, GC_descr d);
/* Allocate an object whose layout is described by d. */
/* The resulting object MAY NOT BE PASSED TO REALLOC. */
/* The returned object is cleared. */
-GC_API GC_PTR GC_malloc_explicitly_typed_ignore_off_page
- GC_PROTO((size_t size_in_bytes, GC_descr d));
+GC_API void * GC_malloc_explicitly_typed_ignore_off_page
+ (size_t size_in_bytes, GC_descr d);
-GC_API GC_PTR GC_calloc_explicitly_typed
- GC_PROTO((size_t nelements,
- size_t element_size_in_bytes,
- GC_descr d));
+GC_API void * GC_calloc_explicitly_typed(size_t nelements,
+ size_t element_size_in_bytes,
+ GC_descr d);
/* Allocate an array of nelements elements, each of the */
/* given size, and with the given descriptor. */
/* The elemnt size must be a multiple of the byte */
diff --git a/include/new_gc_alloc.h b/include/new_gc_alloc.h
index 7546638c..7668e49e 100644
--- a/include/new_gc_alloc.h
+++ b/include/new_gc_alloc.h
@@ -88,10 +88,11 @@ extern "C" {
extern void ** const GC_uobjfreelist_ptr;
extern void ** const GC_auobjfreelist_ptr;
- extern void GC_incr_words_allocd(size_t words);
- extern void GC_incr_mem_freed(size_t words);
+ extern void GC_incr_bytes_allocd(size_t bytes);
+ extern void GC_incr_mem_freed(size_t words); /* FIXME: use bytes */
extern char * GC_generic_malloc_words_small(size_t word, int kind);
+ /* FIXME: Doesn't exist anymore. */
}
// Object kinds; must match PTRFREE, NORMAL, UNCOLLECTABLE, and
@@ -130,51 +131,50 @@ public:
// File local count of allocated words. Occasionally this is
// added into the global count. A separate count is necessary since the
// real one must be updated with a procedure call.
- static size_t GC_words_recently_allocd;
+ static size_t GC_bytes_recently_allocd;
// Same for uncollectable mmory. Not yet reflected in either
- // GC_words_recently_allocd or GC_non_gc_bytes.
- static size_t GC_uncollectable_words_recently_allocd;
+ // GC_bytes_recently_allocd or GC_non_gc_bytes.
+ static size_t GC_uncollectable_bytes_recently_allocd;
// Similar counter for explicitly deallocated memory.
- static size_t GC_mem_recently_freed;
+ static size_t GC_bytes_recently_freed;
// Again for uncollectable memory.
- static size_t GC_uncollectable_mem_recently_freed;
+ static size_t GC_uncollectable_bytes_recently_freed;
static void * GC_out_of_line_malloc(size_t nwords, int kind);
};
template <int dummy>
-size_t GC_aux_template<dummy>::GC_words_recently_allocd = 0;
+size_t GC_aux_template<dummy>::GC_bytes_recently_allocd = 0;
template <int dummy>
-size_t GC_aux_template<dummy>::GC_uncollectable_words_recently_allocd = 0;
+size_t GC_aux_template<dummy>::GC_uncollectable_bytes_recently_allocd = 0;
template <int dummy>
-size_t GC_aux_template<dummy>::GC_mem_recently_freed = 0;
+size_t GC_aux_template<dummy>::GC_bytes_recently_freed = 0;
template <int dummy>
-size_t GC_aux_template<dummy>::GC_uncollectable_mem_recently_freed = 0;
+size_t GC_aux_template<dummy>::GC_uncollectable_bytes_recently_freed = 0;
template <int dummy>
void * GC_aux_template<dummy>::GC_out_of_line_malloc(size_t nwords, int kind)
{
- GC_words_recently_allocd += GC_uncollectable_words_recently_allocd;
+ GC_bytes_recently_allocd += GC_uncollectable_bytes_recently_allocd;
GC_non_gc_bytes +=
- GC_bytes_per_word * GC_uncollectable_words_recently_allocd;
- GC_uncollectable_words_recently_allocd = 0;
+ GC_uncollectable_bytes_recently_allocd;
+ GC_uncollectable_bytes_recently_allocd = 0;
- GC_mem_recently_freed += GC_uncollectable_mem_recently_freed;
- GC_non_gc_bytes -=
- GC_bytes_per_word * GC_uncollectable_mem_recently_freed;
- GC_uncollectable_mem_recently_freed = 0;
+ GC_bytes_recently_freed += GC_uncollectable_bytes_recently_freed;
+ GC_non_gc_bytes -= GC_uncollectable_bytes_recently_freed;
+ GC_uncollectable_bytes_recently_freed = 0;
- GC_incr_words_allocd(GC_words_recently_allocd);
- GC_words_recently_allocd = 0;
+ GC_incr_bytes_allocd(GC_bytes_recently_allocd);
+ GC_bytes_recently_allocd = 0;
- GC_incr_mem_freed(GC_mem_recently_freed);
- GC_mem_recently_freed = 0;
+ GC_incr_mem_freed(GC_bytes_per_word(GC_bytes_recently_freed));
+ GC_bytes_recently_freed = 0;
return GC_generic_malloc_words_small(nwords, kind);
}
@@ -200,7 +200,7 @@ class single_client_gc_alloc_template {
return GC_aux::GC_out_of_line_malloc(nwords, GC_NORMAL);
}
*flh = GC_obj_link(op);
- GC_aux::GC_words_recently_allocd += nwords;
+ GC_aux::GC_bytes_recently_allocd += nwords * GC_bytes_per_word;
return op;
}
static void * ptr_free_allocate(size_t n)
@@ -215,7 +215,7 @@ class single_client_gc_alloc_template {
return GC_aux::GC_out_of_line_malloc(nwords, GC_PTRFREE);
}
*flh = GC_obj_link(op);
- GC_aux::GC_words_recently_allocd += nwords;
+ GC_aux::GC_bytes_recently_allocd += nwords * GC_bytes_per_word;
return op;
}
static void deallocate(void *p, size_t n)
@@ -231,7 +231,7 @@ class single_client_gc_alloc_template {
memset(reinterpret_cast<char *>(p) + GC_bytes_per_word, 0,
GC_bytes_per_word * (nwords - 1));
*flh = p;
- GC_aux::GC_mem_recently_freed += nwords;
+ GC_aux::GC_bytes_recently_freed += nwords * GC_bytes_per_word;
}
}
static void ptr_free_deallocate(void *p, size_t n)
@@ -245,7 +245,7 @@ class single_client_gc_alloc_template {
flh = GC_aobjfreelist_ptr + nwords;
GC_obj_link(p) = *flh;
*flh = p;
- GC_aux::GC_mem_recently_freed += nwords;
+ GC_aux::GC_bytes_recently_freed += nwords * GC_bytes_per_word;
}
}
};
@@ -268,7 +268,8 @@ class single_client_traceable_alloc_template {
return GC_aux::GC_out_of_line_malloc(nwords, GC_UNCOLLECTABLE);
}
*flh = GC_obj_link(op);
- GC_aux::GC_uncollectable_words_recently_allocd += nwords;
+ GC_aux::GC_uncollectable_bytes_recently_allocd +=
+ nwords * GC_bytes_per_word;
return op;
}
static void * ptr_free_allocate(size_t n)
@@ -283,7 +284,8 @@ class single_client_traceable_alloc_template {
return GC_aux::GC_out_of_line_malloc(nwords, GC_AUNCOLLECTABLE);
}
*flh = GC_obj_link(op);
- GC_aux::GC_uncollectable_words_recently_allocd += nwords;
+ GC_aux::GC_uncollectable_bytes_recently_allocd +=
+ nwords * GC_bytes_per_word;
return op;
}
static void deallocate(void *p, size_t n)
@@ -297,7 +299,8 @@ class single_client_traceable_alloc_template {
flh = GC_uobjfreelist_ptr + nwords;
GC_obj_link(p) = *flh;
*flh = p;
- GC_aux::GC_uncollectable_mem_recently_freed += nwords;
+ GC_aux::GC_uncollectable_bytes_recently_freed +=
+ nwords * GC_bytes_per_word;
}
}
static void ptr_free_deallocate(void *p, size_t n)
@@ -311,7 +314,8 @@ class single_client_traceable_alloc_template {
flh = GC_auobjfreelist_ptr + nwords;
GC_obj_link(p) = *flh;
*flh = p;
- GC_aux::GC_uncollectable_mem_recently_freed += nwords;
+ GC_aux::GC_uncollectable_bytes_recently_freed +=
+ nwords * GC_bytes_per_word;
}
}
};
diff --git a/include/private/dbg_mlc.h b/include/private/dbg_mlc.h
index e0a994de..fcd027c4 100644
--- a/include/private/dbg_mlc.h
+++ b/include/private/dbg_mlc.h
@@ -38,7 +38,7 @@
/* get them anyway. */
typedef GC_word GC_hidden_pointer;
# define HIDE_POINTER(p) (~(GC_hidden_pointer)(p))
-# define REVEAL_POINTER(p) ((GC_PTR)(HIDE_POINTER(p)))
+# define REVEAL_POINTER(p) ((void *)(HIDE_POINTER(p)))
#endif /* HIDE_POINTER */
# define START_FLAG ((word)0xfedcedcb)
@@ -95,12 +95,12 @@ typedef struct {
# ifdef MAKE_BACK_GRAPH
GC_hidden_pointer oh_bg_ptr;
# endif
-# if defined(ALIGN_DOUBLE) && \
- (defined(KEEP_BACK_PTRS) != defined(MAKE_BACK_GRAPH))
+# if defined(KEEP_BACK_PTRS) != defined(MAKE_BACK_GRAPH)
+ /* Keep double-pointer-sized alignment. */
word oh_dummy;
# endif
# endif
- GC_CONST char * oh_string; /* object descriptor string */
+ const char * oh_string; /* object descriptor string */
word oh_int; /* object descriptor integers */
# ifdef NEED_CALLINFO
struct callinfo oh_ci[NFRAMES];
@@ -132,17 +132,20 @@ typedef struct {
/* lock. */
/* PRINT_CALL_CHAIN prints the call chain stored in an object */
/* to stderr. It requires that we do not hold the lock. */
-#ifdef SAVE_CALL_CHAIN
+#if defined(SAVE_CALL_CHAIN)
+ struct callinfo;
+ void GC_save_callers(struct callinfo info[NFRAMES]);
+ void GC_print_callers(struct callinfo info[NFRAMES]);
# define ADD_CALL_CHAIN(base, ra) GC_save_callers(((oh *)(base)) -> oh_ci)
# define PRINT_CALL_CHAIN(base) GC_print_callers(((oh *)(base)) -> oh_ci)
-#else
-# ifdef GC_ADD_CALLER
+#elif defined(GC_ADD_CALLER)
+ struct callinfo;
+ void GC_print_callers(struct callinfo info[NFRAMES]);
# define ADD_CALL_CHAIN(base, ra) ((oh *)(base)) -> oh_ci[0].ci_pc = (ra)
# define PRINT_CALL_CHAIN(base) GC_print_callers(((oh *)(base)) -> oh_ci)
-# else
+#else
# define ADD_CALL_CHAIN(base, ra)
# define PRINT_CALL_CHAIN(base)
-# endif
#endif
# ifdef GC_ADD_CALLER
diff --git a/include/private/gc_hdrs.h b/include/private/gc_hdrs.h
index 70dfefe8..80a29d86 100644
--- a/include/private/gc_hdrs.h
+++ b/include/private/gc_hdrs.h
@@ -28,7 +28,7 @@ typedef struct hblkhdr hdr;
* This defines HDR, GET_HDR, and SET_HDR, the main macros used to
* retrieve and set object headers.
*
- * Since 5.0 alpha 5, we can also take advantage of a header lookup
+ * We take advantage of a header lookup
* cache. This is a locally declared direct mapped cache, used inside
* the marker. The HC_GET_HDR macro uses and maintains this
* cache. Assuming we get reasonable hit rates, this shaves a few
@@ -60,22 +60,6 @@ typedef struct hblkhdr hdr;
/* #define COUNT_HDR_CACHE_HITS */
-extern hdr * GC_invalid_header; /* header for an imaginary block */
- /* containing no objects. */
-
-
-/* Check whether p and corresponding hhdr point to long or invalid */
-/* object. If so, advance hhdr to */
-/* beginning of block, or set hhdr to GC_invalid_header. */
-#define ADVANCE(p, hhdr, source) \
- { \
- hdr * new_hdr = GC_invalid_header; \
- p = GC_find_start(p, hhdr, &new_hdr); \
- hhdr = new_hdr; \
- }
-
-#ifdef USE_HDR_CACHE
-
# ifdef COUNT_HDR_CACHE_HITS
extern word GC_hdr_cache_hits;
extern word GC_hdr_cache_misses;
@@ -96,7 +80,7 @@ extern hdr * GC_invalid_header; /* header for an imaginary block */
# define DECLARE_HDR_CACHE \
hdr_cache_entry hdr_cache[HDR_CACHE_SIZE]
-# define INIT_HDR_CACHE BZERO(hdr_cache, sizeof(hdr_cache));
+# define INIT_HDR_CACHE BZERO(hdr_cache, sizeof(hdr_cache))
# define HCE(h) hdr_cache + (((word)(h) >> LOG_HBLKSIZE) & (HDR_CACHE_SIZE-1))
@@ -105,44 +89,33 @@ extern hdr * GC_invalid_header; /* header for an imaginary block */
# define HCE_HDR(h) ((hce) -> hce_hdr)
+#ifdef PRINT_BLACK_LIST
+ hdr * GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce, ptr_t source);
+# define HEADER_CACHE_MISS(p, hce, source) \
+ GC_header_cache_miss(p, hce, source)
+#else
+ hdr * GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce);
+# define HEADER_CACHE_MISS(p, hce, source) GC_header_cache_miss(p, hce)
+#endif
-/* Analogous to GET_HDR, except that in the case of large objects, it */
-/* Returns the header for the object beginning, and updates p. */
-/* Returns GC_invalid_header instead of 0. All of this saves a branch */
-/* in the fast path. */
-# define HC_GET_HDR(p, hhdr, source) \
+/* Set hhdr to the header for p. Analogous to GET_HDR below, */
+/* except that in the case of large objects, it */
+/* gets the header for the object beginning, if GC_all_interior_ptrs */
+/* is set. */
+/* Returns zero if p points to somewhere other than the first page */
+/* of an object, and it is not a valid pointer to the object. */
+# define HC_GET_HDR(p, hhdr, source, exit_label) \
{ \
hdr_cache_entry * hce = HCE(p); \
- if (HCE_VALID_FOR(hce, p)) { \
+ if (EXPECT(HCE_VALID_FOR(hce, p), 1)) { \
HC_HIT(); \
hhdr = hce -> hce_hdr; \
} else { \
- HC_MISS(); \
- GET_HDR(p, hhdr); \
- if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) { \
- ADVANCE(p, hhdr, source); \
- } else { \
- hce -> block_addr = (word)(p) >> LOG_HBLKSIZE; \
- hce -> hce_hdr = hhdr; \
- } \
+ hhdr = HEADER_CACHE_MISS(p, hce, source); \
+ if (0 == hhdr) goto exit_label; \
} \
}
-#else /* !USE_HDR_CACHE */
-
-# define DECLARE_HDR_CACHE
-
-# define INIT_HDR_CACHE
-
-# define HC_GET_HDR(p, hhdr, source) \
- { \
- GET_HDR(p, hhdr); \
- if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) { \
- ADVANCE(p, hhdr, source); \
- } \
- }
-#endif
-
typedef struct bi {
hdr * index[BOTTOM_SZ];
/*
diff --git a/include/private/gc_locks.h b/include/private/gc_locks.h
index 5e2de6df..b69d2b87 100644
--- a/include/private/gc_locks.h
+++ b/include/private/gc_locks.h
@@ -18,6 +18,8 @@
#ifndef GC_LOCKS_H
#define GC_LOCKS_H
+#include <atomic_ops.h>
+
/*
* Mutual exclusion between allocator/collector routines.
* Needed if there is more than one allocator thread.
@@ -31,8 +33,7 @@
* FASTLOCK is otherwise immune to interruption, provided it is
* not restarted.
* DCL_LOCK_STATE declares any local variables needed by LOCK and UNLOCK
- * and/or DISABLE_SIGNALS and ENABLE_SIGNALS and/or FASTLOCK.
- * (There is currently no equivalent for FASTLOCK.)
+ * and/or FASTLOCK.
*
* In the PARALLEL_MARK case, we also need to define a number of
* other inline finctions here:
@@ -43,7 +44,7 @@
*
*/
# ifdef THREADS
- void GC_noop1 GC_PROTO((word));
+ void GC_noop1(word);
# ifdef PCR_OBSOLETE /* Faster, but broken with multiple lwp's */
# include "th/PCR_Th.h"
# include "th/PCR_ThCrSec.h"
@@ -76,474 +77,16 @@
# define LOCK() RT0u__inCritical++
# define UNLOCK() RT0u__inCritical--
# endif
-# ifdef GC_SOLARIS_THREADS
-# include <thread.h>
-# include <signal.h>
- extern mutex_t GC_allocate_ml;
-# define LOCK() mutex_lock(&GC_allocate_ml);
-# define UNLOCK() mutex_unlock(&GC_allocate_ml);
-# endif
-
-/* Try to define GC_TEST_AND_SET and a matching GC_CLEAR for spin lock */
-/* acquisition and release. We need this for correct operation of the */
-/* incremental GC. */
-# ifdef __GNUC__
-# if defined(I386)
- inline static int GC_test_and_set(volatile unsigned int *addr) {
- int oldval;
- /* Note: the "xchg" instruction does not need a "lock" prefix */
- __asm__ __volatile__("xchgl %0, %1"
- : "=r"(oldval), "=m"(*(addr))
- : "0"(1), "m"(*(addr)) : "memory");
- return oldval;
- }
-# define GC_TEST_AND_SET_DEFINED
-# endif
-# if defined(IA64)
-# if defined(__INTEL_COMPILER)
-# include <ia64intrin.h>
-# endif
- inline static int GC_test_and_set(volatile unsigned int *addr) {
- long oldval, n = 1;
-# ifndef __INTEL_COMPILER
- __asm__ __volatile__("xchg4 %0=%1,%2"
- : "=r"(oldval), "=m"(*addr)
- : "r"(n), "1"(*addr) : "memory");
-# else
- oldval = _InterlockedExchange(addr, n);
-# endif
- return oldval;
- }
-# define GC_TEST_AND_SET_DEFINED
- /* Should this handle post-increment addressing?? */
- inline static void GC_clear(volatile unsigned int *addr) {
-# ifndef __INTEL_COMPILER
- __asm__ __volatile__("st4.rel %0=r0" : "=m" (*addr) : : "memory");
-# else
- // there is no st4 but I can use xchg I hope
- _InterlockedExchange(addr, 0);
-# endif
- }
-# define GC_CLEAR_DEFINED
-# endif
-# ifdef SPARC
- inline static int GC_test_and_set(volatile unsigned int *addr) {
- int oldval;
-
- __asm__ __volatile__("ldstub %1,%0"
- : "=r"(oldval), "=m"(*addr)
- : "m"(*addr) : "memory");
- return oldval;
- }
-# define GC_TEST_AND_SET_DEFINED
-# endif
-# ifdef M68K
- /* Contributed by Tony Mantler. I'm not sure how well it was */
- /* tested. */
- inline static int GC_test_and_set(volatile unsigned int *addr) {
- char oldval; /* this must be no longer than 8 bits */
-
- /* The return value is semi-phony. */
- /* 'tas' sets bit 7 while the return */
- /* value pretends bit 0 was set */
- __asm__ __volatile__(
- "tas %1@; sne %0; negb %0"
- : "=d" (oldval)
- : "a" (addr) : "memory");
- return oldval;
- }
-# define GC_TEST_AND_SET_DEFINED
-# endif
-# if defined(POWERPC)
- inline static int GC_test_and_set(volatile unsigned int *addr) {
- int oldval;
- int temp = 1; /* locked value */
-
- __asm__ __volatile__(
- "1:\tlwarx %0,0,%3\n" /* load and reserve */
- "\tcmpwi %0, 0\n" /* if load is */
- "\tbne 2f\n" /* non-zero, return already set */
- "\tstwcx. %2,0,%1\n" /* else store conditional */
- "\tbne- 1b\n" /* retry if lost reservation */
- "\tsync\n" /* import barrier */
- "2:\t\n" /* oldval is zero if we set */
- : "=&r"(oldval), "=p"(addr)
- : "r"(temp), "1"(addr)
- : "cr0","memory");
- return oldval;
- }
-# define GC_TEST_AND_SET_DEFINED
- inline static void GC_clear(volatile unsigned int *addr) {
- __asm__ __volatile__("lwsync" : : : "memory");
- *(addr) = 0;
- }
-# define GC_CLEAR_DEFINED
-# endif
-# if defined(ALPHA)
- inline static int GC_test_and_set(volatile unsigned int * addr)
- {
- unsigned long oldvalue;
- unsigned long temp;
-
- __asm__ __volatile__(
- "1: ldl_l %0,%1\n"
- " and %0,%3,%2\n"
- " bne %2,2f\n"
- " xor %0,%3,%0\n"
- " stl_c %0,%1\n"
-# ifdef __ELF__
- " beq %0,3f\n"
-# else
- " beq %0,1b\n"
-# endif
- " mb\n"
- "2:\n"
-# ifdef __ELF__
- ".section .text2,\"ax\"\n"
- "3: br 1b\n"
- ".previous"
-# endif
- :"=&r" (temp), "=m" (*addr), "=&r" (oldvalue)
- :"Ir" (1), "m" (*addr)
- :"memory");
- return oldvalue;
- }
-# define GC_TEST_AND_SET_DEFINED
- inline static void GC_clear(volatile unsigned int *addr) {
- __asm__ __volatile__("mb" : : : "memory");
- *(addr) = 0;
- }
-# define GC_CLEAR_DEFINED
-# endif /* ALPHA */
-# ifdef ARM32
- inline static int GC_test_and_set(volatile unsigned int *addr) {
- int oldval;
- /* SWP on ARM is very similar to XCHG on x86. Doesn't lock the
- * bus because there are no SMP ARM machines. If/when there are,
- * this code will likely need to be updated. */
- /* See linuxthreads/sysdeps/arm/pt-machine.h in glibc-2.1 */
- __asm__ __volatile__("swp %0, %1, [%2]"
- : "=r"(oldval)
- : "r"(1), "r"(addr)
- : "memory");
- return oldval;
- }
-# define GC_TEST_AND_SET_DEFINED
-# endif /* ARM32 */
-# ifdef CRIS
- inline static int GC_test_and_set(volatile unsigned int *addr) {
- /* Ripped from linuxthreads/sysdeps/cris/pt-machine.h. */
- /* Included with Hans-Peter Nilsson's permission. */
- register unsigned long int ret;
-
- /* Note the use of a dummy output of *addr to expose the write.
- * The memory barrier is to stop *other* writes being moved past
- * this code.
- */
- __asm__ __volatile__("clearf\n"
- "0:\n\t"
- "movu.b [%2],%0\n\t"
- "ax\n\t"
- "move.b %3,[%2]\n\t"
- "bwf 0b\n\t"
- "clearf"
- : "=&r" (ret), "=m" (*addr)
- : "r" (addr), "r" ((int) 1), "m" (*addr)
- : "memory");
- return ret;
- }
-# define GC_TEST_AND_SET_DEFINED
-# endif /* CRIS */
-# ifdef S390
- inline static int GC_test_and_set(volatile unsigned int *addr) {
- int ret;
- __asm__ __volatile__ (
- " l %0,0(%2)\n"
- "0: cs %0,%1,0(%2)\n"
- " jl 0b"
- : "=&d" (ret)
- : "d" (1), "a" (addr)
- : "cc", "memory");
- return ret;
- }
-# endif
-# endif /* __GNUC__ */
-# if (defined(ALPHA) && !defined(__GNUC__))
-# ifndef OSF1
- --> We currently assume that if gcc is not used, we are
- --> running under Tru64.
-# endif
-# include <machine/builtins.h>
-# include <c_asm.h>
-# define GC_test_and_set(addr) __ATOMIC_EXCH_LONG(addr, 1)
-# define GC_TEST_AND_SET_DEFINED
-# define GC_clear(addr) { asm("mb"); *(volatile unsigned *)addr = 0; }
-# define GC_CLEAR_DEFINED
-# endif
-# if defined(MSWIN32)
-# define GC_test_and_set(addr) InterlockedExchange((LPLONG)addr,1)
-# define GC_TEST_AND_SET_DEFINED
-# endif
-# ifdef MIPS
-# ifdef LINUX
-# include <sys/tas.h>
-# define GC_test_and_set(addr) _test_and_set((int *) addr,1)
-# define GC_TEST_AND_SET_DEFINED
-# elif __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \
- || !defined(_COMPILER_VERSION) || _COMPILER_VERSION < 700
-# ifdef __GNUC__
-# define GC_test_and_set(addr) _test_and_set((void *)addr,1)
-# else
-# define GC_test_and_set(addr) test_and_set((void *)addr,1)
-# endif
-# else
-# define GC_test_and_set(addr) __test_and_set32((void *)addr,1)
-# define GC_clear(addr) __lock_release(addr);
-# define GC_CLEAR_DEFINED
-# endif
-# define GC_TEST_AND_SET_DEFINED
-# endif /* MIPS */
-# if defined(_AIX)
-# include <sys/atomic_op.h>
-# if (defined(_POWER) || defined(_POWERPC))
-# if defined(__GNUC__)
- inline static void GC_memsync() {
- __asm__ __volatile__ ("sync" : : : "memory");
- }
-# else
-# ifndef inline
-# define inline __inline
-# endif
-# pragma mc_func GC_memsync { \
- "7c0004ac" /* sync (same opcode used for dcs)*/ \
- }
-# endif
-# else
-# error dont know how to memsync
-# endif
- inline static int GC_test_and_set(volatile unsigned int * addr) {
- int oldvalue = 0;
- if (compare_and_swap((void *)addr, &oldvalue, 1)) {
- GC_memsync();
- return 0;
- } else return 1;
- }
-# define GC_TEST_AND_SET_DEFINED
- inline static void GC_clear(volatile unsigned int *addr) {
- GC_memsync();
- *(addr) = 0;
- }
-# define GC_CLEAR_DEFINED
-
-# endif
-# if 0 /* defined(HP_PA) */
- /* The official recommendation seems to be to not use ldcw from */
- /* user mode. Since multithreaded incremental collection doesn't */
- /* work anyway on HP_PA, this shouldn't be a major loss. */
-
- /* "set" means 0 and "clear" means 1 here. */
-# define GC_test_and_set(addr) !GC_test_and_clear(addr);
-# define GC_TEST_AND_SET_DEFINED
-# define GC_clear(addr) GC_noop1((word)(addr)); *(volatile unsigned int *)addr = 1;
- /* The above needs a memory barrier! */
-# define GC_CLEAR_DEFINED
-# endif
-# if defined(GC_TEST_AND_SET_DEFINED) && !defined(GC_CLEAR_DEFINED)
-# ifdef __GNUC__
- inline static void GC_clear(volatile unsigned int *addr) {
- /* Try to discourage gcc from moving anything past this. */
- __asm__ __volatile__(" " : : : "memory");
- *(addr) = 0;
- }
-# else
- /* The function call in the following should prevent the */
- /* compiler from moving assignments to below the UNLOCK. */
-# define GC_clear(addr) GC_noop1((word)(addr)); \
- *((volatile unsigned int *)(addr)) = 0;
-# endif
-# define GC_CLEAR_DEFINED
-# endif /* !GC_CLEAR_DEFINED */
-
-# if !defined(GC_TEST_AND_SET_DEFINED)
+# if !defined(AO_have_test_and_set_acquire)
# define USE_PTHREAD_LOCKS
# endif
-# if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
- && !defined(GC_IRIX_THREADS) && !defined(GC_WIN32_THREADS)
+
+# if defined(GC_PTHREADS) \
+ && !defined(GC_IRIX_THREADS) && !defined(GC_WIN32_THREADS)
# define NO_THREAD (pthread_t)(-1)
# include <pthread.h>
-# if defined(PARALLEL_MARK)
- /* We need compare-and-swap to update mark bits, where it's */
- /* performance critical. If USE_MARK_BYTES is defined, it is */
- /* no longer needed for this purpose. However we use it in */
- /* either case to implement atomic fetch-and-add, though that's */
- /* less performance critical, and could perhaps be done with */
- /* a lock. */
-# if defined(GENERIC_COMPARE_AND_SWAP)
- /* Probably not useful, except for debugging. */
- /* We do use GENERIC_COMPARE_AND_SWAP on PA_RISC, but we */
- /* minimize its use. */
- extern pthread_mutex_t GC_compare_and_swap_lock;
-
- /* Note that if GC_word updates are not atomic, a concurrent */
- /* reader should acquire GC_compare_and_swap_lock. On */
- /* currently supported platforms, such updates are atomic. */
- extern GC_bool GC_compare_and_exchange(volatile GC_word *addr,
- GC_word old, GC_word new_val);
-# endif /* GENERIC_COMPARE_AND_SWAP */
-# if defined(I386)
-# if !defined(GENERIC_COMPARE_AND_SWAP)
- /* Returns TRUE if the comparison succeeded. */
- inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
- GC_word old,
- GC_word new_val)
- {
- char result;
- __asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
- : "+m"(*(addr)), "=r"(result)
- : "r" (new_val), "a"(old) : "memory");
- return (GC_bool) result;
- }
-# endif /* !GENERIC_COMPARE_AND_SWAP */
- inline static void GC_memory_barrier()
- {
- /* We believe the processor ensures at least processor */
- /* consistent ordering. Thus a compiler barrier */
- /* should suffice. */
- __asm__ __volatile__("" : : : "memory");
- }
-# endif /* I386 */
-
-# if defined(POWERPC)
-# if !defined(GENERIC_COMPARE_AND_SWAP)
- /* Returns TRUE if the comparison succeeded. */
- inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
- GC_word old, GC_word new_val)
- {
- int result, dummy;
- __asm__ __volatile__(
- "1:\tlwarx %0,0,%5\n"
- "\tcmpw %0,%4\n"
- "\tbne 2f\n"
- "\tstwcx. %3,0,%2\n"
- "\tbne- 1b\n"
- "\tsync\n"
- "\tli %1, 1\n"
- "\tb 3f\n"
- "2:\tli %1, 0\n"
- "3:\t\n"
- : "=&r" (dummy), "=r" (result), "=p" (addr)
- : "r" (new_val), "r" (old), "2"(addr)
- : "cr0","memory");
- return (GC_bool) result;
- }
-# endif /* !GENERIC_COMPARE_AND_SWAP */
- inline static void GC_memory_barrier()
- {
- __asm__ __volatile__("sync" : : : "memory");
- }
-# endif /* POWERPC */
-
-# if defined(IA64)
-# if !defined(GENERIC_COMPARE_AND_SWAP)
- inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
- GC_word old, GC_word new_val)
- {
- unsigned long oldval;
- __asm__ __volatile__("mov ar.ccv=%4 ;; cmpxchg8.rel %0=%1,%2,ar.ccv"
- : "=r"(oldval), "=m"(*addr)
- : "r"(new_val), "1"(*addr), "r"(old) : "memory");
- return (oldval == old);
- }
-# endif /* !GENERIC_COMPARE_AND_SWAP */
-# if 0
- /* Shouldn't be needed; we use volatile stores instead. */
- inline static void GC_memory_barrier()
- {
- __asm__ __volatile__("mf" : : : "memory");
- }
-# endif /* 0 */
-# endif /* IA64 */
-# if defined(ALPHA)
-# if !defined(GENERIC_COMPARE_AND_SWAP)
-# if defined(__GNUC__)
- inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
- GC_word old, GC_word new_val)
- {
- unsigned long was_equal;
- unsigned long temp;
-
- __asm__ __volatile__(
- "1: ldq_l %0,%1\n"
- " cmpeq %0,%4,%2\n"
- " mov %3,%0\n"
- " beq %2,2f\n"
- " stq_c %0,%1\n"
- " beq %0,1b\n"
- "2:\n"
- " mb\n"
- :"=&r" (temp), "=m" (*addr), "=&r" (was_equal)
- : "r" (new_val), "Ir" (old)
- :"memory");
- return was_equal;
- }
-# else /* !__GNUC__ */
- inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
- GC_word old, GC_word new_val)
- {
- return __CMP_STORE_QUAD(addr, old, new_val, addr);
- }
-# endif /* !__GNUC__ */
-# endif /* !GENERIC_COMPARE_AND_SWAP */
-# ifdef __GNUC__
- inline static void GC_memory_barrier()
- {
- __asm__ __volatile__("mb" : : : "memory");
- }
-# else
-# define GC_memory_barrier() asm("mb")
-# endif /* !__GNUC__ */
-# endif /* ALPHA */
-# if defined(S390)
-# if !defined(GENERIC_COMPARE_AND_SWAP)
- inline static GC_bool GC_compare_and_exchange(volatile C_word *addr,
- GC_word old, GC_word new_val)
- {
- int retval;
- __asm__ __volatile__ (
-# ifndef __s390x__
- " cs %1,%2,0(%3)\n"
-# else
- " csg %1,%2,0(%3)\n"
-# endif
- " ipm %0\n"
- " srl %0,28\n"
- : "=&d" (retval), "+d" (old)
- : "d" (new_val), "a" (addr)
- : "cc", "memory");
- return retval == 0;
- }
-# endif
-# endif
-# if !defined(GENERIC_COMPARE_AND_SWAP)
- /* Returns the original value of *addr. */
- inline static GC_word GC_atomic_add(volatile GC_word *addr,
- GC_word how_much)
- {
- GC_word old;
- do {
- old = *addr;
- } while (!GC_compare_and_exchange(addr, old, old+how_much));
- return old;
- }
-# else /* GENERIC_COMPARE_AND_SWAP */
- /* So long as a GC_word can be atomically updated, it should */
- /* be OK to read *addr without a lock. */
- extern GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much);
-# endif /* GENERIC_COMPARE_AND_SWAP */
-
-# endif /* PARALLEL_MARK */
# if !defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_LOCKS)
/* In the THREAD_LOCAL_ALLOC case, the allocation lock tends to */
@@ -556,74 +99,64 @@
/* Allocation lock holder. Only set if acquired by client through */
/* GC_call_with_alloc_lock. */
# ifdef GC_ASSERTIONS
-# define LOCK() \
- { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); \
+# define UNCOND_LOCK() \
+ { if (AO_test_and_set_acquire(&GC_allocate_lock)) GC_lock(); \
SET_LOCK_HOLDER(); }
-# define UNLOCK() \
+# define UNCOND_UNLOCK() \
{ GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
- GC_clear(&GC_allocate_lock); }
+ AO_CLEAR(&GC_allocate_lock); }
# else
-# define LOCK() \
- { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
-# define UNLOCK() \
- GC_clear(&GC_allocate_lock)
+# define UNCOND_LOCK() \
+ { if (AO_test_and_set_acquire(&GC_allocate_lock)) GC_lock(); }
+# define UNCOND_UNLOCK() \
+ AO_CLEAR(&GC_allocate_lock)
# endif /* !GC_ASSERTIONS */
-# if 0
- /* Another alternative for OSF1 might be: */
-# include <sys/mman.h>
- extern msemaphore GC_allocate_semaphore;
-# define LOCK() { if (msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) \
- != 0) GC_lock(); else GC_allocate_lock = 1; }
- /* The following is INCORRECT, since the memory model is too weak. */
- /* Is this true? Presumably msem_unlock has the right semantics? */
- /* - HB */
-# define UNLOCK() { GC_allocate_lock = 0; \
- msem_unlock(&GC_allocate_semaphore, 0); }
-# endif /* 0 */
# else /* THREAD_LOCAL_ALLOC || USE_PTHREAD_LOCKS */
# ifndef USE_PTHREAD_LOCKS
# define USE_PTHREAD_LOCKS
# endif
-# endif /* THREAD_LOCAL_ALLOC */
-# ifdef USE_PTHREAD_LOCKS
+# endif /* THREAD_LOCAL_ALLOC || USE_PTHREAD_LOCK */
+# ifdef USE_PTHREAD_LOCKS
# include <pthread.h>
extern pthread_mutex_t GC_allocate_ml;
# ifdef GC_ASSERTIONS
-# define LOCK() \
+# define UNCOND_LOCK() \
{ GC_lock(); \
SET_LOCK_HOLDER(); }
-# define UNLOCK() \
+# define UNCOND_UNLOCK() \
{ GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
pthread_mutex_unlock(&GC_allocate_ml); }
# else /* !GC_ASSERTIONS */
# if defined(NO_PTHREAD_TRYLOCK)
-# define LOCK() GC_lock();
+# define UNCOND_LOCK() GC_lock();
# else /* !defined(NO_PTHREAD_TRYLOCK) */
-# define LOCK() \
+# define UNCOND_LOCK() \
{ if (0 != pthread_mutex_trylock(&GC_allocate_ml)) GC_lock(); }
# endif
-# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
+# define UNCOND_UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
# endif /* !GC_ASSERTIONS */
-# endif /* USE_PTHREAD_LOCKS */
-# define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
-# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
-# define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
- extern VOLATILE GC_bool GC_collecting;
-# define ENTER_GC() GC_collecting = 1;
-# define EXIT_GC() GC_collecting = 0;
- extern void GC_lock(void);
- extern pthread_t GC_lock_holder;
-# ifdef GC_ASSERTIONS
+# endif /* USE_PTHREAD_LOCKS */
+# define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
+# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
+# define I_HOLD_LOCK() (!GC_need_to_lock \
+ || pthread_equal(GC_lock_holder, pthread_self()))
+ extern volatile GC_bool GC_collecting;
+# define ENTER_GC() GC_collecting = 1;
+# define EXIT_GC() GC_collecting = 0;
+ extern void GC_lock(void);
+ extern pthread_t GC_lock_holder;
+# ifdef GC_ASSERTIONS
extern pthread_t GC_mark_lock_holder;
-# endif
+# endif
# endif /* GC_PTHREADS with linux_threads.c implementation */
+
# if defined(GC_IRIX_THREADS)
# include <pthread.h>
/* This probably should never be included, but I can't test */
/* on Irix anymore. */
# include <mutex.h>
- extern volatile unsigned int GC_allocate_lock;
+ extern volatile AO_TS_t GC_allocate_lock;
/* This is not a mutex because mutexes that obey the (optional) */
/* POSIX scheduling rules are subject to convoys in high contention */
/* applications. This is basically a spin lock. */
@@ -635,26 +168,28 @@
# define NO_THREAD (pthread_t)(-1)
# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
# define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
-# define LOCK() { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
-# define UNLOCK() GC_clear(&GC_allocate_lock);
- extern VOLATILE GC_bool GC_collecting;
+# define UNCOND_LOCK() { if (AO_test_and_set_acquire(&GC_allocate_lock)) \
+ GC_lock(); }
+# define UNCOND_UNLOCK() AO_CLEAR(&GC_allocate_lock);
+ extern volatile GC_bool GC_collecting;
# define ENTER_GC() \
{ \
GC_collecting = 1; \
}
# define EXIT_GC() GC_collecting = 0;
# endif /* GC_IRIX_THREADS */
+
# if defined(GC_WIN32_THREADS)
# if defined(GC_PTHREADS)
# include <pthread.h>
extern pthread_mutex_t GC_allocate_ml;
-# define LOCK() pthread_mutex_lock(&GC_allocate_ml)
-# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
+# define UNCOND_LOCK() pthread_mutex_lock(&GC_allocate_ml)
+# define UNCOND_UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
# else
# include <windows.h>
GC_API CRITICAL_SECTION GC_allocate_ml;
-# define LOCK() EnterCriticalSection(&GC_allocate_ml);
-# define UNLOCK() LeaveCriticalSection(&GC_allocate_ml);
+# define UNCOND_LOCK() EnterCriticalSection(&GC_allocate_ml);
+# define UNCOND_UNLOCK() LeaveCriticalSection(&GC_allocate_ml);
# endif
# endif
# ifndef SET_LOCK_HOLDER
@@ -668,6 +203,14 @@
# define LOCK()
# define UNLOCK()
# endif /* !THREADS */
+
+#if defined(UNCOND_LOCK) && !defined(LOCK)
+ extern GC_bool GC_need_to_lock;
+ /* At least two thread running; need to lock. */
+# define LOCK() if (GC_need_to_lock) { UNCOND_LOCK(); }
+# define UNLOCK() if (GC_need_to_lock) { UNCOND_UNLOCK(); }
+#endif
+
# ifndef SET_LOCK_HOLDER
# define SET_LOCK_HOLDER()
# define UNSET_LOCK_HOLDER()
@@ -675,6 +218,7 @@
/* Used on platforms were locks can be reacquired, */
/* so it doesn't matter if we lie. */
# endif
+
# ifndef ENTER_GC
# define ENTER_GC()
# define EXIT_GC()
@@ -683,6 +227,7 @@
# ifndef DCL_LOCK_STATE
# define DCL_LOCK_STATE
# endif
+
# ifndef FASTLOCK
# define FASTLOCK() LOCK()
# define FASTLOCK_SUCCEEDED() TRUE
diff --git a/include/private/gc_pmark.h b/include/private/gc_pmark.h
index 51981914..03d3af16 100644
--- a/include/private/gc_pmark.h
+++ b/include/private/gc_pmark.h
@@ -60,18 +60,18 @@ extern word GC_n_mark_procs;
#define GC_MARK_STACK_DISCARDS (INITIAL_MARK_STACK_SIZE/8)
typedef struct GC_ms_entry {
- GC_word * mse_start; /* First word of object */
+ ptr_t mse_start; /* First word of object */
GC_word mse_descr; /* Descriptor; low order two bits are tags, */
/* identifying the upper 30 bits as one of the */
/* following: */
} mse;
-extern word GC_mark_stack_size;
+extern size_t GC_mark_stack_size;
extern mse * GC_mark_stack_limit;
#ifdef PARALLEL_MARK
- extern mse * VOLATILE GC_mark_stack_top;
+ extern mse * volatile GC_mark_stack_top;
#else
extern mse * GC_mark_stack_top;
#endif
@@ -117,11 +117,6 @@ extern mse * GC_mark_stack;
/* once it returns to 0, it */
/* stays zero for the cycle. */
/* GC_mark_stack_top is also protected by mark lock. */
- extern mse * VOLATILE GC_first_nonempty;
- /* Lowest entry on mark stack */
- /* that may be nonempty. */
- /* Updated only by initiating */
- /* thread. */
/*
* GC_notify_all_marker() is used when GC_help_wanted is first set,
* when the last helper becomes inactive,
@@ -134,21 +129,9 @@ extern mse * GC_mark_stack;
/* Return a pointer to within 1st page of object. */
/* Set *new_hdr_p to corr. hdr. */
-#ifdef __STDC__
- ptr_t GC_find_start(ptr_t current, hdr *hhdr, hdr **new_hdr_p);
-#else
- ptr_t GC_find_start();
-#endif
-
-mse * GC_signal_mark_stack_overflow GC_PROTO((mse *msp));
+ptr_t GC_find_start(ptr_t current, hdr *hhdr, hdr **new_hdr_p);
-# ifdef GATHERSTATS
-# define ADD_TO_ATOMIC(sz) GC_atomic_in_use += (sz)
-# define ADD_TO_COMPOSITE(sz) GC_composite_in_use += (sz)
-# else
-# define ADD_TO_ATOMIC(sz)
-# define ADD_TO_COMPOSITE(sz)
-# endif
+mse * GC_signal_mark_stack_overflow(mse *msp);
/* Push the object obj with corresponding heap block header hhdr onto */
/* the mark stack. */
@@ -156,10 +139,7 @@ mse * GC_signal_mark_stack_overflow GC_PROTO((mse *msp));
{ \
register word _descr = (hhdr) -> hb_descr; \
\
- if (_descr == 0) { \
- ADD_TO_ATOMIC((hhdr) -> hb_sz); \
- } else { \
- ADD_TO_COMPOSITE((hhdr) -> hb_sz); \
+ if (_descr != 0) { \
mark_stack_top++; \
if (mark_stack_top >= mark_stack_limit) { \
mark_stack_top = GC_signal_mark_stack_overflow(mark_stack_top); \
@@ -177,94 +157,207 @@ mse * GC_signal_mark_stack_overflow GC_PROTO((mse *msp));
source, exit_label) \
{ \
hdr * my_hhdr; \
- ptr_t my_current = current; \
- \
- GET_HDR(my_current, my_hhdr); \
- if (IS_FORWARDING_ADDR_OR_NIL(my_hhdr)) { \
- hdr * new_hdr = GC_invalid_header; \
- my_current = GC_find_start(my_current, my_hhdr, &new_hdr); \
- my_hhdr = new_hdr; \
- } \
- PUSH_CONTENTS_HDR(my_current, mark_stack_top, mark_stack_limit, \
- source, exit_label, my_hhdr); \
-exit_label: ; \
-}
-
-/* As above, but use header cache for header lookup. */
-# define HC_PUSH_CONTENTS(current, mark_stack_top, mark_stack_limit, \
- source, exit_label) \
-{ \
- hdr * my_hhdr; \
- ptr_t my_current = current; \
\
- HC_GET_HDR(my_current, my_hhdr, source); \
- PUSH_CONTENTS_HDR(my_current, mark_stack_top, mark_stack_limit, \
- source, exit_label, my_hhdr); \
+ HC_GET_HDR(current, my_hhdr, source, exit_label); \
+ PUSH_CONTENTS_HDR(current, mark_stack_top, mark_stack_limit, \
+ source, exit_label, my_hhdr, TRUE); \
exit_label: ; \
}
/* Set mark bit, exit if it was already set. */
-# ifdef USE_MARK_BYTES
- /* Unlike the mark bit case, there is a race here, and we may set */
- /* the bit twice in the concurrent case. This can result in the */
- /* object being pushed twice. But that's only a performance issue. */
-# define SET_MARK_BIT_EXIT_IF_SET(hhdr,displ,exit_label) \
+# ifdef USE_MARK_BITS
+/* FIXME: untested */
+# if defined(THREADS)
+ /* Introduces a benign race as in the byte case. */
+# define OR_WORD_EXIT_IF_SET(addr, mask, label) \
+ if (!(*(addr) & (mask))) { \
+ AO_or((AO_t *)(addr), (mask); \
+ } else { \
+ goto label; \
+ }
+# else /* !THREADS */
+# define OR_WORD_EXIT_IF_SET(addr, mask, label) \
+ if (!(*(addr) & (mask))) { \
+ *(addr) |= (mask); \
+ } else { \
+ goto label; \
+ }
+# endif
+# define SET_MARK_BIT_EXIT_IF_SET(hhdr,bit_no,exit_label) \
{ \
- register VOLATILE char * mark_byte_addr = \
- hhdr -> hb_marks + ((displ) >> 1); \
- register char mark_byte = *mark_byte_addr; \
+ word * mark_word_addr = hhdr -> hb_marks + divWORDSZ(bit_no); \
+ \
+ OR_WORD_EXIT_IF_SET(mark_word_addr, (word)1 << modWORDSZ(bit_no), \
+ exit_label); \
+ }
+# endif
+
+
+#if defined(I386) && defined(__GNUC__)
+# define LONG_MULT(hprod, lprod, x, y) { \
+ asm("mull %2" : "=a"(lprod), "=d"(hprod) : "g"(y), "0"(x)); \
+ }
+#else /* No in-line X86 assembly code */
+# define LONG_MULT(hprod, lprod, x, y) { \
+ unsigned long long prod = (unsigned long long)x \
+ * (unsigned long long)y; \
+ hprod = prod >> 32; \
+ lprod = (unsigned32)prod; \
+ }
+#endif
+
+ /* There is a race here, and we may set */
+ /* the bit twice in the concurrent case. This can result in the */
+ /* object being pushed twice. But that's only a performance issue. */
+# define SET_MARK_BIT_EXIT_IF_SET(hhdr,bit_no,exit_label) \
+ { \
+ char * mark_byte_addr = (char *)hhdr -> hb_marks + (bit_no); \
+ char mark_byte = *mark_byte_addr; \
\
if (mark_byte) goto exit_label; \
*mark_byte_addr = 1; \
}
-# else
-# define SET_MARK_BIT_EXIT_IF_SET(hhdr,displ,exit_label) \
- { \
- register word * mark_word_addr = hhdr -> hb_marks + divWORDSZ(displ); \
- \
- OR_WORD_EXIT_IF_SET(mark_word_addr, (word)1 << modWORDSZ(displ), \
- exit_label); \
- }
-# endif /* USE_MARK_BYTES */
+#ifdef PARALLEL_MARK
+# define INCR_MARKS(hhdr) \
+ AO_store(&(hhdr -> hb_n_marks), AO_load(&(hhdr -> hb_n_marks))+1);
+#else
+# define INCR_MARKS(hhdr) ++(hhdr -> hb_n_marks)
+#endif
+
+#ifdef ENABLE_TRACE
+# define TRACE(source, cmd) \
+ if (GC_trace_addr != 0 && (ptr_t)(source) == GC_trace_addr) cmd
+# define TRACE_TARGET(target, cmd) \
+ if (GC_trace_addr != 0 && (target) == *(ptr_t *)GC_trace_addr) cmd
+#else
+# define TRACE(source, cmd)
+# define TRACE_TARGET(source, cmd)
+#endif
/* If the mark bit corresponding to current is not set, set it, and */
-/* push the contents of the object on the mark stack. For a small */
-/* object we assume that current is the (possibly interior) pointer */
-/* to the object. For large objects we assume that current points */
-/* to somewhere inside the first page of the object. If */
-/* GC_all_interior_pointers is set, it may have been previously */
-/* adjusted to make that true. */
+/* push the contents of the object on the mark stack. Current points */
+/* to the bginning of the object. We rely on the fact that the */
+/* preceding header calculation will succeed for a pointer past the */
+/* forst page of an object, only if it is in fact a valid pointer */
+/* to the object. Thus we can omit the otherwise necessary tests */
+/* here. Note in particular tha the "displ" value is the displacement */
+/* from the beggining of the heap block, which may itself be in the */
+/* interior of a large object. */
+#ifdef MARK_BIT_PER_GRANULE
# define PUSH_CONTENTS_HDR(current, mark_stack_top, mark_stack_limit, \
- source, exit_label, hhdr) \
+ source, exit_label, hhdr, do_offset_check) \
{ \
- int displ; /* Displacement in block; first bytes, then words */ \
- int map_entry; \
- \
- displ = HBLKDISPL(current); \
- map_entry = MAP_ENTRY((hhdr -> hb_map), displ); \
- displ = BYTES_TO_WORDS(displ); \
- if (map_entry > CPP_MAX_OFFSET) { \
- if (map_entry == OFFSET_TOO_BIG) { \
- map_entry = displ % (hhdr -> hb_sz); \
- displ -= map_entry; \
- if (displ + (hhdr -> hb_sz) > BYTES_TO_WORDS(HBLKSIZE)) { \
- GC_ADD_TO_BLACK_LIST_NORMAL((word)current, source); \
+ size_t displ = HBLKDISPL(current); /* Displacement in block; in bytes. */\
+ /* displ is always within range. If current doesn't point to */ \
+ /* first block, then we are in the all_interior_pointers case, and */ \
+ /* it is safe to use any displacement value. */ \
+ size_t gran_displ = BYTES_TO_GRANULES(displ); \
+ size_t gran_offset = hhdr -> hb_map[gran_displ]; \
+ size_t byte_offset = displ & (GRANULE_BYTES - 1); \
+ ptr_t base = current; \
+ /* The following always fails for large block references. */ \
+ if (EXPECT((gran_offset | byte_offset) != 0, FALSE)) { \
+ if (hhdr -> hb_large_block) { \
+ /* gran_offset is bogus. */ \
+ size_t obj_displ; \
+ base = (ptr_t)(hhdr -> hb_block); \
+ obj_displ = (ptr_t)(current) - base; \
+ if (obj_displ != displ) { \
+ GC_ASSERT(obj_displ < hhdr -> hb_sz); \
+ /* Must be in all_interior_pointer case, not first block */ \
+ /* already did validity check on cache miss. */ \
+ ; \
+ } else { \
+ if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
+ GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
+ goto exit_label; \
+ } \
+ } \
+ gran_displ = 0; \
+ GC_ASSERT(hhdr -> hb_sz > HBLKSIZE || \
+ hhdr -> hb_block == HBLKPTR(current)); \
+ GC_ASSERT((ptr_t)(hhdr -> hb_block) <= (ptr_t) current); \
+ } else { \
+ size_t obj_displ = GRANULES_TO_BYTES(gran_offset) \
+ + byte_offset; \
+ if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
+ GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
goto exit_label; \
} \
+ gran_displ -= gran_offset; \
+ base -= obj_displ; \
+ } \
+ } \
+ GC_ASSERT(hhdr == GC_find_header(base)); \
+ GC_ASSERT(gran_displ % BYTES_TO_GRANULES(hhdr -> hb_sz) == 0); \
+ TRACE(source, GC_log_printf("GC:%d: passed validity tests\n",GC_gc_no)); \
+ SET_MARK_BIT_EXIT_IF_SET(hhdr, gran_displ, exit_label); \
+ TRACE(source, GC_log_printf("GC:%d: previously unmarked\n",GC_gc_no)); \
+ TRACE_TARGET(base, \
+ GC_log_printf("GC:%d: marking %p from %p instead\n", GC_gc_no, \
+ base, source)); \
+ INCR_MARKS(hhdr); \
+ GC_STORE_BACK_PTR((ptr_t)source, base); \
+ PUSH_OBJ(base, hhdr, mark_stack_top, mark_stack_limit); \
+}
+#endif /* MARK_BIT_PER_GRANULE */
+
+#ifdef MARK_BIT_PER_OBJ
+# define PUSH_CONTENTS_HDR(current, mark_stack_top, mark_stack_limit, \
+ source, exit_label, hhdr, do_offset_check) \
+{ \
+ size_t displ = HBLKDISPL(current); /* Displacement in block; in bytes. */\
+ unsigned32 low_prod, high_prod, offset_fraction; \
+ unsigned32 inv_sz = hhdr -> hb_inv_sz; \
+ ptr_t base = current; \
+ LONG_MULT(high_prod, low_prod, displ, inv_sz); \
+ /* product is > and within sz_in_bytes of displ * sz_in_bytes * 2**32 */ \
+ if (EXPECT(low_prod >> 16 != 0, FALSE)) { \
+ FIXME: fails if offset is a multiple of HBLKSIZE which becomes 0 \
+ if (inv_sz == LARGE_INV_SZ) { \
+ size_t obj_displ; \
+ base = (ptr_t)(hhdr -> hb_block); \
+ obj_displ = (ptr_t)(current) - base; \
+ if (obj_displ != displ) { \
+ GC_ASSERT(obj_displ < hhdr -> hb_sz); \
+ /* Must be in all_interior_pointer case, not first block */ \
+ /* already did validity check on cache miss. */ \
+ ; \
+ } else { \
+ if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
+ GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
+ goto exit_label; \
+ } \
+ } \
+ GC_ASSERT(hhdr -> hb_sz > HBLKSIZE || \
+ hhdr -> hb_block == HBLKPTR(current)); \
+ GC_ASSERT((ptr_t)(hhdr -> hb_block) < (ptr_t) current); \
} else { \
- GC_ADD_TO_BLACK_LIST_NORMAL((word)current, source); goto exit_label; \
+ /* Accurate enough if HBLKSIZE <= 2**15. */ \
+ GC_ASSERT(HBLKSIZE <= (1 << 15)); \
+ size_t obj_displ = (((low_prod >> 16) + 1) * (hhdr -> hb_sz)) >> 16; \
+ if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
+ GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
+ goto exit_label; \
+ } \
+ base -= obj_displ; \
} \
- } else { \
- displ -= map_entry; \
} \
- GC_ASSERT(displ >= 0 && displ < MARK_BITS_PER_HBLK); \
- SET_MARK_BIT_EXIT_IF_SET(hhdr, displ, exit_label); \
- GC_STORE_BACK_PTR((ptr_t)source, (ptr_t)HBLKPTR(current) \
- + WORDS_TO_BYTES(displ)); \
- PUSH_OBJ(((word *)(HBLKPTR(current)) + displ), hhdr, \
- mark_stack_top, mark_stack_limit) \
+ /* May get here for pointer to start of block not at */ \
+ /* beginning of object. If so, it's valid, and we're fine. */ \
+ GC_ASSERT(high_prod >= 0 && high_prod <= HBLK_OBJS(hhdr -> hb_sz)); \
+ TRACE(source, GC_log_printf("GC:%d: passed validity tests\n",GC_gc_no)); \
+ SET_MARK_BIT_EXIT_IF_SET(hhdr, high_prod, exit_label); \
+ TRACE(source, GC_log_printf("GC:%d: previously unmarked\n",GC_gc_no)); \
+ TRACE_TARGET(base, \
+ GC_log_printf("GC:%d: marking %p from %p instead\n", GC_gc_no, \
+ base, source)); \
+ INCR_MARKS(hhdr); \
+ GC_STORE_BACK_PTR((ptr_t)source, base); \
+ PUSH_OBJ(base, hhdr, mark_stack_top, mark_stack_limit); \
}
+#endif /* MARK_BIT_PER_OBJ */
#if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
# define PUSH_ONE_CHECKED_STACK(p, source) \
@@ -285,13 +378,13 @@ exit_label: ; \
# if NEED_FIXUP_POINTER
/* Try both the raw version and the fixed up one. */
# define GC_PUSH_ONE_STACK(p, source) \
- if ((ptr_t)(p) >= (ptr_t)GC_least_plausible_heap_addr \
- && (ptr_t)(p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
+ if ((p) >= (ptr_t)GC_least_plausible_heap_addr \
+ && (p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
PUSH_ONE_CHECKED_STACK(p, source); \
} \
FIXUP_POINTER(p); \
- if ((ptr_t)(p) >= (ptr_t)GC_least_plausible_heap_addr \
- && (ptr_t)(p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
+ if ((p) >= (ptr_t)GC_least_plausible_heap_addr \
+ && (p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
PUSH_ONE_CHECKED_STACK(p, source); \
}
# else /* !NEED_FIXUP_POINTER */
@@ -305,22 +398,22 @@ exit_label: ; \
/*
* As above, but interior pointer recognition as for
- * normal for heap pointers.
+ * normal heap pointers.
*/
# define GC_PUSH_ONE_HEAP(p,source) \
FIXUP_POINTER(p); \
- if ((ptr_t)(p) >= (ptr_t)GC_least_plausible_heap_addr \
- && (ptr_t)(p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
+ if ((p) >= (ptr_t)GC_least_plausible_heap_addr \
+ && (p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
GC_mark_stack_top = GC_mark_and_push( \
- (GC_PTR)(p), GC_mark_stack_top, \
- GC_mark_stack_limit, (GC_PTR *)(source)); \
+ (void *)(p), GC_mark_stack_top, \
+ GC_mark_stack_limit, (void * *)(source)); \
}
/* Mark starting at mark stack entry top (incl.) down to */
/* mark stack entry bottom (incl.). Stop after performing */
/* about one page worth of work. Return the new mark stack */
/* top entry. */
-mse * GC_mark_from GC_PROTO((mse * top, mse * bottom, mse *limit));
+mse * GC_mark_from(mse * top, mse * bottom, mse *limit);
#define MARK_FROM_MARK_STACK() \
GC_mark_stack_top = GC_mark_from(GC_mark_stack_top, \
diff --git a/include/private/gc_priv.h b/include/private/gc_priv.h
index e5bd52b6..06d8a15a 100644
--- a/include/private/gc_priv.h
+++ b/include/private/gc_priv.h
@@ -2,7 +2,7 @@
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999-2001 by Hewlett-Packard Company. All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
@@ -19,17 +19,6 @@
# ifndef GC_PRIVATE_H
# define GC_PRIVATE_H
-#if defined(mips) && defined(SYSTYPE_BSD) && defined(sony_news)
- /* sony RISC NEWS, NEWSOS 4 */
-# define BSD_TIME
-/* typedef long ptrdiff_t; -- necessary on some really old systems */
-#endif
-
-#if defined(mips) && defined(SYSTYPE_BSD43)
- /* MIPS RISCOS 4 */
-# define BSD_TIME
-#endif
-
#ifdef DGUX
# include <sys/types.h>
# include <sys/time.h>
@@ -52,6 +41,7 @@
typedef GC_word word;
typedef GC_signed_word signed_word;
+typedef unsigned int unsigned32;
typedef int GC_bool;
# define TRUE 1
@@ -64,30 +54,27 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
# ifndef GCCONFIG_H
# include "gcconfig.h"
+# ifndef USE_MARK_BYTES
+# define USE_MARK_BYTES
+# endif
# endif
# ifndef HEADERS_H
# include "gc_hdrs.h"
# endif
-#if defined(__STDC__)
-# include <stdlib.h>
-# if !(defined( sony_news ) )
-# include <stddef.h>
-# endif
-# define VOLATILE volatile
-#else
-# ifdef MSWIN32
-# include <stdlib.h>
-# endif
-# define VOLATILE
-#endif
+# include <stdlib.h>
+# if !(defined( sony_news ) )
+# include <stddef.h>
+# endif
-#if 0 /* defined(__GNUC__) doesn't work yet */
+#if __GNUC__ >= 3
# define EXPECT(expr, outcome) __builtin_expect(expr,outcome)
+# define INLINE inline
/* Equivalent to (expr), but predict that usually (expr)==outcome. */
#else
# define EXPECT(expr, outcome) (expr)
+# define INLINE
#endif /* __GNUC__ */
# ifndef GC_LOCKS_H
@@ -97,13 +84,13 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
# ifdef STACK_GROWS_DOWN
# define COOLER_THAN >
# define HOTTER_THAN <
-# define MAKE_COOLER(x,y) if ((word)(x)+(y) > (word)(x)) {(x) += (y);} \
- else {(x) = (word)ONES;}
+# define MAKE_COOLER(x,y) if ((x)+(y) > (x)) {(x) += (y);} \
+ else {(x) = (ptr_t)ONES;}
# define MAKE_HOTTER(x,y) (x) -= (y)
# else
# define COOLER_THAN <
# define HOTTER_THAN >
-# define MAKE_COOLER(x,y) if ((word)(x)-(y) < (word)(x)) {(x) -= (y);} else {(x) = 0;}
+# define MAKE_COOLER(x,y) if ((x)-(y) < (x)) {(x) -= (y);} else {(x) = 0;}
# define MAKE_HOTTER(x,y) (x) += (y)
# endif
@@ -159,72 +146,15 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
/* This is now really controlled at startup, */
/* through GC_all_interior_pointers. */
-#define PRINTSTATS /* Print garbage collection statistics */
- /* For less verbose output, undefine in reclaim.c */
-
-#define PRINTTIMES /* Print the amount of time consumed by each garbage */
- /* collection. */
-
-#define PRINTBLOCKS /* Print object sizes associated with heap blocks, */
- /* whether the objects are atomic or composite, and */
- /* whether or not the block was found to be empty */
- /* during the reclaim phase. Typically generates */
- /* about one screenful per garbage collection. */
-#undef PRINTBLOCKS
-
-#ifdef SILENT
-# ifdef PRINTSTATS
-# undef PRINTSTATS
-# endif
-# ifdef PRINTTIMES
-# undef PRINTTIMES
-# endif
-# ifdef PRINTNBLOCKS
-# undef PRINTNBLOCKS
-# endif
-#endif
-
-#if defined(PRINTSTATS) && !defined(GATHERSTATS)
-# define GATHERSTATS
-#endif
-
-#if defined(PRINTSTATS) || !defined(SMALL_CONFIG)
-# define CONDPRINT /* Print some things if GC_print_stats is set */
-#endif
#define GC_INVOKE_FINALIZERS() GC_notify_or_invoke_finalizers()
-#define MERGE_SIZES /* Round up some object sizes, so that fewer distinct */
- /* free lists are actually maintained. This applies */
- /* only to the top level routines in misc.c, not to */
- /* user generated code that calls GC_allocobj and */
- /* GC_allocaobj directly. */
- /* Slows down average programs slightly. May however */
- /* substantially reduce fragmentation if allocation */
- /* request sizes are widely scattered. */
- /* May save significant amounts of space for obj_map */
- /* entries. */
-
-#if defined(USE_MARK_BYTES) && !defined(ALIGN_DOUBLE)
-# define ALIGN_DOUBLE
- /* We use one byte for every 2 words, which doesn't allow for */
- /* odd numbered words to have mark bits. */
-#endif
-
-#if defined(GC_GCJ_SUPPORT) && ALIGNMENT < 8 && !defined(ALIGN_DOUBLE)
- /* GCJ's Hashtable synchronization code requires 64-bit alignment. */
-# define ALIGN_DOUBLE
-#endif
-
-/* ALIGN_DOUBLE requires MERGE_SIZES at present. */
-# if defined(ALIGN_DOUBLE) && !defined(MERGE_SIZES)
-# define MERGE_SIZES
-# endif
-
#if !defined(DONT_ADD_BYTE_AT_END)
# define EXTRA_BYTES GC_all_interior_pointers
+# define MAX_EXTRA_BYTES 1
#else
# define EXTRA_BYTES 0
+# define MAX_EXTRA_BYTES 0
#endif
@@ -263,9 +193,9 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
/* Fill in the pc and argument information for up to NFRAMES of my */
/* callers. Ignore my frame and my callers frame. */
struct callinfo;
-void GC_save_callers GC_PROTO((struct callinfo info[NFRAMES]));
+void GC_save_callers(struct callinfo info[NFRAMES]);
-void GC_print_callers GC_PROTO((struct callinfo info[NFRAMES]));
+void GC_print_callers(struct callinfo info[NFRAMES]);
#endif
@@ -275,7 +205,7 @@ void GC_print_callers GC_PROTO((struct callinfo info[NFRAMES]));
# if NARGS > 0
word ci_arg[NARGS]; /* bit-wise complement to avoid retention */
# endif
-# if defined(ALIGN_DOUBLE) && (NFRAMES * (NARGS + 1)) % 2 == 1
+# if (NFRAMES * (NARGS + 1)) % 2 == 1
/* Likely alignment problem. */
word ci_dummy;
# endif
@@ -362,39 +292,8 @@ void GC_print_callers GC_PROTO((struct callinfo info[NFRAMES]));
# define BCOPY(x,y,n) memcpy(y, x, (size_t)(n))
# define BZERO(x,n) memset(x, 0, (size_t)(n))
# else
-# define BCOPY(x,y,n) bcopy((char *)(x),(char *)(y),(int)(n))
-# define BZERO(x,n) bzero((char *)(x),(int)(n))
-# endif
-
-/* Delay any interrupts or signals that may abort this thread. Data */
-/* structures are in a consistent state outside this pair of calls. */
-/* ANSI C allows both to be empty (though the standard isn't very */
-/* clear on that point). Standard malloc implementations are usually */
-/* neither interruptable nor thread-safe, and thus correspond to */
-/* empty definitions. */
-/* It probably doesn't make any sense to declare these to be nonempty */
-/* if the code is being optimized, since signal safety relies on some */
-/* ordering constraints that are typically not obeyed by optimizing */
-/* compilers. */
-# ifdef PCR
-# define DISABLE_SIGNALS() \
- PCR_Th_SetSigMask(PCR_allSigsBlocked,&GC_old_sig_mask)
-# define ENABLE_SIGNALS() \
- PCR_Th_SetSigMask(&GC_old_sig_mask, NIL)
-# else
-# if defined(THREADS) || defined(AMIGA) \
- || defined(MSWIN32) || defined(MSWINCE) || defined(MACOS) \
- || defined(DJGPP) || defined(NO_SIGNALS)
- /* Also useful for debugging. */
- /* Should probably use thr_sigsetmask for GC_SOLARIS_THREADS. */
-# define DISABLE_SIGNALS()
-# define ENABLE_SIGNALS()
-# else
-# define DISABLE_SIGNALS() GC_disable_signals()
- void GC_disable_signals();
-# define ENABLE_SIGNALS() GC_enable_signals()
- void GC_enable_signals();
-# endif
+# define BCOPY(x,y,n) bcopy((void *)(x),(void *)(y),(size_t)(n))
+# define BZERO(x,n) bzero((void *)(x),(size_t)(n))
# endif
/*
@@ -430,7 +329,7 @@ void GC_print_callers GC_PROTO((struct callinfo info[NFRAMES]));
# ifdef SMALL_CONFIG
# define ABORT(msg) abort();
# else
- GC_API void GC_abort GC_PROTO((GC_CONST char * msg));
+ GC_API void GC_abort(const char * msg);
# define ABORT(msg) GC_abort(msg);
# endif
# endif
@@ -491,12 +390,45 @@ extern GC_warn_proc GC_current_warn_proc;
# endif
#endif
+/* The first TINY_FREELISTS free lists correspond to the first */
+/* TINY_FREELISTS multiples of GRANULE_BYTES, i.e. we keep */
+/* separate free lists for each multiple of GRANULE_BYTES */
+/* up to (TINY_FREELISTS-1) * GRANULE_BYTES. After that they */
+/* may be spread out further. */
+#include "../gc_tiny_fl.h"
+#define GRANULE_BYTES GC_GRANULE_BYTES
+#define TINY_FREELISTS GC_TINY_FREELISTS
+
#define WORDSZ ((word)CPP_WORDSZ)
#define SIGNB ((word)1 << (WORDSZ-1))
#define BYTES_PER_WORD ((word)(sizeof (word)))
#define ONES ((word)(signed_word)(-1))
#define divWORDSZ(n) ((n) >> LOGWL) /* divide n by size of word */
+#if GRANULE_BYTES == 8
+# define BYTES_TO_GRANULES(n) ((n)>>3)
+# define GRANULES_TO_BYTES(n) ((n)<<3)
+# if CPP_WORDSZ == 64
+# define GRANULES_TO_WORDS(n) (n)
+# elif CPP_WORDSZ == 32
+# define GRANULES_TO_WORDS(n) ((n)<<1)
+# else
+# define GRANULES_TO_WORDS(n) BYTES_TO_WORDS(GRANULES_TO_BYTES(n))
+# endif
+#elif GRANULE_BYTES == 16
+# define BYTES_TO_GRANULES(n) ((n)>>4)
+# define GRANULES_TO_BYTES(n) ((n)<<4)
+# if CPP_WORDSZ == 64
+# define GRANULES_TO_WORDS(n) ((n)<<1)
+# elif CPP_WORDSZ == 32
+# define GRANULES_TO_WORDS(n) ((n)<<2)
+# else
+# define GRANULES_TO_WORDS(n) BYTES_TO_WORDS(GRANULES_TO_BYTES(n))
+# endif
+#else
+# error Bad GRANULE_BYTES value
+#endif
+
/*********************/
/* */
/* Size Parameters */
@@ -541,17 +473,20 @@ extern GC_warn_proc GC_current_warn_proc;
# undef HBLKSIZE
#endif
# define CPP_HBLKSIZE (1 << CPP_LOG_HBLKSIZE)
-# define LOG_HBLKSIZE ((word)CPP_LOG_HBLKSIZE)
-# define HBLKSIZE ((word)CPP_HBLKSIZE)
+# define LOG_HBLKSIZE ((size_t)CPP_LOG_HBLKSIZE)
+# define HBLKSIZE ((size_t)CPP_HBLKSIZE)
-/* max size objects supported by freelist (larger objects may be */
-/* allocated, but less efficiently) */
+/* max size objects supported by freelist (larger objects are */
+/* allocated directly with allchblk(), by rounding to the next */
+/* multiple of HBLKSIZE. */
#define CPP_MAXOBJBYTES (CPP_HBLKSIZE/2)
-#define MAXOBJBYTES ((word)CPP_MAXOBJBYTES)
-#define CPP_MAXOBJSZ BYTES_TO_WORDS(CPP_MAXOBJBYTES)
-#define MAXOBJSZ ((word)CPP_MAXOBJSZ)
+#define MAXOBJBYTES ((size_t)CPP_MAXOBJBYTES)
+#define CPP_MAXOBJWORDS BYTES_TO_WORDS(CPP_MAXOBJBYTES)
+#define MAXOBJWORDS ((size_t)CPP_MAXOBJWORDS)
+#define CPP_MAXOBJGRANULES BYTES_TO_GRANULES(CPP_MAXOBJBYTES)
+#define MAXOBJGRANULES ((size_t)CPP_MAXOBJGRANULES)
# define divHBLKSZ(n) ((n) >> LOG_HBLKSIZE)
@@ -572,26 +507,20 @@ extern GC_warn_proc GC_current_warn_proc;
/* Round up byte allocation requests to integral number of words, etc. */
# define ROUNDED_UP_WORDS(n) \
BYTES_TO_WORDS((n) + (WORDS_TO_BYTES(1) - 1 + EXTRA_BYTES))
-# ifdef ALIGN_DOUBLE
-# define ALIGNED_WORDS(n) \
- (BYTES_TO_WORDS((n) + WORDS_TO_BYTES(2) - 1 + EXTRA_BYTES) & ~1)
+# define ROUNDED_UP_GRANULES(n) \
+ BYTES_TO_GRANULES((n) + (GRANULE_BYTES - 1 + EXTRA_BYTES))
+# if MAX_EXTRA_BYTES == 0
+# define SMALL_OBJ(bytes) EXPECT((bytes) <= (MAXOBJBYTES), 1)
# else
-# define ALIGNED_WORDS(n) ROUNDED_UP_WORDS(n)
+# define SMALL_OBJ(bytes) \
+ (EXPECT((bytes) <= (MAXOBJBYTES - MAX_EXTRA_BYTES), 1) || \
+ (bytes) <= (MAXOBJBYTES - EXTRA_BYTES))
+ /* This really just tests bytes <= MAXOBJBYTES - EXTRA_BYTES. */
+ /* But we try to avoid looking up EXTRA_BYTES. */
# endif
-# define SMALL_OBJ(bytes) ((bytes) <= (MAXOBJBYTES - EXTRA_BYTES))
# define ADD_SLOP(bytes) ((bytes) + EXTRA_BYTES)
# ifndef MIN_WORDS
- /* MIN_WORDS is the size of the smallest allocated object. */
- /* 1 and 2 are the only valid values. */
- /* 2 must be used if: */
- /* - GC_gcj_malloc can be used for objects of requested */
- /* size smaller than 2 words, or */
- /* - USE_MARK_BYTES is defined. */
-# if defined(USE_MARK_BYTES) || defined(GC_GCJ_SUPPORT)
-# define MIN_WORDS 2 /* Smallest allocated object. */
-# else
-# define MIN_WORDS 1
-# endif
+# define MIN_WORDS 2 /* FIXME: obsolete */
# endif
@@ -649,43 +578,43 @@ typedef word page_hash_table[PHT_SIZE];
/* heap block header */
#define HBLKMASK (HBLKSIZE-1)
-#define BITS_PER_HBLK (CPP_HBLKSIZE * 8)
-
-#define MARK_BITS_PER_HBLK (BITS_PER_HBLK/CPP_WORDSZ)
+#define MARK_BITS_PER_HBLK (HBLKSIZE/GRANULE_BYTES)
/* upper bound */
- /* We allocate 1 bit/word, unless USE_MARK_BYTES */
- /* is defined. Only the first word */
- /* in each object is actually marked. */
+ /* We allocate 1 bit per allocation granule. */
+ /* If MARK_BIT_PER_GRANULE is defined, we use */
+ /* every nth bit, where n is the number of */
+ /* allocation granules per object. If */
+ /* MARK_BIT_PER_OBJ is defined, we only use the */
+ /* initial group of mark bits, and it is safe */
+ /* to allocate smaller header for large objects. */
# ifdef USE_MARK_BYTES
-# define MARK_BITS_SZ (MARK_BITS_PER_HBLK/2)
+# define MARK_BITS_SZ (MARK_BITS_PER_HBLK + 1)
/* Unlike the other case, this is in units of bytes. */
- /* We actually allocate only every second mark bit, since we */
- /* force all objects to be doubleword aligned. */
- /* However, each mark bit is allocated as a byte. */
+ /* Since we force doubleword alignment, we need at most one */
+ /* mark bit per 2 words. But we do allocate and set one */
+ /* extra mark bit to avoid an explicit check for the */
+ /* partial object at the end of each block. */
# else
-# define MARK_BITS_SZ (MARK_BITS_PER_HBLK/CPP_WORDSZ)
+# define MARK_BITS_SZ (MARK_BITS_PER_HBLK/CPP_WORDSZ + 1)
# endif
+#ifdef PARALLEL_MARK
+# include <atomic_ops.h>
+ typedef AO_t counter_t;
+#else
+ typedef size_t counter_t;
+#endif
+
/* We maintain layout maps for heap blocks containing objects of a given */
/* size. Each entry in this map describes a byte offset and has the */
/* following type. */
-typedef unsigned char map_entry_type;
-
struct hblkhdr {
- word hb_sz; /* If in use, size in words, of objects in the block. */
- /* if free, the size in bytes of the whole block */
struct hblk * hb_next; /* Link field for hblk free list */
/* and for lists of chunks waiting to be */
/* reclaimed. */
struct hblk * hb_prev; /* Backwards link for free list. */
- word hb_descr; /* object descriptor for marking. See */
- /* mark.h. */
- map_entry_type * hb_map;
- /* A pointer to a pointer validity map of the block. */
- /* See GC_obj_map. */
- /* Valid for all blocks with headers. */
- /* Free blocks point to GC_invalid_map. */
+ struct hblk * hb_block; /* The corresponding block. */
unsigned char hb_obj_kind;
/* Kind of objects in the block. Each kind */
/* identifies a mark procedure and a set of */
@@ -700,6 +629,7 @@ struct hblkhdr {
/* GC_remap must be invoked on it */
/* before it can be reallocated. */
/* Only set with USE_MUNMAP. */
+# define FREE_BLK 4 /* Block is free, i.e. not in use. */
unsigned short hb_last_reclaimed;
/* Value of GC_gc_no when block was */
/* last allocated or swept. May wrap. */
@@ -708,43 +638,67 @@ struct hblkhdr {
/* when the header was allocated, or */
/* when the size of the block last */
/* changed. */
+ size_t hb_sz; /* If in use, size in bytes, of objects in the block. */
+ /* if free, the size in bytes of the whole block */
+ word hb_descr; /* object descriptor for marking. See */
+ /* mark.h. */
+# ifdef MARK_BIT_PER_OBJ
+ unsigned32 hb_inv_sz; /* A good upper bound for 2**32/hb_sz. */
+ /* For large objects, we use */
+ /* LARGE_INV_SZ. */
+# define LARGE_INV_SZ (1 << 16)
+# else
+ unsigned char hb_large_block;
+ short * hb_map; /* Essentially a table of remainders */
+ /* mod BYTES_TO_GRANULES(hb_sz), except */
+ /* for large blocks. See GC_obj_map. */
+# endif
+ counter_t hb_n_marks; /* Number of set mark bits, excluding */
+ /* the one always set at the end. */
+ /* Currently it is concurrently */
+ /* updated and hence only a lower bound.*/
+ /* But a zero value does gurantee that */
+ /* the block contains no marked */
+ /* objects. */
# ifdef USE_MARK_BYTES
union {
char _hb_marks[MARK_BITS_SZ];
/* The i'th byte is 1 if the object */
- /* starting at word 2i is marked, 0 o.w. */
+ /* starting at granule i or object i is */
+ /* marked, 0 o.w. */
+ /* The mark bit for the "one past the */
+ /* end" object is always set to avoid a */
+ /* special case test in the marker. */
word dummy; /* Force word alignment of mark bytes. */
} _mark_byte_union;
# define hb_marks _mark_byte_union._hb_marks
+# define ANY_INDEX 23 /* Random mark bit index for assertions */
# else
word hb_marks[MARK_BITS_SZ];
- /* Bit i in the array refers to the */
- /* object starting at the ith word (header */
- /* INCLUDED) in the heap block. */
- /* The lsb of word 0 is numbered 0. */
- /* Unused bits are invalid, and are */
- /* occasionally set, e.g for uncollectable */
- /* objects. */
# endif /* !USE_MARK_BYTES */
};
/* heap block body */
-# define BODY_SZ (HBLKSIZE/sizeof(word))
+# define HBLK_WORDS (HBLKSIZE/sizeof(word))
+# define HBLK_GRANULES (HBLKSIZE/GRANULE_BYTES)
+
+/* The number of objects in a block dedicated to a certain size. */
+/* may erroneously yield zero (instead of one) for large objects. */
+# define HBLK_OBJS(sz_in_bytes) (HBLKSIZE/(sz_in_bytes))
struct hblk {
- word hb_body[BODY_SZ];
+ char hb_body[HBLKSIZE];
};
-# define HBLK_IS_FREE(hdr) ((hdr) -> hb_map == GC_invalid_map)
+# define HBLK_IS_FREE(hdr) (((hdr) -> hb_flags & FREE_BLK) != 0)
-# define OBJ_SZ_TO_BLOCKS(sz) \
- divHBLKSZ(WORDS_TO_BYTES(sz) + HBLKSIZE-1)
+# define OBJ_SZ_TO_BLOCKS(sz) divHBLKSZ(sz + HBLKSIZE-1)
/* Size of block (in units of HBLKSIZE) needed to hold objects of */
- /* given sz (in words). */
+ /* given sz (in bytes). */
/* Object free list link */
-# define obj_link(p) (*(ptr_t *)(p))
+# define obj_link(p) (*(void **)(p))
# define LOG_MAX_MARK_PROCS 6
# define MAX_MARK_PROCS (1 << LOG_MAX_MARK_PROCS)
@@ -820,7 +774,7 @@ struct roots {
/* compiled. */
struct _GC_arrays {
- word _heapsize;
+ word _heapsize; /* Heap size in bytes. */
word _max_heapsize;
word _requested_heapsize; /* Heap size due to explicit expansion */
ptr_t _last_heap_addr;
@@ -837,29 +791,29 @@ struct _GC_arrays {
/* Maximum number of bytes that were ever allocated in */
/* large object blocks. This is used to help decide when it */
/* is safe to split up a large block. */
- word _words_allocd_before_gc;
+ word _bytes_allocd_before_gc;
/* Number of words allocated before this */
/* collection cycle. */
# ifndef SEPARATE_GLOBALS
- word _words_allocd;
+ word _bytes_allocd;
/* Number of words allocated during this collection cycle */
# endif
- word _words_wasted;
+ word _bytes_wasted;
/* Number of words wasted due to internal fragmentation */
/* in large objects, or due to dropping blacklisted */
/* blocks, since last gc. Approximate. */
- word _words_finalized;
- /* Approximate number of words in objects (and headers) */
+ word _bytes_finalized;
+ /* Approximate number of bytes in objects (and headers) */
/* That became ready for finalization in the last */
/* collection. */
word _non_gc_bytes_at_gc;
/* Number of explicitly managed bytes of storage */
/* at last collection. */
- word _mem_freed;
- /* Number of explicitly deallocated words of memory */
+ word _bytes_freed;
+ /* Number of explicitly deallocated bytes of memory */
/* since last collection. */
- word _finalizer_mem_freed;
- /* Words of memory explicitly deallocated while */
+ word _finalizer_bytes_freed;
+ /* Bytes of memory explicitly deallocated while */
/* finalizers were running. Used to approximate mem. */
/* explicitly deallocated by finalizers. */
ptr_t _scratch_end_ptr;
@@ -872,77 +826,61 @@ struct _GC_arrays {
/* by DS_PROC mark descriptors. See gc_mark.h. */
# ifndef SEPARATE_GLOBALS
- ptr_t _objfreelist[MAXOBJSZ+1];
+ void *_objfreelist[MAXOBJGRANULES+1];
/* free list for objects */
- ptr_t _aobjfreelist[MAXOBJSZ+1];
+ void *_aobjfreelist[MAXOBJGRANULES+1];
/* free list for atomic objs */
# endif
- ptr_t _uobjfreelist[MAXOBJSZ+1];
+ void *_uobjfreelist[MAXOBJGRANULES+1];
/* uncollectable but traced objs */
/* objects on this and auobjfreelist */
/* are always marked, except during */
/* garbage collections. */
# ifdef ATOMIC_UNCOLLECTABLE
- ptr_t _auobjfreelist[MAXOBJSZ+1];
+ void *_auobjfreelist[MAXOBJGRANULES+1];
# endif
/* uncollectable but traced objs */
-# ifdef GATHERSTATS
word _composite_in_use;
/* Number of words in accessible composite */
/* objects. */
word _atomic_in_use;
/* Number of words in accessible atomic */
/* objects. */
-# endif
# ifdef USE_MUNMAP
word _unmapped_bytes;
# endif
-# ifdef MERGE_SIZES
- unsigned _size_map[WORDS_TO_BYTES(MAXOBJSZ+1)];
+
+ unsigned _size_map[MAXOBJBYTES+1];
/* Number of words to allocate for a given allocation request in */
/* bytes. */
-# endif
# ifdef STUBBORN_ALLOC
- ptr_t _sobjfreelist[MAXOBJSZ+1];
+ ptr_t _sobjfreelist[MAXOBJGRANULES+1];
# endif
/* free list for immutable objects */
- map_entry_type * _obj_map[MAXOBJSZ+1];
+# ifdef MARK_BIT_PER_GRANULE
+ short * _obj_map[MAXOBJGRANULES+1];
/* If not NIL, then a pointer to a map of valid */
- /* object addresses. _obj_map[sz][i] is j if the */
- /* address block_start+i is a valid pointer */
- /* to an object at block_start + */
- /* WORDS_TO_BYTES(BYTES_TO_WORDS(i) - j) */
- /* I.e. j is a word displacement from the */
- /* object beginning. */
- /* The entry is OBJ_INVALID if the corresponding */
- /* address is not a valid pointer. It is */
- /* OFFSET_TOO_BIG if the value j would be too */
- /* large to fit in the entry. (Note that the */
- /* size of these entries matters, both for */
- /* space consumption and for cache utilization.) */
-# define OFFSET_TOO_BIG 0xfe
-# define OBJ_INVALID 0xff
-# define MAP_ENTRY(map, bytes) (map)[bytes]
-# define MAP_ENTRIES HBLKSIZE
-# define MAP_SIZE MAP_ENTRIES
-# define CPP_MAX_OFFSET (OFFSET_TOO_BIG - 1)
-# define MAX_OFFSET ((word)CPP_MAX_OFFSET)
- /* The following are used only if GC_all_interior_ptrs != 0 */
-# define VALID_OFFSET_SZ \
- (CPP_MAX_OFFSET > WORDS_TO_BYTES(CPP_MAXOBJSZ)? \
- CPP_MAX_OFFSET+1 \
- : WORDS_TO_BYTES(CPP_MAXOBJSZ)+1)
- char _valid_offsets[VALID_OFFSET_SZ];
+ /* object addresses. */
+ /* _obj_map[sz_in_granules][i] is */
+ /* i % sz_in_granules. */
+ /* This is now used purely to replace a */
+ /* division in the marker by a table lookup. */
+ /* _obj_map[0] is used for large objects and */
+ /* contains all nonzero entries. This gets us */
+ /* out of the marker fast path without an extra */
+ /* test. */
+# define MAP_LEN BYTES_TO_GRANULES(HBLKSIZE)
+# endif
+# define VALID_OFFSET_SZ HBLKSIZE
+ char _valid_offsets[VALID_OFFSET_SZ];
/* GC_valid_offsets[i] == TRUE ==> i */
/* is registered as a displacement. */
- char _modws_valid_offsets[sizeof(word)];
+ char _modws_valid_offsets[sizeof(word)];
/* GC_valid_offsets[i] ==> */
/* GC_modws_valid_offsets[i%sizeof(word)] */
-# define OFFSET_VALID(displ) \
- (GC_all_interior_pointers || GC_valid_offsets[displ])
# ifdef STUBBORN_ALLOC
page_hash_table _changed_pages;
/* Stubborn object pages that were changes since last call to */
@@ -956,7 +894,7 @@ struct _GC_arrays {
/* GC_read_dirty. */
# endif
# ifdef MPROTECT_VDB
- VOLATILE page_hash_table _dirty_pages;
+ volatile page_hash_table _dirty_pages;
/* Pages dirtied since last GC_read_dirty. */
# endif
# ifdef PROC_VDB
@@ -994,6 +932,9 @@ struct _GC_arrays {
/* Block header index; see gc_headers.h */
bottom_index * _all_nils;
bottom_index * _top_index [TOP_SZ];
+#ifdef ENABLE_TRACE
+ ptr_t _trace_addr;
+#endif
#ifdef SAVE_CALL_CHAIN
struct callinfo _last_stack[NFRAMES]; /* Stack at last garbage collection.*/
/* Useful for debugging mysterious */
@@ -1009,7 +950,7 @@ GC_API GC_FAR struct _GC_arrays GC_arrays;
# ifndef SEPARATE_GLOBALS
# define GC_objfreelist GC_arrays._objfreelist
# define GC_aobjfreelist GC_arrays._aobjfreelist
-# define GC_words_allocd GC_arrays._words_allocd
+# define GC_bytes_allocd GC_arrays._bytes_allocd
# endif
# define GC_uobjfreelist GC_arrays._uobjfreelist
# ifdef ATOMIC_UNCOLLECTABLE
@@ -1022,26 +963,31 @@ GC_API GC_FAR struct _GC_arrays GC_arrays;
# define GC_changed_pages GC_arrays._changed_pages
# define GC_prev_changed_pages GC_arrays._prev_changed_pages
# endif
-# define GC_obj_map GC_arrays._obj_map
+# ifdef MARK_BIT_PER_GRANULE
+# define GC_obj_map GC_arrays._obj_map
+# endif
# define GC_last_heap_addr GC_arrays._last_heap_addr
# define GC_prev_heap_addr GC_arrays._prev_heap_addr
-# define GC_words_wasted GC_arrays._words_wasted
+# define GC_bytes_wasted GC_arrays._bytes_wasted
# define GC_large_free_bytes GC_arrays._large_free_bytes
# define GC_large_allocd_bytes GC_arrays._large_allocd_bytes
# define GC_max_large_allocd_bytes GC_arrays._max_large_allocd_bytes
-# define GC_words_finalized GC_arrays._words_finalized
+# define GC_bytes_finalized GC_arrays._bytes_finalized
# define GC_non_gc_bytes_at_gc GC_arrays._non_gc_bytes_at_gc
-# define GC_mem_freed GC_arrays._mem_freed
-# define GC_finalizer_mem_freed GC_arrays._finalizer_mem_freed
+# define GC_bytes_freed GC_arrays._bytes_freed
+# define GC_finalizer_bytes_freed GC_arrays._finalizer_bytes_freed
# define GC_scratch_end_ptr GC_arrays._scratch_end_ptr
# define GC_scratch_last_end_ptr GC_arrays._scratch_last_end_ptr
# define GC_mark_procs GC_arrays._mark_procs
# define GC_heapsize GC_arrays._heapsize
# define GC_max_heapsize GC_arrays._max_heapsize
# define GC_requested_heapsize GC_arrays._requested_heapsize
-# define GC_words_allocd_before_gc GC_arrays._words_allocd_before_gc
+# define GC_bytes_allocd_before_gc GC_arrays._bytes_allocd_before_gc
# define GC_heap_sects GC_arrays._heap_sects
# define GC_last_stack GC_arrays._last_stack
+#ifdef ENABLE_TRACE
+#define GC_trace_addr GC_arrays._trace_addr
+#endif
# ifdef USE_MUNMAP
# define GC_unmapped_bytes GC_arrays._unmapped_bytes
# endif
@@ -1065,13 +1011,9 @@ GC_API GC_FAR struct _GC_arrays GC_arrays;
# ifdef PROC_VDB
# define GC_written_pages GC_arrays._written_pages
# endif
-# ifdef GATHERSTATS
-# define GC_composite_in_use GC_arrays._composite_in_use
-# define GC_atomic_in_use GC_arrays._atomic_in_use
-# endif
-# ifdef MERGE_SIZES
-# define GC_size_map GC_arrays._size_map
-# endif
+# define GC_composite_in_use GC_arrays._composite_in_use
+# define GC_atomic_in_use GC_arrays._atomic_in_use
+# define GC_size_map GC_arrays._size_map
# define beginGC_arrays ((ptr_t)(&GC_arrays))
# define endGC_arrays (((ptr_t)(&GC_arrays)) + (sizeof GC_arrays))
@@ -1082,12 +1024,13 @@ GC_API GC_FAR struct _GC_arrays GC_arrays;
# define MAXOBJKINDS 16
extern struct obj_kind {
- ptr_t *ok_freelist; /* Array of free listheaders for this kind of object */
+ void **ok_freelist; /* Array of free listheaders for this kind of object */
/* Point either to GC_arrays or to storage allocated */
/* with GC_scratch_alloc. */
struct hblk **ok_reclaim_list;
/* List headers for lists of blocks waiting to be */
/* swept. */
+ /* Indexed by object size in granules. */
word ok_descriptor; /* Descriptor template for objects in this */
/* block. */
GC_bool ok_relocate_descr;
@@ -1106,14 +1049,14 @@ extern struct obj_kind {
/* introduce maintenance problems. */
#ifdef SEPARATE_GLOBALS
- word GC_words_allocd;
+ word GC_bytes_allocd;
/* Number of words allocated during this collection cycle */
- ptr_t GC_objfreelist[MAXOBJSZ+1];
+ ptr_t GC_objfreelist[MAXOBJGRANULES+1];
/* free list for NORMAL objects */
# define beginGC_objfreelist ((ptr_t)(&GC_objfreelist))
# define endGC_objfreelist (beginGC_objfreelist + sizeof(GC_objfreelist))
- ptr_t GC_aobjfreelist[MAXOBJSZ+1];
+ ptr_t GC_aobjfreelist[MAXOBJGRANULES+1];
/* free list for atomic (PTRFREE) objs */
# define beginGC_aobjfreelist ((ptr_t)(&GC_aobjfreelist))
# define endGC_aobjfreelist (beginGC_aobjfreelist + sizeof(GC_aobjfreelist))
@@ -1157,10 +1100,6 @@ extern word GC_black_list_spacing;
/* "stack-blacklisted", i.e. that are */
/* problematic in the interior of an object. */
-extern map_entry_type * GC_invalid_map;
- /* Pointer to the nowhere valid hblk map */
- /* Blocks pointing to this map are free. */
-
extern struct hblk * GC_hblkfreelist[];
/* List of completely empty heap blocks */
/* Linked through hb_next field of */
@@ -1243,17 +1182,16 @@ extern long GC_large_alloc_warn_suppressed;
/* Mark bit operations */
/*
- * Retrieve, set, clear the mark bit corresponding
- * to the nth word in a given heap block.
+ * Retrieve, set, clear the nth mark bit in a given heap block.
*
- * (Recall that bit n corresponds to object beginning at word n
+ * (Recall that bit n corresponds to nth object or allocation granule
* relative to the beginning of the block, including unused words)
*/
#ifdef USE_MARK_BYTES
-# define mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[(n) >> 1])
-# define set_mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[(n)>>1]) = 1
-# define clear_mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[(n)>>1]) = 0
+# define mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[n])
+# define set_mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[n]) = 1
+# define clear_mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[n]) = 0
#else /* !USE_MARK_BYTES */
# define mark_bit_from_hdr(hhdr,n) (((hhdr)->hb_marks[divWORDSZ(n)] \
>> (modWORDSZ(n))) & (word)1)
@@ -1264,70 +1202,85 @@ extern long GC_large_alloc_warn_suppressed;
&= ~((word)1 << modWORDSZ(n))
#endif /* !USE_MARK_BYTES */
+#ifdef MARK_BIT_PER_OBJ
+# define MARK_BIT_NO(offset, sz) ((offset)/(sz))
+ /* Get the mark bit index corresponding to the given byte */
+ /* offset and size (in bytes). */
+# define MARK_BIT_OFFSET(sz) 1
+ /* Spacing between useful mark bits. */
+# define IF_PER_OBJ(x) x
+# define FINAL_MARK_BIT(sz) ((sz) > MAXOBJBYTES? 1 : HBLK_OBJS(sz))
+ /* Position of final, always set, mark bit. */
+#else /* MARK_BIT_PER_GRANULE */
+# define MARK_BIT_NO(offset, sz) BYTES_TO_GRANULES((offset))
+# define MARK_BIT_OFFSET(sz) BYTES_TO_GRANULES(sz)
+# define IF_PER_OBJ(x)
+# define FINAL_MARK_BIT(sz) \
+ ((sz) > MAXOBJBYTES? MARK_BITS_PER_HBLK \
+ : BYTES_TO_GRANULES(sz * HBLK_OBJS(sz)))
+#endif
+
/* Important internal collector routines */
-ptr_t GC_approx_sp GC_PROTO((void));
+ptr_t GC_approx_sp(void);
-GC_bool GC_should_collect GC_PROTO((void));
+GC_bool GC_should_collect(void);
-void GC_apply_to_all_blocks GC_PROTO(( \
- void (*fn) GC_PROTO((struct hblk *h, word client_data)), \
- word client_data));
+void GC_apply_to_all_blocks(void (*fn) (struct hblk *h, word client_data),
+ word client_data);
/* Invoke fn(hbp, client_data) for each */
/* allocated heap block. */
-struct hblk * GC_next_used_block GC_PROTO((struct hblk * h));
+struct hblk * GC_next_used_block(struct hblk * h);
/* Return first in-use block >= h */
-struct hblk * GC_prev_block GC_PROTO((struct hblk * h));
+struct hblk * GC_prev_block(struct hblk * h);
/* Return last block <= h. Returned block */
/* is managed by GC, but may or may not be in */
/* use. */
-void GC_mark_init GC_PROTO((void));
-void GC_clear_marks GC_PROTO((void)); /* Clear mark bits for all heap objects. */
-void GC_invalidate_mark_state GC_PROTO((void));
+void GC_mark_init(void);
+void GC_clear_marks(void); /* Clear mark bits for all heap objects. */
+void GC_invalidate_mark_state(void);
/* Tell the marker that marked */
/* objects may point to unmarked */
/* ones, and roots may point to */
/* unmarked objects. */
/* Reset mark stack. */
-GC_bool GC_mark_stack_empty GC_PROTO((void));
-GC_bool GC_mark_some GC_PROTO((ptr_t cold_gc_frame));
+GC_bool GC_mark_stack_empty(void);
+GC_bool GC_mark_some(ptr_t cold_gc_frame);
/* Perform about one pages worth of marking */
/* work of whatever kind is needed. Returns */
/* quickly if no collection is in progress. */
/* Return TRUE if mark phase finished. */
-void GC_initiate_gc GC_PROTO((void));
+void GC_initiate_gc(void);
/* initiate collection. */
/* If the mark state is invalid, this */
/* becomes full colleection. Otherwise */
/* it's partial. */
-void GC_push_all GC_PROTO((ptr_t bottom, ptr_t top));
+void GC_push_all(ptr_t bottom, ptr_t top);
/* Push everything in a range */
/* onto mark stack. */
-void GC_push_selected GC_PROTO(( \
- ptr_t bottom, \
- ptr_t top, \
- int (*dirty_fn) GC_PROTO((struct hblk *h)), \
- void (*push_fn) GC_PROTO((ptr_t bottom, ptr_t top)) ));
+void GC_push_selected(ptr_t bottom, ptr_t top,
+ int (*dirty_fn) (struct hblk *h),
+ void (*push_fn) (ptr_t bottom, ptr_t top) );
/* Push all pages h in [b,t) s.t. */
/* select_fn(h) != 0 onto mark stack. */
#ifndef SMALL_CONFIG
- void GC_push_conditional GC_PROTO((ptr_t b, ptr_t t, GC_bool all));
+ void GC_push_conditional (ptr_t b, ptr_t t, GC_bool all);
#else
# define GC_push_conditional(b, t, all) GC_push_all(b, t)
#endif
/* Do either of the above, depending */
/* on the third arg. */
-void GC_push_all_stack GC_PROTO((ptr_t b, ptr_t t));
+void GC_push_all_stack (ptr_t b, ptr_t t);
/* As above, but consider */
/* interior pointers as valid */
-void GC_push_all_eager GC_PROTO((ptr_t b, ptr_t t));
+void GC_push_all_eager (ptr_t b, ptr_t t);
/* Same as GC_push_all_stack, but */
/* ensures that stack is scanned */
/* immediately, not just scheduled */
/* for scanning. */
#ifndef THREADS
- void GC_push_all_stack_partially_eager GC_PROTO(( \
- ptr_t bottom, ptr_t top, ptr_t cold_gc_frame ));
+ void GC_push_all_stack_partially_eager(ptr_t bottom, ptr_t top,
+ ptr_t cold_gc_frame);
/* Similar to GC_push_all_eager, but only the */
/* part hotter than cold_gc_frame is scanned */
/* immediately. Needed to ensure that callee- */
@@ -1339,46 +1292,46 @@ void GC_push_all_eager GC_PROTO((ptr_t b, ptr_t t));
/* stacks are scheduled for scanning in *GC_push_other_roots, which */
/* is thread-package-specific. */
#endif
-void GC_push_current_stack GC_PROTO((ptr_t cold_gc_frame));
+void GC_push_current_stack(ptr_t cold_gc_frame);
/* Push enough of the current stack eagerly to */
/* ensure that callee-save registers saved in */
/* GC frames are scanned. */
/* In the non-threads case, schedule entire */
/* stack for scanning. */
-void GC_push_roots GC_PROTO((GC_bool all, ptr_t cold_gc_frame));
+void GC_push_roots(GC_bool all, ptr_t cold_gc_frame);
/* Push all or dirty roots. */
-extern void (*GC_push_other_roots) GC_PROTO((void));
+extern void (*GC_push_other_roots)(void);
/* Push system or application specific roots */
/* onto the mark stack. In some environments */
/* (e.g. threads environments) this is */
/* predfined to be non-zero. A client supplied */
/* replacement should also call the original */
/* function. */
-extern void GC_push_gc_structures GC_PROTO((void));
+extern void GC_push_gc_structures(void);
/* Push GC internal roots. These are normally */
/* included in the static data segment, and */
/* Thus implicitly pushed. But we must do this */
/* explicitly if normal root processing is */
/* disabled. Calls the following: */
- extern void GC_push_finalizer_structures GC_PROTO((void));
- extern void GC_push_stubborn_structures GC_PROTO((void));
+ extern void GC_push_finalizer_structures(void);
+ extern void GC_push_stubborn_structures (void);
# ifdef THREADS
- extern void GC_push_thread_structures GC_PROTO((void));
+ extern void GC_push_thread_structures (void);
# endif
-extern void (*GC_start_call_back) GC_PROTO((void));
+extern void (*GC_start_call_back) (void);
/* Called at start of full collections. */
/* Not called if 0. Called with allocation */
/* lock held. */
/* 0 by default. */
# if defined(USE_GENERIC_PUSH_REGS)
- void GC_generic_push_regs GC_PROTO((ptr_t cold_gc_frame));
+ void GC_generic_push_regs(ptr_t cold_gc_frame);
# else
- void GC_push_regs GC_PROTO((void));
+ void GC_push_regs(void);
# endif
# if defined(SPARC) || defined(IA64)
/* Cause all stacked registers to be saved in memory. Return a */
/* pointer to the top of the corresponding memory stack. */
- word GC_save_regs_in_stack GC_PROTO((void));
+ word GC_save_regs_in_stack(void);
# endif
/* Push register contents onto mark stack. */
/* If NURSERY is defined, the default push */
@@ -1388,9 +1341,9 @@ extern void (*GC_start_call_back) GC_PROTO((void));
extern void (*GC_push_proc)(ptr_t);
# endif
# if defined(MSWIN32) || defined(MSWINCE)
- void __cdecl GC_push_one GC_PROTO((word p));
+ void __cdecl GC_push_one(word p);
# else
- void GC_push_one GC_PROTO((word p));
+ void GC_push_one(word p);
/* If p points to an object, mark it */
/* and push contents on the mark stack */
/* Pointer recognition test always */
@@ -1399,133 +1352,144 @@ extern void (*GC_start_call_back) GC_PROTO((void));
/* stack. */
# endif
# if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
- void GC_mark_and_push_stack GC_PROTO((word p, ptr_t source));
+ void GC_mark_and_push_stack(ptr_t p, ptr_t source);
/* Ditto, omits plausibility test */
# else
- void GC_mark_and_push_stack GC_PROTO((word p));
+ void GC_mark_and_push_stack(ptr_t p);
# endif
-void GC_push_marked GC_PROTO((struct hblk * h, hdr * hhdr));
+void GC_push_marked(struct hblk * h, hdr * hhdr);
/* Push contents of all marked objects in h onto */
/* mark stack. */
#ifdef SMALL_CONFIG
# define GC_push_next_marked_dirty(h) GC_push_next_marked(h)
#else
- struct hblk * GC_push_next_marked_dirty GC_PROTO((struct hblk * h));
+ struct hblk * GC_push_next_marked_dirty(struct hblk * h);
/* Invoke GC_push_marked on next dirty block above h. */
/* Return a pointer just past the end of this block. */
#endif /* !SMALL_CONFIG */
-struct hblk * GC_push_next_marked GC_PROTO((struct hblk * h));
+struct hblk * GC_push_next_marked(struct hblk * h);
/* Ditto, but also mark from clean pages. */
-struct hblk * GC_push_next_marked_uncollectable GC_PROTO((struct hblk * h));
+struct hblk * GC_push_next_marked_uncollectable(struct hblk * h);
/* Ditto, but mark only from uncollectable pages. */
-GC_bool GC_stopped_mark GC_PROTO((GC_stop_func stop_func));
+GC_bool GC_stopped_mark(GC_stop_func stop_func);
/* Stop world and mark from all roots */
/* and rescuers. */
-void GC_clear_hdr_marks GC_PROTO((hdr * hhdr));
+void GC_clear_hdr_marks(hdr * hhdr);
/* Clear the mark bits in a header */
-void GC_set_hdr_marks GC_PROTO((hdr * hhdr));
+void GC_set_hdr_marks(hdr * hhdr);
/* Set the mark bits in a header */
-void GC_set_fl_marks GC_PROTO((ptr_t p));
+void GC_set_fl_marks(ptr_t p);
/* Set all mark bits associated with */
/* a free list. */
-void GC_add_roots_inner GC_PROTO((char * b, char * e, GC_bool tmp));
-void GC_remove_roots_inner GC_PROTO((char * b, char * e));
-GC_bool GC_is_static_root GC_PROTO((ptr_t p));
+#ifdef GC_ASSERTIONS
+ void GC_check_fl_marks(ptr_t p);
+ /* Check that all mark bits */
+ /* associated with a free list are */
+ /* set. Abort if not. */
+#endif
+void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp);
+void GC_remove_roots_inner(ptr_t b, ptr_t e);
+GC_bool GC_is_static_root(ptr_t p);
/* Is the address p in one of the registered static */
/* root sections? */
# if defined(MSWIN32) || defined(_WIN32_WCE_EMULATION)
-GC_bool GC_is_tmp_root GC_PROTO((ptr_t p));
+GC_bool GC_is_tmp_root(ptr_t p);
/* Is the address p in one of the temporary static */
/* root sections? */
# endif
-void GC_register_dynamic_libraries GC_PROTO((void));
+void GC_register_dynamic_libraries(void);
/* Add dynamic library data sections to the root set. */
+void GC_cond_register_dynamic_libraries(void);
+ /* Remove and reregister dynamic libraries if we're */
+ /* configured to do that at each GC. */
-GC_bool GC_register_main_static_data GC_PROTO((void));
+GC_bool GC_register_main_static_data(void);
/* We need to register the main data segment. Returns */
/* TRUE unless this is done implicitly as part of */
/* dynamic library registration. */
/* Machine dependent startup routines */
-ptr_t GC_get_stack_base GC_PROTO((void)); /* Cold end of stack */
+ptr_t GC_get_stack_base(void); /* Cold end of stack */
#ifdef IA64
- ptr_t GC_get_register_stack_base GC_PROTO((void));
+ ptr_t GC_get_register_stack_base(void);
/* Cold end of register stack. */
#endif
-void GC_register_data_segments GC_PROTO((void));
+void GC_register_data_segments(void);
/* Black listing: */
-void GC_bl_init GC_PROTO((void));
+void GC_bl_init(void);
# ifdef PRINT_BLACK_LIST
- void GC_add_to_black_list_normal GC_PROTO((word p, ptr_t source));
+ void GC_add_to_black_list_normal(word p, ptr_t source);
/* Register bits as a possible future false */
/* reference from the heap or static data */
# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
if (GC_all_interior_pointers) { \
- GC_add_to_black_list_stack(bits, (ptr_t)(source)); \
+ GC_add_to_black_list_stack((word)(bits), (source)); \
} else { \
- GC_add_to_black_list_normal(bits, (ptr_t)(source)); \
+ GC_add_to_black_list_normal((word)(bits), (source)); \
}
# else
- void GC_add_to_black_list_normal GC_PROTO((word p));
+ void GC_add_to_black_list_normal(word p);
# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
if (GC_all_interior_pointers) { \
- GC_add_to_black_list_stack(bits); \
+ GC_add_to_black_list_stack((word)(bits)); \
} else { \
- GC_add_to_black_list_normal(bits); \
+ GC_add_to_black_list_normal((word)(bits)); \
}
# endif
# ifdef PRINT_BLACK_LIST
- void GC_add_to_black_list_stack GC_PROTO((word p, ptr_t source));
+ void GC_add_to_black_list_stack(word p, ptr_t source);
+# define GC_ADD_TO_BLACK_LIST_STACK(bits, source) \
+ GC_add_to_black_list_stack((word)(bits), (source))
# else
- void GC_add_to_black_list_stack GC_PROTO((word p));
+ void GC_add_to_black_list_stack(word p);
+# define GC_ADD_TO_BLACK_LIST_STACK(bits, source) \
+ GC_add_to_black_list_stack((word)(bits))
# endif
-struct hblk * GC_is_black_listed GC_PROTO((struct hblk * h, word len));
+struct hblk * GC_is_black_listed(struct hblk * h, word len);
/* If there are likely to be false references */
/* to a block starting at h of the indicated */
/* length, then return the next plausible */
/* starting location for h that might avoid */
/* these false references. */
-void GC_promote_black_lists GC_PROTO((void));
+void GC_promote_black_lists(void);
/* Declare an end to a black listing phase. */
-void GC_unpromote_black_lists GC_PROTO((void));
+void GC_unpromote_black_lists(void);
/* Approximately undo the effect of the above. */
/* This actually loses some information, but */
/* only in a reasonably safe way. */
-word GC_number_stack_black_listed GC_PROTO(( \
- struct hblk *start, struct hblk *endp1));
+word GC_number_stack_black_listed(struct hblk *start, struct hblk *endp1);
/* Return the number of (stack) blacklisted */
/* blocks in the range for statistical */
/* purposes. */
-ptr_t GC_scratch_alloc GC_PROTO((word bytes));
+ptr_t GC_scratch_alloc(size_t bytes);
/* GC internal memory allocation for */
/* small objects. Deallocation is not */
/* possible. */
/* Heap block layout maps: */
-void GC_invalidate_map GC_PROTO((hdr * hhdr));
- /* Remove the object map associated */
- /* with the block. This identifies */
- /* the block as invalid to the mark */
- /* routines. */
-GC_bool GC_add_map_entry GC_PROTO((word sz));
+GC_bool GC_add_map_entry(size_t sz);
/* Add a heap block map for objects of */
/* size sz to obj_map. */
/* Return FALSE on failure. */
-void GC_register_displacement_inner GC_PROTO((word offset));
+void GC_register_displacement_inner(size_t offset);
/* Version of GC_register_displacement */
/* that assumes lock is already held */
/* and signals are already disabled. */
+
+void GC_initialize_offsets(void);
+ /* Initialize GC_valid_offsets, */
+ /* depending on current */
+ /* GC_all_interior_pointers settings. */
/* hblk allocation: */
-void GC_new_hblk GC_PROTO((word size_in_words, int kind));
+void GC_new_hblk(size_t size_in_granules, int kind);
/* Allocate a new heap block, and build */
/* a free list in it. */
-ptr_t GC_build_fl GC_PROTO((struct hblk *h, word sz,
- GC_bool clear, ptr_t list));
+ptr_t GC_build_fl(struct hblk *h, size_t words, GC_bool clear, ptr_t list);
/* Build a free list for objects of */
/* size sz in block h. Append list to */
/* end of the free lists. Possibly */
@@ -1533,56 +1497,55 @@ ptr_t GC_build_fl GC_PROTO((struct hblk *h, word sz,
/* called by GC_new_hblk, but also */
/* called explicitly without GC lock. */
-struct hblk * GC_allochblk GC_PROTO(( \
- word size_in_words, int kind, unsigned flags));
+struct hblk * GC_allochblk (size_t size_in_bytes, int kind, unsigned flags);
/* Allocate a heap block, inform */
/* the marker that block is valid */
/* for objects of indicated size. */
-ptr_t GC_alloc_large GC_PROTO((word lw, int k, unsigned flags));
- /* Allocate a large block of size lw words. */
+ptr_t GC_alloc_large (size_t lb, int k, unsigned flags);
+ /* Allocate a large block of size lb bytes. */
/* The block is not cleared. */
/* Flags is 0 or IGNORE_OFF_PAGE. */
/* Calls GC_allchblk to do the actual */
/* allocation, but also triggers GC and/or */
/* heap expansion as appropriate. */
- /* Does not update GC_words_allocd, but does */
+ /* Does not update GC_bytes_allocd, but does */
/* other accounting. */
-ptr_t GC_alloc_large_and_clear GC_PROTO((word lw, int k, unsigned flags));
+ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags);
/* As above, but clear block if appropriate */
/* for kind k. */
-void GC_freehblk GC_PROTO((struct hblk * p));
+void GC_freehblk(struct hblk * p);
/* Deallocate a heap block and mark it */
/* as invalid. */
/* Misc GC: */
-void GC_init_inner GC_PROTO((void));
-GC_bool GC_expand_hp_inner GC_PROTO((word n));
-void GC_start_reclaim GC_PROTO((int abort_if_found));
+void GC_init_inner(void);
+GC_bool GC_expand_hp_inner(word n);
+void GC_start_reclaim(int abort_if_found);
/* Restore unmarked objects to free */
/* lists, or (if abort_if_found is */
/* TRUE) report them. */
/* Sweeping of small object pages is */
/* largely deferred. */
-void GC_continue_reclaim GC_PROTO((word sz, int kind));
+void GC_continue_reclaim(size_t sz, int kind);
/* Sweep pages of the given size and */
/* kind, as long as possible, and */
/* as long as the corr. free list is */
- /* empty. */
-void GC_reclaim_or_delete_all GC_PROTO((void));
+ /* empty. Sz is in granules. */
+void GC_reclaim_or_delete_all(void);
/* Arrange for all reclaim lists to be */
/* empty. Judiciously choose between */
/* sweeping and discarding each page. */
-GC_bool GC_reclaim_all GC_PROTO((GC_stop_func stop_func, GC_bool ignore_old));
+GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old);
/* Reclaim all blocks. Abort (in a */
/* consistent state) if f returns TRUE. */
-GC_bool GC_block_empty GC_PROTO((hdr * hhdr));
+GC_bool GC_block_empty(hdr * hhdr);
/* Block completely unmarked? */
-GC_bool GC_never_stop_func GC_PROTO((void));
+GC_bool GC_never_stop_func(void);
/* Returns FALSE. */
-GC_bool GC_try_to_collect_inner GC_PROTO((GC_stop_func f));
+GC_bool GC_try_to_collect_inner(GC_stop_func f);
/* Collect; caller must have acquired */
/* lock and disabled signals. */
@@ -1591,11 +1554,10 @@ GC_bool GC_try_to_collect_inner GC_PROTO((GC_stop_func f));
/* successfully. */
# define GC_gcollect_inner() \
(void) GC_try_to_collect_inner(GC_never_stop_func)
-void GC_finish_collection GC_PROTO((void));
+void GC_finish_collection(void);
/* Finish collection. Mark bits are */
/* consistent and lock is still held. */
-GC_bool GC_collect_or_expand GC_PROTO(( \
- word needed_blocks, GC_bool ignore_off_page));
+GC_bool GC_collect_or_expand(word needed_blocks, GC_bool ignore_off_page);
/* Collect or expand heap in an attempt */
/* make the indicated number of free */
/* blocks available. Should be called */
@@ -1605,17 +1567,17 @@ GC_bool GC_collect_or_expand GC_PROTO(( \
extern GC_bool GC_is_initialized; /* GC_init() has been run. */
#if defined(MSWIN32) || defined(MSWINCE)
- void GC_deinit GC_PROTO((void));
+ void GC_deinit(void);
/* Free any resources allocated by */
/* GC_init */
#endif
-void GC_collect_a_little_inner GC_PROTO((int n));
+void GC_collect_a_little_inner(int n);
/* Do n units worth of garbage */
/* collection work, if appropriate. */
/* A unit is an amount appropriate for */
/* HBLKSIZE bytes of allocation. */
-/* ptr_t GC_generic_malloc GC_PROTO((word lb, int k)); */
+/* void * GC_generic_malloc(size_t lb, int k); */
/* Allocate an object of the given */
/* kind. By default, there are only */
/* a few kinds: composite(pointerfree), */
@@ -1626,99 +1588,106 @@ void GC_collect_a_little_inner GC_PROTO((int n));
/* communicate object layout info */
/* to the collector. */
/* The actual decl is in gc_mark.h. */
-ptr_t GC_generic_malloc_ignore_off_page GC_PROTO((size_t b, int k));
+void * GC_generic_malloc_ignore_off_page(size_t b, int k);
/* As above, but pointers past the */
/* first page of the resulting object */
/* are ignored. */
-ptr_t GC_generic_malloc_inner GC_PROTO((word lb, int k));
+void * GC_generic_malloc_inner(size_t lb, int k);
/* Ditto, but I already hold lock, etc. */
-ptr_t GC_generic_malloc_words_small_inner GC_PROTO((word lw, int k));
- /* Analogous to the above, but assumes */
- /* a small object size, and bypasses */
- /* MERGE_SIZES mechanism. */
-ptr_t GC_generic_malloc_words_small GC_PROTO((size_t lw, int k));
- /* As above, but size in units of words */
- /* Bypasses MERGE_SIZES. Assumes */
- /* words <= MAXOBJSZ. */
-ptr_t GC_generic_malloc_inner_ignore_off_page GC_PROTO((size_t lb, int k));
+void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k);
/* Allocate an object, where */
/* the client guarantees that there */
/* will always be a pointer to the */
/* beginning of the object while the */
/* object is live. */
-ptr_t GC_allocobj GC_PROTO((word sz, int kind));
+void GC_generic_malloc_many(size_t lb, int k, void **result);
+ /* Store a pointer to a list of newly */
+ /* allocated objects of kind k and size */
+ /* lb in *result. */
+ /* Caler must make sure that *result is */
+ /* traced even if objects are ptrfree. */
+ptr_t GC_allocobj(size_t sz, int kind);
/* Make the indicated */
/* free list nonempty, and return its */
- /* head. */
+ /* head. Sz is in granules. */
-void GC_free_inner(GC_PTR p);
+void GC_free_inner(void * p);
+void GC_debug_free_inner(void * p);
-void GC_init_headers GC_PROTO((void));
-struct hblkhdr * GC_install_header GC_PROTO((struct hblk *h));
+void GC_init_headers(void);
+struct hblkhdr * GC_install_header(struct hblk *h);
/* Install a header for block h. */
/* Return 0 on failure, or the header */
/* otherwise. */
-GC_bool GC_install_counts GC_PROTO((struct hblk * h, word sz));
+GC_bool GC_install_counts(struct hblk * h, size_t sz);
/* Set up forwarding counts for block */
/* h of size sz. */
/* Return FALSE on failure. */
-void GC_remove_header GC_PROTO((struct hblk * h));
+void GC_remove_header(struct hblk * h);
/* Remove the header for block h. */
-void GC_remove_counts GC_PROTO((struct hblk * h, word sz));
+void GC_remove_counts(struct hblk * h, size_t sz);
/* Remove forwarding counts for h. */
-hdr * GC_find_header GC_PROTO((ptr_t h)); /* Debugging only. */
+hdr * GC_find_header(ptr_t h); /* Debugging only. */
-void GC_finalize GC_PROTO((void));
+void GC_finalize(void);
/* Perform all indicated finalization actions */
/* on unmarked objects. */
/* Unreachable finalizable objects are enqueued */
/* for processing by GC_invoke_finalizers. */
/* Invoked with lock. */
-void GC_notify_or_invoke_finalizers GC_PROTO((void));
+void GC_notify_or_invoke_finalizers(void);
/* If GC_finalize_on_demand is not set, invoke */
/* eligible finalizers. Otherwise: */
/* Call *GC_finalizer_notifier if there are */
/* finalizers to be run, and we haven't called */
/* this procedure yet this GC cycle. */
-GC_API GC_PTR GC_make_closure GC_PROTO((GC_finalization_proc fn, GC_PTR data));
-GC_API void GC_debug_invoke_finalizer GC_PROTO((GC_PTR obj, GC_PTR data));
+GC_API void * GC_make_closure(GC_finalization_proc fn, void * data);
+GC_API void GC_debug_invoke_finalizer(void * obj, void * data);
/* Auxiliary fns to make finalization work */
/* correctly with displaced pointers introduced */
/* by the debugging allocators. */
-void GC_add_to_heap GC_PROTO((struct hblk *p, word bytes));
+void GC_add_to_heap(struct hblk *p, size_t bytes);
/* Add a HBLKSIZE aligned chunk to the heap. */
-void GC_print_obj GC_PROTO((ptr_t p));
+void GC_print_obj(ptr_t p);
/* P points to somewhere inside an object with */
/* debugging info. Print a human readable */
/* description of the object to stderr. */
-extern void (*GC_check_heap) GC_PROTO((void));
+extern void (*GC_check_heap)(void);
/* Check that all objects in the heap with */
/* debugging info are intact. */
/* Add any that are not to GC_smashed list. */
-extern void (*GC_print_all_smashed) GC_PROTO((void));
+extern void (*GC_print_all_smashed) (void);
/* Print GC_smashed if it's not empty. */
/* Clear GC_smashed list. */
-extern void GC_print_all_errors GC_PROTO((void));
+extern void GC_print_all_errors (void);
/* Print smashed and leaked objects, if any. */
/* Clear the lists of such objects. */
-extern void (*GC_print_heap_obj) GC_PROTO((ptr_t p));
+extern void (*GC_print_heap_obj) (ptr_t p);
/* If possible print s followed by a more */
/* detailed description of the object */
/* referred to by p. */
#if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
- void GC_print_address_map GC_PROTO((void));
+ void GC_print_address_map (void);
/* Print an address map of the process. */
#endif
extern GC_bool GC_have_errors; /* We saw a smashed or leaked object. */
/* Call error printing routine */
/* occasionally. */
-extern GC_bool GC_print_stats; /* Produce at least some logging output */
- /* Set from environment variable. */
+
+#ifndef SMALL_CONFIG
+ extern int GC_print_stats; /* Nonzero generates basic GC log. */
+ /* VERBOSE generates add'l messages. */
+#else
+# define GC_print_stats 0
+ /* Will this keep the message character strings from the executable? */
+ /* It should ... */
+#endif
+#define VERBOSE 2
#ifndef NO_DEBUGGING
extern GC_bool GC_dump_regularly; /* Generate regular debugging dumps. */
@@ -1741,8 +1710,8 @@ extern GC_bool GC_print_back_height;
/* Macros used for collector internal allocation. */
/* These assume the collector lock is held. */
#ifdef DBG_HDRS_ALL
- extern GC_PTR GC_debug_generic_malloc_inner(size_t lb, int k);
- extern GC_PTR GC_debug_generic_malloc_inner_ignore_off_page(size_t lb,
+ extern void * GC_debug_generic_malloc_inner(size_t lb, int k);
+ extern void * GC_debug_generic_malloc_inner_ignore_off_page(size_t lb,
int k);
# define GC_INTERNAL_MALLOC GC_debug_generic_malloc_inner
# define GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE \
@@ -1767,50 +1736,50 @@ extern GC_bool GC_print_back_height;
#ifdef USE_MUNMAP
void GC_unmap_old(void);
void GC_merge_unmapped(void);
- void GC_unmap(ptr_t start, word bytes);
- void GC_remap(ptr_t start, word bytes);
- void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2);
+ void GC_unmap(ptr_t start, size_t bytes);
+ void GC_remap(ptr_t start, size_t bytes);
+ void GC_unmap_gap(ptr_t start1, size_t bytes1, ptr_t start2, size_t bytes2);
#endif
/* Virtual dirty bit implementation: */
/* Each implementation exports the following: */
-void GC_read_dirty GC_PROTO((void));
+void GC_read_dirty(void);
/* Retrieve dirty bits. */
-GC_bool GC_page_was_dirty GC_PROTO((struct hblk *h));
+GC_bool GC_page_was_dirty(struct hblk *h);
/* Read retrieved dirty bits. */
-GC_bool GC_page_was_ever_dirty GC_PROTO((struct hblk *h));
+GC_bool GC_page_was_ever_dirty(struct hblk *h);
/* Could the page contain valid heap pointers? */
-void GC_is_fresh GC_PROTO((struct hblk *h, word n));
+void GC_is_fresh(struct hblk *h, word n);
/* Assert the region currently contains no */
/* valid pointers. */
-void GC_remove_protection GC_PROTO((struct hblk *h, word nblocks,
- GC_bool pointerfree));
+void GC_remove_protection(struct hblk *h, word nblocks,
+ GC_bool pointerfree);
/* h is about to be writteni or allocated. Ensure */
/* that it's not write protected by the virtual */
/* dirty bit implementation. */
-void GC_dirty_init GC_PROTO((void));
+void GC_dirty_init(void);
/* Slow/general mark bit manipulation: */
-GC_API GC_bool GC_is_marked GC_PROTO((ptr_t p));
-void GC_clear_mark_bit GC_PROTO((ptr_t p));
-void GC_set_mark_bit GC_PROTO((ptr_t p));
+GC_API GC_bool GC_is_marked(ptr_t p);
+void GC_clear_mark_bit(ptr_t p);
+void GC_set_mark_bit(ptr_t p);
/* Stubborn objects: */
-void GC_read_changed GC_PROTO((void)); /* Analogous to GC_read_dirty */
-GC_bool GC_page_was_changed GC_PROTO((struct hblk * h));
+void GC_read_changed(void); /* Analogous to GC_read_dirty */
+GC_bool GC_page_was_changed(struct hblk * h);
/* Analogous to GC_page_was_dirty */
-void GC_clean_changing_list GC_PROTO((void));
+void GC_clean_changing_list(void);
/* Collect obsolete changing list entries */
-void GC_stubborn_init GC_PROTO((void));
+void GC_stubborn_init(void);
/* Debugging print routines: */
-void GC_print_block_list GC_PROTO((void));
-void GC_print_hblkfreelist GC_PROTO((void));
-void GC_print_heap_sects GC_PROTO((void));
-void GC_print_static_roots GC_PROTO((void));
-void GC_print_finalization_stats GC_PROTO((void));
-void GC_dump GC_PROTO((void));
+void GC_print_block_list(void);
+void GC_print_hblkfreelist(void);
+void GC_print_heap_sects(void);
+void GC_print_static_roots(void);
+void GC_print_finalization_stats(void);
+void GC_dump(void);
#ifdef KEEP_BACK_PTRS
void GC_store_back_pointer(ptr_t source, ptr_t dest);
@@ -1833,50 +1802,22 @@ void GC_dump GC_PROTO((void));
# endif
# endif
-void GC_noop1 GC_PROTO((word));
+void GC_noop1(word);
/* Logging and diagnostic output: */
-GC_API void GC_printf GC_PROTO((GC_CONST char * format, long, long, long, long, long, long));
+GC_API void GC_printf (const char * format, ...);
/* A version of printf that doesn't allocate, */
- /* is restricted to long arguments, and */
- /* (unfortunately) doesn't use varargs for */
- /* portability. Restricted to 6 args and */
/* 1K total output length. */
/* (We use sprintf. Hopefully that doesn't */
/* allocate for long arguments.) */
-# define GC_printf0(f) GC_printf(f, 0l, 0l, 0l, 0l, 0l, 0l)
-# define GC_printf1(f,a) GC_printf(f, (long)a, 0l, 0l, 0l, 0l, 0l)
-# define GC_printf2(f,a,b) GC_printf(f, (long)a, (long)b, 0l, 0l, 0l, 0l)
-# define GC_printf3(f,a,b,c) GC_printf(f, (long)a, (long)b, (long)c, 0l, 0l, 0l)
-# define GC_printf4(f,a,b,c,d) GC_printf(f, (long)a, (long)b, (long)c, \
- (long)d, 0l, 0l)
-# define GC_printf5(f,a,b,c,d,e) GC_printf(f, (long)a, (long)b, (long)c, \
- (long)d, (long)e, 0l)
-# define GC_printf6(f,a,b,c,d,e,g) GC_printf(f, (long)a, (long)b, (long)c, \
- (long)d, (long)e, (long)g)
-
-GC_API void GC_err_printf GC_PROTO((GC_CONST char * format, long, long, long, long, long, long));
-# define GC_err_printf0(f) GC_err_puts(f)
-# define GC_err_printf1(f,a) GC_err_printf(f, (long)a, 0l, 0l, 0l, 0l, 0l)
-# define GC_err_printf2(f,a,b) GC_err_printf(f, (long)a, (long)b, 0l, 0l, 0l, 0l)
-# define GC_err_printf3(f,a,b,c) GC_err_printf(f, (long)a, (long)b, (long)c, \
- 0l, 0l, 0l)
-# define GC_err_printf4(f,a,b,c,d) GC_err_printf(f, (long)a, (long)b, \
- (long)c, (long)d, 0l, 0l)
-# define GC_err_printf5(f,a,b,c,d,e) GC_err_printf(f, (long)a, (long)b, \
- (long)c, (long)d, \
- (long)e, 0l)
-# define GC_err_printf6(f,a,b,c,d,e,g) GC_err_printf(f, (long)a, (long)b, \
- (long)c, (long)d, \
- (long)e, (long)g)
- /* Ditto, writes to stderr. */
-
-void GC_err_puts GC_PROTO((GC_CONST char *s));
+GC_API void GC_err_printf(const char * format, ...);
+GC_API void GC_log_printf(const char * format, ...);
+void GC_err_puts(const char *s);
/* Write s to stderr, don't buffer, don't add */
/* newlines, don't ... */
#if defined(LINUX) && !defined(SMALL_CONFIG)
- void GC_err_write GC_PROTO((GC_CONST char *buf, size_t len));
+ void GC_err_write(const char *buf, size_t len);
/* Write buf to stderr, don't buffer, don't add */
/* newlines, don't ... */
#endif
@@ -1884,7 +1825,7 @@ void GC_err_puts GC_PROTO((GC_CONST char *s));
# ifdef GC_ASSERTIONS
# define GC_ASSERT(expr) if(!(expr)) {\
- GC_err_printf2("Assertion failure: %s:%ld\n", \
+ GC_err_printf("Assertion failure: %s:%ld\n", \
__FILE__, (unsigned long)__LINE__); \
ABORT("assertion failure"); }
# else
diff --git a/include/private/gcconfig.h b/include/private/gcconfig.h
index 280b48b9..0760e9f6 100644
--- a/include/private/gcconfig.h
+++ b/include/private/gcconfig.h
@@ -68,14 +68,10 @@
# endif
# endif
# if defined(sun) && defined(mc68000)
-# define M68K
-# define SUNOS4
-# define mach_type_known
+# error SUNOS4 no longer supported
# endif
# if defined(hp9000s300)
-# define M68K
-# define HP
-# define mach_type_known
+# error M68K based HP machines no longer supported.
# endif
# if defined(OPENBSD) && defined(m68k)
# define M68K
@@ -123,12 +119,7 @@
# if defined(ultrix) || defined(__ultrix)
# define ULTRIX
# else
-# if defined(_SYSTYPE_SVR4) || defined(SYSTYPE_SVR4) \
- || defined(__SYSTYPE_SVR4__)
-# define IRIX5 /* or IRIX 6.X */
-# else
-# define RISCOS /* or IRIX 4.X */
-# endif
+# define IRIX5 /* or IRIX 6.X */
# endif
# endif /* !LINUX */
# if defined(__NetBSD__) && defined(__MIPSEL__)
@@ -159,22 +150,17 @@
# define mach_type_known
# endif
# if defined(ibm032)
-# define RT
-# define mach_type_known
+# error IBM PC/RT no longer supported.
# endif
# if defined(sun) && (defined(sparc) || defined(__sparc))
# define SPARC
/* Test for SunOS 5.x */
# include <errno.h>
-# ifdef ECHRNG
-# define SUNOS5
-# else
-# define SUNOS4
-# endif
+# define SUNOS5
# define mach_type_known
# endif
# if defined(sparc) && defined(unix) && !defined(sun) && !defined(linux) \
- && !defined(__OpenBSD__) && !defined(__NetBSD__) && !defined(__FreeBSD__)
+ && !defined(__OpenBSD__) && !(__NetBSD__)
# define SPARC
# define DRSNX
# define mach_type_known
@@ -198,9 +184,7 @@
# define mach_type_known
# endif
# if defined(_AUX_SOURCE)
-# define M68K
-# define SYSV
-# define mach_type_known
+# error A/UX no longer supported
# endif
# if defined(_PA_RISC1_0) || defined(_PA_RISC1_1) || defined(_PA_RISC2_0) \
|| defined(hppa) || defined(__hppa__)
@@ -236,12 +220,6 @@
# define ARM32
# define mach_type_known
# endif
-# if defined(LINUX) && defined(__cris__)
-# ifndef CRIS
-# define CRIS
-# endif
-# define mach_type_known
-# endif
# if defined(LINUX) && (defined(powerpc) || defined(__powerpc__) || defined(powerpc64) || defined(__powerpc64__))
# define POWERPC
# define mach_type_known
@@ -262,10 +240,6 @@
# define SH
# define mach_type_known
# endif
-# if defined(LINUX) && defined(__m32r__)
-# define M32R
-# define mach_type_known
-# endif
# if defined(__alpha) || defined(__alpha__)
# define ALPHA
# if !defined(LINUX) && !defined(NETBSD) && !defined(OPENBSD) && !defined(FREEBSD)
@@ -328,10 +302,6 @@
# define X86_64
# define mach_type_known
# endif
-# if defined(FREEBSD) && defined(__sparc__)
-# define SPARC
-# define mach_type_known
-#endif
# if defined(bsdi) && (defined(i386) || defined(__i386__))
# define I386
# define BSDI
@@ -448,15 +418,14 @@
/* Or manually define the machine type here. A machine type is */
/* characterized by the architecture. Some */
/* machine types are further subdivided by OS. */
-/* the macros ULTRIX, RISCOS, and BSD to distinguish. */
-/* Note that SGI IRIX is treated identically to RISCOS. */
+/* Macros such as LINUX, FREEBSD, etc. distinguish them. */
/* SYSV on an M68K actually means A/UX. */
/* The distinction in these cases is usually the stack starting address */
# ifndef mach_type_known
--> unknown machine type
# endif
/* Mapping is: M68K ==> Motorola 680X0 */
- /* (SUNOS4,HP,NEXT, and SYSV (A/UX), */
+ /* (NEXT, and SYSV (A/UX), */
/* MACOS and AMIGA variants) */
/* I386 ==> Intel 386 */
/* (SEQUENT, OS2, SCO, LINUX, NETBSD, */
@@ -464,16 +433,14 @@
/* BSDI,SUNOS5, NEXT, other variants) */
/* NS32K ==> Encore Multimax */
/* MIPS ==> R2000 or R3000 */
- /* (RISCOS, ULTRIX variants) */
+ /* (ULTRIX variants) */
/* VAX ==> DEC VAX */
/* (BSD, ULTRIX variants) */
/* RS6000 ==> IBM RS/6000 AIX3.X */
- /* RT ==> IBM PC/RT */
/* HP_PA ==> HP9000/700 & /800 */
/* HP/UX, LINUX */
/* SPARC ==> SPARC v7/v8/v9 */
- /* (SUNOS4, SUNOS5, LINUX, */
- /* DRSNX variants) */
+ /* (SUNOS5, LINUX, DRSNX variants) */
/* ALPHA ==> DEC Alpha */
/* (OSF1 and LINUX variants) */
/* M88K ==> Motorola 88XX0 */
@@ -492,8 +459,6 @@
/* POWERPC ==> IBM/Apple PowerPC */
/* (MACOS(<=9),DARWIN(incl.MACOSX),*/
/* LINUX, NETBSD, NOSYS variants) */
- /* CRIS ==> Axis Etrax */
- /* M32R ==> Renesas M32R */
/*
@@ -529,9 +494,6 @@
* cause failures on alpha*-*-* with ``-msmall-data or -fpic'' or mips-*-*
* without any special options.
*
- * ALIGN_DOUBLE of GC_malloc should return blocks aligned to twice
- * the pointer size.
- *
* STACKBOTTOM is the cool end of the stack, which is usually the
* highest address in the stack.
* Under PCR or OS/2, we have other ways of finding thread stacks.
@@ -683,43 +645,6 @@
# define DATASTART ((ptr_t)((((word) (etext)) + 0xfff) & ~0xfff))
# endif
# endif
-# ifdef SUNOS4
-# define OS_TYPE "SUNOS4"
- extern char etext[];
-# define DATASTART ((ptr_t)((((word) (etext)) + 0x1ffff) & ~0x1ffff))
-# define HEURISTIC1 /* differs */
-# define DYNAMIC_LOADING
-# endif
-# ifdef HP
-# define OS_TYPE "HP"
- extern char etext[];
-# define DATASTART ((ptr_t)((((word) (etext)) + 0xfff) & ~0xfff))
-# define STACKBOTTOM ((ptr_t) 0xffeffffc)
- /* empirically determined. seems to work. */
-# include <unistd.h>
-# define GETPAGESIZE() sysconf(_SC_PAGE_SIZE)
-# endif
-# ifdef SYSV
-# define OS_TYPE "SYSV"
- extern etext[];
-# define DATASTART ((ptr_t)((((word) (etext)) + 0x3fffff) \
- & ~0x3fffff) \
- +((word)etext & 0x1fff))
- /* This only works for shared-text binaries with magic number 0413.
- The other sorts of SysV binaries put the data at the end of the text,
- in which case the default of etext would work. Unfortunately,
- handling both would require having the magic-number available.
- -- Parag
- */
-# define STACKBOTTOM ((ptr_t)0xFFFFFFFE)
- /* The stack starts at the top of memory, but */
- /* 0x0 cannot be used as setjump_test complains */
- /* that the stack direction is incorrect. Two */
- /* bytes down from 0x0 should be safe enough. */
- /* --Parag */
-# include <sys/mmu.h>
-# define GETPAGESIZE() PAGESIZE /* Is this still right? */
-# endif
# ifdef AMIGA
# define OS_TYPE "AMIGA"
/* STACKBOTTOM and DATASTART handled specially */
@@ -786,10 +711,8 @@
# define USE_MMAP_ANON
# define USE_ASM_PUSH_REGS
/* This is potentially buggy. It needs more testing. See the comments in
- os_dep.c. It relies on threads to track writes. */
-# ifdef GC_DARWIN_THREADS
-# define MPROTECT_VDB
-# endif
+ os_dep.c */
+# define MPROTECT_VDB
# include <unistd.h>
# define GETPAGESIZE() getpagesize()
# if defined(USE_PPC_PREFETCH) && defined(__GNUC__)
@@ -840,13 +763,6 @@
# endif
# endif
-# ifdef RT
-# define MACH_TYPE "RT"
-# define ALIGNMENT 4
-# define DATASTART ((ptr_t) 0x10000000)
-# define STACKBOTTOM ((ptr_t) 0x1fffd800)
-# endif
-
# ifdef SPARC
# define MACH_TYPE "SPARC"
# if defined(__arch64__) || defined(__sparcv9)
@@ -857,13 +773,12 @@
# define ALIGNMENT 4 /* Required by hardware */
# define CPP_WORDSZ 32
# endif
-# define ALIGN_DOUBLE
# ifdef SUNOS5
# define OS_TYPE "SUNOS5"
extern int _etext[];
extern int _end[];
- extern ptr_t GC_SysVGetDataStart();
-# define DATASTART GC_SysVGetDataStart(0x10000, _etext)
+ extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
+# define DATASTART GC_SysVGetDataStart(0x10000, (ptr_t)_etext)
# define DATAEND (_end)
# if !defined(USE_MMAP) && defined(REDIRECT_MALLOC)
# define USE_MMAP
@@ -896,30 +811,11 @@
/* Solaris 5.4 installation. Weird. */
# define DYNAMIC_LOADING
# endif
-# ifdef SUNOS4
-# define OS_TYPE "SUNOS4"
- /* [If you have a weak stomach, don't read this.] */
- /* We would like to use: */
-/* # define DATASTART ((ptr_t)((((word) (etext)) + 0x1fff) & ~0x1fff)) */
- /* This fails occasionally, due to an ancient, but very */
- /* persistent ld bug. etext is set 32 bytes too high. */
- /* We instead read the text segment size from the a.out */
- /* header, which happens to be mapped into our address space */
- /* at the start of the text segment. The detective work here */
- /* was done by Robert Ehrlich, Manuel Serrano, and Bernard */
- /* Serpette of INRIA. */
- /* This assumes ZMAGIC, i.e. demand-loadable executables. */
-# define TEXTSTART 0x2000
-# define DATASTART ((ptr_t)(*(int *)(TEXTSTART+0x4)+TEXTSTART))
-# define MPROTECT_VDB
-# define HEURISTIC1
-# define DYNAMIC_LOADING
-# endif
# ifdef DRSNX
# define OS_TYPE "DRSNX"
- extern ptr_t GC_SysVGetDataStart();
+ extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
extern int etext[];
-# define DATASTART GC_SysVGetDataStart(0x10000, etext)
+# define DATASTART GC_SysVGetDataStart(0x10000, (ptr_t)etext)
# define MPROTECT_VDB
# define STACKBOTTOM ((ptr_t) 0xdfff0000)
# define DYNAMIC_LOADING
@@ -935,13 +831,13 @@
extern int _etext[];
# define DATAEND (_end)
# define SVR4
- extern ptr_t GC_SysVGetDataStart();
+ extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
# ifdef __arch64__
-# define DATASTART GC_SysVGetDataStart(0x100000, _etext)
+# define DATASTART GC_SysVGetDataStart(0x100000, (ptr_t)_etext)
/* libc_stack_end is not set reliably for sparc64 */
# define STACKBOTTOM ((ptr_t) 0x80000000000ULL)
# else
-# define DATASTART GC_SysVGetDataStart(0x10000, _etext)
+# define DATASTART GC_SysVGetDataStart(0x10000, (ptr_t)_etext)
# define LINUX_STACKBOTTOM
# endif
# endif
@@ -962,23 +858,6 @@
# define DATASTART ((ptr_t)(etext))
# endif
# endif
-# ifdef FREEBSD
-# define OS_TYPE "FREEBSD"
-# define SIG_SUSPEND SIGUSR1
-# define SIG_THR_RESTART SIGUSR2
-# define FREEBSD_STACKBOTTOM
-# ifdef __ELF__
-# define DYNAMIC_LOADING
-# endif
- extern char etext[];
- extern char edata[];
- extern char end[];
-# define NEED_FIND_LIMIT
-# define DATASTART ((ptr_t)(&etext))
-# define DATAEND (GC_find_limit (DATASTART, TRUE))
-# define DATASTART2 ((ptr_t)(&edata))
-# define DATAEND2 ((ptr_t)(&end))
-# endif
# endif
# ifdef I386
@@ -994,10 +873,6 @@
/* Borland. */
/* Ivan Demakov: For Watcom the option is -zp4. */
# endif
-# ifndef SMALL_CONFIG
-# define ALIGN_DOUBLE /* Not strictly necessary, but may give speed */
- /* improvement on Pentiums. */
-# endif
# ifdef HAVE_BUILTIN_UNWIND_INIT
# define USE_GENERIC_PUSH_REGS
# endif
@@ -1017,8 +892,8 @@
# ifdef SUNOS5
# define OS_TYPE "SUNOS5"
extern int _etext[], _end[];
- extern ptr_t GC_SysVGetDataStart();
-# define DATASTART GC_SysVGetDataStart(0x1000, _etext)
+ extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
+# define DATASTART GC_SysVGetDataStart(0x1000, (ptr_t)_etext)
# define DATAEND (_end)
/* # define STACKBOTTOM ((ptr_t)(_start)) worked through 2.7, */
/* but reportedly breaks under 2.8. It appears that the stack */
@@ -1065,8 +940,8 @@
# ifdef DGUX
# define OS_TYPE "DGUX"
extern int _etext, _end;
- extern ptr_t GC_SysVGetDataStart();
-# define DATASTART GC_SysVGetDataStart(0x1000, &_etext)
+ extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
+# define DATASTART GC_SysVGetDataStart(0x1000, (ptr_t)(&_etext))
# define DATAEND (&_end)
# define STACK_GROWS_DOWN
# define HEURISTIC2
@@ -1234,8 +1109,8 @@
# define DYNAMIC_LOADING
# endif
extern char etext[];
- extern char * GC_FreeBSDGetDataStart();
-# define DATASTART GC_FreeBSDGetDataStart(0x1000, &etext)
+ extern char * GC_FreeBSDGetDataStart(size_t, ptr_t);
+# define DATASTART GC_FreeBSDGetDataStart(0x1000, (ptr_t)etext)
# endif
# ifdef NETBSD
# define OS_TYPE "NETBSD"
@@ -1351,12 +1226,6 @@
# define OS_TYPE "ULTRIX"
# define ALIGNMENT 4
# endif
-# ifdef RISCOS
-# define HEURISTIC2
-# define DATASTART (ptr_t)0x10000000
-# define OS_TYPE "RISCOS"
-# define ALIGNMENT 4 /* Required by hardware */
-# endif
# ifdef IRIX5
# define HEURISTIC2
extern int _fdata[];
@@ -1377,12 +1246,8 @@
# ifdef _MIPS_SZPTR
# define CPP_WORDSZ _MIPS_SZPTR
# define ALIGNMENT (_MIPS_SZPTR/8)
-# if CPP_WORDSZ != 64
-# define ALIGN_DOUBLE
-# endif
# else
# define ALIGNMENT 4
-# define ALIGN_DOUBLE
# endif
# define DYNAMIC_LOADING
# endif
@@ -1449,7 +1314,6 @@
# else
# define CPP_WORDSZ 32
# define ALIGNMENT 4
-# define ALIGN_DOUBLE
# endif
# if !defined(GC_HPUX_THREADS) && !defined(GC_LINUX_THREADS)
# ifndef LINUX /* For now. */
@@ -1609,7 +1473,6 @@
# ifdef HPUX
# ifdef _ILP32
# define CPP_WORDSZ 32
-# define ALIGN_DOUBLE
/* Requires 8 byte alignment for malloc */
# define ALIGNMENT 4
# else
@@ -1617,7 +1480,6 @@
---> unknown ABI
# endif
# define CPP_WORDSZ 64
-# define ALIGN_DOUBLE
/* Requires 16 byte alignment for malloc */
# define ALIGNMENT 8
# endif
@@ -1645,8 +1507,6 @@
# endif
# ifdef LINUX
# define CPP_WORDSZ 64
-# define ALIGN_DOUBLE
- /* Requires 16 byte alignment for malloc */
# define ALIGNMENT 8
# define OS_TYPE "LINUX"
/* The following works on NUE and older kernels: */
@@ -1704,13 +1564,13 @@
# define CPP_WORDSZ 32 /* Is this possible? */
# endif
# define ALIGNMENT 8
+# define STRTOULL _strtoui64
# endif
# endif
# ifdef M88K
# define MACH_TYPE "M88K"
# define ALIGNMENT 4
-# define ALIGN_DOUBLE
extern int etext[];
# ifdef CX_UX
# define OS_TYPE "CX_UX"
@@ -1718,8 +1578,8 @@
# endif
# ifdef DGUX
# define OS_TYPE "DGUX"
- extern ptr_t GC_SysVGetDataStart();
-# define DATASTART GC_SysVGetDataStart(0x10000, etext)
+ extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
+# define DATASTART GC_SysVGetDataStart(0x10000, (ptr_t)etext)
# endif
# define STACKBOTTOM ((char*)0xf0000000) /* determined empirically */
# endif
@@ -1735,8 +1595,8 @@
extern int etext[];
extern int _etext[];
extern int _end[];
- extern ptr_t GC_SysVGetDataStart();
-# define DATASTART GC_SysVGetDataStart(0x10000, _etext)
+ extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
+# define DATASTART GC_SysVGetDataStart(0x10000, (ptr_t)_etext)
# define DATAEND (_end)
# define HEURISTIC2
# endif
@@ -1834,19 +1694,6 @@
# endif
#endif
-# ifdef CRIS
-# define MACH_TYPE "CRIS"
-# define CPP_WORDSZ 32
-# define ALIGNMENT 1
-# define OS_TYPE "LINUX"
-# define DYNAMIC_LOADING
-# define LINUX_STACKBOTTOM
-# define USE_GENERIC_PUSH_REGS
-# define SEARCH_FOR_DATA_START
- extern int _end[];
-# define DATAEND (_end)
-# endif
-
# ifdef SH
# define MACH_TYPE "SH"
# define ALIGNMENT 4
@@ -1879,23 +1726,6 @@
# define DATAEND /* not needed */
# endif
-# ifdef M32R
-# define CPP_WORDSZ 32
-# define MACH_TYPE "M32R"
-# define ALIGNMENT 4
-# ifdef LINUX
-# define OS_TYPE "LINUX"
-# define LINUX_STACKBOTTOM
-# undef STACK_GRAN
-# define STACK_GRAN 0x10000000
-# define USE_GENERIC_PUSH_REGS
-# define DYNAMIC_LOADING
-# define SEARCH_FOR_DATA_START
- extern int _end[];
-# define DATAEND (_end)
-# endif
-# endif
-
# ifdef X86_64
# define MACH_TYPE "X86_64"
# define ALIGNMENT 8
@@ -1982,7 +1812,8 @@
# endif
# ifndef GETPAGESIZE
-# if defined(SUNOS5) || defined(IRIX5)
+# if defined(SUNOS5) || defined(IRIX5) || defined(LINUX) \
+ || defined(NETBSD) || defined(FREEBSD) || defined(HPUX)
# include <unistd.h>
# endif
# define GETPAGESIZE() getpagesize()
@@ -2011,7 +1842,7 @@
# if defined(SVR4) || defined(LINUX) || defined(IRIX5) || defined(HPUX) \
|| defined(OPENBSD) || defined(NETBSD) || defined(FREEBSD) \
- || defined(DGUX) || defined(BSD) || defined(SUNOS4) \
+ || defined(DGUX) || defined(BSD) \
|| defined(_AIX) || defined(DARWIN) || defined(OSF1)
# define UNIX_LIKE /* Basic Unix-like system calls work. */
# endif
@@ -2119,7 +1950,7 @@
# endif
# if defined(HP_PA) || defined(M88K) || defined(POWERPC) && !defined(DARWIN) \
- || defined(LINT) || defined(MSWINCE) || defined(ARM32) || defined(CRIS) \
+ || defined(LINT) || defined(MSWINCE) || defined(ARM32) \
|| (defined(I386) && defined(__LCC__))
/* Use setjmp based hack to mark from callee-save registers. */
/* The define should move to the individual platform */
@@ -2168,7 +1999,7 @@
# ifdef SAVE_CALL_CHAIN
# ifndef SAVE_CALL_COUNT
# define NFRAMES 6 /* Number of frames to save. Even for */
- /* alignment reasons. */
+ /* alignment reasons. */
# else
# define NFRAMES ((SAVE_CALL_COUNT + 1) & ~1)
# endif
@@ -2203,6 +2034,14 @@
# define FIXUP_POINTER(p)
# endif
+# if !defined(MARK_BIT_PER_GRANULE) && !defined(MARK_BIT_PER_OBJ)
+# define MARK_BIT_PER_GRANULE /* Usually faster */
+# endif
+
+# if defined(MARK_BIT_PER_GRANULE) && defined(MARK_BIT_PER_OBJ)
+# error Define only one of MARK_BIT_PER_GRANULE and MARK_BIT_PER_OBJ.
+# endif
+
#ifdef GC_PRIVATE_H
/* This relies on some type definitions from gc_priv.h, from */
/* where it's normally included. */
diff --git a/include/private/pthread_support.h b/include/private/pthread_support.h
index d52e4da9..b1733824 100644
--- a/include/private/pthread_support.h
+++ b/include/private/pthread_support.h
@@ -50,22 +50,21 @@ typedef struct GC_Thread_Rep {
/* reclamation of any data it might */
/* reference. */
# ifdef THREAD_LOCAL_ALLOC
-# if CPP_WORDSZ == 64 && defined(ALIGN_DOUBLE)
-# define GRANULARITY 16
-# define NFREELISTS 49
-# else
-# define GRANULARITY 8
-# define NFREELISTS 65
-# endif
- /* The ith free list corresponds to size i*GRANULARITY */
-# define INDEX_FROM_BYTES(n) ((ADD_SLOP(n) + GRANULARITY - 1)/GRANULARITY)
-# define BYTES_FROM_INDEX(i) ((i) * GRANULARITY - EXTRA_BYTES)
+ /* The ith free list corresponds to size i*GRANULE_BYTES */
+ /* Convert the number of requested bytes to a suitable free */
+ /* list index, adding EXTRA_BYTES if appripriate. */
+# define INDEX_FROM_REQUESTED_BYTES(n) \
+ ((ADD_SLOP(n) + GRANULE_BYTES - 1)/GRANULE_BYTES)
+ /* Convert a free list index to the actual size of objects */
+ /* on that list, including extra space we added. Not an */
+ /* inverse of the above. */
+# define RAW_BYTES_FROM_INDEX(i) ((i) * GRANULE_BYTES)
# define SMALL_ENOUGH(bytes) (ADD_SLOP(bytes) <= \
- (NFREELISTS-1)*GRANULARITY)
- ptr_t ptrfree_freelists[NFREELISTS];
- ptr_t normal_freelists[NFREELISTS];
+ (TINY_FREELISTS-1)*GRANULE_BYTES)
+ void * ptrfree_freelists[TINY_FREELISTS];
+ void * normal_freelists[TINY_FREELISTS];
# ifdef GC_GCJ_SUPPORT
- ptr_t gcj_freelists[NFREELISTS];
+ void * gcj_freelists[TINY_FREELISTS];
# endif
/* Free lists contain either a pointer or a small count */
/* reflecting the number of granules allocated at that */
@@ -78,7 +77,7 @@ typedef struct GC_Thread_Rep {
/* >= HBLKSIZE => pointer to nonempty free list. */
/* > DIRECT_GRANULES, < HBLKSIZE ==> transition to */
/* local alloc, equivalent to 0. */
-# define DIRECT_GRANULES (HBLKSIZE/GRANULARITY)
+# define DIRECT_GRANULES (HBLKSIZE/GRANULE_BYTES)
/* Don't use local free lists for up to this much */
/* allocation. */
# endif
diff --git a/include/private/solaris_threads.h b/include/private/solaris_threads.h
deleted file mode 100644
index b1f62620..00000000
--- a/include/private/solaris_threads.h
+++ /dev/null
@@ -1,37 +0,0 @@
-#ifdef GC_SOLARIS_THREADS
-
-/* The set of all known threads. We intercept thread creation and */
-/* joins. We never actually create detached threads. We allocate all */
-/* new thread stacks ourselves. These allow us to maintain this */
-/* data structure. */
-/* Protected by GC_thr_lock. */
-/* Some of this should be declared volatile, but that's incosnsistent */
-/* with some library routine declarations. In particular, the */
-/* definition of cond_t doesn't mention volatile! */
- typedef struct GC_Thread_Rep {
- struct GC_Thread_Rep * next;
- thread_t id;
- word flags;
-# define FINISHED 1 /* Thread has exited. */
-# define DETACHED 2 /* Thread is intended to be detached. */
-# define CLIENT_OWNS_STACK 4
- /* Stack was supplied by client. */
-# define SUSPNDED 8 /* Currently suspended. */
- /* SUSPENDED is used insystem header. */
- ptr_t stack;
- size_t stack_size;
- cond_t join_cv;
- void * status;
- } * GC_thread;
- extern GC_thread GC_new_thread(thread_t id);
-
- extern GC_bool GC_thr_initialized;
- extern volatile GC_thread GC_threads[];
- extern size_t GC_min_stack_sz;
- extern size_t GC_page_sz;
- extern void GC_thr_init(void);
- extern ptr_t GC_stack_alloc(size_t * stack_size);
- extern void GC_stack_free(ptr_t stack, size_t size);
-
-# endif /* GC_SOLARIS_THREADS */
-
diff --git a/include/private/specific.h b/include/private/specific.h
index d04e19f5..fc2e8f9e 100644
--- a/include/private/specific.h
+++ b/include/private/specific.h
@@ -13,6 +13,7 @@
*/
#include <errno.h>
+#include "atomic_ops.h"
/* Called during key creation or setspecific. */
/* For the GC we already hold lock. */
@@ -34,7 +35,7 @@
/* value. This invariant must be preserved at ALL times, since */
/* asynchronous reads are allowed. */
typedef struct thread_specific_entry {
- unsigned long qtid; /* quick thread id, only for cache */
+ volatile AO_t qtid; /* quick thread id, only for cache */
void * value;
struct thread_specific_entry *next;
pthread_t thread;
diff --git a/mach_dep.c b/mach_dep.c
index c0d01b35..b11ad6a6 100644
--- a/mach_dep.c
+++ b/mach_dep.c
@@ -94,7 +94,7 @@ void GC_push_regs()
asm("pushl r6"); asm("calls $1,_GC_push_one");
# define HAVE_PUSH_REGS
# endif
-# if defined(M68K) && (defined(SUNOS4) || defined(NEXT))
+# if defined(M68K) && defined(NEXT)
/* M68K SUNOS - could be replaced by generic code */
/* a0, a1 and d1 are caller save */
/* and therefore are on stack or dead. */
@@ -465,7 +465,7 @@ ptr_t cold_gc_frame;
/* the stack. Return sp. */
# ifdef SPARC
asm(" .seg \"text\"");
-# if defined(SVR4) || defined(NETBSD) || defined(FREEBSD)
+# if defined(SVR4) || defined(NETBSD)
asm(" .globl GC_save_regs_in_stack");
asm("GC_save_regs_in_stack:");
asm(" .type GC_save_regs_in_stack,#function");
@@ -547,14 +547,9 @@ ptr_t cold_gc_frame;
#ifndef SPARC
--> fix it
#endif
-# ifdef SUNOS4
- asm(".globl _GC_clear_stack_inner");
- asm("_GC_clear_stack_inner:");
-# else
- asm(".globl GC_clear_stack_inner");
- asm("GC_clear_stack_inner:");
- asm(".type GC_save_regs_in_stack,#function");
-# endif
+ asm(".globl GC_clear_stack_inner");
+ asm("GC_clear_stack_inner:");
+ asm(".type GC_save_regs_in_stack,#function");
#if defined(__arch64__) || defined(__sparcv9)
asm("mov %sp,%o2"); /* Save sp */
asm("add %sp,2047-8,%o3"); /* p = sp+bias-8 */
diff --git a/malloc.c b/malloc.c
index cb3f3766..879aa945 100644
--- a/malloc.c
+++ b/malloc.c
@@ -1,7 +1,7 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -12,53 +12,52 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, February 7, 1996 4:32 pm PST */
#include <stdio.h>
#include "private/gc_priv.h"
-extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
-void GC_extend_size_map(); /* in misc.c. */
+extern void * GC_clear_stack(void *); /* in misc.c, behaves like identity */
+void GC_extend_size_map(size_t); /* in misc.c. */
/* Allocate reclaim list for kind: */
/* Return TRUE on success */
-GC_bool GC_alloc_reclaim_list(kind)
-register struct obj_kind * kind;
+GC_bool GC_alloc_reclaim_list(struct obj_kind *kind)
{
struct hblk ** result = (struct hblk **)
- GC_scratch_alloc((MAXOBJSZ+1) * sizeof(struct hblk *));
+ GC_scratch_alloc((MAXOBJGRANULES+1) * sizeof(struct hblk *));
if (result == 0) return(FALSE);
- BZERO(result, (MAXOBJSZ+1)*sizeof(struct hblk *));
+ BZERO(result, (MAXOBJGRANULES+1)*sizeof(struct hblk *));
kind -> ok_reclaim_list = result;
return(TRUE);
}
-/* Allocate a large block of size lw words. */
+/* Allocate a large block of size lb bytes. */
/* The block is not cleared. */
/* Flags is 0 or IGNORE_OFF_PAGE. */
/* We hold the allocation lock. */
-ptr_t GC_alloc_large(lw, k, flags)
-word lw;
-int k;
-unsigned flags;
+/* EXTRA_BYTES were already added to lb. */
+ptr_t GC_alloc_large(size_t lb, int k, unsigned flags)
{
struct hblk * h;
- word n_blocks = OBJ_SZ_TO_BLOCKS(lw);
+ word n_blocks;
ptr_t result;
+ /* Round up to a multiple of a granule. */
+ lb = (lb + GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1);
+ n_blocks = OBJ_SZ_TO_BLOCKS(lb);
if (!GC_is_initialized) GC_init_inner();
/* Do our share of marking work */
if(GC_incremental && !GC_dont_gc)
GC_collect_a_little_inner((int)n_blocks);
- h = GC_allochblk(lw, k, flags);
+ h = GC_allochblk(lb, k, flags);
# ifdef USE_MUNMAP
if (0 == h) {
GC_merge_unmapped();
- h = GC_allochblk(lw, k, flags);
+ h = GC_allochblk(lb, k, flags);
}
# endif
while (0 == h && GC_collect_or_expand(n_blocks, (flags != 0))) {
- h = GC_allochblk(lw, k, flags);
+ h = GC_allochblk(lb, k, flags);
}
if (h == 0) {
result = 0;
@@ -69,8 +68,8 @@ unsigned flags;
if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
GC_max_large_allocd_bytes = GC_large_allocd_bytes;
}
- result = (ptr_t) (h -> hb_body);
- GC_words_wasted += BYTES_TO_WORDS(total_bytes) - lw;
+ result = h -> hb_body;
+ GC_bytes_wasted += total_bytes - lb;
}
return result;
}
@@ -78,13 +77,11 @@ unsigned flags;
/* Allocate a large block of size lb bytes. Clear if appropriate. */
/* We hold the allocation lock. */
-ptr_t GC_alloc_large_and_clear(lw, k, flags)
-word lw;
-int k;
-unsigned flags;
+/* EXTRA_BYTES were already added to lb. */
+ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags)
{
- ptr_t result = GC_alloc_large(lw, k, flags);
- word n_blocks = OBJ_SZ_TO_BLOCKS(lw);
+ ptr_t result = GC_alloc_large(lb, k, flags);
+ word n_blocks = OBJ_SZ_TO_BLOCKS(lb);
if (0 == result) return 0;
if (GC_debugging_started || GC_obj_kinds[k].ok_init) {
@@ -100,60 +97,34 @@ unsigned flags;
/* require special handling on allocation. */
/* First a version that assumes we already */
/* hold lock: */
-ptr_t GC_generic_malloc_inner(lb, k)
-register word lb;
-register int k;
+void * GC_generic_malloc_inner(size_t lb, int k)
{
-register word lw;
-register ptr_t op;
-register ptr_t *opp;
+size_t lg;
+void *op;
+void **opp;
- if( SMALL_OBJ(lb) ) {
+ if(SMALL_OBJ(lb)) {
register struct obj_kind * kind = GC_obj_kinds + k;
-# ifdef MERGE_SIZES
- lw = GC_size_map[lb];
-# else
- lw = ALIGNED_WORDS(lb);
- if (lw == 0) lw = MIN_WORDS;
-# endif
- opp = &(kind -> ok_freelist[lw]);
+ lg = GC_size_map[lb];
+ opp = &(kind -> ok_freelist[lg]);
if( (op = *opp) == 0 ) {
-# ifdef MERGE_SIZES
- if (GC_size_map[lb] == 0) {
- if (!GC_is_initialized) GC_init_inner();
- if (GC_size_map[lb] == 0) GC_extend_size_map(lb);
- return(GC_generic_malloc_inner(lb, k));
- }
-# else
- if (!GC_is_initialized) {
- GC_init_inner();
- return(GC_generic_malloc_inner(lb, k));
- }
-# endif
+ if (GC_size_map[lb] == 0) {
+ if (!GC_is_initialized) GC_init_inner();
+ if (GC_size_map[lb] == 0) GC_extend_size_map(lb);
+ return(GC_generic_malloc_inner(lb, k));
+ }
if (kind -> ok_reclaim_list == 0) {
if (!GC_alloc_reclaim_list(kind)) goto out;
}
- op = GC_allocobj(lw, k);
+ op = GC_allocobj(lg, k);
if (op == 0) goto out;
}
- /* Here everything is in a consistent state. */
- /* We assume the following assignment is */
- /* atomic. If we get aborted */
- /* after the assignment, we lose an object, */
- /* but that's benign. */
- /* Volatile declarations may need to be added */
- /* to prevent the compiler from breaking things.*/
- /* If we only execute the second of the */
- /* following assignments, we lose the free */
- /* list, but that should still be OK, at least */
- /* for garbage collected memory. */
*opp = obj_link(op);
obj_link(op) = 0;
} else {
- lw = ROUNDED_UP_WORDS(lb);
- op = (ptr_t)GC_alloc_large_and_clear(lw, k, 0);
+ op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0);
}
- GC_words_allocd += lw;
+ GC_bytes_allocd += GRANULES_TO_BYTES(lg);
out:
return op;
@@ -162,46 +133,41 @@ out:
/* Allocate a composite object of size n bytes. The caller guarantees */
/* that pointers past the first page are not relevant. Caller holds */
/* allocation lock. */
-ptr_t GC_generic_malloc_inner_ignore_off_page(lb, k)
-register size_t lb;
-register int k;
+void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k)
{
- register word lw;
- ptr_t op;
+ word lb_adjusted;
+ void * op;
if (lb <= HBLKSIZE)
- return(GC_generic_malloc_inner((word)lb, k));
- lw = ROUNDED_UP_WORDS(lb);
- op = (ptr_t)GC_alloc_large_and_clear(lw, k, IGNORE_OFF_PAGE);
- GC_words_allocd += lw;
+ return(GC_generic_malloc_inner(lb, k));
+ lb_adjusted = ADD_SLOP(lb);
+ op = GC_alloc_large_and_clear(lb_adjusted, k, IGNORE_OFF_PAGE);
+ GC_bytes_allocd += lb_adjusted;
return op;
}
-ptr_t GC_generic_malloc(lb, k)
-register word lb;
-register int k;
+void * GC_generic_malloc(size_t lb, int k)
{
- ptr_t result;
+ void * result;
DCL_LOCK_STATE;
if (GC_have_errors) GC_print_all_errors();
GC_INVOKE_FINALIZERS();
if (SMALL_OBJ(lb)) {
- DISABLE_SIGNALS();
LOCK();
result = GC_generic_malloc_inner((word)lb, k);
UNLOCK();
- ENABLE_SIGNALS();
} else {
- word lw;
+ size_t lw;
+ size_t lb_rounded;
word n_blocks;
GC_bool init;
lw = ROUNDED_UP_WORDS(lb);
- n_blocks = OBJ_SZ_TO_BLOCKS(lw);
+ lb_rounded = WORDS_TO_BYTES(lw);
+ n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
init = GC_obj_kinds[k].ok_init;
- DISABLE_SIGNALS();
LOCK();
- result = (ptr_t)GC_alloc_large(lw, k, 0);
+ result = (ptr_t)GC_alloc_large(lb_rounded, k, 0);
if (0 != result) {
if (GC_debugging_started) {
BZERO(result, n_blocks * HBLKSIZE);
@@ -216,9 +182,8 @@ register int k;
# endif
}
}
- GC_words_allocd += lw;
+ GC_bytes_allocd += lb_rounded;
UNLOCK();
- ENABLE_SIGNALS();
if (init && !GC_debugging_started && 0 != result) {
BZERO(result, n_blocks * HBLKSIZE);
}
@@ -232,65 +197,46 @@ register int k;
#define GENERAL_MALLOC(lb,k) \
- (GC_PTR)GC_clear_stack(GC_generic_malloc((word)lb, k))
+ GC_clear_stack(GC_generic_malloc(lb, k))
/* We make the GC_clear_stack_call a tail call, hoping to get more of */
/* the stack. */
/* Allocate lb bytes of atomic (pointerfree) data */
-# ifdef __STDC__
- GC_PTR GC_malloc_atomic(size_t lb)
-# else
- GC_PTR GC_malloc_atomic(lb)
- size_t lb;
-# endif
+void * GC_malloc_atomic(size_t lb)
{
-register ptr_t op;
-register ptr_t * opp;
-register word lw;
-DCL_LOCK_STATE;
+ void *op;
+ void ** opp;
+ size_t lg;
+ DCL_LOCK_STATE;
- if( EXPECT(SMALL_OBJ(lb), 1) ) {
-# ifdef MERGE_SIZES
- lw = GC_size_map[lb];
-# else
- lw = ALIGNED_WORDS(lb);
-# endif
- opp = &(GC_aobjfreelist[lw]);
+ if(SMALL_OBJ(lb)) {
+ lg = GC_size_map[lb];
+ opp = &(GC_aobjfreelist[lg]);
FASTLOCK();
if( EXPECT(!FASTLOCK_SUCCEEDED() || (op = *opp) == 0, 0) ) {
FASTUNLOCK();
return(GENERAL_MALLOC((word)lb, PTRFREE));
}
- /* See above comment on signals. */
*opp = obj_link(op);
- GC_words_allocd += lw;
+ GC_bytes_allocd += GRANULES_TO_BYTES(lg);
FASTUNLOCK();
- return((GC_PTR) op);
+ return((void *) op);
} else {
return(GENERAL_MALLOC((word)lb, PTRFREE));
}
}
/* Allocate lb bytes of composite (pointerful) data */
-# ifdef __STDC__
- GC_PTR GC_malloc(size_t lb)
-# else
- GC_PTR GC_malloc(lb)
- size_t lb;
-# endif
+void * GC_malloc(size_t lb)
{
-register ptr_t op;
-register ptr_t *opp;
-register word lw;
-DCL_LOCK_STATE;
+ void *op;
+ void **opp;
+ size_t lg;
+ DCL_LOCK_STATE;
- if( EXPECT(SMALL_OBJ(lb), 1) ) {
-# ifdef MERGE_SIZES
- lw = GC_size_map[lb];
-# else
- lw = ALIGNED_WORDS(lb);
-# endif
- opp = &(GC_objfreelist[lw]);
+ if(SMALL_OBJ(lb)) {
+ lg = GC_size_map[lb];
+ opp = (void **)&(GC_objfreelist[lg]);
FASTLOCK();
if( EXPECT(!FASTLOCK_SUCCEEDED() || (op = *opp) == 0, 0) ) {
FASTUNLOCK();
@@ -304,11 +250,11 @@ DCL_LOCK_STATE;
>= (word)GC_least_plausible_heap_addr);
*opp = obj_link(op);
obj_link(op) = 0;
- GC_words_allocd += lw;
+ GC_bytes_allocd += GRANULES_TO_BYTES(lg);
FASTUNLOCK();
- return((GC_PTR) op);
+ return op;
} else {
- return(GENERAL_MALLOC((word)lb, NORMAL));
+ return(GENERAL_MALLOC(lb, NORMAL));
}
}
@@ -326,12 +272,7 @@ DCL_LOCK_STATE;
# define GC_debug_malloc_replacement(lb) \
GC_debug_malloc(lb, RA "unknown", 0)
-# ifdef __STDC__
- GC_PTR malloc(size_t lb)
-# else
- GC_PTR malloc(lb)
- size_t lb;
-# endif
+void * malloc(size_t lb)
{
/* It might help to manually inline the GC_malloc call here. */
/* But any decent compiler should reduce the extra procedure call */
@@ -346,27 +287,17 @@ DCL_LOCK_STATE;
*/
if (!GC_is_initialized) return sbrk(lb);
# endif /* I386 && GC_SOLARIS_THREADS */
- return((GC_PTR)REDIRECT_MALLOC(lb));
+ return((void *)REDIRECT_MALLOC(lb));
}
-# ifdef __STDC__
- GC_PTR calloc(size_t n, size_t lb)
-# else
- GC_PTR calloc(n, lb)
- size_t n, lb;
-# endif
- {
- return((GC_PTR)REDIRECT_MALLOC(n*lb));
- }
+void * calloc(size_t n, size_t lb)
+{
+ return((void *)REDIRECT_MALLOC(n*lb));
+}
#ifndef strdup
# include <string.h>
-# ifdef __STDC__
- char *strdup(const char *s)
-# else
- char *strdup(s)
- char *s;
-# endif
+ char *strdup(const char *s)
{
size_t len = strlen(s) + 1;
char * result = ((char *)REDIRECT_MALLOC(len+1));
@@ -383,25 +314,23 @@ DCL_LOCK_STATE;
# endif /* REDIRECT_MALLOC */
/* Explicitly deallocate an object p. */
-# ifdef __STDC__
- void GC_free(GC_PTR p)
-# else
- void GC_free(p)
- GC_PTR p;
-# endif
+void GC_free(void * p)
{
- register struct hblk *h;
- register hdr *hhdr;
- register signed_word sz;
- register ptr_t * flh;
- register int knd;
- register struct obj_kind * ok;
+ struct hblk *h;
+ hdr *hhdr;
+ size_t sz; /* In bytes */
+ size_t ngranules; /* sz in granules */
+ void **flh;
+ int knd;
+ struct obj_kind * ok;
DCL_LOCK_STATE;
if (p == 0) return;
/* Required by ANSI. It's not my fault ... */
h = HBLKPTR(p);
hhdr = HDR(h);
+ sz = hhdr -> hb_sz;
+ ngranules = BYTES_TO_GRANULES(sz);
GC_ASSERT(GC_base(p) == p);
# if defined(REDIRECT_MALLOC) && \
(defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
@@ -413,38 +342,31 @@ DCL_LOCK_STATE;
if (0 == hhdr) return;
# endif
knd = hhdr -> hb_obj_kind;
- sz = hhdr -> hb_sz;
ok = &GC_obj_kinds[knd];
- if (EXPECT((sz <= MAXOBJSZ), 1)) {
+ if (EXPECT((ngranules <= MAXOBJGRANULES), 1)) {
# ifdef THREADS
- DISABLE_SIGNALS();
LOCK();
# endif
- GC_mem_freed += sz;
- /* A signal here can make GC_mem_freed and GC_non_gc_bytes */
- /* inconsistent. We claim this is benign. */
- if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
+ GC_bytes_freed += sz;
+ if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
/* Its unnecessary to clear the mark bit. If the */
/* object is reallocated, it doesn't matter. O.w. the */
/* collector will do it, since it's on a free list. */
if (ok -> ok_init) {
- BZERO((word *)p + 1, WORDS_TO_BYTES(sz-1));
+ BZERO((word *)p + 1, sz-sizeof(word));
}
- flh = &(ok -> ok_freelist[sz]);
+ flh = &(ok -> ok_freelist[ngranules]);
obj_link(p) = *flh;
*flh = (ptr_t)p;
# ifdef THREADS
UNLOCK();
- ENABLE_SIGNALS();
# endif
} else {
- DISABLE_SIGNALS();
LOCK();
- GC_mem_freed += sz;
- if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
+ GC_bytes_freed += sz;
+ if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
GC_freehblk(h);
UNLOCK();
- ENABLE_SIGNALS();
}
}
@@ -452,33 +374,35 @@ DCL_LOCK_STATE;
/* Only used for internally allocated objects, so we can take some */
/* shortcuts. */
#ifdef THREADS
-void GC_free_inner(GC_PTR p)
+void GC_free_inner(void * p)
{
- register struct hblk *h;
- register hdr *hhdr;
- register signed_word sz;
- register ptr_t * flh;
- register int knd;
- register struct obj_kind * ok;
+ struct hblk *h;
+ hdr *hhdr;
+ size_t sz; /* bytes */
+ size_t ngranules; /* sz in granules */
+ void ** flh;
+ int knd;
+ struct obj_kind * ok;
DCL_LOCK_STATE;
h = HBLKPTR(p);
hhdr = HDR(h);
knd = hhdr -> hb_obj_kind;
sz = hhdr -> hb_sz;
+ ngranules = BYTES_TO_GRANULES(sz);
ok = &GC_obj_kinds[knd];
- if (sz <= MAXOBJSZ) {
- GC_mem_freed += sz;
- if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
+ if (ngranules <= MAXOBJGRANULES) {
+ GC_bytes_freed += sz;
+ if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
if (ok -> ok_init) {
- BZERO((word *)p + 1, WORDS_TO_BYTES(sz-1));
+ BZERO((word *)p + 1, sz-sizeof(word));
}
- flh = &(ok -> ok_freelist[sz]);
+ flh = &(ok -> ok_freelist[ngranules]);
obj_link(p) = *flh;
*flh = (ptr_t)p;
} else {
- GC_mem_freed += sz;
- if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
+ GC_bytes_freed += sz;
+ if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
GC_freehblk(h);
}
}
@@ -488,12 +412,7 @@ void GC_free_inner(GC_PTR p)
# define REDIRECT_FREE GC_free
# endif
# ifdef REDIRECT_FREE
-# ifdef __STDC__
- void free(GC_PTR p)
-# else
- void free(p)
- GC_PTR p;
-# endif
+ void free(void * p)
{
# ifndef IGNORE_FREE
REDIRECT_FREE(p);
diff --git a/mallocx.c b/mallocx.c
index d45f21e8..11567ff3 100644
--- a/mallocx.c
+++ b/mallocx.c
@@ -31,17 +31,15 @@ GC_bool GC_alloc_reclaim_list(); /* in malloc.c */
/* Some externally visible but unadvertised variables to allow access to */
/* free lists from inlined allocators without including gc_priv.h */
/* or introducing dependencies on internal data structure layouts. */
-ptr_t * GC_CONST GC_objfreelist_ptr = GC_objfreelist;
-ptr_t * GC_CONST GC_aobjfreelist_ptr = GC_aobjfreelist;
-ptr_t * GC_CONST GC_uobjfreelist_ptr = GC_uobjfreelist;
+void ** const GC_objfreelist_ptr = GC_objfreelist;
+void ** const GC_aobjfreelist_ptr = GC_aobjfreelist;
+void ** const GC_uobjfreelist_ptr = GC_uobjfreelist;
# ifdef ATOMIC_UNCOLLECTABLE
- ptr_t * GC_CONST GC_auobjfreelist_ptr = GC_auobjfreelist;
+ void ** const GC_auobjfreelist_ptr = GC_auobjfreelist;
# endif
-GC_PTR GC_generic_or_special_malloc(lb,knd)
-word lb;
-int knd;
+void * GC_generic_or_special_malloc(size_t lb, int knd)
{
switch(knd) {
# ifdef STUBBORN_ALLOC
@@ -68,26 +66,19 @@ int knd;
/* lb bytes. The object may be (and quite likely will be) moved. */
/* The kind (e.g. atomic) is the same as that of the old. */
/* Shrinking of large blocks is not implemented well. */
-# ifdef __STDC__
- GC_PTR GC_realloc(GC_PTR p, size_t lb)
-# else
- GC_PTR GC_realloc(p,lb)
- GC_PTR p;
- size_t lb;
-# endif
+void * GC_realloc(void * p, size_t lb)
{
-register struct hblk * h;
-register hdr * hhdr;
-register word sz; /* Current size in bytes */
-register word orig_sz; /* Original sz in bytes */
-int obj_kind;
+ struct hblk * h;
+ hdr * hhdr;
+ size_t sz; /* Current size in bytes */
+ size_t orig_sz; /* Original sz in bytes */
+ int obj_kind;
if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
h = HBLKPTR(p);
hhdr = HDR(h);
sz = hhdr -> hb_sz;
obj_kind = hhdr -> hb_obj_kind;
- sz = WORDS_TO_BYTES(sz);
orig_sz = sz;
if (sz > MAXOBJBYTES) {
@@ -95,10 +86,16 @@ int obj_kind;
register word descr;
sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
- hhdr -> hb_sz = BYTES_TO_WORDS(sz);
+ hhdr -> hb_sz = sz;
descr = GC_obj_kinds[obj_kind].ok_descriptor;
if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
hhdr -> hb_descr = descr;
+# ifdef MARK_BIT_PER_OBJ
+ GC_ASSERT(hhdr -> hb_inv_sz == LARGE_INV_SZ);
+# else
+ GC_ASSERT(hhdr -> hb_large_block &&
+ hhdr -> hb_map[ANY_INDEX] == 1);
+# endif
if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
/* Extra area is already cleared by GC_alloc_large_and_clear. */
}
@@ -116,7 +113,7 @@ int obj_kind;
return(p);
} else {
/* shrink */
- GC_PTR result =
+ void * result =
GC_generic_or_special_malloc((word)lb, obj_kind);
if (result == 0) return(0);
@@ -130,7 +127,7 @@ int obj_kind;
}
} else {
/* grow */
- GC_PTR result =
+ void * result =
GC_generic_or_special_malloc((word)lb, obj_kind);
if (result == 0) return(0);
@@ -157,13 +154,7 @@ int obj_kind;
# define GC_debug_realloc_replacement(p, lb) \
GC_debug_realloc(p, lb, RA "unknown", 0)
-# ifdef __STDC__
- GC_PTR realloc(GC_PTR p, size_t lb)
-# else
- GC_PTR realloc(p,lb)
- GC_PTR p;
- size_t lb;
-# endif
+void * realloc(void * p, size_t lb)
{
return(REDIRECT_REALLOC(p, lb));
}
@@ -174,12 +165,11 @@ int obj_kind;
/* The same thing, except caller does not hold allocation lock. */
/* We avoid holding allocation lock while we clear memory. */
-ptr_t GC_generic_malloc_ignore_off_page(lb, k)
-register size_t lb;
-register int k;
+void * GC_generic_malloc_ignore_off_page(size_t lb, int k)
{
- register ptr_t result;
- word lw;
+ void *result;
+ size_t lw;
+ size_t lb_rounded;
word n_blocks;
GC_bool init;
DCL_LOCK_STATE;
@@ -187,13 +177,13 @@ register int k;
if (SMALL_OBJ(lb))
return(GC_generic_malloc((word)lb, k));
lw = ROUNDED_UP_WORDS(lb);
- n_blocks = OBJ_SZ_TO_BLOCKS(lw);
+ lb_rounded = WORDS_TO_BYTES(lw);
+ n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
init = GC_obj_kinds[k].ok_init;
if (GC_have_errors) GC_print_all_errors();
GC_INVOKE_FINALIZERS();
- DISABLE_SIGNALS();
LOCK();
- result = (ptr_t)GC_alloc_large(lw, k, IGNORE_OFF_PAGE);
+ result = (ptr_t)GC_alloc_large(ADD_SLOP(lb), k, IGNORE_OFF_PAGE);
if (0 != result) {
if (GC_debugging_started) {
BZERO(result, n_blocks * HBLKSIZE);
@@ -208,9 +198,8 @@ register int k;
# endif
}
}
- GC_words_allocd += lw;
+ GC_bytes_allocd += lb_rounded;
UNLOCK();
- ENABLE_SIGNALS();
if (0 == result) {
return((*GC_oom_fn)(lb));
} else {
@@ -221,108 +210,43 @@ register int k;
}
}
-# if defined(__STDC__) || defined(__cplusplus)
- void * GC_malloc_ignore_off_page(size_t lb)
-# else
- char * GC_malloc_ignore_off_page(lb)
- register size_t lb;
-# endif
+void * GC_malloc_ignore_off_page(size_t lb)
{
- return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, NORMAL));
+ return((void *)GC_generic_malloc_ignore_off_page(lb, NORMAL));
}
-# if defined(__STDC__) || defined(__cplusplus)
- void * GC_malloc_atomic_ignore_off_page(size_t lb)
-# else
- char * GC_malloc_atomic_ignore_off_page(lb)
- register size_t lb;
-# endif
+void * GC_malloc_atomic_ignore_off_page(size_t lb)
{
- return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
+ return((void *)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
}
-/* Increment GC_words_allocd from code that doesn't have direct access */
+/* Increment GC_bytes_allocd from code that doesn't have direct access */
/* to GC_arrays. */
-# ifdef __STDC__
-void GC_incr_words_allocd(size_t n)
+void GC_incr_bytes_allocd(size_t n)
{
- GC_words_allocd += n;
-}
-
-/* The same for GC_mem_freed. */
-void GC_incr_mem_freed(size_t n)
-{
- GC_mem_freed += n;
-}
-# endif /* __STDC__ */
-
-/* Analogous to the above, but assumes a small object size, and */
-/* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
-ptr_t GC_generic_malloc_words_small_inner(lw, k)
-register word lw;
-register int k;
-{
-register ptr_t op;
-register ptr_t *opp;
-register struct obj_kind * kind = GC_obj_kinds + k;
-
- opp = &(kind -> ok_freelist[lw]);
- if( (op = *opp) == 0 ) {
- if (!GC_is_initialized) {
- GC_init_inner();
- }
- if (kind -> ok_reclaim_list != 0 || GC_alloc_reclaim_list(kind)) {
- op = GC_clear_stack(GC_allocobj((word)lw, k));
- }
- if (op == 0) {
- UNLOCK();
- ENABLE_SIGNALS();
- return ((*GC_oom_fn)(WORDS_TO_BYTES(lw)));
- }
- }
- *opp = obj_link(op);
- obj_link(op) = 0;
- GC_words_allocd += lw;
- return((ptr_t)op);
+ GC_bytes_allocd += n;
}
-/* Analogous to the above, but assumes a small object size, and */
-/* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
-#ifdef __STDC__
- ptr_t GC_generic_malloc_words_small(size_t lw, int k)
-#else
- ptr_t GC_generic_malloc_words_small(lw, k)
- register word lw;
- register int k;
-#endif
+/* The same for GC_bytes_freed. */
+void GC_incr_bytes_freed(size_t n)
{
-register ptr_t op;
-DCL_LOCK_STATE;
-
- if (GC_have_errors) GC_print_all_errors();
- GC_INVOKE_FINALIZERS();
- DISABLE_SIGNALS();
- LOCK();
- op = GC_generic_malloc_words_small_inner(lw, k);
- UNLOCK();
- ENABLE_SIGNALS();
- return((ptr_t)op);
+ GC_bytes_freed += n;
}
#if defined(THREADS) && !defined(SRC_M3)
-extern signed_word GC_mem_found; /* Protected by GC lock. */
+extern signed_word GC_bytes_found; /* Protected by GC lock. */
#ifdef PARALLEL_MARK
-volatile signed_word GC_words_allocd_tmp = 0;
- /* Number of words of memory allocated since */
+volatile signed_word GC_bytes_allocd_tmp = 0;
+ /* Number of bytes of memory allocated since */
/* we released the GC lock. Instead of */
/* reacquiring the GC lock just to add this in, */
/* we add it in the next time we reacquire */
/* the lock. (Atomically adding it doesn't */
/* work, since we would have to atomically */
/* update it in GC_malloc, which is too */
- /* expensive. */
+ /* expensive.) */
#endif /* PARALLEL_MARK */
/* See reclaim.c: */
@@ -337,40 +261,34 @@ extern ptr_t GC_reclaim_generic();
/* GC_malloc_many or friends to replenish it. (We do not round up */
/* object sizes, since a call indicates the intention to consume many */
/* objects of exactly this size.) */
+/* We assume that the size is a multiple of GRANULE_BYTES. */
/* We return the free-list by assigning it to *result, since it is */
/* not safe to return, e.g. a linked list of pointer-free objects, */
/* since the collector would not retain the entire list if it were */
/* invoked just as we were returning. */
/* Note that the client should usually clear the link field. */
-void GC_generic_malloc_many(lb, k, result)
-register word lb;
-register int k;
-ptr_t *result;
+void GC_generic_malloc_many(size_t lb, int k, void **result)
{
-ptr_t op;
-ptr_t p;
-ptr_t *opp;
-word lw;
-word my_words_allocd = 0;
+void *op;
+void *p;
+void **opp;
+size_t lw; /* Length in words. */
+size_t lg; /* Length in granules. */
+word my_bytes_allocd = 0;
struct obj_kind * ok = &(GC_obj_kinds[k]);
DCL_LOCK_STATE;
-# if defined(GATHERSTATS) || defined(PARALLEL_MARK)
-# define COUNT_ARG , &my_words_allocd
-# else
-# define COUNT_ARG
-# define NEED_TO_COUNT
-# endif
+ GC_ASSERT((lb & (GRANULE_BYTES-1)) == 0);
if (!SMALL_OBJ(lb)) {
op = GC_generic_malloc(lb, k);
if(0 != op) obj_link(op) = 0;
*result = op;
return;
}
- lw = ALIGNED_WORDS(lb);
+ lw = BYTES_TO_WORDS(lb);
+ lg = BYTES_TO_GRANULES(lb);
if (GC_have_errors) GC_print_all_errors();
GC_INVOKE_FINALIZERS();
- DISABLE_SIGNALS();
LOCK();
if (!GC_is_initialized) GC_init_inner();
/* Do our share of marking work */
@@ -386,55 +304,45 @@ DCL_LOCK_STATE;
struct hblk * hbp;
hdr * hhdr;
- rlh += lw;
+ rlh += lg;
while ((hbp = *rlh) != 0) {
hhdr = HDR(hbp);
*rlh = hhdr -> hb_next;
+ GC_ASSERT(hhdr -> hb_sz == lb);
hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
# ifdef PARALLEL_MARK
{
- signed_word my_words_allocd_tmp = GC_words_allocd_tmp;
+ signed_word my_bytes_allocd_tmp = GC_bytes_allocd_tmp;
- GC_ASSERT(my_words_allocd_tmp >= 0);
+ GC_ASSERT(my_bytes_allocd_tmp >= 0);
/* We only decrement it while holding the GC lock. */
/* Thus we can't accidentally adjust it down in more */
/* than one thread simultaneously. */
- if (my_words_allocd_tmp != 0) {
- (void)GC_atomic_add(
- (volatile GC_word *)(&GC_words_allocd_tmp),
- (GC_word)(-my_words_allocd_tmp));
- GC_words_allocd += my_words_allocd_tmp;
+ if (my_bytes_allocd_tmp != 0) {
+ (void)AO_fetch_and_add(
+ (volatile AO_t *)(&GC_bytes_allocd_tmp),
+ (AO_t)(-my_bytes_allocd_tmp));
+ GC_bytes_allocd += my_bytes_allocd_tmp;
}
}
GC_acquire_mark_lock();
++ GC_fl_builder_count;
UNLOCK();
- ENABLE_SIGNALS();
GC_release_mark_lock();
# endif
- op = GC_reclaim_generic(hbp, hhdr, lw,
- ok -> ok_init, 0 COUNT_ARG);
+ op = GC_reclaim_generic(hbp, hhdr, lb,
+ ok -> ok_init, 0, &my_bytes_allocd);
if (op != 0) {
-# ifdef NEED_TO_COUNT
- /* We are neither gathering statistics, nor marking in */
- /* parallel. Thus GC_reclaim_generic doesn't count */
- /* for us. */
- for (p = op; p != 0; p = obj_link(p)) {
- my_words_allocd += lw;
- }
-# endif
-# if defined(GATHERSTATS)
- /* We also reclaimed memory, so we need to adjust */
- /* that count. */
- /* This should be atomic, so the results may be */
- /* inaccurate. */
- GC_mem_found += my_words_allocd;
-# endif
+ /* We also reclaimed memory, so we need to adjust */
+ /* that count. */
+ /* This should be atomic, so the results may be */
+ /* inaccurate. */
+ GC_bytes_found += my_bytes_allocd;
# ifdef PARALLEL_MARK
*result = op;
- (void)GC_atomic_add(
- (volatile GC_word *)(&GC_words_allocd_tmp),
- (GC_word)(my_words_allocd));
+ (void)AO_fetch_and_add(
+ (volatile AO_t *)(&GC_bytes_allocd_tmp),
+ (AO_t)(my_bytes_allocd));
GC_acquire_mark_lock();
-- GC_fl_builder_count;
if (GC_fl_builder_count == 0) GC_notify_all_builder();
@@ -442,7 +350,7 @@ DCL_LOCK_STATE;
(void) GC_clear_stack(0);
return;
# else
- GC_words_allocd += my_words_allocd;
+ GC_bytes_allocd += my_bytes_allocd;
goto out;
# endif
}
@@ -451,7 +359,6 @@ DCL_LOCK_STATE;
-- GC_fl_builder_count;
if (GC_fl_builder_count == 0) GC_notify_all_builder();
GC_release_mark_lock();
- DISABLE_SIGNALS();
LOCK();
/* GC lock is needed for reclaim list access. We */
/* must decrement fl_builder_count before reaquiring GC */
@@ -462,33 +369,31 @@ DCL_LOCK_STATE;
/* Next try to use prefix of global free list if there is one. */
/* We don't refill it, but we need to use it up before allocating */
/* a new block ourselves. */
- opp = &(GC_obj_kinds[k].ok_freelist[lw]);
+ opp = &(GC_obj_kinds[k].ok_freelist[lg]);
if ( (op = *opp) != 0 ) {
*opp = 0;
- my_words_allocd = 0;
+ my_bytes_allocd = 0;
for (p = op; p != 0; p = obj_link(p)) {
- my_words_allocd += lw;
- if (my_words_allocd >= BODY_SZ) {
+ my_bytes_allocd += lb;
+ if (my_bytes_allocd >= HBLKSIZE) {
*opp = obj_link(p);
obj_link(p) = 0;
break;
}
}
- GC_words_allocd += my_words_allocd;
+ GC_bytes_allocd += my_bytes_allocd;
goto out;
}
/* Next try to allocate a new block worth of objects of this size. */
{
- struct hblk *h = GC_allochblk(lw, k, 0);
+ struct hblk *h = GC_allochblk(lb, k, 0);
if (h != 0) {
if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
- GC_words_allocd += BYTES_TO_WORDS(HBLKSIZE)
- - BYTES_TO_WORDS(HBLKSIZE) % lw;
+ GC_bytes_allocd += HBLKSIZE - HBLKSIZE % lb;
# ifdef PARALLEL_MARK
GC_acquire_mark_lock();
++ GC_fl_builder_count;
UNLOCK();
- ENABLE_SIGNALS();
GC_release_mark_lock();
# endif
@@ -515,14 +420,15 @@ DCL_LOCK_STATE;
out:
*result = op;
UNLOCK();
- ENABLE_SIGNALS();
(void) GC_clear_stack(0);
}
-GC_PTR GC_malloc_many(size_t lb)
+void * GC_malloc_many(size_t lb)
{
- ptr_t result;
- GC_generic_malloc_many(lb, NORMAL, &result);
+ void *result;
+ GC_generic_malloc_many(((lb + EXTRA_BYTES + GRANULE_BYTES-1)
+ & ~(GRANULE_BYTES-1)),
+ NORMAL, &result);
return result;
}
@@ -531,40 +437,31 @@ GC_PTR GC_malloc_many(size_t lb)
# endif
/* Allocate lb bytes of pointerful, traced, but not collectable data */
-# ifdef __STDC__
- GC_PTR GC_malloc_uncollectable(size_t lb)
-# else
- GC_PTR GC_malloc_uncollectable(lb)
- size_t lb;
-# endif
+void * GC_malloc_uncollectable(size_t lb)
{
-register ptr_t op;
-register ptr_t *opp;
-register word lw;
-DCL_LOCK_STATE;
+ void *op;
+ void **opp;
+ size_t lg;
+ DCL_LOCK_STATE;
if( SMALL_OBJ(lb) ) {
-# ifdef MERGE_SIZES
- if (EXTRA_BYTES != 0 && lb != 0) lb--;
+ if (EXTRA_BYTES != 0 && lb != 0) lb--;
/* We don't need the extra byte, since this won't be */
/* collected anyway. */
- lw = GC_size_map[lb];
-# else
- lw = ALIGNED_WORDS(lb);
-# endif
- opp = &(GC_uobjfreelist[lw]);
+ lg = GC_size_map[lb];
+ opp = &(GC_uobjfreelist[lg]);
FASTLOCK();
if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
/* See above comment on signals. */
*opp = obj_link(op);
obj_link(op) = 0;
- GC_words_allocd += lw;
+ GC_bytes_allocd += GRANULES_TO_BYTES(lg);
/* Mark bit ws already set on free list. It will be */
/* cleared only temporarily during a collection, as a */
/* result of the normal free list mark bit clearing. */
- GC_non_gc_bytes += WORDS_TO_BYTES(lw);
+ GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
FASTUNLOCK();
- return((GC_PTR) op);
+ return((void *) op);
}
FASTUNLOCK();
op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
@@ -577,35 +474,30 @@ DCL_LOCK_STATE;
/* mark bits. */
{
register struct hblk * h;
+ size_t lb;
h = HBLKPTR(op);
- lw = HDR(h) -> hb_sz;
+ lb = HDR(h) -> hb_sz;
- DISABLE_SIGNALS();
LOCK();
GC_set_mark_bit(op);
- GC_non_gc_bytes += WORDS_TO_BYTES(lw);
+ GC_non_gc_bytes += lb;
UNLOCK();
- ENABLE_SIGNALS();
- return((GC_PTR) op);
+ return((void *) op);
}
}
-#ifdef __STDC__
/* Not well tested nor integrated. */
/* Debug version is tricky and currently missing. */
#include <limits.h>
-GC_PTR GC_memalign(size_t align, size_t lb)
+void * GC_memalign(size_t align, size_t lb)
{
size_t new_lb;
size_t offset;
ptr_t result;
-# ifdef ALIGN_DOUBLE
- if (align <= WORDS_TO_BYTES(2) && lb > align) return GC_malloc(lb);
-# endif
- if (align <= WORDS_TO_BYTES(1)) return GC_malloc(lb);
+ if (align <= GRANULE_BYTES) return GC_malloc(lb);
if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) {
if (align > HBLKSIZE) return GC_oom_fn(LONG_MAX-1024) /* Fail */;
return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb);
@@ -623,71 +515,60 @@ GC_PTR GC_memalign(size_t align, size_t lb)
GC_register_displacement(offset);
}
}
- result = (GC_PTR) ((ptr_t)result + offset);
+ result = (void *) ((ptr_t)result + offset);
GC_ASSERT((word)result % align == 0);
return result;
}
-#endif
# ifdef ATOMIC_UNCOLLECTABLE
/* Allocate lb bytes of pointerfree, untraced, uncollectable data */
/* This is normally roughly equivalent to the system malloc. */
/* But it may be useful if malloc is redefined. */
-# ifdef __STDC__
- GC_PTR GC_malloc_atomic_uncollectable(size_t lb)
-# else
- GC_PTR GC_malloc_atomic_uncollectable(lb)
- size_t lb;
-# endif
+void * GC_malloc_atomic_uncollectable(size_t lb)
{
-register ptr_t op;
-register ptr_t *opp;
-register word lw;
-DCL_LOCK_STATE;
+ void *op;
+ void **opp;
+ size_t lg;
+ DCL_LOCK_STATE;
if( SMALL_OBJ(lb) ) {
-# ifdef MERGE_SIZES
- if (EXTRA_BYTES != 0 && lb != 0) lb--;
+ if (EXTRA_BYTES != 0 && lb != 0) lb--;
/* We don't need the extra byte, since this won't be */
/* collected anyway. */
- lw = GC_size_map[lb];
-# else
- lw = ALIGNED_WORDS(lb);
-# endif
- opp = &(GC_auobjfreelist[lw]);
+ lg = GC_size_map[lg];
+ opp = &(GC_auobjfreelist[lg]);
FASTLOCK();
if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
/* See above comment on signals. */
*opp = obj_link(op);
obj_link(op) = 0;
- GC_words_allocd += lw;
+ GC_bytes_allocd += GRANULES_TO_BYTES(lg);
/* Mark bit was already set while object was on free list. */
- GC_non_gc_bytes += WORDS_TO_BYTES(lw);
+ GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
FASTUNLOCK();
- return((GC_PTR) op);
+ return((void *) op);
}
FASTUNLOCK();
- op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
+ op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);
} else {
- op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
+ op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);
}
if (0 == op) return(0);
/* We don't need the lock here, since we have an undisguised */
/* pointer. We do need to hold the lock while we adjust */
/* mark bits. */
{
- register struct hblk * h;
+ struct hblk * h;
+ size_t lb;
h = HBLKPTR(op);
- lw = HDR(h) -> hb_sz;
+ lb = HDR(h) -> hb_sz;
- DISABLE_SIGNALS();
LOCK();
GC_set_mark_bit(op);
- GC_non_gc_bytes += WORDS_TO_BYTES(lw);
+ GC_non_gc_bytes += lb;
UNLOCK();
- ENABLE_SIGNALS();
- return((GC_PTR) op);
+ return((void *) op);
}
}
diff --git a/mark.c b/mark.c
index 1d39ca08..2197474d 100644
--- a/mark.c
+++ b/mark.c
@@ -32,10 +32,9 @@
#endif
/* Single argument version, robust against whole program analysis. */
-void GC_noop1(x)
-word x;
+void GC_noop1(word x)
{
- static VOLATILE word sink;
+ static volatile word sink;
sink = x;
}
@@ -104,12 +103,20 @@ word GC_n_rescuing_pages; /* Number of dirty pages we marked from */
mse * GC_mark_stack;
-mse * GC_mark_stack_limit;
+mse * GC_mark_stack_limit;;
-word GC_mark_stack_size = 0;
+size_t GC_mark_stack_size = 0;
#ifdef PARALLEL_MARK
- mse * VOLATILE GC_mark_stack_top;
+# include "atomic_ops.h"
+
+ mse * volatile GC_mark_stack_top;
+ /* Updated only with mark lock held, but read asynchronously. */
+ volatile AO_t GC_first_nonempty;
+ /* Lowest entry on mark stack */
+ /* that may be nonempty. */
+ /* Updated only by initiating */
+ /* thread. */
#else
mse * GC_mark_stack_top;
#endif
@@ -126,48 +133,54 @@ GC_bool GC_objects_are_marked = FALSE; /* Are there collectable marked */
/* Is a collection in progress? Note that this can return true in the */
/* nonincremental case, if a collection has been abandoned and the */
/* mark state is now MS_INVALID. */
-GC_bool GC_collection_in_progress()
+GC_bool GC_collection_in_progress(void)
{
return(GC_mark_state != MS_NONE);
}
/* clear all mark bits in the header */
-void GC_clear_hdr_marks(hhdr)
-register hdr * hhdr;
+void GC_clear_hdr_marks(hdr *hhdr)
{
+ int last_bit = FINAL_MARK_BIT(hhdr -> hb_sz);
+
# ifdef USE_MARK_BYTES
BZERO(hhdr -> hb_marks, MARK_BITS_SZ);
+ hhdr -> hb_marks[last_bit] = 1;
# else
BZERO(hhdr -> hb_marks, MARK_BITS_SZ*sizeof(word));
+ set_mark_bit_from_hdr(hhdr, last_bit);
# endif
+ hhdr -> hb_n_marks = 0;
}
/* Set all mark bits in the header. Used for uncollectable blocks. */
-void GC_set_hdr_marks(hhdr)
-register hdr * hhdr;
+void GC_set_hdr_marks(hdr *hhdr)
{
- register int i;
+ int i;
+ size_t sz = hhdr -> hb_sz;
+ int n_marks = FINAL_MARK_BIT(sz);
- for (i = 0; i < MARK_BITS_SZ; ++i) {
-# ifdef USE_MARK_BYTES
+# ifdef USE_MARK_BYTES
+ for (i = 0; i <= n_marks; i += MARK_BIT_OFFSET(sz)) {
hhdr -> hb_marks[i] = 1;
-# else
+ }
+# else
+ for (i = 0; i < divWORDSZ(n_marks + WORDSZ); ++i) {
hhdr -> hb_marks[i] = ONES;
-# endif
- }
+ }
+# endif
+# ifdef MARK_BIT_PER_OBJ
+ hhdr -> hb_n_marks = n_marks - 1;
+# else
+ hhdr -> hb_n_marks = HBLK_OBJS(sz);
+# endif
}
/*
* Clear all mark bits associated with block h.
*/
/*ARGSUSED*/
-# if defined(__STDC__) || defined(__cplusplus)
- static void clear_marks_for_block(struct hblk *h, word dummy)
-# else
- static void clear_marks_for_block(h, dummy)
- struct hblk *h;
- word dummy;
-# endif
+static void clear_marks_for_block(struct hblk *h, word dummy)
{
register hdr * hhdr = HDR(h);
@@ -179,34 +192,47 @@ register hdr * hhdr;
}
/* Slow but general routines for setting/clearing/asking about mark bits */
-void GC_set_mark_bit(p)
-ptr_t p;
+void GC_set_mark_bit(ptr_t p)
{
- register struct hblk *h = HBLKPTR(p);
- register hdr * hhdr = HDR(h);
- register int word_no = (word *)p - (word *)h;
+ struct hblk *h = HBLKPTR(p);
+ hdr * hhdr = HDR(h);
+ int bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz);
- set_mark_bit_from_hdr(hhdr, word_no);
+ if (!mark_bit_from_hdr(hhdr, bit_no)) {
+ set_mark_bit_from_hdr(hhdr, bit_no);
+ ++hhdr -> hb_n_marks;
+ }
}
-void GC_clear_mark_bit(p)
-ptr_t p;
+void GC_clear_mark_bit(ptr_t p)
{
- register struct hblk *h = HBLKPTR(p);
- register hdr * hhdr = HDR(h);
- register int word_no = (word *)p - (word *)h;
+ struct hblk *h = HBLKPTR(p);
+ hdr * hhdr = HDR(h);
+ int bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz);
- clear_mark_bit_from_hdr(hhdr, word_no);
+ if (mark_bit_from_hdr(hhdr, bit_no)) {
+ int n_marks;
+ clear_mark_bit_from_hdr(hhdr, bit_no);
+ n_marks = hhdr -> hb_n_marks - 1;
+# ifdef THREADS
+ if (n_marks != 0)
+ hhdr -> hb_n_marks = n_marks;
+ /* Don't decrement to zero. The counts are approximate due to */
+ /* concurrency issues, but we need to ensure that a count of */
+ /* zero implies an empty block. */
+# else
+ hhdr -> hb_n_marks = n_marks;
+# endif
+ }
}
-GC_bool GC_is_marked(p)
-ptr_t p;
+GC_bool GC_is_marked(ptr_t p)
{
- register struct hblk *h = HBLKPTR(p);
- register hdr * hhdr = HDR(h);
- register int word_no = (word *)p - (word *)h;
+ struct hblk *h = HBLKPTR(p);
+ hdr * hhdr = HDR(h);
+ int bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz);
- return(mark_bit_from_hdr(hhdr, word_no));
+ return(mark_bit_from_hdr(hhdr, bit_no));
}
@@ -215,24 +241,18 @@ ptr_t p;
* the marker invariant, and sets GC_mark_state to reflect this.
* (This implicitly starts marking to reestablish the invariant.)
*/
-void GC_clear_marks()
+void GC_clear_marks(void)
{
GC_apply_to_all_blocks(clear_marks_for_block, (word)0);
GC_objects_are_marked = FALSE;
GC_mark_state = MS_INVALID;
scan_ptr = 0;
-# ifdef GATHERSTATS
- /* Counters reflect currently marked objects: reset here */
- GC_composite_in_use = 0;
- GC_atomic_in_use = 0;
-# endif
-
}
/* Initiate a garbage collection. Initiates a full collection if the */
/* mark state is invalid. */
/*ARGSUSED*/
-void GC_initiate_gc()
+void GC_initiate_gc(void)
{
if (GC_dirty_maintained) GC_read_dirty();
# ifdef STUBBORN_ALLOC
@@ -256,7 +276,7 @@ void GC_initiate_gc()
}
-static void alloc_mark_stack();
+static void alloc_mark_stack(size_t);
/* Perform a small amount of marking. */
/* We try to touch roughly a page of memory. */
@@ -272,11 +292,9 @@ static void alloc_mark_stack();
/* exception handler, in case Windows unmaps one of our root */
/* segments. See below. In either case, we acquire the */
/* allocator lock long before we get here. */
- GC_bool GC_mark_some_inner(cold_gc_frame)
- ptr_t cold_gc_frame;
+ GC_bool GC_mark_some_inner(ptr_t cold_gc_frame)
#else
- GC_bool GC_mark_some(cold_gc_frame)
- ptr_t cold_gc_frame;
+ GC_bool GC_mark_some(ptr_t cold_gc_frame)
#endif
{
switch(GC_mark_state) {
@@ -295,12 +313,10 @@ static void alloc_mark_stack();
} else {
scan_ptr = GC_push_next_marked_dirty(scan_ptr);
if (scan_ptr == 0) {
-# ifdef CONDPRINT
- if (GC_print_stats) {
- GC_printf1("Marked from %lu dirty pages\n",
- (unsigned long)GC_n_rescuing_pages);
- }
-# endif
+ if (GC_print_stats) {
+ GC_log_printf("Marked from %u dirty pages\n",
+ GC_n_rescuing_pages);
+ }
GC_push_roots(FALSE, cold_gc_frame);
GC_objects_are_marked = TRUE;
if (GC_mark_state != MS_INVALID) {
@@ -344,7 +360,7 @@ static void alloc_mark_stack();
/* the allocation lock. */
if (GC_parallel) {
GC_do_parallel_mark();
- GC_ASSERT(GC_mark_stack_top < GC_first_nonempty);
+ GC_ASSERT(GC_mark_stack_top < (mse *)GC_first_nonempty);
GC_mark_stack_top = GC_mark_stack - 1;
if (GC_mark_stack_too_small) {
alloc_mark_stack(2*GC_mark_stack_size);
@@ -442,8 +458,7 @@ static void alloc_mark_stack();
# endif /* __GNUC__ */
- GC_bool GC_mark_some(cold_gc_frame)
- ptr_t cold_gc_frame;
+ GC_bool GC_mark_some(ptr_t cold_gc_frame)
{
GC_bool ret_val;
@@ -494,12 +509,10 @@ handle_ex:
# endif /* __GNUC__ */
-# ifdef CONDPRINT
- if (GC_print_stats) {
- GC_printf0("Caught ACCESS_VIOLATION in marker. "
- "Memory mapping disappeared.\n");
- }
-# endif /* CONDPRINT */
+ if (GC_print_stats) {
+ GC_log_printf("Caught ACCESS_VIOLATION in marker. "
+ "Memory mapping disappeared.\n");
+ }
/* We have bad roots on the stack. Discard mark stack. */
/* Rescan from marked objects. Redetermine roots. */
@@ -525,79 +538,31 @@ rm_handler:
#endif /* MSWIN32 */
-GC_bool GC_mark_stack_empty()
+GC_bool GC_mark_stack_empty(void)
{
return(GC_mark_stack_top < GC_mark_stack);
}
-#ifdef PROF_MARKER
- word GC_prof_array[10];
-# define PROF(n) GC_prof_array[n]++
-#else
-# define PROF(n)
-#endif
-
-/* Given a pointer to someplace other than a small object page or the */
-/* first page of a large object, either: */
-/* - return a pointer to somewhere in the first page of the large */
-/* object, if current points to a large object. */
-/* In this case *hhdr is replaced with a pointer to the header */
-/* for the large object. */
-/* - just return current if it does not point to a large object. */
-/*ARGSUSED*/
-ptr_t GC_find_start(current, hhdr, new_hdr_p)
-register ptr_t current;
-register hdr *hhdr, **new_hdr_p;
-{
- if (GC_all_interior_pointers) {
- if (hhdr != 0) {
- register ptr_t orig = current;
-
- current = (ptr_t)HBLKPTR(current);
- do {
- current = current - HBLKSIZE*(word)hhdr;
- hhdr = HDR(current);
- } while(IS_FORWARDING_ADDR_OR_NIL(hhdr));
- /* current points to near the start of the large object */
- if (hhdr -> hb_flags & IGNORE_OFF_PAGE) return(orig);
- if ((word *)orig - (word *)current
- >= (ptrdiff_t)(hhdr->hb_sz)) {
- /* Pointer past the end of the block */
- return(orig);
- }
- *new_hdr_p = hhdr;
- return(current);
- } else {
- return(current);
- }
- } else {
- return(current);
- }
-}
-
-void GC_invalidate_mark_state()
+void GC_invalidate_mark_state(void)
{
GC_mark_state = MS_INVALID;
GC_mark_stack_top = GC_mark_stack-1;
}
-mse * GC_signal_mark_stack_overflow(msp)
-mse * msp;
+mse * GC_signal_mark_stack_overflow(mse *msp)
{
GC_mark_state = MS_INVALID;
GC_mark_stack_too_small = TRUE;
-# ifdef CONDPRINT
- if (GC_print_stats) {
- GC_printf1("Mark stack overflow; current size = %lu entries\n",
- GC_mark_stack_size);
- }
-# endif
+ if (GC_print_stats) {
+ GC_log_printf("Mark stack overflow; current size = %lu entries\n",
+ GC_mark_stack_size);
+ }
return(msp - GC_MARK_STACK_DISCARDS);
}
/*
* Mark objects pointed to by the regions described by
- * mark stack entries between GC_mark_stack and GC_mark_stack_top,
+ * mark stack entries between mark_stack and mark_stack_top,
* inclusive. Assumes the upper limit of a mark stack entry
* is never 0. A mark stack entry never has size 0.
* We try to traverse on the order of a hblk of memory before we return.
@@ -609,19 +574,16 @@ mse * msp;
* encoding, we optionally maintain a cache for the block address to
* header mapping, we prefetch when an object is "grayed", etc.
*/
-mse * GC_mark_from(mark_stack_top, mark_stack, mark_stack_limit)
-mse * mark_stack_top;
-mse * mark_stack;
-mse * mark_stack_limit;
+mse * GC_mark_from(mse *mark_stack_top, mse *mark_stack, mse *mark_stack_limit)
{
int credit = HBLKSIZE; /* Remaining credit for marking work */
- register word * current_p; /* Pointer to current candidate ptr. */
- register word current; /* Candidate pointer. */
- register word * limit; /* (Incl) limit of current candidate */
+ ptr_t current_p; /* Pointer to current candidate ptr. */
+ word current; /* Candidate pointer. */
+ ptr_t limit; /* (Incl) limit of current candidate */
/* range */
- register word descr;
- register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
- register ptr_t least_ha = GC_least_plausible_heap_addr;
+ word descr;
+ ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
+ ptr_t least_ha = GC_least_plausible_heap_addr;
DECLARE_HDR_CACHE;
# define SPLIT_RANGE_WORDS 128 /* Must be power of 2. */
@@ -652,6 +614,13 @@ mse * mark_stack_limit;
/* stack. */
GC_ASSERT(descr < (word)GC_greatest_plausible_heap_addr
- (word)GC_least_plausible_heap_addr);
+# ifdef ENABLE_TRACE
+ if (GC_trace_addr >= current_p
+ && GC_trace_addr < current_p + descr) {
+ GC_log_printf("GC:%d Large section; start %p len %lu\n",
+ GC_gc_no, current_p, (unsigned long) descr);
+ }
+# endif /* ENABLE_TRACE */
# ifdef PARALLEL_MARK
# define SHARE_BYTES 2048
if (descr > SHARE_BYTES && GC_parallel
@@ -662,49 +631,84 @@ mse * mark_stack_limit;
/* makes sure we handle */
/* misaligned pointers. */
mark_stack_top++;
- current_p = (word *) ((char *)current_p + new_size);
+# ifdef ENABLE_TRACE
+ if (GC_trace_addr >= current_p
+ && GC_trace_addr < current_p + descr) {
+ GC_log_printf("GC:%d splitting (parallel) %p at %p\n",
+ GC_gc_no, current_p, current_p + new_size);
+ }
+# endif /* ENABLE_TRACE */
+ current_p += new_size;
descr -= new_size;
goto retry;
}
# endif /* PARALLEL_MARK */
mark_stack_top -> mse_start =
- limit = current_p + SPLIT_RANGE_WORDS-1;
+ limit = current_p + WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
mark_stack_top -> mse_descr =
descr - WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
+# ifdef ENABLE_TRACE
+ if (GC_trace_addr >= current_p
+ && GC_trace_addr < current_p + descr) {
+ GC_log_printf("GC:%d splitting %p at %p\n",
+ GC_gc_no, current_p, limit);
+ }
+# endif /* ENABLE_TRACE */
/* Make sure that pointers overlapping the two ranges are */
/* considered. */
- limit = (word *)((char *)limit + sizeof(word) - ALIGNMENT);
+ limit += sizeof(word) - ALIGNMENT;
break;
case GC_DS_BITMAP:
mark_stack_top--;
+# ifdef ENABLE_TRACE
+ if (GC_trace_addr >= current_p
+ && GC_trace_addr < current_p + WORDS_TO_BYTES(WORDSZ-2)) {
+ GC_log_printf("GC:%d Tracing from %p bitmap descr %lu\n",
+ GC_gc_no, current_p, (unsigned long) descr);
+ }
+# endif /* ENABLE_TRACE */
descr &= ~GC_DS_TAGS;
credit -= WORDS_TO_BYTES(WORDSZ/2); /* guess */
while (descr != 0) {
if ((signed_word)descr < 0) {
- current = *current_p;
+ current = *(word *)current_p;
FIXUP_POINTER(current);
if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
PREFETCH((ptr_t)current);
- HC_PUSH_CONTENTS((ptr_t)current, mark_stack_top,
+# ifdef ENABLE_TRACE
+ if (GC_trace_addr == current_p) {
+ GC_log_printf("GC:%d Considering(3) %p -> %p\n",
+ GC_gc_no, current_p, (ptr_t) current);
+ }
+# endif /* ENABLE_TRACE */
+ PUSH_CONTENTS((ptr_t)current, mark_stack_top,
mark_stack_limit, current_p, exit1);
}
}
descr <<= 1;
- ++ current_p;
+ current_p += sizeof(word);
}
continue;
case GC_DS_PROC:
mark_stack_top--;
+# ifdef ENABLE_TRACE
+ if (GC_trace_addr >= current_p
+ && GC_base(current_p) != 0
+ && GC_base(current_p) == GC_base(GC_trace_addr)) {
+ GC_log_printf("GC:%d Tracing from %p proc descr %lu\n",
+ GC_gc_no, current_p, (unsigned long) descr);
+ }
+# endif /* ENABLE_TRACE */
credit -= GC_PROC_BYTES;
mark_stack_top =
(*PROC(descr))
- (current_p, mark_stack_top,
+ ((word *)current_p, mark_stack_top,
mark_stack_limit, ENV(descr));
continue;
case GC_DS_PER_OBJECT:
if ((signed_word)descr >= 0) {
/* Descriptor is in the object. */
- descr = *(word *)((ptr_t)current_p + descr - GC_DS_PER_OBJECT);
+ descr = *(word *)(current_p + descr - GC_DS_PER_OBJECT);
} else {
/* Descriptor is in type descriptor pointed to by first */
/* word in object. */
@@ -727,7 +731,7 @@ mse * mark_stack_limit;
}
if (0 == descr) {
/* Can happen either because we generated a 0 descriptor */
- /* or we saw a pointer to a free object. */
+ /* or we saw a pointer to a free object. */
mark_stack_top--;
continue;
}
@@ -735,12 +739,19 @@ mse * mark_stack_limit;
}
} else /* Small object with length descriptor */ {
mark_stack_top--;
- limit = (word *)(((ptr_t)current_p) + (word)descr);
+ limit = current_p + (word)descr;
}
+# ifdef ENABLE_TRACE
+ if (GC_trace_addr >= current_p
+ && GC_trace_addr < limit) {
+ GC_log_printf("GC:%d Tracing from %p len %lu\n",
+ GC_gc_no, current_p, (unsigned long) descr);
+ }
+# endif /* ENABLE_TRACE */
/* The simple case in which we're scanning a range. */
GC_ASSERT(!((word)current_p & (ALIGNMENT-1)));
- credit -= (ptr_t)limit - (ptr_t)current_p;
- limit -= 1;
+ credit -= limit - current_p;
+ limit -= sizeof(word);
{
# define PREF_DIST 4
@@ -754,11 +765,11 @@ mse * mark_stack_limit;
/* generating slightly better code. Overall gcc code quality */
/* for this loop is still not great. */
for(;;) {
- PREFETCH((ptr_t)limit - PREF_DIST*CACHE_LINE_SIZE);
+ PREFETCH(limit - PREF_DIST*CACHE_LINE_SIZE);
GC_ASSERT(limit >= current_p);
- deferred = *limit;
+ deferred = *(word *)limit;
FIXUP_POINTER(deferred);
- limit = (word *)((char *)limit - ALIGNMENT);
+ limit -= ALIGNMENT;
if ((ptr_t)deferred >= least_ha && (ptr_t)deferred < greatest_ha) {
PREFETCH((ptr_t)deferred);
break;
@@ -766,9 +777,9 @@ mse * mark_stack_limit;
if (current_p > limit) goto next_object;
/* Unroll once, so we don't do too many of the prefetches */
/* based on limit. */
- deferred = *limit;
+ deferred = *(word *)limit;
FIXUP_POINTER(deferred);
- limit = (word *)((char *)limit - ALIGNMENT);
+ limit -= ALIGNMENT;
if ((ptr_t)deferred >= least_ha && (ptr_t)deferred < greatest_ha) {
PREFETCH((ptr_t)deferred);
break;
@@ -779,26 +790,38 @@ mse * mark_stack_limit;
while (current_p <= limit) {
/* Empirically, unrolling this loop doesn't help a lot. */
- /* Since HC_PUSH_CONTENTS expands to a lot of code, */
+ /* Since PUSH_CONTENTS expands to a lot of code, */
/* we don't. */
- current = *current_p;
+ current = *(word *)current_p;
FIXUP_POINTER(current);
- PREFETCH((ptr_t)current_p + PREF_DIST*CACHE_LINE_SIZE);
+ PREFETCH(current_p + PREF_DIST*CACHE_LINE_SIZE);
if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
/* Prefetch the contents of the object we just pushed. It's */
/* likely we will need them soon. */
PREFETCH((ptr_t)current);
- HC_PUSH_CONTENTS((ptr_t)current, mark_stack_top,
+# ifdef ENABLE_TRACE
+ if (GC_trace_addr == current_p) {
+ GC_log_printf("GC:%d Considering(1) %p -> %p\n",
+ GC_gc_no, current_p, (ptr_t) current);
+ }
+# endif /* ENABLE_TRACE */
+ PUSH_CONTENTS((ptr_t)current, mark_stack_top,
mark_stack_limit, current_p, exit2);
}
- current_p = (word *)((char *)current_p + ALIGNMENT);
+ current_p += ALIGNMENT;
}
# ifndef SMALL_CONFIG
/* We still need to mark the entry we previously prefetched. */
- /* We alrady know that it passes the preliminary pointer */
+ /* We already know that it passes the preliminary pointer */
/* validity test. */
- HC_PUSH_CONTENTS((ptr_t)deferred, mark_stack_top,
+# ifdef ENABLE_TRACE
+ if (GC_trace_addr == current_p) {
+ GC_log_printf("GC:%d Considering(2) %p -> %p\n",
+ GC_gc_no, current_p, (ptr_t) deferred);
+ }
+# endif /* ENABLE_TRACE */
+ PUSH_CONTENTS((ptr_t)deferred, mark_stack_top,
mark_stack_limit, current_p, exit4);
next_object:;
# endif
@@ -813,7 +836,6 @@ mse * mark_stack_limit;
GC_bool GC_help_wanted = FALSE;
unsigned GC_helper_count = 0;
unsigned GC_active_count = 0;
-mse * VOLATILE GC_first_nonempty;
word GC_mark_no = 0;
#define LOCAL_MARK_STACK_SIZE HBLKSIZE
@@ -834,33 +856,20 @@ mse * GC_steal_mark_stack(mse * low, mse * high, mse * local,
mse *top = local - 1;
unsigned i = 0;
- /* Make sure that prior writes to the mark stack are visible. */
- /* On some architectures, the fact that the reads are */
- /* volatile should suffice. */
-# if !defined(IA64) && !defined(HP_PA) && !defined(I386)
- GC_memory_barrier();
-# endif
GC_ASSERT(high >= low-1 && high - low + 1 <= GC_mark_stack_size);
for (p = low; p <= high && i <= max; ++p) {
- word descr = *(volatile word *) &(p -> mse_descr);
- /* In the IA64 memory model, the following volatile store is */
- /* ordered after this read of descr. Thus a thread must read */
- /* the original nonzero value. HP_PA appears to be similar, */
- /* and if I'm reading the P4 spec correctly, X86 is probably */
- /* also OK. In some other cases we need a barrier. */
-# if !defined(IA64) && !defined(HP_PA) && !defined(I386)
- GC_memory_barrier();
-# endif
+ word descr = AO_load((volatile AO_t *) &(p -> mse_descr));
if (descr != 0) {
- *(volatile word *) &(p -> mse_descr) = 0;
+ /* Must be ordered after read of descr: */
+ AO_store_release_write((volatile AO_t *) &(p -> mse_descr), 0);
/* More than one thread may get this entry, but that's only */
/* a minor performance problem. */
++top;
top -> mse_descr = descr;
top -> mse_start = p -> mse_start;
- GC_ASSERT( top -> mse_descr & GC_DS_TAGS != GC_DS_LENGTH ||
- top -> mse_descr < GC_greatest_plausible_heap_addr
- - GC_least_plausible_heap_addr);
+ GC_ASSERT( (top -> mse_descr & GC_DS_TAGS) != GC_DS_LENGTH ||
+ top -> mse_descr < GC_greatest_plausible_heap_addr
+ - GC_least_plausible_heap_addr);
/* If this is a big object, count it as */
/* size/256 + 1 objects. */
++i;
@@ -882,25 +891,22 @@ void GC_return_mark_stack(mse * low, mse * high)
if (high < low) return;
stack_size = high - low + 1;
GC_acquire_mark_lock();
- my_top = GC_mark_stack_top;
+ my_top = GC_mark_stack_top; /* Concurrent modification impossible. */
my_start = my_top + 1;
if (my_start - GC_mark_stack + stack_size > GC_mark_stack_size) {
-# ifdef CONDPRINT
- if (GC_print_stats) {
- GC_printf0("No room to copy back mark stack.");
- }
-# endif
+ if (GC_print_stats) {
+ GC_log_printf("No room to copy back mark stack.");
+ }
GC_mark_state = MS_INVALID;
GC_mark_stack_too_small = TRUE;
/* We drop the local mark stack. We'll fix things later. */
} else {
BCOPY(low, my_start, stack_size * sizeof(mse));
- GC_ASSERT(GC_mark_stack_top = my_top);
-# if !defined(IA64) && !defined(HP_PA)
- GC_memory_barrier();
-# endif
- /* On IA64, the volatile write acts as a release barrier. */
- GC_mark_stack_top = my_top + stack_size;
+ GC_ASSERT((mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top))
+ == my_top);
+ AO_store_release_write((volatile AO_t *)(&GC_mark_stack_top),
+ (AO_t)(my_top + stack_size));
+ /* Ensures visibility of previously written stack contents. */
}
GC_release_mark_lock();
GC_notify_all_marker();
@@ -930,15 +936,15 @@ void GC_do_local_mark(mse *local_mark_stack, mse *local_top)
return;
}
}
- if (GC_mark_stack_top < GC_first_nonempty &&
- GC_active_count < GC_helper_count
+ if ((mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top))
+ < (mse *)AO_load(&GC_first_nonempty)
+ && GC_active_count < GC_helper_count
&& local_top > local_mark_stack + 1) {
/* Try to share the load, since the main stack is empty, */
/* and helper threads are waiting for a refill. */
/* The entries near the bottom of the stack are likely */
/* to require more work. Thus we return those, eventhough */
/* it's harder. */
- mse * p;
mse * new_bottom = local_mark_stack
+ (local_top - local_mark_stack)/2;
GC_ASSERT(new_bottom > local_mark_stack
@@ -968,41 +974,44 @@ void GC_mark_local(mse *local_mark_stack, int id)
GC_acquire_mark_lock();
GC_active_count++;
- my_first_nonempty = GC_first_nonempty;
- GC_ASSERT(GC_first_nonempty >= GC_mark_stack &&
- GC_first_nonempty <= GC_mark_stack_top + 1);
-# ifdef PRINTSTATS
- GC_printf1("Starting mark helper %lu\n", (unsigned long)id);
-# endif
+ my_first_nonempty = (mse *)AO_load(&GC_first_nonempty);
+ GC_ASSERT((mse *)AO_load(&GC_first_nonempty) >= GC_mark_stack &&
+ (mse *)AO_load(&GC_first_nonempty) <=
+ (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);
+ if (GC_print_stats == VERBOSE)
+ GC_log_printf("Starting mark helper %lu\n", (unsigned long)id);
GC_release_mark_lock();
for (;;) {
size_t n_on_stack;
size_t n_to_get;
- mse *next;
mse * my_top;
mse * local_top;
- mse * global_first_nonempty = GC_first_nonempty;
+ mse * global_first_nonempty = (mse *)AO_load(&GC_first_nonempty);
GC_ASSERT(my_first_nonempty >= GC_mark_stack &&
- my_first_nonempty <= GC_mark_stack_top + 1);
+ my_first_nonempty <=
+ (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);
GC_ASSERT(global_first_nonempty >= GC_mark_stack &&
- global_first_nonempty <= GC_mark_stack_top + 1);
+ global_first_nonempty <=
+ (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);
if (my_first_nonempty < global_first_nonempty) {
my_first_nonempty = global_first_nonempty;
} else if (global_first_nonempty < my_first_nonempty) {
- GC_compare_and_exchange((word *)(&GC_first_nonempty),
- (word) global_first_nonempty,
- (word) my_first_nonempty);
+ AO_compare_and_swap(&GC_first_nonempty,
+ (AO_t) global_first_nonempty,
+ (AO_t) my_first_nonempty);
/* If this fails, we just go ahead, without updating */
/* GC_first_nonempty. */
}
/* Perhaps we should also update GC_first_nonempty, if it */
/* is less. But that would require using atomic updates. */
- my_top = GC_mark_stack_top;
+ my_top = (mse *)AO_load_acquire((volatile AO_t *)(&GC_mark_stack_top));
n_on_stack = my_top - my_first_nonempty + 1;
if (0 == n_on_stack) {
GC_acquire_mark_lock();
my_top = GC_mark_stack_top;
+ /* Asynchronous modification impossible here, */
+ /* since we hold mark lock. */
n_on_stack = my_top - my_first_nonempty + 1;
if (0 == n_on_stack) {
GC_active_count--;
@@ -1011,14 +1020,15 @@ void GC_mark_local(mse *local_mark_stack, int id)
/* on the stack. */
if (0 == GC_active_count) GC_notify_all_marker();
while (GC_active_count > 0
- && GC_first_nonempty > GC_mark_stack_top) {
+ && (mse *)AO_load(&GC_first_nonempty)
+ > GC_mark_stack_top) {
/* We will be notified if either GC_active_count */
/* reaches zero, or if more objects are pushed on */
/* the global mark stack. */
GC_wait_marker();
}
if (GC_active_count == 0 &&
- GC_first_nonempty > GC_mark_stack_top) {
+ (mse *)AO_load(&GC_first_nonempty) > GC_mark_stack_top) {
GC_bool need_to_notify = FALSE;
/* The above conditions can't be falsified while we */
/* hold the mark lock, since neither */
@@ -1028,10 +1038,9 @@ void GC_mark_local(mse *local_mark_stack, int id)
/* both conditions actually held simultaneously. */
GC_helper_count--;
if (0 == GC_helper_count) need_to_notify = TRUE;
-# ifdef PRINTSTATS
- GC_printf1(
+ if (GC_print_stats == VERBOSE)
+ GC_log_printf(
"Finished mark helper %lu\n", (unsigned long)id);
-# endif
GC_release_mark_lock();
if (need_to_notify) GC_notify_all_marker();
return;
@@ -1052,7 +1061,8 @@ void GC_mark_local(mse *local_mark_stack, int id)
local_mark_stack, n_to_get,
&my_first_nonempty);
GC_ASSERT(my_first_nonempty >= GC_mark_stack &&
- my_first_nonempty <= GC_mark_stack_top + 1);
+ my_first_nonempty <=
+ (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);
GC_do_local_mark(local_mark_stack, local_top);
}
}
@@ -1064,8 +1074,6 @@ void GC_mark_local(mse *local_mark_stack, int id)
void GC_do_parallel_mark()
{
mse local_mark_stack[LOCAL_MARK_STACK_SIZE];
- mse * local_top;
- mse * my_top;
GC_acquire_mark_lock();
GC_ASSERT(I_HOLD_LOCK());
@@ -1073,11 +1081,10 @@ void GC_do_parallel_mark()
/* all the time, especially since it's cheap. */
if (GC_help_wanted || GC_active_count != 0 || GC_helper_count != 0)
ABORT("Tried to start parallel mark in bad state");
-# ifdef PRINTSTATS
- GC_printf1("Starting marking for mark phase number %lu\n",
+ if (GC_print_stats == VERBOSE)
+ GC_log_printf("Starting marking for mark phase number %lu\n",
(unsigned long)GC_mark_no);
-# endif
- GC_first_nonempty = GC_mark_stack;
+ GC_first_nonempty = (AO_t)GC_mark_stack;
GC_active_count = 0;
GC_helper_count = 1;
GC_help_wanted = TRUE;
@@ -1090,11 +1097,10 @@ void GC_do_parallel_mark()
/* Done; clean up. */
while (GC_helper_count > 0) GC_wait_marker();
/* GC_helper_count cannot be incremented while GC_help_wanted == FALSE */
-# ifdef PRINTSTATS
- GC_printf1(
+ if (GC_print_stats == VERBOSE)
+ GC_log_printf(
"Finished marking for mark phase number %lu\n",
(unsigned long)GC_mark_no);
-# endif
GC_mark_no++;
GC_release_mark_lock();
GC_notify_all_marker();
@@ -1107,12 +1113,11 @@ void GC_help_marker(word my_mark_no)
{
mse local_mark_stack[LOCAL_MARK_STACK_SIZE];
unsigned my_id;
- mse * my_first_nonempty;
if (!GC_parallel) return;
GC_acquire_mark_lock();
while (GC_mark_no < my_mark_no
- || !GC_help_wanted && GC_mark_no == my_mark_no) {
+ || (!GC_help_wanted && GC_mark_no == my_mark_no)) {
GC_wait_marker();
}
my_id = GC_helper_count;
@@ -1130,10 +1135,9 @@ void GC_help_marker(word my_mark_no)
#endif /* PARALLEL_MARK */
-/* Allocate or reallocate space for mark stack of size s words */
-/* May silently fail. */
-static void alloc_mark_stack(n)
-word n;
+/* Allocate or reallocate space for mark stack of size n entries. */
+/* May silently fail. */
+static void alloc_mark_stack(size_t n)
{
mse * new_stack = (mse *)GC_scratch_alloc(n * sizeof(struct GC_ms_entry));
@@ -1153,23 +1157,19 @@ word n;
GC_mark_stack = new_stack;
GC_mark_stack_size = n;
GC_mark_stack_limit = new_stack + n;
-# ifdef CONDPRINT
- if (GC_print_stats) {
- GC_printf1("Grew mark stack to %lu frames\n",
- (unsigned long) GC_mark_stack_size);
- }
-# endif
+ if (GC_print_stats) {
+ GC_log_printf("Grew mark stack to %lu frames\n",
+ (unsigned long) GC_mark_stack_size);
+ }
} else {
-# ifdef CONDPRINT
- if (GC_print_stats) {
- GC_printf1("Failed to grow mark stack to %lu frames\n",
- (unsigned long) n);
- }
-# endif
+ if (GC_print_stats) {
+ GC_log_printf("Failed to grow mark stack to %lu frames\n",
+ (unsigned long) n);
+ }
}
} else {
if (new_stack == 0) {
- GC_err_printf0("No space for mark stack\n");
+ GC_err_printf("No space for mark stack\n");
EXIT();
}
GC_mark_stack = new_stack;
@@ -1209,7 +1209,7 @@ ptr_t top;
length += GC_DS_TAGS;
length &= ~GC_DS_TAGS;
# endif
- GC_mark_stack_top -> mse_start = (word *)bottom;
+ GC_mark_stack_top -> mse_start = bottom;
GC_mark_stack_top -> mse_descr = length;
}
@@ -1223,13 +1223,11 @@ ptr_t top;
* or if it marks each object before pushing it, thus ensuring progress
* in the event of a stack overflow.)
*/
-void GC_push_selected(bottom, top, dirty_fn, push_fn)
-ptr_t bottom;
-ptr_t top;
-int (*dirty_fn) GC_PROTO((struct hblk * h));
-void (*push_fn) GC_PROTO((ptr_t bottom, ptr_t top));
+void GC_push_selected(ptr_t bottom, ptr_t top,
+ int (*dirty_fn) (struct hblk *),
+ void (*push_fn) (ptr_t, ptr_t))
{
- register struct hblk * h;
+ struct hblk * h;
bottom = (ptr_t)(((long) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
top = (ptr_t)(((long) top) & ~(ALIGNMENT-1));
@@ -1280,10 +1278,7 @@ void (*push_fn) GC_PROTO((ptr_t bottom, ptr_t top));
#endif
-void GC_push_conditional(bottom, top, all)
-ptr_t bottom;
-ptr_t top;
-int all;
+void GC_push_conditional(ptr_t bottom, ptr_t top, GC_bool all)
{
if (all) {
if (GC_dirty_maintained) {
@@ -1303,32 +1298,45 @@ int all;
#endif
# if defined(MSWIN32) || defined(MSWINCE)
- void __cdecl GC_push_one(p)
+ void __cdecl GC_push_one(word p)
# else
- void GC_push_one(p)
+ void GC_push_one(word p)
# endif
-word p;
{
- GC_PUSH_ONE_STACK(p, MARKED_FROM_REGISTER);
+ GC_PUSH_ONE_STACK((ptr_t)p, MARKED_FROM_REGISTER);
}
-struct GC_ms_entry *GC_mark_and_push(obj, mark_stack_ptr, mark_stack_limit, src)
-GC_PTR obj;
-struct GC_ms_entry * mark_stack_ptr;
-struct GC_ms_entry * mark_stack_limit;
-GC_PTR *src;
+struct GC_ms_entry *GC_mark_and_push(void *obj,
+ mse *mark_stack_ptr,
+ mse *mark_stack_limit,
+ void **src)
{
- PREFETCH(obj);
- PUSH_CONTENTS(obj, mark_stack_ptr /* modified */, mark_stack_limit, src,
- was_marked /* internally generated exit label */);
- return mark_stack_ptr;
-}
+ hdr * hhdr;
+
+ PREFETCH(obj);
+ GET_HDR(obj, hhdr);
+ if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr),FALSE)) {
+ if (GC_all_interior_pointers) {
+ hhdr = GC_find_header(GC_base(obj));
+ if (hhdr == 0) {
+ GC_ADD_TO_BLACK_LIST_NORMAL(obj, src);
+ return mark_stack_ptr;
+ }
+ } else {
+ GC_ADD_TO_BLACK_LIST_NORMAL(obj, src);
+ return mark_stack_ptr;
+ }
+ }
+ if (EXPECT(HBLK_IS_FREE(hhdr),0)) {
+ GC_ADD_TO_BLACK_LIST_NORMAL(obj, src);
+ return mark_stack_ptr;
+ }
-# ifdef __STDC__
-# define BASE(p) (word)GC_base((void *)(p))
-# else
-# define BASE(p) (word)GC_base((char *)(p))
-# endif
+ PUSH_CONTENTS_HDR(obj, mark_stack_ptr /* modified */, mark_stack_limit,
+ src, was_marked, hhdr, TRUE);
+ was_marked:
+ return mark_stack_ptr;
+}
/* Mark and push (i.e. gray) a single object p onto the main */
/* mark stack. Consider p to be valid if it is an interior */
@@ -1338,62 +1346,38 @@ GC_PTR *src;
/* Mark bits are NOT atomically updated. Thus this must be the */
/* only thread setting them. */
# if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
- void GC_mark_and_push_stack(p, source)
- ptr_t source;
+ void GC_mark_and_push_stack(ptr_t p, ptr_t source)
# else
- void GC_mark_and_push_stack(p)
+ void GC_mark_and_push_stack(ptr_t p)
# define source 0
# endif
-register word p;
{
- register word r;
- register hdr * hhdr;
- register int displ;
+ hdr * hhdr;
+ ptr_t r = p;
+ PREFETCH(p);
GET_HDR(p, hhdr);
- if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
+ if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr),FALSE)) {
if (hhdr != 0) {
- r = BASE(p);
+ r = GC_base(p);
hhdr = HDR(r);
- displ = BYTES_TO_WORDS(HBLKDISPL(r));
}
- } else {
- register map_entry_type map_entry;
-
- displ = HBLKDISPL(p);
- map_entry = MAP_ENTRY((hhdr -> hb_map), displ);
- if (map_entry >= MAX_OFFSET) {
- if (map_entry == OFFSET_TOO_BIG || !GC_all_interior_pointers) {
- r = BASE(p);
- displ = BYTES_TO_WORDS(HBLKDISPL(r));
- if (r == 0) hhdr = 0;
- } else {
- /* Offset invalid, but map reflects interior pointers */
- hhdr = 0;
- }
- } else {
- displ = BYTES_TO_WORDS(displ);
- displ -= map_entry;
- r = (word)((word *)(HBLKPTR(p)) + displ);
- }
- }
- /* If hhdr != 0 then r == GC_base(p), only we did it faster. */
- /* displ is the word index within the block. */
- if (hhdr == 0) {
-# ifdef PRINT_BLACK_LIST
- GC_add_to_black_list_stack(p, source);
-# else
- GC_add_to_black_list_stack(p);
-# endif
-# undef source /* In case we had to define it. */
- } else {
- if (!mark_bit_from_hdr(hhdr, displ)) {
- set_mark_bit_from_hdr(hhdr, displ);
- GC_STORE_BACK_PTR(source, (ptr_t)r);
- PUSH_OBJ((word *)r, hhdr, GC_mark_stack_top,
- GC_mark_stack_limit);
+ if (hhdr == 0) {
+ GC_ADD_TO_BLACK_LIST_STACK(p, source);
+ return;
}
}
+ if (EXPECT(HBLK_IS_FREE(hhdr),0)) {
+ GC_ADD_TO_BLACK_LIST_NORMAL(p, src);
+ return;
+ }
+ PUSH_CONTENTS_HDR(r, GC_mark_stack_top, GC_mark_stack_limit,
+ source, mark_and_push_exit, hhdr, FALSE);
+ mark_and_push_exit: ;
+ /* We silently ignore pointers to near the end of a block, */
+ /* which is very mildly suboptimal. */
+ /* FIXME: We should probably add a header word to address */
+ /* this. */
}
# ifdef TRACE_BUF
@@ -1403,7 +1387,7 @@ register word p;
struct trace_entry {
char * kind;
word gc_no;
- word words_allocd;
+ word bytes_allocd;
word arg1;
word arg2;
} GC_trace_buf[TRACE_ENTRIES];
@@ -1414,7 +1398,7 @@ void GC_add_trace_entry(char *kind, word arg1, word arg2)
{
GC_trace_buf[GC_trace_buf_ptr].kind = kind;
GC_trace_buf[GC_trace_buf_ptr].gc_no = GC_gc_no;
- GC_trace_buf[GC_trace_buf_ptr].words_allocd = GC_words_allocd;
+ GC_trace_buf[GC_trace_buf_ptr].bytes_allocd = GC_bytes_allocd;
GC_trace_buf[GC_trace_buf_ptr].arg1 = arg1 ^ 0x80000000;
GC_trace_buf[GC_trace_buf_ptr].arg2 = arg2 ^ 0x80000000;
GC_trace_buf_ptr++;
@@ -1431,8 +1415,8 @@ void GC_print_trace(word gc_no, GC_bool lock)
if (i < 0) i = TRACE_ENTRIES-1;
p = GC_trace_buf + i;
if (p -> gc_no < gc_no || p -> kind == 0) return;
- printf("Trace:%s (gc:%d,words:%d) 0x%X, 0x%X\n",
- p -> kind, p -> gc_no, p -> words_allocd,
+ printf("Trace:%s (gc:%d,bytes:%d) 0x%X, 0x%X\n",
+ p -> kind, p -> gc_no, p -> bytes_allocd,
(p -> arg1) ^ 0x80000000, (p -> arg2) ^ 0x80000000);
}
printf("Trace incomplete\n");
@@ -1446,9 +1430,7 @@ void GC_print_trace(word gc_no, GC_bool lock)
* and scans the entire region immediately, in case the contents
* change.
*/
-void GC_push_all_eager(bottom, top)
-ptr_t bottom;
-ptr_t top;
+void GC_push_all_eager(ptr_t bottom, ptr_t top)
{
word * b = (word *)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
word * t = (word *)(((word) top) & ~(ALIGNMENT-1));
@@ -1464,9 +1446,9 @@ ptr_t top;
/* check all pointers in range and push if they appear */
/* to be valid. */
lim = t - 1 /* longword */;
- for (p = b; p <= lim; p = (word *)(((char *)p) + ALIGNMENT)) {
+ for (p = b; p <= lim; p = (word *)(((ptr_t)p) + ALIGNMENT)) {
q = *p;
- GC_PUSH_ONE_STACK(q, p);
+ GC_PUSH_ONE_STACK((ptr_t)q, p);
}
# undef GC_greatest_plausible_heap_addr
# undef GC_least_plausible_heap_addr
@@ -1480,10 +1462,8 @@ ptr_t top;
* Cold_gc_frame delimits the stack section that must be scanned
* eagerly. A zero value indicates that no eager scanning is needed.
*/
-void GC_push_all_stack_partially_eager(bottom, top, cold_gc_frame)
-ptr_t bottom;
-ptr_t top;
-ptr_t cold_gc_frame;
+void GC_push_all_stack_partially_eager(ptr_t bottom, ptr_t top,
+ ptr_t cold_gc_frame)
{
if (!NEED_FIXUP_POINTER && GC_all_interior_pointers) {
# define EAGER_BYTES 1024
@@ -1511,9 +1491,7 @@ ptr_t cold_gc_frame;
}
#endif /* !THREADS */
-void GC_push_all_stack(bottom, top)
-ptr_t bottom;
-ptr_t top;
+void GC_push_all_stack(ptr_t bottom, ptr_t top)
{
if (!NEED_FIXUP_POINTER && GC_all_interior_pointers) {
GC_push_all(bottom, top);
@@ -1525,9 +1503,7 @@ ptr_t top;
#if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES)
/* Push all objects reachable from marked objects in the given block */
/* of size 1 objects. */
-void GC_push_marked1(h, hhdr)
-struct hblk *h;
-register hdr * hhdr;
+void GC_push_marked1(struct hblk *h, hdr *hhdr)
{
word * mark_word_addr = &(hhdr->hb_marks[0]);
register word *p;
@@ -1573,9 +1549,7 @@ register hdr * hhdr;
/* Push all objects reachable from marked objects in the given block */
/* of size 2 objects. */
-void GC_push_marked2(h, hhdr)
-struct hblk *h;
-register hdr * hhdr;
+void GC_push_marked2(struct hblk *h, hdr *hhdr)
{
word * mark_word_addr = &(hhdr->hb_marks[0]);
register word *p;
@@ -1622,9 +1596,7 @@ register hdr * hhdr;
/* of size 4 objects. */
/* There is a risk of mark stack overflow here. But we handle that. */
/* And only unmarked objects get pushed, so it's not very likely. */
-void GC_push_marked4(h, hhdr)
-struct hblk *h;
-register hdr * hhdr;
+void GC_push_marked4(struct hblk *h, hdr *hhdr)
{
word * mark_word_addr = &(hhdr->hb_marks[0]);
register word *p;
@@ -1676,30 +1648,28 @@ register hdr * hhdr;
#endif /* SMALL_CONFIG */
/* Push all objects reachable from marked objects in the given block */
-void GC_push_marked(h, hhdr)
-struct hblk *h;
-register hdr * hhdr;
+void GC_push_marked(struct hblk *h, hdr *hhdr)
{
- register int sz = hhdr -> hb_sz;
- register int descr = hhdr -> hb_descr;
- register word * p;
- register int word_no;
- register word * lim;
- register mse * GC_mark_stack_top_reg;
- register mse * mark_stack_limit = GC_mark_stack_limit;
+ int sz = hhdr -> hb_sz;
+ int descr = hhdr -> hb_descr;
+ ptr_t p;
+ int bit_no;
+ ptr_t lim;
+ mse * GC_mark_stack_top_reg;
+ mse * mark_stack_limit = GC_mark_stack_limit;
/* Some quick shortcuts: */
if ((0 | GC_DS_LENGTH) == descr) return;
if (GC_block_empty(hhdr)/* nothing marked */) return;
GC_n_rescuing_pages++;
GC_objects_are_marked = TRUE;
- if (sz > MAXOBJSZ) {
- lim = (word *)h;
+ if (sz > MAXOBJBYTES) {
+ lim = h -> hb_body;
} else {
- lim = (word *)(h + 1) - sz;
+ lim = (h + 1)->hb_body - sz;
}
- switch(sz) {
+ switch(BYTES_TO_WORDS(sz)) {
# if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES)
case 1:
GC_push_marked1(h, hhdr);
@@ -1716,15 +1686,11 @@ register hdr * hhdr;
# endif
default:
GC_mark_stack_top_reg = GC_mark_stack_top;
- for (p = (word *)h, word_no = 0; p <= lim; p += sz, word_no += sz) {
- if (mark_bit_from_hdr(hhdr, word_no)) {
+ for (p = h -> hb_body, bit_no = 0; p <= lim;
+ p += sz, bit_no += MARK_BIT_OFFSET(sz)) {
+ if (mark_bit_from_hdr(hhdr, bit_no)) {
/* Mark from fields inside the object */
- PUSH_OBJ((word *)p, hhdr, GC_mark_stack_top_reg, mark_stack_limit);
-# ifdef GATHERSTATS
- /* Subtract this object from total, since it was */
- /* added in twice. */
- GC_composite_in_use -= sz;
-# endif
+ PUSH_OBJ(p, hhdr, GC_mark_stack_top_reg, mark_stack_limit);
}
}
GC_mark_stack_top = GC_mark_stack_top_reg;
@@ -1733,17 +1699,14 @@ register hdr * hhdr;
#ifndef SMALL_CONFIG
/* Test whether any page in the given block is dirty */
-GC_bool GC_block_was_dirty(h, hhdr)
-struct hblk *h;
-register hdr * hhdr;
+GC_bool GC_block_was_dirty(struct hblk *h, hdr *hhdr)
{
- register int sz = hhdr -> hb_sz;
+ int sz = hhdr -> hb_sz;
- if (sz <= MAXOBJSZ) {
+ if (sz <= MAXOBJBYTES) {
return(GC_page_was_dirty(h));
} else {
- register ptr_t p = (ptr_t)h;
- sz = WORDS_TO_BYTES(sz);
+ ptr_t p = (ptr_t)h;
while (p < (ptr_t)h + sz) {
if (GC_page_was_dirty((struct hblk *)p)) return(TRUE);
p += HBLKSIZE;
@@ -1754,30 +1717,32 @@ register hdr * hhdr;
#endif /* SMALL_CONFIG */
/* Similar to GC_push_next_marked, but return address of next block */
-struct hblk * GC_push_next_marked(h)
-struct hblk *h;
+struct hblk * GC_push_next_marked(struct hblk *h)
{
- register hdr * hhdr;
+ hdr * hhdr = HDR(h);
- h = GC_next_used_block(h);
- if (h == 0) return(0);
- hhdr = HDR(h);
+ if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr), FALSE)) {
+ h = GC_next_used_block(h);
+ if (h == 0) return(0);
+ hhdr = GC_find_header((ptr_t)h);
+ }
GC_push_marked(h, hhdr);
return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
}
#ifndef SMALL_CONFIG
/* Identical to above, but mark only from dirty pages */
-struct hblk * GC_push_next_marked_dirty(h)
-struct hblk *h;
+struct hblk * GC_push_next_marked_dirty(struct hblk *h)
{
- register hdr * hhdr;
+ hdr * hhdr = HDR(h);
if (!GC_dirty_maintained) { ABORT("dirty bits not set up"); }
for (;;) {
- h = GC_next_used_block(h);
- if (h == 0) return(0);
- hhdr = HDR(h);
+ if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr), FALSE)) {
+ h = GC_next_used_block(h);
+ if (h == 0) return(0);
+ hhdr = GC_find_header((ptr_t)h);
+ }
# ifdef STUBBORN_ALLOC
if (hhdr -> hb_obj_kind == STUBBORN) {
if (GC_page_was_changed(h) && GC_block_was_dirty(h, hhdr)) {
@@ -1790,6 +1755,7 @@ struct hblk *h;
if (GC_block_was_dirty(h, hhdr)) break;
# endif
h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
+ hhdr = HDR(h);
}
GC_push_marked(h, hhdr);
return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
@@ -1798,20 +1764,21 @@ struct hblk *h;
/* Similar to above, but for uncollectable pages. Needed since we */
/* do not clear marks for such pages, even for full collections. */
-struct hblk * GC_push_next_marked_uncollectable(h)
-struct hblk *h;
+struct hblk * GC_push_next_marked_uncollectable(struct hblk *h)
{
- register hdr * hhdr = HDR(h);
+ hdr * hhdr = HDR(h);
for (;;) {
- h = GC_next_used_block(h);
- if (h == 0) return(0);
- hhdr = HDR(h);
+ if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr), FALSE)) {
+ h = GC_next_used_block(h);
+ if (h == 0) return(0);
+ hhdr = GC_find_header((ptr_t)h);
+ }
if (hhdr -> hb_obj_kind == UNCOLLECTABLE) break;
h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
+ hhdr = HDR(h);
}
GC_push_marked(h, hhdr);
return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
}
-
diff --git a/mark_rts.c b/mark_rts.c
index 55eb5d54..50ac09ac 100644
--- a/mark_rts.c
+++ b/mark_rts.c
@@ -40,26 +40,26 @@ static int n_root_sets = 0;
# if !defined(NO_DEBUGGING)
/* For debugging: */
-void GC_print_static_roots()
+void GC_print_static_roots(void)
{
register int i;
size_t total = 0;
for (i = 0; i < n_root_sets; i++) {
- GC_printf2("From 0x%lx to 0x%lx ",
- (unsigned long) GC_static_roots[i].r_start,
- (unsigned long) GC_static_roots[i].r_end);
+ GC_printf("From %p to %p ",
+ GC_static_roots[i].r_start,
+ GC_static_roots[i].r_end);
if (GC_static_roots[i].r_tmp) {
- GC_printf0(" (temporary)\n");
+ GC_printf(" (temporary)\n");
} else {
- GC_printf0("\n");
+ GC_printf("\n");
}
total += GC_static_roots[i].r_end - GC_static_roots[i].r_start;
}
- GC_printf1("Total size: %ld\n", (unsigned long) total);
+ GC_printf("Total size: %ld\n", (unsigned long) total);
if (GC_root_size != total) {
- GC_printf1("GC_root_size incorrect: %ld!!\n",
- (unsigned long) GC_root_size);
+ GC_printf("GC_root_size incorrect: %ld!!\n",
+ (unsigned long) GC_root_size);
}
}
# endif /* NO_DEBUGGING */
@@ -67,8 +67,7 @@ void GC_print_static_roots()
/* Primarily for debugging support: */
/* Is the address p in one of the registered static */
/* root sections? */
-GC_bool GC_is_static_root(p)
-ptr_t p;
+GC_bool GC_is_static_root(ptr_t p)
{
static int last_root_set = MAX_ROOT_SETS;
register int i;
@@ -98,8 +97,7 @@ ptr_t p;
-- really defined in gc_priv.h
*/
-static int rt_hash(addr)
-char * addr;
+static INLINE int rt_hash(ptr_t addr)
{
word result = (word) addr;
# if CPP_WORDSZ > 8*LOG_RT_SIZE
@@ -116,11 +114,10 @@ char * addr;
/* Is a range starting at b already in the table? If so return a */
/* pointer to it, else NIL. */
-struct roots * GC_roots_present(b)
-char *b;
+struct roots * GC_roots_present(ptr_t b)
{
- register int h = rt_hash(b);
- register struct roots *p = GC_root_index[h];
+ int h = rt_hash(b);
+ struct roots *p = GC_root_index[h];
while (p != 0) {
if (p -> r_start == (ptr_t)b) return(p);
@@ -130,10 +127,9 @@ char *b;
}
/* Add the given root structure to the index. */
-static void add_roots_to_index(p)
-struct roots *p;
+static void add_roots_to_index(struct roots *p)
{
- register int h = rt_hash(p -> r_start);
+ int h = rt_hash(p -> r_start);
p -> r_next = GC_root_index[h];
GC_root_index[h] = p;
@@ -150,16 +146,13 @@ struct roots *p;
word GC_root_size = 0;
-void GC_add_roots(b, e)
-char * b; char * e;
+void GC_add_roots(void *b, void *e)
{
DCL_LOCK_STATE;
- DISABLE_SIGNALS();
LOCK();
- GC_add_roots_inner(b, e, FALSE);
+ GC_add_roots_inner((ptr_t)b, (ptr_t)e, FALSE);
UNLOCK();
- ENABLE_SIGNALS();
}
@@ -169,9 +162,7 @@ char * b; char * e;
/* them correctly.) */
/* Tmp specifies that the interval may be deleted before */
/* reregistering dynamic libraries. */
-void GC_add_roots_inner(b, e, tmp)
-char * b; char * e;
-GC_bool tmp;
+void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp)
{
struct roots * old;
@@ -187,14 +178,14 @@ GC_bool tmp;
for (i = 0; i < n_root_sets; i++) {
old = GC_static_roots + i;
- if ((ptr_t)b <= old -> r_end && (ptr_t)e >= old -> r_start) {
- if ((ptr_t)b < old -> r_start) {
- old -> r_start = (ptr_t)b;
- GC_root_size += (old -> r_start - (ptr_t)b);
+ if (b <= old -> r_end && e >= old -> r_start) {
+ if (b < old -> r_start) {
+ old -> r_start = b;
+ GC_root_size += (old -> r_start - b);
}
- if ((ptr_t)e > old -> r_end) {
- old -> r_end = (ptr_t)e;
- GC_root_size += ((ptr_t)e - old -> r_end);
+ if (e > old -> r_end) {
+ old -> r_end = e;
+ GC_root_size += (e - old -> r_end);
}
old -> r_tmp &= tmp;
break;
@@ -206,23 +197,23 @@ GC_bool tmp;
for (i++; i < n_root_sets; i++) {
other = GC_static_roots + i;
- b = (char *)(other -> r_start);
- e = (char *)(other -> r_end);
- if ((ptr_t)b <= old -> r_end && (ptr_t)e >= old -> r_start) {
- if ((ptr_t)b < old -> r_start) {
- old -> r_start = (ptr_t)b;
- GC_root_size += (old -> r_start - (ptr_t)b);
+ b = other -> r_start;
+ e = other -> r_end;
+ if (b <= old -> r_end && e >= old -> r_start) {
+ if (b < old -> r_start) {
+ old -> r_start = b;
+ GC_root_size += (old -> r_start - b);
}
- if ((ptr_t)e > old -> r_end) {
- old -> r_end = (ptr_t)e;
- GC_root_size += ((ptr_t)e - old -> r_end);
+ if (e > old -> r_end) {
+ old -> r_end = e;
+ GC_root_size += (e - old -> r_end);
}
old -> r_tmp &= other -> r_tmp;
/* Delete this entry. */
GC_root_size -= (other -> r_end - other -> r_start);
other -> r_start = GC_static_roots[n_root_sets-1].r_start;
other -> r_end = GC_static_roots[n_root_sets-1].r_end;
- n_root_sets--;
+ n_root_sets--;
}
}
return;
@@ -231,10 +222,10 @@ GC_bool tmp;
# else
old = GC_roots_present(b);
if (old != 0) {
- if ((ptr_t)e <= old -> r_end) /* already there */ return;
+ if (e <= old -> r_end) /* already there */ return;
/* else extend */
- GC_root_size += (ptr_t)e - old -> r_end;
- old -> r_end = (ptr_t)e;
+ GC_root_size += e - old -> r_end;
+ old -> r_end = e;
return;
}
# endif
@@ -248,17 +239,16 @@ GC_bool tmp;
GC_static_roots[n_root_sets].r_next = 0;
# endif
add_roots_to_index(GC_static_roots + n_root_sets);
- GC_root_size += (ptr_t)e - (ptr_t)b;
+ GC_root_size += e - b;
n_root_sets++;
}
static GC_bool roots_were_cleared = FALSE;
-void GC_clear_roots GC_PROTO((void))
+void GC_clear_roots (void)
{
DCL_LOCK_STATE;
- DISABLE_SIGNALS();
LOCK();
roots_were_cleared = TRUE;
n_root_sets = 0;
@@ -271,12 +261,10 @@ void GC_clear_roots GC_PROTO((void))
}
# endif
UNLOCK();
- ENABLE_SIGNALS();
}
/* Internal use only; lock held. */
-static void GC_remove_root_at_pos(i)
-int i;
+static void GC_remove_root_at_pos(int i)
{
GC_root_size -= (GC_static_roots[i].r_end - GC_static_roots[i].r_start);
GC_static_roots[i].r_start = GC_static_roots[n_root_sets-1].r_start;
@@ -286,9 +274,9 @@ int i;
}
#if !defined(MSWIN32) && !defined(MSWINCE)
-static void GC_rebuild_root_index()
+static void GC_rebuild_root_index(void)
{
- register int i;
+ int i;
for (i = 0; i < RT_SIZE; i++) GC_root_index[i] = 0;
for (i = 0; i < n_root_sets; i++)
@@ -297,9 +285,9 @@ static void GC_rebuild_root_index()
#endif
/* Internal use only; lock held. */
-void GC_remove_tmp_roots()
+void GC_remove_tmp_roots(void)
{
- register int i;
+ int i;
for (i = 0; i < n_root_sets; ) {
if (GC_static_roots[i].r_tmp) {
@@ -314,25 +302,22 @@ void GC_remove_tmp_roots()
}
#if !defined(MSWIN32) && !defined(MSWINCE)
-void GC_remove_roots(b, e)
-char * b; char * e;
+void GC_remove_roots(void *b, void *e)
{
DCL_LOCK_STATE;
- DISABLE_SIGNALS();
LOCK();
- GC_remove_roots_inner(b, e);
+ GC_remove_roots_inner((ptr_t)b, (ptr_t)e);
UNLOCK();
- ENABLE_SIGNALS();
}
/* Should only be called when the lock is held */
-void GC_remove_roots_inner(b,e)
-char * b; char * e;
+void GC_remove_roots_inner(ptr_t b, ptr_t e)
{
int i;
for (i = 0; i < n_root_sets; ) {
- if (GC_static_roots[i].r_start >= (ptr_t)b && GC_static_roots[i].r_end <= (ptr_t)e) {
+ if (GC_static_roots[i].r_start >= b
+ && GC_static_roots[i].r_end <= e) {
GC_remove_root_at_pos(i);
} else {
i++;
@@ -345,8 +330,7 @@ char * b; char * e;
#if defined(MSWIN32) || defined(_WIN32_WCE_EMULATION)
/* Workaround for the OS mapping and unmapping behind our back: */
/* Is the address p in one of the temporary static root sections? */
-GC_bool GC_is_tmp_root(p)
-ptr_t p;
+GC_bool GC_is_tmp_root(ptr_t p)
{
static int last_root_set = MAX_ROOT_SETS;
register int i;
@@ -366,7 +350,7 @@ ptr_t p;
}
#endif /* MSWIN32 || _WIN32_WCE_EMULATION */
-ptr_t GC_approx_sp()
+ptr_t GC_approx_sp(void)
{
word dummy;
@@ -398,8 +382,7 @@ size_t GC_excl_table_entries = 0; /* Number of entries in use. */
/* Return the first exclusion range that includes an address >= start_addr */
/* Assumes the exclusion table contains at least one entry (namely the */
/* GC data structures). */
-struct exclusion * GC_next_exclusion(start_addr)
-ptr_t start_addr;
+struct exclusion * GC_next_exclusion(ptr_t start_addr)
{
size_t low = 0;
size_t high = GC_excl_table_entries - 1;
@@ -418,9 +401,7 @@ ptr_t start_addr;
return GC_excl_table + low;
}
-void GC_exclude_static_roots(start, finish)
-GC_PTR start;
-GC_PTR finish;
+void GC_exclude_static_roots(void *start, void *finish)
{
struct exclusion * next;
size_t next_index, i;
@@ -454,10 +435,7 @@ GC_PTR finish;
}
/* Invoke push_conditional on ranges that are not excluded. */
-void GC_push_conditional_with_exclusions(bottom, top, all)
-ptr_t bottom;
-ptr_t top;
-int all;
+void GC_push_conditional_with_exclusions(ptr_t bottom, ptr_t top, GC_bool all)
{
struct exclusion * next;
ptr_t excl_start;
@@ -479,8 +457,7 @@ int all;
* to ensure that callee-save registers saved in collector frames have been
* seen.
*/
-void GC_push_current_stack(cold_gc_frame)
-ptr_t cold_gc_frame;
+void GC_push_current_stack(ptr_t cold_gc_frame)
{
# if defined(THREADS)
if (0 == cold_gc_frame) return;
@@ -532,20 +509,19 @@ ptr_t cold_gc_frame;
* Push GC internal roots. Only called if there is some reason to believe
* these would not otherwise get registered.
*/
-void GC_push_gc_structures GC_PROTO((void))
+void GC_push_gc_structures(void)
{
GC_push_finalizer_structures();
- GC_push_stubborn_structures();
# if defined(THREADS)
GC_push_thread_structures();
# endif
}
#ifdef THREAD_LOCAL_ALLOC
- void GC_mark_thread_local_free_lists();
+ void GC_mark_thread_local_free_lists(void);
#endif
-void GC_cond_register_dynamic_libraries()
+void GC_cond_register_dynamic_libraries(void)
{
# if (defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE) \
|| defined(PCR)) && !defined(SRC_M3)
@@ -565,9 +541,7 @@ void GC_cond_register_dynamic_libraries()
* A zero value indicates that it's OK to miss some
* register values.
*/
-void GC_push_roots(all, cold_gc_frame)
-GC_bool all;
-ptr_t cold_gc_frame;
+void GC_push_roots(GC_bool all, ptr_t cold_gc_frame)
{
int i;
int kind;
@@ -598,7 +572,7 @@ ptr_t cold_gc_frame;
/* saves us the trouble of scanning them, and possibly that of */
/* marking the freelists. */
for (kind = 0; kind < GC_n_kinds; kind++) {
- GC_PTR base = GC_base(GC_obj_kinds[kind].ok_freelist);
+ void *base = GC_base(GC_obj_kinds[kind].ok_freelist);
if (0 != base) {
GC_set_mark_bit(base);
}
@@ -615,7 +589,7 @@ ptr_t cold_gc_frame;
/* If the world is not stopped, this is unsafe. It is */
/* also unnecessary, since we will do this again with the */
/* world stopped. */
-# ifdef THREAD_LOCAL_ALLOC
+# if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
if (GC_world_stopped) GC_mark_thread_local_free_lists();
# endif
diff --git a/misc.c b/misc.c
index 52567b7d..b286026d 100644
--- a/misc.c
+++ b/misc.c
@@ -17,6 +17,7 @@
#include <stdio.h>
#include <limits.h>
+#include <stdarg.h>
#ifndef _WIN32_WCE
#include <signal.h>
#endif
@@ -34,6 +35,14 @@
# include <tchar.h>
#endif
+#ifdef UNIX_LIKE
+# include <fcntl.h>
+# include <sys/types.h>
+# include <sys/stat.h>
+
+ int GC_log; /* Forward decl, so we can set it. */
+#endif
+
# ifdef THREADS
# ifdef PCR
# include "il/PCR_IL.h"
@@ -91,10 +100,10 @@ GC_FAR struct _GC_arrays GC_arrays /* = { 0 } */;
GC_bool GC_debugging_started = FALSE;
/* defined here so we don't have to load debug_malloc.o */
-void (*GC_check_heap) GC_PROTO((void)) = (void (*) GC_PROTO((void)))0;
-void (*GC_print_all_smashed) GC_PROTO((void)) = (void (*) GC_PROTO((void)))0;
+void (*GC_check_heap) (void) = (void (*) (void))0;
+void (*GC_print_all_smashed) (void) = (void (*) (void))0;
-void (*GC_start_call_back) GC_PROTO((void)) = (void (*) GC_PROTO((void)))0;
+void (*GC_start_call_back) (void) = (void (*) (void))0;
ptr_t GC_stackbottom = 0;
@@ -108,7 +117,9 @@ GC_bool GC_dont_precollect = 0;
GC_bool GC_quiet = 0;
-GC_bool GC_print_stats = 0;
+#ifndef SMALL_CONFIG
+ GC_bool GC_print_stats = 0;
+#endif
GC_bool GC_print_back_height = 0;
@@ -140,118 +151,87 @@ long GC_large_alloc_warn_suppressed = 0;
/* Number of warnings suppressed so far. */
/*ARGSUSED*/
-GC_PTR GC_default_oom_fn GC_PROTO((size_t bytes_requested))
+void * GC_default_oom_fn(size_t bytes_requested)
{
return(0);
}
-GC_PTR (*GC_oom_fn) GC_PROTO((size_t bytes_requested)) = GC_default_oom_fn;
+void * (*GC_oom_fn) (size_t bytes_requested) = GC_default_oom_fn;
-extern signed_word GC_mem_found;
-
-void * GC_project2(arg1, arg2)
-void *arg1;
-void *arg2;
+void * GC_project2(void *arg1, void *arg2)
{
return arg2;
}
-# ifdef MERGE_SIZES
- /* Set things up so that GC_size_map[i] >= words(i), */
- /* but not too much bigger */
- /* and so that size_map contains relatively few distinct entries */
- /* This is stolen from Russ Atkinson's Cedar quantization */
- /* alogrithm (but we precompute it). */
-
-
- void GC_init_size_map()
- {
- register unsigned i;
-
- /* Map size 0 to something bigger. */
- /* This avoids problems at lower levels. */
- /* One word objects don't have to be 2 word aligned, */
- /* unless we're using mark bytes. */
- for (i = 0; i < sizeof(word); i++) {
- GC_size_map[i] = MIN_WORDS;
- }
-# if MIN_WORDS > 1
- GC_size_map[sizeof(word)] = MIN_WORDS;
-# else
- GC_size_map[sizeof(word)] = ROUNDED_UP_WORDS(sizeof(word));
-# endif
- for (i = sizeof(word) + 1; i <= 8 * sizeof(word); i++) {
- GC_size_map[i] = ALIGNED_WORDS(i);
- }
- for (i = 8*sizeof(word) + 1; i <= 16 * sizeof(word); i++) {
- GC_size_map[i] = (ROUNDED_UP_WORDS(i) + 1) & (~1);
- }
-# ifdef GC_GCJ_SUPPORT
- /* Make all sizes up to 32 words predictable, so that a */
- /* compiler can statically perform the same computation, */
- /* or at least a computation that results in similar size */
- /* classes. */
- for (i = 16*sizeof(word) + 1; i <= 32 * sizeof(word); i++) {
- GC_size_map[i] = (ROUNDED_UP_WORDS(i) + 3) & (~3);
- }
-# endif
- /* We leave the rest of the array to be filled in on demand. */
+/* Set things up so that GC_size_map[i] >= granules(i), */
+/* but not too much bigger */
+/* and so that size_map contains relatively few distinct entries */
+/* This was originally stolen from Russ Atkinson's Cedar */
+/* quantization alogrithm (but we precompute it). */
+void GC_init_size_map(void)
+{
+ register unsigned i;
+
+ /* Map size 0 to something bigger. */
+ /* This avoids problems at lower levels. */
+ GC_size_map[0] = 1;
+ for (i = 1; i <= GRANULES_TO_BYTES(TINY_FREELISTS-1) - EXTRA_BYTES; i++) {
+ GC_size_map[i] = ROUNDED_UP_GRANULES(i);
+ GC_ASSERT(GC_size_map[i] < TINY_FREELISTS);
}
+ /* We leave the rest of the array to be filled in on demand. */
+}
+
+/* Fill in additional entries in GC_size_map, including the ith one */
+/* We assume the ith entry is currently 0. */
+/* Note that a filled in section of the array ending at n always */
+/* has length at least n/4. */
+void GC_extend_size_map(size_t i)
+{
+ size_t orig_granule_sz = ROUNDED_UP_GRANULES(i);
+ size_t granule_sz = orig_granule_sz;
+ size_t byte_sz = GRANULES_TO_BYTES(granule_sz);
+ /* The size we try to preserve. */
+ /* Close to i, unless this would */
+ /* introduce too many distinct sizes. */
+ size_t smaller_than_i = byte_sz - (byte_sz >> 3);
+ size_t much_smaller_than_i = byte_sz - (byte_sz >> 2);
+ size_t low_limit; /* The lowest indexed entry we */
+ /* initialize. */
+ size_t j;
- /* Fill in additional entries in GC_size_map, including the ith one */
- /* We assume the ith entry is currently 0. */
- /* Note that a filled in section of the array ending at n always */
- /* has length at least n/4. */
- void GC_extend_size_map(i)
- word i;
+ if (GC_size_map[smaller_than_i] == 0) {
+ low_limit = much_smaller_than_i;
+ while (GC_size_map[low_limit] != 0) low_limit++;
+ } else {
+ low_limit = smaller_than_i + 1;
+ while (GC_size_map[low_limit] != 0) low_limit++;
+ granule_sz = ROUNDED_UP_GRANULES(low_limit);
+ granule_sz += granule_sz >> 3;
+ if (granule_sz < orig_granule_sz) granule_sz = orig_granule_sz;
+ }
+ /* For these larger sizes, we use an even number of granules. */
+ /* This makes it easier to, for example, construct a 16byte-aligned */
+ /* allocator even if GRANULE_BYTES is 8. */
+ granule_sz += 1;
+ granule_sz &= ~1;
+ if (granule_sz > MAXOBJGRANULES) {
+ granule_sz = MAXOBJGRANULES;
+ }
+ /* If we can fit the same number of larger objects in a block, */
+ /* do so. */
{
- word orig_word_sz = ROUNDED_UP_WORDS(i);
- word word_sz = orig_word_sz;
- register word byte_sz = WORDS_TO_BYTES(word_sz);
- /* The size we try to preserve. */
- /* Close to to i, unless this would */
- /* introduce too many distinct sizes. */
- word smaller_than_i = byte_sz - (byte_sz >> 3);
- word much_smaller_than_i = byte_sz - (byte_sz >> 2);
- register word low_limit; /* The lowest indexed entry we */
- /* initialize. */
- register word j;
-
- if (GC_size_map[smaller_than_i] == 0) {
- low_limit = much_smaller_than_i;
- while (GC_size_map[low_limit] != 0) low_limit++;
- } else {
- low_limit = smaller_than_i + 1;
- while (GC_size_map[low_limit] != 0) low_limit++;
- word_sz = ROUNDED_UP_WORDS(low_limit);
- word_sz += word_sz >> 3;
- if (word_sz < orig_word_sz) word_sz = orig_word_sz;
- }
-# ifdef ALIGN_DOUBLE
- word_sz += 1;
- word_sz &= ~1;
-# endif
- if (word_sz > MAXOBJSZ) {
- word_sz = MAXOBJSZ;
- }
- /* If we can fit the same number of larger objects in a block, */
- /* do so. */
- {
- size_t number_of_objs = BODY_SZ/word_sz;
- word_sz = BODY_SZ/number_of_objs;
-# ifdef ALIGN_DOUBLE
- word_sz &= ~1;
-# endif
- }
- byte_sz = WORDS_TO_BYTES(word_sz);
- if (GC_all_interior_pointers) {
- /* We need one extra byte; don't fill in GC_size_map[byte_sz] */
- byte_sz -= EXTRA_BYTES;
- }
-
- for (j = low_limit; j <= byte_sz; j++) GC_size_map[j] = word_sz;
+ size_t number_of_objs = HBLK_GRANULES/granule_sz;
+ granule_sz = HBLK_GRANULES/number_of_objs;
+ granule_sz &= ~1;
}
-# endif
+ byte_sz = GRANULES_TO_BYTES(granule_sz);
+ /* We may need one extra byte; */
+ /* don't always fill in GC_size_map[byte_sz] */
+ byte_sz -= EXTRA_BYTES;
+
+ for (j = low_limit; j <= byte_sz; j++) GC_size_map[j] = granule_sz;
+}
/*
@@ -270,28 +250,26 @@ word GC_stack_last_cleared = 0; /* GC_no when we last did this */
# define CLEAR_SIZE 213 /* Granularity for GC_clear_stack_inner */
# define DEGRADE_RATE 50
-word GC_min_sp; /* Coolest stack pointer value from which we've */
+ptr_t GC_min_sp; /* Coolest stack pointer value from which we've */
/* already cleared the stack. */
-word GC_high_water;
+ptr_t GC_high_water;
/* "hottest" stack pointer value we have seen */
/* recently. Degrades over time. */
-word GC_words_allocd_at_reset;
+word GC_bytes_allocd_at_reset;
#if defined(ASM_CLEAR_CODE)
- extern ptr_t GC_clear_stack_inner();
+ extern void *GC_clear_stack_inner(void *, ptr_t);
#else
/* Clear the stack up to about limit. Return arg. */
/*ARGSUSED*/
-ptr_t GC_clear_stack_inner(arg, limit)
-ptr_t arg;
-word limit;
+void * GC_clear_stack_inner(void *arg, ptr_t limit)
{
word dummy[CLEAR_SIZE];
BZERO(dummy, CLEAR_SIZE*sizeof(word));
- if ((word)(dummy) COOLER_THAN limit) {
+ if ((ptr_t)(dummy) COOLER_THAN limit) {
(void) GC_clear_stack_inner(arg, limit);
}
/* Make sure the recursive call is not a tail call, and the bzero */
@@ -304,10 +282,9 @@ word limit;
/* Clear some of the inaccessible part of the stack. Returns its */
/* argument, so it can be used in a tail call position, hence clearing */
/* another frame. */
-ptr_t GC_clear_stack(arg)
-ptr_t arg;
+void * GC_clear_stack(void *arg)
{
- register word sp = (word)GC_approx_sp(); /* Hotter than actual sp */
+ ptr_t sp = GC_approx_sp(); /* Hotter than actual sp */
# ifdef THREADS
word dummy[SMALL_CLEAR_SIZE];
static unsigned random_no = 0;
@@ -315,7 +292,7 @@ ptr_t arg;
/* Used to occasionally clear a bigger */
/* chunk. */
# endif
- register word limit;
+ ptr_t limit;
# define SLOP 400
/* Extra bytes we clear every time. This clears our own */
@@ -336,7 +313,8 @@ ptr_t arg;
if (++random_no % 13 == 0) {
limit = sp;
MAKE_HOTTER(limit, BIG_CLEAR_SIZE*sizeof(word));
- limit &= ~0xf; /* Make it sufficiently aligned for assembly */
+ limit = (ptr_t)((word)limit & ~0xf);
+ /* Make it sufficiently aligned for assembly */
/* implementations of GC_clear_stack_inner. */
return GC_clear_stack_inner(arg, limit);
} else {
@@ -346,10 +324,10 @@ ptr_t arg;
# else
if (GC_gc_no > GC_stack_last_cleared) {
/* Start things over, so we clear the entire stack again */
- if (GC_stack_last_cleared == 0) GC_high_water = (word) GC_stackbottom;
+ if (GC_stack_last_cleared == 0) GC_high_water = (ptr_t)GC_stackbottom;
GC_min_sp = GC_high_water;
GC_stack_last_cleared = GC_gc_no;
- GC_words_allocd_at_reset = GC_words_allocd;
+ GC_bytes_allocd_at_reset = GC_bytes_allocd;
}
/* Adjust GC_high_water */
MAKE_COOLER(GC_high_water, WORDS_TO_BYTES(DEGRADE_RATE) + GC_SLOP);
@@ -360,17 +338,17 @@ ptr_t arg;
limit = GC_min_sp;
MAKE_HOTTER(limit, SLOP);
if (sp COOLER_THAN limit) {
- limit &= ~0xf; /* Make it sufficiently aligned for assembly */
+ limit = (ptr_t)((word)limit & ~0xf);
+ /* Make it sufficiently aligned for assembly */
/* implementations of GC_clear_stack_inner. */
GC_min_sp = sp;
return(GC_clear_stack_inner(arg, limit));
- } else if (WORDS_TO_BYTES(GC_words_allocd - GC_words_allocd_at_reset)
- > CLEAR_THRESHOLD) {
+ } else if (GC_bytes_allocd - GC_bytes_allocd_at_reset > CLEAR_THRESHOLD) {
/* Restart clearing process, but limit how much clearing we do. */
GC_min_sp = sp;
MAKE_HOTTER(GC_min_sp, CLEAR_THRESHOLD/4);
if (GC_min_sp HOTTER_THAN GC_high_water) GC_min_sp = GC_high_water;
- GC_words_allocd_at_reset = GC_words_allocd;
+ GC_bytes_allocd_at_reset = GC_bytes_allocd;
}
return(arg);
# endif
@@ -379,20 +357,15 @@ ptr_t arg;
/* Return a pointer to the base address of p, given a pointer to a */
/* an address within an object. Return 0 o.w. */
-# ifdef __STDC__
- GC_PTR GC_base(GC_PTR p)
-# else
- GC_PTR GC_base(p)
- GC_PTR p;
-# endif
+void * GC_base(void * p)
{
- register word r;
- register struct hblk *h;
- register bottom_index *bi;
- register hdr *candidate_hdr;
- register word limit;
+ ptr_t r;
+ struct hblk *h;
+ bottom_index *bi;
+ hdr *candidate_hdr;
+ ptr_t limit;
- r = (word)p;
+ r = p;
if (!GC_is_initialized) return 0;
h = HBLKPTR(r);
GET_BI(r, bi);
@@ -402,78 +375,64 @@ ptr_t arg;
/* to the beginning. */
while (IS_FORWARDING_ADDR_OR_NIL(candidate_hdr)) {
h = FORWARDED_ADDR(h,candidate_hdr);
- r = (word)h;
+ r = (ptr_t)h;
candidate_hdr = HDR(h);
}
- if (candidate_hdr -> hb_map == GC_invalid_map) return(0);
+ if (HBLK_IS_FREE(candidate_hdr)) return(0);
/* Make sure r points to the beginning of the object */
- r &= ~(WORDS_TO_BYTES(1) - 1);
+ r = (ptr_t)((word)r & ~(WORDS_TO_BYTES(1) - 1));
{
- register int offset = HBLKDISPL(r);
- register signed_word sz = candidate_hdr -> hb_sz;
- register signed_word map_entry;
-
- map_entry = MAP_ENTRY((candidate_hdr -> hb_map), offset);
- if (map_entry > CPP_MAX_OFFSET) {
- map_entry = (signed_word)(BYTES_TO_WORDS(offset)) % sz;
- }
- r -= WORDS_TO_BYTES(map_entry);
- limit = r + WORDS_TO_BYTES(sz);
- if (limit > (word)(h + 1)
- && sz <= BYTES_TO_WORDS(HBLKSIZE)) {
+ int offset = HBLKDISPL(r);
+ signed_word sz = candidate_hdr -> hb_sz;
+ int obj_displ = offset % sz;
+
+ r -= obj_displ;
+ limit = r + sz;
+ if (limit > (ptr_t)(h + 1) && sz <= HBLKSIZE) {
return(0);
}
- if ((word)p >= limit) return(0);
+ if ((ptr_t)p >= limit) return(0);
}
- return((GC_PTR)r);
+ return((void *)r);
}
/* Return the size of an object, given a pointer to its base. */
/* (For small obects this also happens to work from interior pointers, */
/* but that shouldn't be relied upon.) */
-# ifdef __STDC__
- size_t GC_size(GC_PTR p)
-# else
- size_t GC_size(p)
- GC_PTR p;
-# endif
+size_t GC_size(void * p)
{
- register int sz;
- register hdr * hhdr = HDR(p);
+ hdr * hhdr = HDR(p);
- sz = WORDS_TO_BYTES(hhdr -> hb_sz);
- return(sz);
+ return hhdr -> hb_sz;
}
-size_t GC_get_heap_size GC_PROTO(())
+size_t GC_get_heap_size(void)
{
- return ((size_t) GC_heapsize);
+ return GC_heapsize;
}
-size_t GC_get_free_bytes GC_PROTO(())
+size_t GC_get_free_bytes(void)
{
- return ((size_t) GC_large_free_bytes);
+ return GC_large_free_bytes;
}
-size_t GC_get_bytes_since_gc GC_PROTO(())
+size_t GC_get_bytes_since_gc(void)
{
- return ((size_t) WORDS_TO_BYTES(GC_words_allocd));
+ return GC_bytes_allocd;
}
-size_t GC_get_total_bytes GC_PROTO(())
+size_t GC_get_total_bytes(void)
{
- return ((size_t) WORDS_TO_BYTES(GC_words_allocd+GC_words_allocd_before_gc));
+ return GC_bytes_allocd+GC_bytes_allocd_before_gc;
}
GC_bool GC_is_initialized = FALSE;
-void GC_init()
+void GC_init(void)
{
DCL_LOCK_STATE;
- DISABLE_SIGNALS();
-
#if defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS)
if (!GC_is_initialized) {
BOOL (WINAPI *pfn) (LPCRITICAL_SECTION, DWORD) = NULL;
@@ -491,7 +450,6 @@ void GC_init()
LOCK();
GC_init_inner();
UNLOCK();
- ENABLE_SIGNALS();
# if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
/* Make sure marker threads and started and thread local */
@@ -518,7 +476,7 @@ void GC_init()
#endif
#ifdef MSWIN32
- extern void GC_init_win32 GC_PROTO((void));
+ extern void GC_init_win32(void);
#endif
extern void GC_setpagesize();
@@ -530,23 +488,23 @@ extern GC_bool GC_no_win32_dlls;
# define GC_no_win32_dlls FALSE
#endif
-void GC_exit_check GC_PROTO((void))
+void GC_exit_check(void)
{
GC_gcollect();
}
#ifdef SEARCH_FOR_DATA_START
- extern void GC_init_linux_data_start GC_PROTO((void));
+ extern void GC_init_linux_data_start(void);
#endif
#ifdef UNIX_LIKE
-extern void GC_set_and_save_fault_handler GC_PROTO((void (*handler)(int)));
+extern void GC_set_and_save_fault_handler(void (*handler)(int));
static void looping_handler(sig)
int sig;
{
- GC_err_printf1("Caught signal %d: looping in handler\n", sig);
+ GC_err_printf("Caught signal %d: looping in handler\n", sig);
for(;;);
}
@@ -576,15 +534,30 @@ void GC_init_inner()
word initial_heap_sz = (word)MINHINCR;
if (GC_is_initialized) return;
-# ifdef PRINTSTATS
- GC_print_stats = 1;
-# endif
# if defined(MSWIN32) || defined(MSWINCE)
InitializeCriticalSection(&GC_write_cs);
# endif
- if (0 != GETENV("GC_PRINT_STATS")) {
- GC_print_stats = 1;
- }
+# if (!defined(SMALL_CONFIG))
+ if (0 != GETENV("GC_PRINT_STATS")) {
+ GC_print_stats = 1;
+ }
+ if (0 != GETENV("GC_PRINT_VERBOSE_STATS")) {
+ GC_print_stats = VERBOSE;
+ }
+# if defined(UNIX_LIKE)
+ {
+ char * file_name = GETENV("GC_LOG_FILE");
+ if (0 != file_name) {
+ int log_d = open(file_name, O_CREAT|O_WRONLY|O_APPEND, 0666);
+ if (log_d < 0) {
+ GC_log_printf("Failed to open %s as log file\n", file_name);
+ } else {
+ GC_log = log_d;
+ }
+ }
+ }
+# endif
+# endif
# ifndef NO_DEBUGGING
if (0 != GETENV("GC_DUMP_REGULARLY")) {
GC_dump_regularly = 1;
@@ -601,9 +574,7 @@ void GC_init_inner()
# endif
if (0 != GETENV("GC_FIND_LEAK")) {
GC_find_leak = 1;
-# ifdef __STDC__
- atexit(GC_exit_check);
-# endif
+ atexit(GC_exit_check);
}
if (0 != GETENV("GC_ALL_INTERIOR_POINTERS")) {
GC_all_interior_pointers = 1;
@@ -618,6 +589,23 @@ void GC_init_inner()
GC_large_alloc_warn_interval = LONG_MAX;
}
{
+ char * addr_string = GETENV("GC_TRACE");
+ if (0 != addr_string) {
+# ifndef ENABLE_TRACE
+ WARN("Tracing not enabled: Ignoring GC_TRACE value\n", 0);
+# else
+# ifdef STRTOULL
+ long long addr = strtoull(addr_string, NULL, 16);
+# else
+ long addr = strtoul(addr_string, NULL, 16);
+# endif
+ if (addr < 0x1000)
+ WARN("Unlikely trace address: 0x%lx", (unsigned long)addr);
+ GC_trace_addr = (ptr_t)addr;
+# endif
+ }
+ }
+ {
char * time_limit_string = GETENV("GC_PAUSE_TIME_TARGET");
if (0 != time_limit_string) {
long time_limit = atol(time_limit_string);
@@ -689,6 +677,7 @@ void GC_init_inner()
# endif
}
# endif
+ /* Ignore gcc -Wall warnings on the following. */
GC_STATIC_ASSERT(sizeof (ptr_t) == sizeof(word));
GC_STATIC_ASSERT(sizeof (signed_word) == sizeof(word));
GC_STATIC_ASSERT(sizeof (struct hblk) == HBLKSIZE);
@@ -743,19 +732,12 @@ void GC_init_inner()
}
}
if (!GC_expand_hp_inner(initial_heap_sz)) {
- GC_err_printf0("Can't start up: not enough memory\n");
+ GC_err_printf("Can't start up: not enough memory\n");
EXIT();
}
- /* Preallocate large object map. It's otherwise inconvenient to */
- /* deal with failure. */
- if (!GC_add_map_entry((word)0)) {
- GC_err_printf0("Can't start up: not enough memory\n");
- EXIT();
- }
+ GC_initialize_offsets();
GC_register_displacement_inner(0L);
-# ifdef MERGE_SIZES
- GC_init_size_map();
-# endif
+ GC_init_size_map();
# ifdef PCR
if (PCR_IL_Lock(PCR_Bool_false, PCR_allSigsBlocked, PCR_waitForever)
!= PCR_ERes_okay) {
@@ -773,7 +755,7 @@ void GC_init_inner()
# ifndef GC_SOLARIS_THREADS
GC_dirty_init();
# endif
- GC_ASSERT(GC_words_allocd == 0)
+ GC_ASSERT(GC_bytes_allocd == 0)
GC_incremental = TRUE;
}
# endif /* !SMALL_CONFIG */
@@ -802,16 +784,12 @@ void GC_init_inner()
# endif
}
-void GC_enable_incremental GC_PROTO(())
+void GC_enable_incremental(void)
{
-# if !defined(SMALL_CONFIG) && !defined(KEEP_BACK_PTRS)
- /* If we are keeping back pointers, the GC itself dirties all */
- /* pages on which objects have been marked, making */
- /* incremental GC pointless. */
+# if !defined(SMALL_CONFIG)
if (!GC_find_leak) {
DCL_LOCK_STATE;
- DISABLE_SIGNALS();
LOCK();
if (GC_incremental) goto out;
GC_setpagesize();
@@ -827,10 +805,9 @@ void GC_enable_incremental GC_PROTO(())
if (GC_dont_gc) {
/* Can't easily do it. */
UNLOCK();
- ENABLE_SIGNALS();
return;
}
- if (GC_words_allocd > 0) {
+ if (GC_bytes_allocd > 0) {
/* There may be unmarked reachable objects */
GC_gcollect_inner();
} /* else we're OK in assuming everything's */
@@ -840,7 +817,6 @@ void GC_enable_incremental GC_PROTO(())
GC_incremental = TRUE;
out:
UNLOCK();
- ENABLE_SIGNALS();
}
# endif
}
@@ -859,7 +835,7 @@ out:
}
int GC_write(buf, len)
- GC_CONST char * buf;
+ const char * buf;
size_t len;
{
BOOL tmp;
@@ -888,6 +864,7 @@ out:
#if defined(OS2) || defined(MACOS)
FILE * GC_stdout = NULL;
FILE * GC_stderr = NULL;
+FILE * GC_log = NULL;
int GC_tmp; /* Should really be local ... */
void GC_set_files()
@@ -904,6 +881,7 @@ int GC_tmp; /* Should really be local ... */
#if !defined(OS2) && !defined(MACOS) && !defined(MSWIN32) && !defined(MSWINCE)
int GC_stdout = 1;
int GC_stderr = 2;
+ int GC_log = 2;
# if !defined(AMIGA)
# include <unistd.h>
# endif
@@ -913,7 +891,7 @@ int GC_tmp; /* Should really be local ... */
&& !defined(MACOS) && !defined(ECOS) && !defined(NOSYS)
int GC_write(fd, buf, len)
int fd;
-GC_CONST char *buf;
+const char *buf;
size_t len;
{
register int bytes_written = 0;
@@ -962,71 +940,75 @@ int GC_write(fd, buf, len)
# endif
#endif
+#define BUFSZ 1024
/* A version of printf that is unlikely to call malloc, and is thus safer */
/* to call from the collector in case malloc has been bound to GC_malloc. */
-/* Assumes that no more than 1023 characters are written at once. */
-/* Assumes that all arguments have been converted to something of the */
-/* same size as long, and that the format conversions expect something */
-/* of that size. */
-void GC_printf(format, a, b, c, d, e, f)
-GC_CONST char * format;
-long a, b, c, d, e, f;
+/* Floating point arguments ans formats should be avoided, since fp */
+/* conversion is more likely to allocate. */
+/* Assumes that no more than BUFSZ-1 characters are written at once. */
+void GC_printf(const char *format, ...)
{
- char buf[1025];
+ va_list args;
+ char buf[BUFSZ+1];
+ va_start(args, format);
if (GC_quiet) return;
- buf[1024] = 0x15;
- (void) sprintf(buf, format, a, b, c, d, e, f);
- if (buf[1024] != 0x15) ABORT("GC_printf clobbered stack");
+ buf[BUFSZ] = 0x15;
+ (void) vsnprintf(buf, BUFSZ, format, args);
+ va_end(args);
+ if (buf[BUFSZ] != 0x15) ABORT("GC_printf clobbered stack");
if (WRITE(GC_stdout, buf, strlen(buf)) < 0) ABORT("write to stdout failed");
}
-void GC_err_printf(format, a, b, c, d, e, f)
-GC_CONST char * format;
-long a, b, c, d, e, f;
+void GC_err_printf(const char *format, ...)
{
- char buf[1025];
+ va_list args;
+ char buf[BUFSZ+1];
- buf[1024] = 0x15;
- (void) sprintf(buf, format, a, b, c, d, e, f);
- if (buf[1024] != 0x15) ABORT("GC_err_printf clobbered stack");
+ va_start(args, format);
+ buf[BUFSZ] = 0x15;
+ (void) vsnprintf(buf, BUFSZ, format, args);
+ va_end(args);
+ if (buf[BUFSZ] != 0x15) ABORT("GC_printf clobbered stack");
if (WRITE(GC_stderr, buf, strlen(buf)) < 0) ABORT("write to stderr failed");
}
+void GC_log_printf(const char *format, ...)
+{
+ va_list args;
+ char buf[BUFSZ+1];
+
+ va_start(args, format);
+ buf[BUFSZ] = 0x15;
+ (void) vsnprintf(buf, BUFSZ, format, args);
+ va_end(args);
+ if (buf[BUFSZ] != 0x15) ABORT("GC_printf clobbered stack");
+ if (WRITE(GC_log, buf, strlen(buf)) < 0) ABORT("write to log failed");
+}
+
void GC_err_puts(s)
-GC_CONST char *s;
+const char *s;
{
if (WRITE(GC_stderr, s, strlen(s)) < 0) ABORT("write to stderr failed");
}
#if defined(LINUX) && !defined(SMALL_CONFIG)
void GC_err_write(buf, len)
-GC_CONST char *buf;
+const char *buf;
size_t len;
{
if (WRITE(GC_stderr, buf, len) < 0) ABORT("write to stderr failed");
}
#endif
-# if defined(__STDC__) || defined(__cplusplus)
- void GC_default_warn_proc(char *msg, GC_word arg)
-# else
- void GC_default_warn_proc(msg, arg)
- char *msg;
- GC_word arg;
-# endif
+void GC_default_warn_proc(char *msg, GC_word arg)
{
- GC_err_printf1(msg, (unsigned long)arg);
+ GC_err_printf(msg, arg);
}
GC_warn_proc GC_current_warn_proc = GC_default_warn_proc;
-# if defined(__STDC__) || defined(__cplusplus)
- GC_warn_proc GC_set_warn_proc(GC_warn_proc p)
-# else
- GC_warn_proc GC_set_warn_proc(p)
- GC_warn_proc p;
-# endif
+GC_warn_proc GC_set_warn_proc(GC_warn_proc p)
{
GC_warn_proc result;
@@ -1040,12 +1022,7 @@ GC_warn_proc GC_current_warn_proc = GC_default_warn_proc;
return(result);
}
-# if defined(__STDC__) || defined(__cplusplus)
- GC_word GC_set_free_space_divisor (GC_word value)
-# else
- GC_word GC_set_free_space_divisor (value)
- GC_word value;
-# endif
+GC_word GC_set_free_space_divisor (GC_word value)
{
GC_word old = GC_free_space_divisor;
GC_free_space_divisor = value;
@@ -1054,12 +1031,12 @@ GC_warn_proc GC_current_warn_proc = GC_default_warn_proc;
#ifndef PCR
void GC_abort(msg)
-GC_CONST char * msg;
+const char * msg;
{
# if defined(MSWIN32)
(void) MessageBoxA(NULL, msg, "Fatal error in gc", MB_ICONERROR|MB_OK);
# else
- GC_err_printf1("%s\n", msg);
+ GC_err_printf("%s\n", msg);
# endif
if (GETENV("GC_LOOP_ON_ABORT") != NULL) {
/* In many cases it's easier to debug a running process. */
@@ -1093,31 +1070,28 @@ void GC_disable()
/* Helper procedures for new kind creation. */
void ** GC_new_free_list_inner()
{
- void *result = GC_INTERNAL_MALLOC((MAXOBJSZ+1)*sizeof(ptr_t), PTRFREE);
+ void *result = GC_INTERNAL_MALLOC((MAXOBJGRANULES+1)*sizeof(ptr_t),
+ PTRFREE);
if (result == 0) ABORT("Failed to allocate freelist for new kind");
- BZERO(result, (MAXOBJSZ+1)*sizeof(ptr_t));
+ BZERO(result, (MAXOBJGRANULES+1)*sizeof(ptr_t));
return result;
}
void ** GC_new_free_list()
{
void *result;
- LOCK(); DISABLE_SIGNALS();
+ LOCK();
result = GC_new_free_list_inner();
- UNLOCK(); ENABLE_SIGNALS();
+ UNLOCK();
return result;
}
-int GC_new_kind_inner(fl, descr, adjust, clear)
-void **fl;
-GC_word descr;
-int adjust;
-int clear;
+int GC_new_kind_inner(void **fl, GC_word descr, int adjust, int clear)
{
int result = GC_n_kinds++;
if (GC_n_kinds > MAXOBJKINDS) ABORT("Too many kinds");
- GC_obj_kinds[result].ok_freelist = (ptr_t *)fl;
+ GC_obj_kinds[result].ok_freelist = fl;
GC_obj_kinds[result].ok_reclaim_list = 0;
GC_obj_kinds[result].ok_descriptor = descr;
GC_obj_kinds[result].ok_relocate_descr = adjust;
@@ -1125,21 +1099,16 @@ int clear;
return result;
}
-int GC_new_kind(fl, descr, adjust, clear)
-void **fl;
-GC_word descr;
-int adjust;
-int clear;
+int GC_new_kind(void **fl, GC_word descr, int adjust, int clear)
{
int result;
- LOCK(); DISABLE_SIGNALS();
+ LOCK();
result = GC_new_kind_inner(fl, descr, adjust, clear);
- UNLOCK(); ENABLE_SIGNALS();
+ UNLOCK();
return result;
}
-int GC_new_proc_inner(proc)
-GC_mark_proc proc;
+int GC_new_proc_inner(GC_mark_proc proc)
{
int result = GC_n_mark_procs++;
@@ -1148,13 +1117,12 @@ GC_mark_proc proc;
return result;
}
-int GC_new_proc(proc)
-GC_mark_proc proc;
+int GC_new_proc(GC_mark_proc proc)
{
int result;
- LOCK(); DISABLE_SIGNALS();
+ LOCK();
result = GC_new_proc_inner(proc);
- UNLOCK(); ENABLE_SIGNALS();
+ UNLOCK();
return result;
}
@@ -1163,15 +1131,15 @@ GC_mark_proc proc;
void GC_dump()
{
- GC_printf0("***Static roots:\n");
+ GC_printf("***Static roots:\n");
GC_print_static_roots();
- GC_printf0("\n***Heap sections:\n");
+ GC_printf("\n***Heap sections:\n");
GC_print_heap_sects();
- GC_printf0("\n***Free blocks:\n");
+ GC_printf("\n***Free blocks:\n");
GC_print_hblkfreelist();
- GC_printf0("\n***Blocks in use:\n");
+ GC_printf("\n***Blocks in use:\n");
GC_print_block_list();
- GC_printf0("\n***Finalization statistics:\n");
+ GC_printf("\n***Finalization statistics:\n");
GC_print_finalization_stats();
}
diff --git a/new_hblk.c b/new_hblk.c
index e5580e4c..5d5a56f2 100644
--- a/new_hblk.c
+++ b/new_hblk.c
@@ -14,7 +14,7 @@
*
* This file contains the functions:
* ptr_t GC_build_flXXX(h, old_fl)
- * void GC_new_hblk(n)
+ * void GC_new_hblk(size)
*/
/* Boehm, May 19, 1994 2:09 pm PDT */
@@ -24,37 +24,14 @@
#ifndef SMALL_CONFIG
/*
- * Build a free list for size 1 objects inside hblk h. Set the last link to
+ * Build a free list for size 2 (words) cleared objects inside hblk h.
+ * Set the last link to
* be ofl. Return a pointer tpo the first free list entry.
*/
-ptr_t GC_build_fl1(h, ofl)
-struct hblk *h;
-ptr_t ofl;
+ptr_t GC_build_fl_clear2(struct hblk *h, ptr_t ofl)
{
- register word * p = h -> hb_body;
- register word * lim = (word *)(h + 1);
-
- p[0] = (word)ofl;
- p[1] = (word)(p);
- p[2] = (word)(p+1);
- p[3] = (word)(p+2);
- p += 4;
- for (; p < lim; p += 4) {
- p[0] = (word)(p-1);
- p[1] = (word)(p);
- p[2] = (word)(p+1);
- p[3] = (word)(p+2);
- };
- return((ptr_t)(p-1));
-}
-
-/* The same for size 2 cleared objects */
-ptr_t GC_build_fl_clear2(h, ofl)
-struct hblk *h;
-ptr_t ofl;
-{
- register word * p = h -> hb_body;
- register word * lim = (word *)(h + 1);
+ word * p = (word *)(h -> hb_body);
+ word * lim = (word *)(h + 1);
p[0] = (word)ofl;
p[1] = 0;
@@ -70,33 +47,11 @@ ptr_t ofl;
return((ptr_t)(p-2));
}
-/* The same for size 3 cleared objects */
-ptr_t GC_build_fl_clear3(h, ofl)
-struct hblk *h;
-ptr_t ofl;
-{
- register word * p = h -> hb_body;
- register word * lim = (word *)(h + 1) - 2;
-
- p[0] = (word)ofl;
- p[1] = 0;
- p[2] = 0;
- p += 3;
- for (; p < lim; p += 3) {
- p[0] = (word)(p-3);
- p[1] = 0;
- p[2] = 0;
- };
- return((ptr_t)(p-3));
-}
-
/* The same for size 4 cleared objects */
-ptr_t GC_build_fl_clear4(h, ofl)
-struct hblk *h;
-ptr_t ofl;
+ptr_t GC_build_fl_clear4(struct hblk *h, ptr_t ofl)
{
- register word * p = h -> hb_body;
- register word * lim = (word *)(h + 1);
+ word * p = (word *)(h -> hb_body);
+ word * lim = (word *)(h + 1);
p[0] = (word)ofl;
p[1] = 0;
@@ -113,12 +68,10 @@ ptr_t ofl;
}
/* The same for size 2 uncleared objects */
-ptr_t GC_build_fl2(h, ofl)
-struct hblk *h;
-ptr_t ofl;
+ptr_t GC_build_fl2(struct hblk *h, ptr_t ofl)
{
- register word * p = h -> hb_body;
- register word * lim = (word *)(h + 1);
+ word * p = (word *)(h -> hb_body);
+ word * lim = (word *)(h + 1);
p[0] = (word)ofl;
p[2] = (word)p;
@@ -131,12 +84,10 @@ ptr_t ofl;
}
/* The same for size 4 uncleared objects */
-ptr_t GC_build_fl4(h, ofl)
-struct hblk *h;
-ptr_t ofl;
+ptr_t GC_build_fl4(struct hblk *h, ptr_t ofl)
{
- register word * p = h -> hb_body;
- register word * lim = (word *)(h + 1);
+ word * p = (word *)(h -> hb_body);
+ word * lim = (word *)(h + 1);
p[0] = (word)ofl;
p[4] = (word)p;
@@ -158,11 +109,7 @@ ptr_t ofl;
/* This could be called without the main GC lock, if we ensure that */
/* there is no concurrent collection which might reclaim objects that */
/* we have not yet allocated. */
-ptr_t GC_build_fl(h, sz, clear, list)
-struct hblk *h;
-word sz;
-GC_bool clear;
-ptr_t list;
+ptr_t GC_build_fl(struct hblk *h, size_t sz, GC_bool clear, ptr_t list)
{
word *p, *prev;
word *last_object; /* points to last object in new hblk */
@@ -179,18 +126,11 @@ ptr_t list;
/* the difference is less significant. */
# ifndef SMALL_CONFIG
switch (sz) {
- case 1: return GC_build_fl1(h, list);
case 2: if (clear) {
return GC_build_fl_clear2(h, list);
} else {
return GC_build_fl2(h, list);
}
- case 3: if (clear) {
- return GC_build_fl_clear3(h, list);
- } else {
- /* It's messy to do better than the default here. */
- break;
- }
case 4: if (clear) {
return GC_build_fl_clear4(h, list);
} else {
@@ -205,8 +145,8 @@ ptr_t list;
if (clear) BZERO(h, HBLKSIZE);
/* Add objects to free list */
- p = &(h -> hb_body[sz]); /* second object in *h */
- prev = &(h -> hb_body[0]); /* One object behind p */
+ p = (word *)(h -> hb_body) + sz; /* second object in *h */
+ prev = (word *)(h -> hb_body); /* One object behind p */
last_object = (word *)((char *)h + HBLKSIZE);
last_object -= sz;
/* Last place for last object to start */
@@ -228,36 +168,34 @@ ptr_t list;
return ((ptr_t)p);
}
+
/*
- * Allocate a new heapblock for small objects of size n.
+ * Allocate a new heapblock for small objects of size gran granules.
* Add all of the heapblock's objects to the free list for objects
* of that size.
* Set all mark bits if objects are uncollectable.
* Will fail to do anything if we are out of memory.
*/
-void GC_new_hblk(sz, kind)
-register word sz;
-int kind;
+void GC_new_hblk(size_t gran, int kind)
{
- register struct hblk *h; /* the new heap block */
- register GC_bool clear = GC_obj_kinds[kind].ok_init;
+ struct hblk *h; /* the new heap block */
+ GC_bool clear = GC_obj_kinds[kind].ok_init;
-# ifdef PRINTSTATS
- if ((sizeof (struct hblk)) > HBLKSIZE) {
- ABORT("HBLK SZ inconsistency");
- }
-# endif
+ /* Ignore gcc "no effect" warning on the following: */
+ GC_STATIC_ASSERT((sizeof (struct hblk)) == HBLKSIZE);
+
if (GC_debugging_started) clear = TRUE;
/* Allocate a new heap block */
- h = GC_allochblk(sz, kind, 0);
+ h = GC_allochblk(GRANULES_TO_BYTES(gran), kind, 0);
if (h == 0) return;
/* Mark all objects if appropriate. */
if (IS_UNCOLLECTABLE(kind)) GC_set_hdr_marks(HDR(h));
/* Build the free list */
- GC_obj_kinds[kind].ok_freelist[sz] =
- GC_build_fl(h, sz, clear, GC_obj_kinds[kind].ok_freelist[sz]);
+ GC_obj_kinds[kind].ok_freelist[gran] =
+ GC_build_fl(h, GRANULES_TO_WORDS(gran), clear,
+ GC_obj_kinds[kind].ok_freelist[gran]);
}
diff --git a/obj_map.c b/obj_map.c
index d002d65b..3b9a09d1 100644
--- a/obj_map.c
+++ b/obj_map.c
@@ -21,127 +21,70 @@
# include "private/gc_priv.h"
-map_entry_type * GC_invalid_map = 0;
-
-/* Invalidate the object map associated with a block. Free blocks */
-/* are identified by invalid maps. */
-void GC_invalidate_map(hhdr)
-hdr *hhdr;
-{
- register int displ;
-
- if (GC_invalid_map == 0) {
- GC_invalid_map = (map_entry_type *)GC_scratch_alloc(MAP_SIZE);
- if (GC_invalid_map == 0) {
- GC_err_printf0(
- "Cant initialize GC_invalid_map: insufficient memory\n");
- EXIT();
- }
- for (displ = 0; displ < HBLKSIZE; displ++) {
- MAP_ENTRY(GC_invalid_map, displ) = OBJ_INVALID;
- }
- }
- hhdr -> hb_map = GC_invalid_map;
-}
-
/* Consider pointers that are offset bytes displaced from the beginning */
/* of an object to be valid. */
-# if defined(__STDC__) || defined(__cplusplus)
- void GC_register_displacement(GC_word offset)
-# else
- void GC_register_displacement(offset)
- GC_word offset;
-# endif
+void GC_register_displacement(size_t offset)
{
DCL_LOCK_STATE;
- DISABLE_SIGNALS();
LOCK();
GC_register_displacement_inner(offset);
UNLOCK();
- ENABLE_SIGNALS();
}
-void GC_register_displacement_inner(offset)
-word offset;
+void GC_register_displacement_inner(size_t offset)
{
- register unsigned i;
- word map_entry = BYTES_TO_WORDS(offset);
-
if (offset >= VALID_OFFSET_SZ) {
ABORT("Bad argument to GC_register_displacement");
}
- if (map_entry > MAX_OFFSET) map_entry = OFFSET_TOO_BIG;
if (!GC_valid_offsets[offset]) {
GC_valid_offsets[offset] = TRUE;
GC_modws_valid_offsets[offset % sizeof(word)] = TRUE;
- if (!GC_all_interior_pointers) {
- for (i = 0; i <= MAXOBJSZ; i++) {
- if (GC_obj_map[i] != 0) {
- if (i == 0) {
- GC_obj_map[i][offset] = (map_entry_type)map_entry;
- } else {
- register unsigned j;
- register unsigned lb = WORDS_TO_BYTES(i);
-
- if (offset < lb) {
- for (j = offset; j < HBLKSIZE; j += lb) {
- GC_obj_map[i][j] = (map_entry_type)map_entry;
- }
- }
- }
- }
- }
- }
}
}
-
-/* Add a heap block map for objects of size sz to obj_map. */
-/* Return FALSE on failure. */
-GC_bool GC_add_map_entry(sz)
-word sz;
+#ifdef MARK_BIT_PER_GRANULE
+/* Add a heap block map for objects of size granules to obj_map. */
+/* Return FALSE on failure. */
+/* A size of 0 granules is used for large objects. */
+GC_bool GC_add_map_entry(size_t granules)
{
- register unsigned obj_start;
- register unsigned displ;
- register map_entry_type * new_map;
- word map_entry;
+ unsigned displ;
+ short * new_map;
- if (sz > MAXOBJSZ) sz = 0;
- if (GC_obj_map[sz] != 0) {
+ if (granules > BYTES_TO_GRANULES(MAXOBJBYTES)) granules = 0;
+ if (GC_obj_map[granules] != 0) {
return(TRUE);
}
- new_map = (map_entry_type *)GC_scratch_alloc(MAP_SIZE);
+ new_map = (short *)GC_scratch_alloc(MAP_LEN * sizeof(short));
if (new_map == 0) return(FALSE);
-# ifdef PRINTSTATS
- GC_printf1("Adding block map for size %lu\n", (unsigned long)sz);
-# endif
- for (displ = 0; displ < HBLKSIZE; displ++) {
- MAP_ENTRY(new_map,displ) = OBJ_INVALID;
- }
- if (sz == 0) {
- for(displ = 0; displ <= HBLKSIZE; displ++) {
- if (OFFSET_VALID(displ)) {
- map_entry = BYTES_TO_WORDS(displ);
- if (map_entry > MAX_OFFSET) map_entry = OFFSET_TOO_BIG;
- MAP_ENTRY(new_map,displ) = (map_entry_type)map_entry;
- }
- }
+ if (GC_print_stats)
+ GC_printf("Adding block map for size of %u granules (%u bytes)\n",
+ (unsigned)granules, (unsigned)(GRANULES_TO_BYTES(granules)));
+ if (granules == 0) {
+ for (displ = 0; displ < BYTES_TO_GRANULES(HBLKSIZE); displ++) {
+ new_map[displ] = 1; /* Nonzero to get us out of marker fast path. */
+ }
} else {
- for (obj_start = 0;
- obj_start + WORDS_TO_BYTES(sz) <= HBLKSIZE;
- obj_start += WORDS_TO_BYTES(sz)) {
- for (displ = 0; displ < WORDS_TO_BYTES(sz); displ++) {
- if (OFFSET_VALID(displ)) {
- map_entry = BYTES_TO_WORDS(displ);
- if (map_entry > MAX_OFFSET) map_entry = OFFSET_TOO_BIG;
- MAP_ENTRY(new_map, obj_start + displ) =
- (map_entry_type)map_entry;
- }
- }
- }
+ for (displ = 0; displ < BYTES_TO_GRANULES(HBLKSIZE); displ++) {
+ new_map[displ] = displ % granules;
+ }
}
- GC_obj_map[sz] = new_map;
+ GC_obj_map[granules] = new_map;
return(TRUE);
}
+#endif
+
+static GC_bool offsets_initialized = FALSE;
+
+void GC_initialize_offsets(void)
+{
+ if (!offsets_initialized) {
+ int i;
+ if (GC_all_interior_pointers) {
+ for (i = 0; i < VALID_OFFSET_SZ; ++i) GC_valid_offsets[i] = TRUE;
+ }
+ offsets_initialized = TRUE;
+ }
+}
diff --git a/os_dep.c b/os_dep.c
index f5c7467c..45037b14 100644
--- a/os_dep.c
+++ b/os_dep.c
@@ -15,6 +15,9 @@
*/
# include "private/gc_priv.h"
+# ifdef THREADS
+# include "atomic_ops.h"
+# endif
# if defined(LINUX) && !defined(POWERPC)
# include <linux/version.h>
@@ -48,7 +51,7 @@
# if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS) \
&& !defined(MSWINCE)
# include <sys/types.h>
-# if !defined(MSWIN32) && !defined(SUNOS4)
+# if !defined(MSWIN32)
# include <unistd.h>
# endif
# endif
@@ -71,10 +74,6 @@
# define NEED_FIND_LIMIT
# endif
-# if (defined(SUNOS4) && defined(DYNAMIC_LOADING)) && !defined(PCR)
-# define NEED_FIND_LIMIT
-# endif
-
# if (defined(SVR4) || defined(AUX) || defined(DGUX) \
|| (defined(LINUX) && defined(SPARC))) && !defined(PCR)
# define NEED_FIND_LIMIT
@@ -334,7 +333,7 @@ char *GC_parse_map_entry(char *buf_ptr, word *start, word *end,
void GC_init_linux_data_start()
{
- extern ptr_t GC_find_limit();
+ extern ptr_t GC_find_limit(ptr_t, GC_bool);
# ifdef LINUX
/* Try the easy approaches first: */
@@ -388,9 +387,9 @@ static void *tiny_sbrk(ptrdiff_t increment)
#if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__)
ptr_t GC_data_start;
- void GC_init_netbsd_elf()
+ void GC_init_netbsd_elf(void)
{
- extern ptr_t GC_find_limit();
+ extern ptr_t GC_find_limit(ptr_t, GC_bool);
extern char **environ;
/* This may need to be environ, without the underscore, for */
/* some versions. */
@@ -531,12 +530,12 @@ static SIGSET_T old_mask;
static SIGSET_T dummy;
-#if defined(PRINTSTATS) && !defined(THREADS)
+#if defined(GC_ASSERTIONS) && !defined(THREADS)
# define CHECK_SIGNALS
int GC_sig_disabled = 0;
#endif
-void GC_disable_signals()
+void GC_disable_signals(void)
{
if (!mask_initialized) {
SIG_FILL(new_mask);
@@ -565,7 +564,7 @@ void GC_disable_signals()
SIGSETMASK(old_mask,new_mask);
}
-void GC_enable_signals()
+void GC_enable_signals(void)
{
# ifdef CHECK_SIGNALS
if (GC_sig_disabled != 1) ABORT("Unmatched enable");
@@ -588,7 +587,7 @@ void GC_enable_signals()
word GC_page_size;
# if defined(MSWIN32) || defined(MSWINCE)
- void GC_setpagesize()
+ void GC_setpagesize(void)
{
GetSystemInfo(&GC_sysinfo);
GC_page_size = GC_sysinfo.dwPageSize;
@@ -597,13 +596,13 @@ word GC_page_size;
# else
# if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) \
|| defined(USE_MUNMAP)
- void GC_setpagesize()
+ void GC_setpagesize(void)
{
GC_page_size = GETPAGESIZE();
}
# else
/* It's acceptable to fake it. */
- void GC_setpagesize()
+ void GC_setpagesize(void)
{
GC_page_size = HBLKSIZE;
}
@@ -642,7 +641,7 @@ word GC_get_writable_length(ptr_t p, ptr_t *base)
return(buf.RegionSize);
}
-ptr_t GC_get_stack_base()
+ptr_t GC_get_stack_base(void)
{
int dummy;
ptr_t sp = (ptr_t)(&dummy);
@@ -657,7 +656,7 @@ ptr_t GC_get_stack_base()
# ifdef BEOS
# include <kernel/OS.h>
-ptr_t GC_get_stack_base(){
+ptr_t GC_get_stack_base(void){
thread_info th;
get_thread_info(find_thread(NULL),&th);
return th.stack_end;
@@ -667,13 +666,13 @@ ptr_t GC_get_stack_base(){
# ifdef OS2
-ptr_t GC_get_stack_base()
+ptr_t GC_get_stack_base(void)
{
PTIB ptib;
PPIB ppib;
if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
- GC_err_printf0("DosGetInfoBlocks failed\n");
+ GC_err_printf("DosGetInfoBlocks failed\n");
ABORT("DosGetInfoBlocks failed\n");
}
return((ptr_t)(ptib -> tib_pstacklimit));
@@ -689,11 +688,7 @@ ptr_t GC_get_stack_base()
# if defined(NEED_FIND_LIMIT) || defined(UNIX_LIKE)
-# ifdef __STDC__
- typedef void (*handler)(int);
-# else
- typedef void (*handler)();
-# endif
+ typedef void (*handler)(int);
# if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) \
|| defined(HURD) || defined(NETBSD)
@@ -706,12 +701,7 @@ ptr_t GC_get_stack_base()
static handler old_segv_handler, old_bus_handler;
# endif
-# ifdef __STDC__
- void GC_set_and_save_fault_handler(handler h)
-# else
- void GC_set_and_save_fault_handler(h)
- handler h;
-# endif
+ void GC_set_and_save_fault_handler(handler h)
{
# if defined(SUNOS5SIGS) || defined(IRIX5) \
|| defined(OSF1) || defined(HURD) || defined(NETBSD)
@@ -756,18 +746,17 @@ ptr_t GC_get_stack_base()
/* static */ JMP_BUF GC_jmp_buf;
/*ARGSUSED*/
- void GC_fault_handler(sig)
- int sig;
+ void GC_fault_handler(int sig)
{
LONGJMP(GC_jmp_buf, 1);
}
- void GC_setup_temporary_fault_handler()
+ void GC_setup_temporary_fault_handler(void)
{
GC_set_and_save_fault_handler(GC_fault_handler);
}
- void GC_reset_fault_handler()
+ void GC_reset_fault_handler(void)
{
# if defined(SUNOS5SIGS) || defined(IRIX5) \
|| defined(OSF1) || defined(HURD) || defined(NETBSD)
@@ -787,11 +776,9 @@ ptr_t GC_get_stack_base()
/* Return the first nonaddressible location > p (up) or */
/* the smallest location q s.t. [q,p) is addressable (!up). */
/* We assume that p (up) or p-1 (!up) is addressable. */
- ptr_t GC_find_limit(p, up)
- ptr_t p;
- GC_bool up;
+ ptr_t GC_find_limit(ptr_t p, GC_bool up)
{
- static VOLATILE ptr_t result;
+ static volatile ptr_t result;
/* Needs to be static, since otherwise it may not be */
/* preserved across the longjmp. Can safely be */
/* static since it's only called once, with the */
@@ -820,7 +807,7 @@ ptr_t GC_get_stack_base()
# endif
#if defined(ECOS) || defined(NOSYS)
- ptr_t GC_get_stack_base()
+ ptr_t GC_get_stack_base(void)
{
return STACKBOTTOM;
}
@@ -858,10 +845,8 @@ ptr_t GC_get_stack_base()
# define STAT_SKIP 27 /* Number of fields preceding startstack */
/* field in /proc/self/stat */
-#ifdef USE_LIBC_PRIVATES
# pragma weak __libc_stack_end
extern ptr_t __libc_stack_end;
-#endif
# ifdef IA64
/* Try to read the backing store base from /proc/self/maps. */
@@ -880,7 +865,8 @@ ptr_t GC_get_stack_base()
buf_ptr = GC_parse_map_entry(buf_ptr, &start, &end, prot_buf, &maj_dev);
if (buf_ptr == NULL) return current_best;
if (prot_buf[1] == 'w' && maj_dev == 0) {
- if (end < (word)(&dummy) && start > current_best) current_best = start;
+ if (end < (word)(&dummy) && start > current_best)
+ current_best = start;
}
}
return current_best;
@@ -891,33 +877,30 @@ ptr_t GC_get_stack_base()
return GC_apply_to_maps(backing_store_base_from_maps);
}
-# ifdef USE_LIBC_PRIVATES
-# pragma weak __libc_ia64_register_backing_store_base
- extern ptr_t __libc_ia64_register_backing_store_base;
-# endif
+# pragma weak __libc_ia64_register_backing_store_base
+ extern ptr_t __libc_ia64_register_backing_store_base;
ptr_t GC_get_register_stack_base(void)
{
-# ifdef USE_LIBC_PRIVATES
- if (0 != &__libc_ia64_register_backing_store_base
- && 0 != __libc_ia64_register_backing_store_base) {
- /* Glibc 2.2.4 has a bug such that for dynamically linked */
- /* executables __libc_ia64_register_backing_store_base is */
- /* defined but uninitialized during constructor calls. */
- /* Hence we check for both nonzero address and value. */
- return __libc_ia64_register_backing_store_base;
- }
-# endif
- word result = backing_store_base_from_proc();
- if (0 == result) {
+ if (0 != &__libc_ia64_register_backing_store_base
+ && 0 != __libc_ia64_register_backing_store_base) {
+ /* Glibc 2.2.4 has a bug such that for dynamically linked */
+ /* executables __libc_ia64_register_backing_store_base is */
+ /* defined but uninitialized during constructor calls. */
+ /* Hence we check for both nonzero address and value. */
+ return __libc_ia64_register_backing_store_base;
+ } else {
+ word result = backing_store_base_from_proc();
+ if (0 == result) {
/* Use dumb heuristics. Works only for default configuration. */
result = (word)GC_stackbottom - BACKING_STORE_DISPLACEMENT;
result += BACKING_STORE_ALIGNMENT - 1;
result &= ~(BACKING_STORE_ALIGNMENT - 1);
/* Verify that it's at least readable. If not, we goofed. */
GC_noop1(*(word *)result);
+ }
+ return (ptr_t)result;
}
- return (ptr_t)result;
}
# endif
@@ -940,7 +923,6 @@ ptr_t GC_get_stack_base()
/* since the correct value of __libc_stack_end never */
/* becomes visible to us. The second test works around */
/* this. */
-# ifdef USE_LIBC_PRIVATES
if (0 != &__libc_stack_end && 0 != __libc_stack_end ) {
# ifdef IA64
/* Some versions of glibc set the address 16 bytes too */
@@ -953,7 +935,6 @@ ptr_t GC_get_stack_base()
return __libc_stack_end;
# endif
}
-# endif
f = open("/proc/self/stat", O_RDONLY);
if (f < 0 || STAT_READ(f, stat_buf, STAT_BUF_SIZE) < 2 * STAT_SKIP) {
ABORT("Couldn't read /proc/self/stat");
@@ -1004,13 +985,12 @@ ptr_t GC_get_stack_base()
#if !defined(BEOS) && !defined(AMIGA) && !defined(MSWIN32) \
&& !defined(MSWINCE) && !defined(OS2) && !defined(NOSYS) && !defined(ECOS)
-ptr_t GC_get_stack_base()
+ptr_t GC_get_stack_base(void)
{
-# if defined(HEURISTIC1) || defined(HEURISTIC2) || \
- defined(LINUX_STACKBOTTOM) || defined(FREEBSD_STACKBOTTOM)
- word dummy;
- ptr_t result;
+# if defined(HEURISTIC1) || defined(HEURISTIC2)
+ word dummy;
# endif
+ ptr_t result;
# define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
@@ -1072,7 +1052,7 @@ ptr_t GC_get_stack_base()
# ifdef OS2
-void GC_register_data_segments()
+void GC_register_data_segments(void)
{
PTIB ptib;
PPIB ppib;
@@ -1087,12 +1067,12 @@ void GC_register_data_segments()
if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
- GC_err_printf0("DosGetInfoBlocks failed\n");
+ GC_err_printf("DosGetInfoBlocks failed\n");
ABORT("DosGetInfoBlocks failed\n");
}
module_handle = ppib -> pib_hmte;
if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) {
- GC_err_printf0("DosQueryModuleName failed\n");
+ GC_err_printf("DosQueryModuleName failed\n");
ABORT("DosGetInfoBlocks failed\n");
}
myexefile = fopen(path, "rb");
@@ -1153,7 +1133,7 @@ void GC_register_data_segments()
if (!(flags & OBJWRITE)) continue;
if (!(flags & OBJREAD)) continue;
if (flags & OBJINVALID) {
- GC_err_printf0("Object with invalid pages?\n");
+ GC_err_printf("Object with invalid pages?\n");
continue;
}
GC_add_roots_inner(O32_BASE(seg), O32_BASE(seg)+O32_SIZE(seg), FALSE);
@@ -1177,7 +1157,7 @@ void GC_register_data_segments()
/* the structured exception handling issues. But we now have */
/* assembly code to do that right. */
- void GC_init_win32()
+ void GC_init_win32(void)
{
/* if we're running under win32s, assume that no DLLs will be loaded */
DWORD v = GetVersion();
@@ -1271,11 +1251,9 @@ void GC_register_data_segments()
free(new_l); return;
}
}
-# ifdef CONDPRINT
- if (GC_print_stats)
- GC_printf1("Found new system malloc AllocationBase at 0x%lx\n",
- candidate);
-# endif
+ if (GC_print_stats)
+ GC_log_printf("Found new system malloc AllocationBase at %p\n",
+ candidate);
new_l -> allocation_base = candidate;
new_l -> next = GC_malloc_heap_l;
GC_malloc_heap_l = new_l;
@@ -1352,9 +1330,7 @@ void GC_register_data_segments()
# if (defined(SVR4) || defined(AUX) || defined(DGUX) \
|| (defined(LINUX) && defined(SPARC))) && !defined(PCR)
-ptr_t GC_SysVGetDataStart(max_page_size, etext_addr)
-int max_page_size;
-int * etext_addr;
+ptr_t GC_SysVGetDataStart(size_t max_page_size, ptr_t etext_addr)
{
word text_end = ((word)(etext_addr) + sizeof(word) - 1)
& ~(sizeof(word) - 1);
@@ -1362,7 +1338,7 @@ int * etext_addr;
word next_page = ((text_end + (word)max_page_size - 1)
& ~((word)max_page_size - 1));
word page_offset = (text_end & ((word)max_page_size - 1));
- VOLATILE char * result = (char *)(next_page + page_offset);
+ volatile char * result = (char *)(next_page + page_offset);
/* Note that this isnt equivalent to just adding */
/* max_page_size to &etext if &etext is at a page boundary */
@@ -1390,22 +1366,20 @@ int * etext_addr;
/* For now we don't assume that there is always an empty page after */
/* etext. But in some cases there actually seems to be slightly more. */
/* This also deals with holes between read-only data and writable data. */
-ptr_t GC_FreeBSDGetDataStart(max_page_size, etext_addr)
-int max_page_size;
-int * etext_addr;
+ptr_t GC_FreeBSDGetDataStart(size_t max_page_size, ptr_t etext_addr)
{
word text_end = ((word)(etext_addr) + sizeof(word) - 1)
& ~(sizeof(word) - 1);
/* etext rounded to word boundary */
- VOLATILE word next_page = (text_end + (word)max_page_size - 1)
+ volatile word next_page = (text_end + (word)max_page_size - 1)
& ~((word)max_page_size - 1);
- VOLATILE ptr_t result = (ptr_t)text_end;
+ volatile ptr_t result = (ptr_t)text_end;
GC_setup_temporary_fault_handler();
if (SETJMP(GC_jmp_buf) == 0) {
/* Try reading at the address. */
/* This should happen before there is another thread. */
for (; next_page < (word)(DATAEND); next_page += (word)max_page_size)
- *(VOLATILE char *)next_page;
+ *(volatile char *)next_page;
GC_reset_fault_handler();
} else {
GC_reset_fault_handler();
@@ -1426,7 +1400,7 @@ int * etext_addr;
#else /* !OS2 && !Windows && !AMIGA */
-void GC_register_data_segments()
+void GC_register_data_segments(void)
{
# if !defined(PCR) && !defined(SRC_M3) && !defined(MACOS)
# if defined(REDIRECT_MALLOC) && defined(GC_SOLARIS_THREADS)
@@ -1437,11 +1411,11 @@ void GC_register_data_segments()
/* hanging from it. We're on thin ice here ... */
extern caddr_t sbrk();
- GC_add_roots_inner(DATASTART, (char *)sbrk(0), FALSE);
+ GC_add_roots_inner(DATASTART, (ptr_t)sbrk(0), FALSE);
# else
- GC_add_roots_inner(DATASTART, (char *)(DATAEND), FALSE);
+ GC_add_roots_inner(DATASTART, (ptr_t)(DATAEND), FALSE);
# if defined(DATASTART2)
- GC_add_roots_inner(DATASTART2, (char *)(DATAEND2), FALSE);
+ GC_add_roots_inner(DATASTART2, (ptr_t)(DATAEND2), FALSE);
# endif
# endif
# endif
@@ -1495,22 +1469,14 @@ void GC_register_data_segments()
&& !defined(MSWIN32) && !defined(MSWINCE) \
&& !defined(MACOS) && !defined(DOS4GW)
-# ifdef SUNOS4
- extern caddr_t sbrk();
-# endif
-# ifdef __STDC__
-# define SBRK_ARG_T ptrdiff_t
-# else
-# define SBRK_ARG_T int
-# endif
+# define SBRK_ARG_T ptrdiff_t
# ifdef RS6000
/* The compiler seems to generate speculative reads one past the end of */
/* an allocated object. Hence we need to make sure that the page */
/* following the last heap page is also mapped. */
-ptr_t GC_unix_get_mem(bytes)
-word bytes;
+ptr_t GC_unix_get_mem(word bytes)
{
caddr_t cur_brk = (caddr_t)sbrk(0);
caddr_t result;
@@ -1567,8 +1533,7 @@ word bytes;
# define HEAP_START 0
#endif
-ptr_t GC_unix_get_mem(bytes)
-word bytes;
+ptr_t GC_unix_get_mem(word bytes)
{
void *result;
static ptr_t last_addr = HEAP_START;
@@ -1605,8 +1570,7 @@ word bytes;
}
#else /* Not RS6000, not USE_MMAP */
-ptr_t GC_unix_get_mem(bytes)
-word bytes;
+ptr_t GC_unix_get_mem(word bytes)
{
ptr_t result;
# ifdef IRIX5
@@ -1668,8 +1632,7 @@ SYSTEM_INFO GC_sysinfo;
word GC_n_heap_bases = 0;
-ptr_t GC_win32_get_mem(bytes)
-word bytes;
+ptr_t GC_win32_get_mem(word bytes)
{
ptr_t result;
@@ -1700,7 +1663,7 @@ word bytes;
return(result);
}
-void GC_win32_free_heap ()
+void GC_win32_free_heap(void)
{
if (GC_no_win32_dlls) {
while (GC_n_heap_bases > 0) {
@@ -1721,8 +1684,7 @@ void GC_win32_free_heap ()
# ifdef MSWINCE
word GC_n_heap_bases = 0;
-ptr_t GC_wince_get_mem(bytes)
-word bytes;
+ptr_t GC_wince_get_mem(word bytes)
{
ptr_t result;
word i;
@@ -1792,7 +1754,7 @@ word bytes;
/* Compute a page aligned starting address for the unmap */
/* operation on a block of size bytes starting at start. */
/* Return 0 if the block is too small to make this feasible. */
-ptr_t GC_unmap_start(ptr_t start, word bytes)
+ptr_t GC_unmap_start(ptr_t start, size_t bytes)
{
ptr_t result = start;
/* Round start to next page boundary. */
@@ -1804,7 +1766,7 @@ ptr_t GC_unmap_start(ptr_t start, word bytes)
/* Compute end address for an unmap operation on the indicated */
/* block. */
-ptr_t GC_unmap_end(ptr_t start, word bytes)
+ptr_t GC_unmap_end(ptr_t start, size_t bytes)
{
ptr_t end_addr = start + bytes;
end_addr = (ptr_t)((word)end_addr & ~(GC_page_size - 1));
@@ -1822,7 +1784,7 @@ ptr_t GC_unmap_end(ptr_t start, word bytes)
/* We assume that GC_remap is called on exactly the same range */
/* as a previous call to GC_unmap. It is safe to consistently */
/* round the endpoints in both places. */
-void GC_unmap(ptr_t start, word bytes)
+void GC_unmap(ptr_t start, size_t bytes)
{
ptr_t start_addr = GC_unmap_start(start, bytes);
ptr_t end_addr = GC_unmap_end(start, bytes);
@@ -1857,7 +1819,7 @@ void GC_unmap(ptr_t start, word bytes)
}
-void GC_remap(ptr_t start, word bytes)
+void GC_remap(ptr_t start, size_t bytes)
{
ptr_t start_addr = GC_unmap_start(start, bytes);
ptr_t end_addr = GC_unmap_end(start, bytes);
@@ -1892,9 +1854,9 @@ void GC_remap(ptr_t start, word bytes)
result = mprotect(start_addr, len,
PROT_READ | PROT_WRITE | OPT_PROT_EXEC);
if (result != 0) {
- GC_err_printf3(
- "Mprotect failed at 0x%lx (length %ld) with errno %ld\n",
- start_addr, len, errno);
+ GC_err_printf(
+ "Mprotect failed at %p (length %ld) with errno %d\n",
+ start_addr, (unsigned long)len, errno);
ABORT("Mprotect remapping failed");
}
GC_unmapped_bytes -= len;
@@ -1905,7 +1867,7 @@ void GC_remap(ptr_t start, word bytes)
/* be merged. Unmap the whole block. This typically requires */
/* that we unmap a small section in the middle that was not previously */
/* unmapped due to alignment constraints. */
-void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2)
+void GC_unmap_gap(ptr_t start1, size_t bytes1, ptr_t start2, size_t bytes2)
{
ptr_t start1_addr = GC_unmap_start(start1, bytes1);
ptr_t end1_addr = GC_unmap_end(start1, bytes1);
@@ -1913,7 +1875,7 @@ void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2)
ptr_t end2_addr = GC_unmap_end(start2, bytes2);
ptr_t start_addr = end1_addr;
ptr_t end_addr = start2_addr;
- word len;
+ size_t len;
GC_ASSERT(start1 + bytes1 == start2);
if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2);
if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2);
@@ -1945,7 +1907,7 @@ void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2)
/* environment, this is also responsible for marking from */
/* thread stacks. */
#ifndef THREADS
-void (*GC_push_other_roots)() = 0;
+void (*GC_push_other_roots)(void) = 0;
#else /* THREADS */
# ifdef PCR
@@ -1970,7 +1932,7 @@ PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data)
}
-void GC_default_push_other_roots GC_PROTO((void))
+void GC_default_push_other_roots(void)
{
/* Traverse data allocated by previous memory managers. */
{
@@ -1998,24 +1960,20 @@ void GC_default_push_other_roots GC_PROTO((void))
--> misconfigured
# endif
-void GC_push_thread_structures GC_PROTO((void))
+void GC_push_thread_structures(void)
{
/* Not our responsibibility. */
}
-extern void ThreadF__ProcessStacks();
+extern void ThreadF__ProcessStacks(void);
-void GC_push_thread_stack(start, stop)
-word start, stop;
+void GC_push_thread_stack(word start, word stop)
{
GC_push_all_stack((ptr_t)start, (ptr_t)stop + sizeof(word));
}
/* Push routine with M3 specific calling convention. */
-GC_m3_push_root(dummy1, p, dummy2, dummy3)
-word *p;
-ptr_t dummy1, dummy2;
-int dummy3;
+GC_m3_push_root(ptr_t dummy1, word *p, ptr_t dummy2, int dummy3)
{
word q = *p;
@@ -2026,13 +1984,13 @@ int dummy3;
typedef struct { int elts[1]; } RefTypeSet;
RefTypeSet GC_TracedRefTypes = {{0x1}};
-void GC_default_push_other_roots GC_PROTO((void))
+void GC_default_push_other_roots(void)
{
/* Use the M3 provided routine for finding static roots. */
/* This is a bit dubious, since it presumes no C roots. */
/* We handle the collector roots explicitly in GC_push_roots */
RTMain__GlobalMapProc(GC_m3_push_root, 0, GC_TracedRefTypes);
- if (GC_words_allocd > 0) {
+ if (GC_bytes_allocd > 0) {
ThreadF__ProcessStacks(GC_push_thread_stack);
}
/* Otherwise this isn't absolutely necessary, and we have */
@@ -2044,25 +2002,37 @@ void GC_default_push_other_roots GC_PROTO((void))
# if defined(GC_SOLARIS_THREADS) || defined(GC_PTHREADS) || \
defined(GC_WIN32_THREADS)
-extern void GC_push_all_stacks();
+extern void GC_push_all_stacks(void);
-void GC_default_push_other_roots GC_PROTO((void))
+void GC_default_push_other_roots(void)
{
GC_push_all_stacks();
}
# endif /* GC_SOLARIS_THREADS || GC_PTHREADS */
-void (*GC_push_other_roots) GC_PROTO((void)) = GC_default_push_other_roots;
+void (*GC_push_other_roots)(void) = GC_default_push_other_roots;
#endif /* THREADS */
/*
* Routines for accessing dirty bits on virtual pages.
- * We plan to eventually implement four strategies for doing so:
+ * There are five ways to maintain this information:
* DEFAULT_VDB: A simple dummy implementation that treats every page
* as possibly dirty. This makes incremental collection
* useless, but the implementation is still correct.
+ * MANUAL_VDB: Stacks and static data are always considered dirty.
+ * Heap pages are considered dirty if GC_dirty(p) has been
+ * called on some pointer p pointing to somewhere inside
+ * an object on that page. A GC_dirty() call on a large
+ * object directly dirties only a single page, but for
+ * MANUAL_VDB we are careful to treat an object with a dirty
+ * page as completely dirty.
+ * In order to avoid races, an object must be marked dirty
+ * after it is written, and a reference to the object
+ * must be kept on a stack or in a register in the interim.
+ * In this mode, an object directly reachable from the
+ * stack at the time of a collection is treated as dirty.
* PCR_VDB: Use PPCRs virtual dirty bit facility.
* PROC_VDB: Use the /proc facility for reading dirty bits. Only
* works under some SVR4 variants. Even then, it may be
@@ -2088,17 +2058,16 @@ GC_bool GC_dirty_maintained = FALSE;
/* written. */
/* Initialize virtual dirty bit implementation. */
-void GC_dirty_init()
+void GC_dirty_init(void)
{
-# ifdef PRINTSTATS
- GC_printf0("Initializing DEFAULT_VDB...\n");
-# endif
+ if (GC_print_stats == VERBOSE)
+ GC_log_printf("Initializing DEFAULT_VDB...\n");
GC_dirty_maintained = TRUE;
}
/* Retrieve system dirty bits for heap to a local buffer. */
/* Restore the systems notion of which pages are dirty. */
-void GC_read_dirty()
+void GC_read_dirty(void)
{}
/* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
@@ -2106,8 +2075,7 @@ void GC_read_dirty()
/* of the pages overlapping h are dirty. This routine may err on the */
/* side of labelling pages as dirty (and this implementation does). */
/*ARGSUSED*/
-GC_bool GC_page_was_dirty(h)
-struct hblk *h;
+GC_bool GC_page_was_dirty(struct hblk *h)
{
return(TRUE);
}
@@ -2121,16 +2089,13 @@ struct hblk *h;
/* Could any valid GC heap pointer ever have been written to this page? */
/*ARGSUSED*/
-GC_bool GC_page_was_ever_dirty(h)
-struct hblk *h;
+GC_bool GC_page_was_ever_dirty(struct hblk *h)
{
return(TRUE);
}
/* Reset the n pages starting at h to "was never dirty" status. */
-void GC_is_fresh(h, n)
-struct hblk *h;
-word n;
+void GC_is_fresh(struct hblk *h, word n)
{
}
@@ -2142,15 +2107,62 @@ word n;
/* pointer-free system call buffers in the heap are */
/* not protected. */
/*ARGSUSED*/
-void GC_remove_protection(h, nblocks, is_ptrfree)
-struct hblk *h;
-word nblocks;
-GC_bool is_ptrfree;
+void GC_remove_protection(struct hblk *h, word nblocks, GC_bool is_ptrfree)
{
}
# endif /* DEFAULT_VDB */
+# ifdef MANUAL_VDB
+
+/* Initialize virtual dirty bit implementation. */
+void GC_dirty_init(void)
+{
+ if (GC_print_stats == VERBOSE)
+ GC_log_printf("Initializing MANUAL_VDB...\n");
+ /* FIXME - implement me. */
+ GC_dirty_maintained = TRUE;
+}
+
+/* Retrieve system dirty bits for heap to a local buffer. */
+/* Restore the systems notion of which pages are dirty. */
+void GC_read_dirty(void)
+{
+ /* FIXME - implement me. */
+}
+
+/* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
+/* If the actual page size is different, this returns TRUE if any */
+/* of the pages overlapping h are dirty. This routine may err on the */
+/* side of labelling pages as dirty (and this implementation does). */
+/*ARGSUSED*/
+GC_bool GC_page_was_dirty(struct hblk *h)
+{
+ /* FIXME - implement me. */
+ return(TRUE);
+}
+
+
+/* Could any valid GC heap pointer ever have been written to this page? */
+/*ARGSUSED*/
+GC_bool GC_page_was_ever_dirty(struct hblk *h)
+{
+ /* FIXME - implement me. */
+ return(TRUE);
+}
+
+/* Reset the n pages starting at h to "was never dirty" status. */
+void GC_is_fresh(struct hblk *h, word n)
+{
+}
+
+/*ARGSUSED*/
+void GC_remove_protection(struct hblk *h, word nblocks, GC_bool is_ptrfree)
+{
+}
+
+# endif /* MANUAL_VDB */
+
# ifdef MPROTECT_VDB
@@ -2168,7 +2180,6 @@ GC_bool is_ptrfree;
* heap, and do even that only if we are on a platform on which those
* are not protected. Another alternative is to wrap system calls
* (see example for read below), but the current implementation holds
- * a lock across blocking calls, making it problematic for multithreaded
* applications.
* We assume the page size is a multiple of HBLKSIZE.
* We prefer them to be the same. We avoid protecting POINTERFREE
@@ -2220,7 +2231,7 @@ GC_bool is_ptrfree;
if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \
&protect_junk)) { \
DWORD last_error = GetLastError(); \
- GC_printf1("Last error code: %lx\n", last_error); \
+ GC_printf("Last error code: %lx\n", last_error); \
ABORT("VirtualProtect failed"); \
}
# define UNPROTECT(addr, len) \
@@ -2231,88 +2242,33 @@ GC_bool is_ptrfree;
# endif /* !DARWIN */
# endif /* MSWIN32 || MSWINCE || DARWIN */
-#if defined(SUNOS4) || (defined(FREEBSD) && !defined(SUNOS5SIGS))
- typedef void (* SIG_PF)();
-#endif /* SUNOS4 || (FREEBSD && !SUNOS5SIGS) */
-
-#if defined(SUNOS5SIGS) || defined(OSF1) || defined(LINUX) \
- || defined(HURD)
-# ifdef __STDC__
- typedef void (* SIG_PF)(int);
-# else
- typedef void (* SIG_PF)();
-# endif
-#endif /* SUNOS5SIGS || OSF1 || LINUX || HURD */
-
#if defined(MSWIN32)
- typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_PF;
+ typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_HNDLR_PTR;
# undef SIG_DFL
# define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER) (-1)
-#endif
-#if defined(MSWINCE)
- typedef LONG (WINAPI *SIG_PF)(struct _EXCEPTION_POINTERS *);
+#elif defined(MSWINCE)
+ typedef LONG (WINAPI *SIG_HNDLR_PTR)(struct _EXCEPTION_POINTERS *);
# undef SIG_DFL
-# define SIG_DFL (SIG_PF) (-1)
+# define SIG_DFL (SIG_HNDLR_PTR) (-1)
+#elif defined(DARWIN)
+ typedef void (* SIG_HNDLR_PTR)();
+#else
+ typedef void (* SIG_HNDLR_PTR)(int, siginfo_t *, void *);
+ typedef void (* PLAIN_HNDLR_PTR)(int);
#endif
-#if defined(IRIX5) || defined(OSF1) || defined(HURD)
- typedef void (* REAL_SIG_PF)(int, int, struct sigcontext *);
-#endif /* IRIX5 || OSF1 || HURD */
-
-#if defined(SUNOS5SIGS)
-# if defined(HPUX) || defined(FREEBSD)
-# define SIGINFO_T siginfo_t
-# else
-# define SIGINFO_T struct siginfo
-# endif
-# ifdef __STDC__
- typedef void (* REAL_SIG_PF)(int, SIGINFO_T *, void *);
-# else
- typedef void (* REAL_SIG_PF)();
-# endif
-#endif /* SUNOS5SIGS */
-
-#if defined(LINUX)
-# if __GLIBC__ > 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ >= 2
- typedef struct sigcontext s_c;
-# else /* glibc < 2.2 */
-# include <linux/version.h>
-# if (LINUX_VERSION_CODE >= 0x20100) && !defined(M68K) || defined(ALPHA) || defined(ARM32)
- typedef struct sigcontext s_c;
-# else
- typedef struct sigcontext_struct s_c;
-# endif
-# endif /* glibc < 2.2 */
-# if defined(ALPHA) || defined(M68K)
- typedef void (* REAL_SIG_PF)(int, int, s_c *);
-# else
-# if defined(IA64) || defined(HP_PA) || defined(X86_64)
- typedef void (* REAL_SIG_PF)(int, siginfo_t *, s_c *);
- /* FIXME: */
- /* According to SUSV3, the last argument should have type */
- /* void * or ucontext_t * */
-# else
- typedef void (* REAL_SIG_PF)(int, s_c);
-# endif
+#if defined(__GLIBC__)
+# if __GLIBC__ < 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ < 2
+# error glibc too old?
# endif
-# ifdef ALPHA
- /* Retrieve fault address from sigcontext structure by decoding */
- /* instruction. */
- char * get_fault_addr(s_c *sc) {
- unsigned instr;
- word faultaddr;
-
- instr = *((unsigned *)(sc->sc_pc));
- faultaddr = sc->sc_regs[(instr >> 16) & 0x1f];
- faultaddr += (word) (((int)instr << 16) >> 16);
- return (char *)faultaddr;
- }
-# endif /* !ALPHA */
-# endif /* LINUX */
+#endif
#ifndef DARWIN
-SIG_PF GC_old_bus_handler;
-SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
+SIG_HNDLR_PTR GC_old_bus_handler;
+GC_bool GC_old_bus_handler_used_si;
+SIG_HNDLR_PTR GC_old_segv_handler;
+ /* Also old MSWIN32 ACCESS_VIOLATION filter */
+GC_bool GC_old_segv_handler_used_si;
#endif /* !DARWIN */
#if defined(THREADS)
@@ -2323,16 +2279,17 @@ SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
/* safe fallback algorithm of setting all bits in the word. */
/* Contention should be very rare, so we do the minimum to handle it */
/* correctly. */
-#ifdef GC_TEST_AND_SET_DEFINED
- static VOLATILE unsigned int fault_handler_lock = 0;
- void async_set_pht_entry_from_index(VOLATILE page_hash_table db, int index) {
- while (GC_test_and_set(&fault_handler_lock)) {}
+#ifdef AO_HAVE_test_and_set_acquire
+ static volatile AO_TS_t fault_handler_lock = 0;
+ void async_set_pht_entry_from_index(volatile page_hash_table db, int index) {
+ while (AO_test_and_set_acquire(&fault_handler_lock) == AO_TS_SET) {}
/* Could also revert to set_pht_entry_from_index_safe if initial */
/* GC_test_and_set fails. */
set_pht_entry_from_index(db, index);
- GC_clear(&fault_handler_lock);
+ AO_CLEAR(&fault_handler_lock);
}
-#else /* !GC_TEST_AND_SET_DEFINED */
+#else /* !AO_have_test_and_set_acquire */
+# error No test-and_set operation: Introduces a race.
/* THIS IS INCORRECT! The dirty bit vector may be temporarily wrong, */
/* just before we notice the conflict and correct it. We may end up */
/* looking at it while it's wrong. But this requires contention */
@@ -2340,8 +2297,8 @@ SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
/* fail than the old code, which had no reported failures. Thus we */
/* leave it this way while we think of something better, or support */
/* GC_test_and_set on the remaining platforms. */
- static VOLATILE word currently_updating = 0;
- void async_set_pht_entry_from_index(VOLATILE page_hash_table db, int index) {
+ static volatile word currently_updating = 0;
+ void async_set_pht_entry_from_index(volatile page_hash_table db, int index) {
unsigned int update_dummy;
currently_updating = (word)(&update_dummy);
set_pht_entry_from_index(db, index);
@@ -2357,182 +2314,72 @@ SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
/* returning us to a safe state, though not soon enough. */
}
}
-#endif /* !GC_TEST_AND_SET_DEFINED */
+#endif /* !AO_HAVE_test_and_set_acquire */
#else /* !THREADS */
# define async_set_pht_entry_from_index(db, index) \
set_pht_entry_from_index(db, index)
#endif /* !THREADS */
-/*ARGSUSED*/
#if !defined(DARWIN)
-# if defined (SUNOS4) || (defined(FREEBSD) && !defined(SUNOS5SIGS))
- void GC_write_fault_handler(sig, code, scp, addr)
- int sig, code;
- struct sigcontext *scp;
- char * addr;
-# ifdef SUNOS4
-# define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
-# define CODE_OK (FC_CODE(code) == FC_PROT \
- || (FC_CODE(code) == FC_OBJERR \
- && FC_ERRNO(code) == FC_PROT))
-# endif
-# ifdef FREEBSD
-# define SIG_OK (sig == SIGBUS)
-# define CODE_OK TRUE
-# endif
-# endif /* SUNOS4 || (FREEBSD && !SUNOS5SIGS) */
-
-# if defined(IRIX5) || defined(OSF1) || defined(HURD)
# include <errno.h>
- void GC_write_fault_handler(int sig, int code, struct sigcontext *scp)
-# ifdef OSF1
+# if defined(FREEBSD)
+# define SIG_OK (sig == SIGBUS)
+# define CODE_OK (code == BUS_PAGE_FAULT)
+# elif defined(OSF1)
# define SIG_OK (sig == SIGSEGV)
# define CODE_OK (code == 2 /* experimentally determined */)
-# endif
-# ifdef IRIX5
+# elif defined(IRIX5)
# define SIG_OK (sig == SIGSEGV)
# define CODE_OK (code == EACCES)
-# endif
-# ifdef HURD
+# elif defined(HURD)
# define SIG_OK (sig == SIGBUS || sig == SIGSEGV)
# define CODE_OK TRUE
-# endif
-# endif /* IRIX5 || OSF1 || HURD */
-
-# if defined(LINUX)
-# if defined(ALPHA) || defined(M68K)
- void GC_write_fault_handler(int sig, int code, s_c * sc)
-# else
-# if defined(IA64) || defined(HP_PA) || defined(X86_64)
- void GC_write_fault_handler(int sig, siginfo_t * si, s_c * scp)
-# else
-# if defined(ARM32)
- void GC_write_fault_handler(int sig, int a2, int a3, int a4, s_c sc)
-# else
- void GC_write_fault_handler(int sig, s_c sc)
-# endif
-# endif
-# endif
-# define SIG_OK (sig == SIGSEGV)
-# define CODE_OK TRUE
+# elif defined(LINUX)
+# define SIG_OK (sig == SIGSEGV)
+# define CODE_OK TRUE
/* Empirically c.trapno == 14, on IA32, but is that useful? */
/* Should probably consider alignment issues on other */
/* architectures. */
-# endif /* LINUX */
-
-# if defined(SUNOS5SIGS)
-# ifdef __STDC__
- void GC_write_fault_handler(int sig, SIGINFO_T *scp, void * context)
-# else
- void GC_write_fault_handler(sig, scp, context)
- int sig;
- SIGINFO_T *scp;
- void * context;
-# endif
-# ifdef HPUX
+# elif defined(HPUX)
# define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
# define CODE_OK (scp -> si_code == SEGV_ACCERR) \
|| (scp -> si_code == BUS_ADRERR) \
|| (scp -> si_code == BUS_UNKNOWN) \
|| (scp -> si_code == SEGV_UNKNOWN) \
|| (scp -> si_code == BUS_OBJERR)
-# else
-# ifdef FREEBSD
-# define SIG_OK (sig == SIGBUS)
-# define CODE_OK (scp -> si_code == BUS_PAGE_FAULT)
-# else
-# define SIG_OK (sig == SIGSEGV)
-# define CODE_OK (scp -> si_code == SEGV_ACCERR)
-# endif
+# elif defined(FREEBSD)
+# define SIG_OK (sig == SIGBUS)
+# define CODE_OK (scp -> si_code == BUS_PAGE_FAULT)
+# elif defined(SUNOS5SIGS)
+# define SIG_OK (sig == SIGSEGV)
+# define CODE_OK (scp -> si_code == SEGV_ACCERR)
+# elif defined(MSWIN32) || defined(MSWINCE)
+# define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode \
+ == STATUS_ACCESS_VIOLATION)
+# define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] \
+ == 1) /* Write fault */
# endif
-# endif /* SUNOS5SIGS */
# if defined(MSWIN32) || defined(MSWINCE)
LONG WINAPI GC_write_fault_handler(struct _EXCEPTION_POINTERS *exc_info)
-# define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode == \
- STATUS_ACCESS_VIOLATION)
-# define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] == 1)
- /* Write fault */
+# else
+# include <ucontext.h>
+ /*ARGSUSED*/
+ void GC_write_fault_handler(int sig, siginfo_t *si, void *raw_sc)
# endif /* MSWIN32 || MSWINCE */
{
- register unsigned i;
-# if defined(HURD)
- char *addr = (char *) code;
-# endif
-# ifdef IRIX5
- char * addr = (char *) (size_t) (scp -> sc_badvaddr);
-# endif
-# if defined(OSF1) && defined(ALPHA)
- char * addr = (char *) (scp -> sc_traparg_a0);
-# endif
-# ifdef SUNOS5SIGS
- char * addr = (char *) (scp -> si_addr);
-# endif
-# ifdef LINUX
-# if defined(I386)
- char * addr = (char *) (sc.cr2);
-# else
-# if defined(M68K)
- char * addr = NULL;
-
- struct sigcontext *scp = (struct sigcontext *)(sc);
-
- int format = (scp->sc_formatvec >> 12) & 0xf;
- unsigned long *framedata = (unsigned long *)(scp + 1);
- unsigned long ea;
-
- if (format == 0xa || format == 0xb) {
- /* 68020/030 */
- ea = framedata[2];
- } else if (format == 7) {
- /* 68040 */
- ea = framedata[3];
- if (framedata[1] & 0x08000000) {
- /* correct addr on misaligned access */
- ea = (ea+4095)&(~4095);
- }
- } else if (format == 4) {
- /* 68060 */
- ea = framedata[0];
- if (framedata[1] & 0x08000000) {
- /* correct addr on misaligned access */
- ea = (ea+4095)&(~4095);
- }
- }
- addr = (char *)ea;
-# else
-# ifdef ALPHA
- char * addr = get_fault_addr(sc);
-# else
-# if defined(IA64) || defined(HP_PA) || defined(X86_64)
- char * addr = si -> si_addr;
- /* I believe this is claimed to work on all platforms for */
- /* Linux 2.3.47 and later. Hopefully we don't have to */
- /* worry about earlier kernels on IA64. */
-# else
-# if defined(POWERPC)
- char * addr = (char *) (sc.regs->dar);
-# else
-# if defined(ARM32)
- char * addr = (char *)sc.fault_address;
-# else
-# if defined(CRIS)
- char * addr = (char *)sc.regs.csraddr;
-# else
- --> architecture not supported
-# endif
-# endif
-# endif
-# endif
-# endif
-# endif
-# endif
+# if !defined(MSWIN32) && !defined(MSWINCE)
+ int code = si -> si_code; /* Ignore gcc unused var. warning. */
+ ucontext_t * scp = (ucontext_t *)raw_sc;
+ /* Ignore gcc unused var. warning. */
+ char *addr = si -> si_addr;
# endif
# if defined(MSWIN32) || defined(MSWINCE)
char * addr = (char *) (exc_info -> ExceptionRecord
-> ExceptionInformation[1]);
# define sig SIGSEGV
# endif
+ unsigned i;
if (SIG_OK && CODE_OK) {
register struct hblk * h =
@@ -2556,53 +2403,38 @@ SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
/* sequence, which often depends on SA_SIGINFO. */
/* Heap blocks now begin and end on page boundaries */
- SIG_PF old_handler;
+ SIG_HNDLR_PTR old_handler;
+ GC_bool used_si;
if (sig == SIGSEGV) {
old_handler = GC_old_segv_handler;
+ used_si = GC_old_segv_handler_used_si;
} else {
old_handler = GC_old_bus_handler;
+ used_si = GC_old_bus_handler_used_si;
}
- if (old_handler == SIG_DFL) {
+ if (old_handler == (SIG_HNDLR_PTR)SIG_DFL) {
# if !defined(MSWIN32) && !defined(MSWINCE)
- GC_err_printf1("Segfault at 0x%lx\n", addr);
+ GC_err_printf("Segfault at %p\n", addr);
ABORT("Unexpected bus error or segmentation fault");
# else
return(EXCEPTION_CONTINUE_SEARCH);
# endif
} else {
-# if defined (SUNOS4) \
- || (defined(FREEBSD) && !defined(SUNOS5SIGS))
- (*old_handler) (sig, code, scp, addr);
- return;
-# endif
-# if defined (SUNOS5SIGS)
- /*
- * FIXME: For FreeBSD, this code should check if the
- * old signal handler used the traditional BSD style and
- * if so call it using that style.
- */
- (*(REAL_SIG_PF)old_handler) (sig, scp, context);
- return;
-# endif
-# if defined (LINUX)
-# if defined(ALPHA) || defined(M68K)
- (*(REAL_SIG_PF)old_handler) (sig, code, sc);
-# else
-# if defined(IA64) || defined(HP_PA) || defined(X86_64)
- (*(REAL_SIG_PF)old_handler) (sig, si, scp);
-# else
- (*(REAL_SIG_PF)old_handler) (sig, sc);
-# endif
-# endif
- return;
-# endif
-# if defined (IRIX5) || defined(OSF1) || defined(HURD)
- (*(REAL_SIG_PF)old_handler) (sig, code, scp);
- return;
-# endif
+ /*
+ * FIXME: This code should probably check if the
+ * old signal handler used the traditional style and
+ * if so call it using that style.
+ */
# ifdef MSWIN32
return((*old_handler)(exc_info));
+# else
+ if (used_si)
+ ((SIG_HNDLR_PTR)old_handler) (sig, si, raw_sc);
+ else
+ /* FIXME: should pass nonstandard args as well. */
+ ((PLAIN_HNDLR_PTR)old_handler) (sig);
+ return;
# endif
}
}
@@ -2623,10 +2455,6 @@ SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
async_set_pht_entry_from_index(GC_dirty_pages, index);
}
-# if defined(OSF1)
- /* These reset the signal handler each time by default. */
- signal(SIGSEGV, (SIG_PF) GC_write_fault_handler);
-# endif
/* The write may not take place before dirty bits are read. */
/* But then we'll fault again ... */
# if defined(MSWIN32) || defined(MSWINCE)
@@ -2638,7 +2466,7 @@ SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
#if defined(MSWIN32) || defined(MSWINCE)
return EXCEPTION_CONTINUE_SEARCH;
#else
- GC_err_printf1("Segfault at 0x%lx\n", addr);
+ GC_err_printf("Segfault at %p\n", addr);
ABORT("Unexpected bus error or segmentation fault");
#endif
}
@@ -2650,10 +2478,7 @@ SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
* starting at h are no longer protected. If is_ptrfree is false,
* also ensure that they will subsequently appear to be dirty.
*/
-void GC_remove_protection(h, nblocks, is_ptrfree)
-struct hblk *h;
-word nblocks;
-GC_bool is_ptrfree;
+void GC_remove_protection(struct hblk *h, word nblocks, GC_bool is_ptrfree)
{
struct hblk * h_trunc; /* Truncated to page boundary */
struct hblk * h_end; /* Page boundary following block end */
@@ -2676,21 +2501,12 @@ GC_bool is_ptrfree;
}
#if !defined(DARWIN)
-void GC_dirty_init()
+void GC_dirty_init(void)
{
-# if defined(SUNOS5SIGS) || defined(IRIX5) || defined(LINUX) || \
- defined(OSF1) || defined(HURD)
+# if !defined(MSWIN32) && !defined(MSWINCE)
struct sigaction act, oldact;
- /* We should probably specify SA_SIGINFO for Linux, and handle */
- /* the different architectures more uniformly. */
-# if defined(IRIX5) || defined(LINUX) && !defined(X86_64) \
- || defined(OSF1) || defined(HURD)
- act.sa_flags = SA_RESTART;
- act.sa_handler = (SIG_PF)GC_write_fault_handler;
-# else
- act.sa_flags = SA_RESTART | SA_SIGINFO;
- act.sa_sigaction = GC_write_fault_handler;
-# endif
+ act.sa_flags = SA_RESTART | SA_SIGINFO;
+ act.sa_sigaction = GC_write_fault_handler;
(void)sigemptyset(&act.sa_mask);
# ifdef SIG_SUSPEND
/* Arrange to postpone SIG_SUSPEND while we're in a write fault */
@@ -2698,42 +2514,16 @@ void GC_dirty_init()
/* stopping the world for GC. */
(void)sigaddset(&act.sa_mask, SIG_SUSPEND);
# endif /* SIG_SUSPEND */
-# endif
-# ifdef PRINTSTATS
- GC_printf0("Inititalizing mprotect virtual dirty bit implementation\n");
# endif
+ if (GC_print_stats == VERBOSE)
+ GC_log_printf(
+ "Initializing mprotect virtual dirty bit implementation\n");
GC_dirty_maintained = TRUE;
if (GC_page_size % HBLKSIZE != 0) {
- GC_err_printf0("Page size not multiple of HBLKSIZE\n");
+ GC_err_printf("Page size not multiple of HBLKSIZE\n");
ABORT("Page size not multiple of HBLKSIZE");
}
-# if defined(SUNOS4) || (defined(FREEBSD) && !defined(SUNOS5SIGS))
- GC_old_bus_handler = signal(SIGBUS, GC_write_fault_handler);
- if (GC_old_bus_handler == SIG_IGN) {
- GC_err_printf0("Previously ignored bus error!?");
- GC_old_bus_handler = SIG_DFL;
- }
- if (GC_old_bus_handler != SIG_DFL) {
-# ifdef PRINTSTATS
- GC_err_printf0("Replaced other SIGBUS handler\n");
-# endif
- }
-# endif
-# if defined(SUNOS4)
- GC_old_segv_handler = signal(SIGSEGV, (SIG_PF)GC_write_fault_handler);
- if (GC_old_segv_handler == SIG_IGN) {
- GC_err_printf0("Previously ignored segmentation violation!?");
- GC_old_segv_handler = SIG_DFL;
- }
- if (GC_old_segv_handler != SIG_DFL) {
-# ifdef PRINTSTATS
- GC_err_printf0("Replaced other SIGSEGV handler\n");
-# endif
- }
-# endif
-# if (defined(SUNOS5SIGS) && !defined(FREEBSD)) || defined(IRIX5) \
- || defined(LINUX) || defined(OSF1) || defined(HURD)
- /* SUNOS5SIGS includes HPUX */
+# if !defined(MSWIN32) && !defined(MSWINCE)
# if defined(GC_IRIX_THREADS)
sigaction(SIGSEGV, 0, &oldact);
sigaction(SIGSEGV, &act, 0);
@@ -2743,47 +2533,46 @@ void GC_dirty_init()
if (res != 0) ABORT("Sigaction failed");
}
# endif
-# if defined(_sigargs) || defined(HURD) || !defined(SA_SIGINFO)
- /* This is Irix 5.x, not 6.x. Irix 5.x does not have */
- /* sa_sigaction. */
- GC_old_segv_handler = oldact.sa_handler;
-# else /* Irix 6.x or SUNOS5SIGS or LINUX */
- if (oldact.sa_flags & SA_SIGINFO) {
- GC_old_segv_handler = (SIG_PF)(oldact.sa_sigaction);
- } else {
- GC_old_segv_handler = oldact.sa_handler;
- }
-# endif
- if (GC_old_segv_handler == SIG_IGN) {
- GC_err_printf0("Previously ignored segmentation violation!?");
- GC_old_segv_handler = SIG_DFL;
+ if (oldact.sa_flags & SA_SIGINFO) {
+ GC_old_segv_handler = oldact.sa_sigaction;
+ GC_old_segv_handler_used_si = TRUE;
+ } else {
+ GC_old_segv_handler = (SIG_HNDLR_PTR)oldact.sa_handler;
+ GC_old_segv_handler_used_si = FALSE;
}
- if (GC_old_segv_handler != SIG_DFL) {
-# ifdef PRINTSTATS
- GC_err_printf0("Replaced other SIGSEGV handler\n");
-# endif
+ if (GC_old_segv_handler == (SIG_HNDLR_PTR)SIG_IGN) {
+ GC_err_printf("Previously ignored segmentation violation!?");
+ GC_old_segv_handler = (SIG_HNDLR_PTR)SIG_DFL;
+ }
+ if (GC_old_segv_handler != (SIG_HNDLR_PTR)SIG_DFL) {
+ if (GC_print_stats == VERBOSE)
+ GC_log_printf("Replaced other SIGSEGV handler\n");
}
-# endif /* (SUNOS5SIGS && !FREEBSD) || IRIX5 || LINUX || OSF1 || HURD */
+# endif /* ! MS windows */
# if defined(HPUX) || defined(LINUX) || defined(HURD) \
|| (defined(FREEBSD) && defined(SUNOS5SIGS))
sigaction(SIGBUS, &act, &oldact);
- GC_old_bus_handler = oldact.sa_handler;
- if (GC_old_bus_handler == SIG_IGN) {
- GC_err_printf0("Previously ignored bus error!?");
- GC_old_bus_handler = SIG_DFL;
+ if (oldact.sa_flags & SA_SIGINFO) {
+ GC_old_bus_handler = oldact.sa_sigaction;
+ GC_old_bus_handler_used_si = TRUE;
+ } else {
+ GC_old_bus_handler = (SIG_HNDLR_PTR)oldact.sa_handler;
+ GC_old_bus_handler_used_si = FALSE;
}
- if (GC_old_bus_handler != SIG_DFL) {
-# ifdef PRINTSTATS
- GC_err_printf0("Replaced other SIGBUS handler\n");
-# endif
+ if (GC_old_bus_handler == (SIG_HNDLR_PTR)SIG_IGN) {
+ GC_err_printf("Previously ignored bus error!?");
+ GC_old_bus_handler = (SIG_HNDLR_PTR)SIG_DFL;
+ }
+ if (GC_old_bus_handler != (SIG_HNDLR_PTR)SIG_DFL) {
+ if (GC_print_stats == VERBOSE)
+ GC_log_printf("Replaced other SIGBUS handler\n");
}
# endif /* HPUX || LINUX || HURD || (FREEBSD && SUNOS5SIGS) */
# if defined(MSWIN32)
GC_old_segv_handler = SetUnhandledExceptionFilter(GC_write_fault_handler);
if (GC_old_segv_handler != NULL) {
-# ifdef PRINTSTATS
- GC_err_printf0("Replaced other UnhandledExceptionFilter\n");
-# endif
+ if (GC_print_stats == VERBOSE)
+ GC_log_printf("Replaced other UnhandledExceptionFilter\n");
} else {
GC_old_segv_handler = SIG_DFL;
}
@@ -2791,7 +2580,7 @@ void GC_dirty_init()
}
#endif /* !DARWIN */
-int GC_incremental_protection_needs()
+int GC_incremental_protection_needs(void)
{
if (GC_page_size == HBLKSIZE) {
return GC_PROTECTS_POINTER_HEAP;
@@ -2805,7 +2594,7 @@ int GC_incremental_protection_needs()
#define IS_PTRFREE(hhdr) ((hhdr)->hb_descr == 0)
#define PAGE_ALIGNED(x) !((word)(x) & (GC_page_size - 1))
-void GC_protect_heap()
+void GC_protect_heap(void)
{
ptr_t start;
word len;
@@ -2867,7 +2656,7 @@ void GC_protect_heap()
/* We assume that either the world is stopped or its OK to lose dirty */
/* bits while this is happenning (as in GC_enable_incremental). */
-void GC_read_dirty()
+void GC_read_dirty(void)
{
BCOPY((word *)GC_dirty_pages, GC_grungy_pages,
(sizeof GC_dirty_pages));
@@ -2875,8 +2664,7 @@ void GC_read_dirty()
GC_protect_heap();
}
-GC_bool GC_page_was_dirty(h)
-struct hblk * h;
+GC_bool GC_page_was_dirty(struct hblk *h)
{
register word index = PHT_HASH(h);
@@ -2893,7 +2681,7 @@ struct hblk * h;
static GC_bool syscall_acquired_lock = FALSE; /* Protected by GC lock. */
-void GC_begin_syscall()
+void GC_begin_syscall(void)
{
if (!I_HOLD_LOCK()) {
LOCK();
@@ -2901,7 +2689,7 @@ void GC_begin_syscall()
}
}
-void GC_end_syscall()
+void GC_end_syscall(void)
{
if (syscall_acquired_lock) {
syscall_acquired_lock = FALSE;
@@ -2909,9 +2697,7 @@ void GC_end_syscall()
}
}
-void GC_unprotect_range(addr, len)
-ptr_t addr;
-word len;
+void GC_unprotect_range(ptr_t addr, word len)
{
struct hblk * start_block;
struct hblk * end_block;
@@ -2953,20 +2739,9 @@ word len;
/* make sure that input is available. */
/* Another, preferred alternative is to ensure that system calls never */
/* write to the protected heap (see above). */
-# if defined(__STDC__) && !defined(SUNOS4)
-# include <unistd.h>
-# include <sys/uio.h>
- ssize_t read(int fd, void *buf, size_t nbyte)
-# else
-# ifndef LINT
- int read(fd, buf, nbyte)
-# else
- int GC_read(fd, buf, nbyte)
-# endif
- int fd;
- char *buf;
- int nbyte;
-# endif
+# include <unistd.h>
+# include <sys/uio.h>
+ssize_t read(int fd, void *buf, size_t nbyte)
{
int result;
@@ -3025,17 +2800,14 @@ word len;
#endif /* 0 */
/*ARGSUSED*/
-GC_bool GC_page_was_ever_dirty(h)
-struct hblk *h;
+GC_bool GC_page_was_ever_dirty(struct hblk *h)
{
return(TRUE);
}
/* Reset the n pages starting at h to "was never dirty" status. */
/*ARGSUSED*/
-void GC_is_fresh(h, n)
-struct hblk *h;
-word n;
+void GC_is_fresh(struct hblk *h, word n)
{
}
@@ -3084,8 +2856,7 @@ char *GC_proc_buf;
#endif
/* Add all pages in pht2 to pht1 */
-void GC_or_pages(pht1, pht2)
-page_hash_table pht1, pht2;
+void GC_or_pages(pgae_hash_table pht1, page_hash_table pht2)
{
register int i;
@@ -3094,21 +2865,21 @@ page_hash_table pht1, pht2;
int GC_proc_fd;
-void GC_dirty_init()
+void GC_dirty_init(void)
{
int fd;
char buf[30];
GC_dirty_maintained = TRUE;
- if (GC_words_allocd != 0 || GC_words_allocd_before_gc != 0) {
+ if (GC_bytes_allocd != 0 || GC_bytes_allocd_before_gc != 0) {
register int i;
for (i = 0; i < PHT_SIZE; i++) GC_written_pages[i] = (word)(-1);
-# ifdef PRINTSTATS
- GC_printf1("Allocated words:%lu:all pages may have been written\n",
- (unsigned long)
- (GC_words_allocd + GC_words_allocd_before_gc));
-# endif
+ if (GC_print_stats == VERBOSE)
+ GC_log_printf(
+ "Allocated bytes:%lu:all pages may have been written\n",
+ (unsigned long)
+ (GC_bytes_allocd + GC_bytes_allocd_before_gc));
}
sprintf(buf, "/proc/%d", getpid());
fd = open(buf, O_RDONLY);
@@ -3126,7 +2897,7 @@ void GC_dirty_init()
GC_fresh_pages = (struct hblk **)
GC_scratch_alloc(MAX_FRESH_PAGES * sizeof (struct hblk *));
if (GC_fresh_pages == 0) {
- GC_err_printf0("No space for fresh pages\n");
+ GC_err_printf("No space for fresh pages\n");
EXIT();
}
BZERO(GC_fresh_pages, MAX_FRESH_PAGES * sizeof (struct hblk *));
@@ -3148,7 +2919,7 @@ GC_bool is_ptrfree;
# define READ(fd,buf,nbytes) read(fd, buf, nbytes)
#endif
-void GC_read_dirty()
+void GC_read_dirty(void)
{
unsigned long ps, np;
int nmaps;
@@ -3157,16 +2928,14 @@ void GC_read_dirty()
char * bufp;
ptr_t current_addr, limit;
int i;
-int dummy;
BZERO(GC_grungy_pages, (sizeof GC_grungy_pages));
bufp = GC_proc_buf;
if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
-# ifdef PRINTSTATS
- GC_printf1("/proc read failed: GC_proc_buf_size = %lu\n",
- GC_proc_buf_size);
-# endif
+ if (GC_print_stats)
+ GC_log_printf("/proc read failed: GC_proc_buf_size = %lu\n",
+ (unsigned long)GC_proc_buf_size);
{
/* Retry with larger buffer. */
word new_size = 2 * GC_proc_buf_size;
@@ -3238,7 +3007,7 @@ int dummy;
#undef READ
-GC_bool GC_page_was_dirty(h)
+GC_bool GC_page_was_dirty(struct hblk *h)
struct hblk *h;
{
register word index = PHT_HASH(h);
@@ -3256,8 +3025,7 @@ struct hblk *h;
return(result);
}
-GC_bool GC_page_was_ever_dirty(h)
-struct hblk *h;
+GC_bool GC_page_was_ever_dirty(struct hblk *h)
{
register word index = PHT_HASH(h);
register GC_bool result;
@@ -3270,9 +3038,7 @@ struct hblk *h;
}
/* Caller holds allocation lock. */
-void GC_is_fresh(h, n)
-struct hblk *h;
-word n;
+void GC_is_fresh(struct hblk *h, word n)
{
register word index;
@@ -3302,7 +3068,7 @@ PCR_VD_DB GC_grungy_bits[NPAGES];
ptr_t GC_vd_base; /* Address corresponding to GC_grungy_bits[0] */
/* HBLKSIZE aligned. */
-void GC_dirty_init()
+void GC_dirty_init(void)
{
GC_dirty_maintained = TRUE;
/* For the time being, we assume the heap generally grows up */
@@ -3316,7 +3082,7 @@ void GC_dirty_init()
}
}
-void GC_read_dirty()
+void GC_read_dirty(void)
{
/* lazily enable dirty bits on newly added heap sects */
{
@@ -3336,8 +3102,7 @@ void GC_read_dirty()
}
}
-GC_bool GC_page_was_dirty(h)
-struct hblk *h;
+GC_bool GC_page_was_dirty(struct hblk *h)
{
if((ptr_t)h < GC_vd_base || (ptr_t)h >= GC_vd_base + NPAGES*HBLKSIZE) {
return(TRUE);
@@ -3346,10 +3111,7 @@ struct hblk *h;
}
/*ARGSUSED*/
-void GC_remove_protection(h, nblocks, is_ptrfree)
-struct hblk *h;
-word nblocks;
-GC_bool is_ptrfree;
+void GC_remove_protection(struct hblk *h, word nblocks, GC_bool is_ptrfree)
{
PCR_VD_WriteProtectDisable(h, nblocks*HBLKSIZE);
PCR_VD_WriteProtectEnable(h, nblocks*HBLKSIZE);
@@ -3463,7 +3225,7 @@ static void GC_mprotect_thread_notify(mach_msg_id_t id) {
}
/* Should only be called by the mprotect thread */
-static void GC_mprotect_thread_reply() {
+static void GC_mprotect_thread_reply(void) {
GC_msg_t msg;
mach_msg_return_t r;
/* remote, local */
@@ -3486,10 +3248,10 @@ static void GC_mprotect_thread_reply() {
ABORT("mach_msg failed in GC_mprotect_thread_reply");
}
-void GC_mprotect_stop() {
+void GC_mprotect_stop(void) {
GC_mprotect_thread_notify(ID_STOP);
}
-void GC_mprotect_resume() {
+void GC_mprotect_resume(void) {
GC_mprotect_thread_notify(ID_RESUME);
}
@@ -3530,8 +3292,8 @@ static void *GC_mprotect_thread(void *arg) {
id = r == MACH_MSG_SUCCESS ? msg.head.msgh_id : -1;
-#if defined(THREADS)
- if(GC_mprotect_state == GC_MP_DISCARDING) {
+# if defined(THREADS)
+ if(GC_mprotect_state == GC_MP_DISCARDING) {
if(r == MACH_RCV_TIMED_OUT) {
GC_mprotect_state = GC_MP_STOPPED;
GC_mprotect_thread_reply();
@@ -3539,17 +3301,17 @@ static void *GC_mprotect_thread(void *arg) {
}
if(r == MACH_MSG_SUCCESS && (id == ID_STOP || id == ID_RESUME))
ABORT("out of order mprotect thread request");
- }
-#endif
+ }
+# endif /* THREADS */
if(r != MACH_MSG_SUCCESS) {
- GC_err_printf2("mach_msg failed with %d %s\n",
- (int)r,mach_error_string(r));
+ GC_err_printf("mach_msg failed with %d %s\n",
+ (int)r, mach_error_string(r));
ABORT("mach_msg failed");
}
switch(id) {
-#if defined(THREADS)
+# if defined(THREADS)
case ID_STOP:
if(GC_mprotect_state != GC_MP_NORMAL)
ABORT("Called mprotect_stop when state wasn't normal");
@@ -3561,7 +3323,7 @@ static void *GC_mprotect_thread(void *arg) {
GC_mprotect_state = GC_MP_NORMAL;
GC_mprotect_thread_reply();
break;
-#endif /* THREADS */
+# endif /* THREADS */
default:
/* Handle the message (calls catch_exception_raise) */
if(!exc_server(&msg.head,&reply.head))
@@ -3576,15 +3338,15 @@ static void *GC_mprotect_thread(void *arg) {
MACH_MSG_TIMEOUT_NONE,
MACH_PORT_NULL);
if(r != MACH_MSG_SUCCESS) {
- /* This will fail if the thread dies, but the thread shouldn't
- die... */
- #ifdef BROKEN_EXCEPTION_HANDLING
- GC_err_printf2(
+ /* This will fail if the thread dies, but the thread */
+ /* shouldn't die... */
+# ifdef BROKEN_EXCEPTION_HANDLING
+ GC_err_printf(
"mach_msg failed with %d %s while sending exc reply\n",
(int)r,mach_error_string(r));
- #else
+# else
ABORT("mach_msg failed while sending exception reply");
- #endif
+# endif
}
} /* switch */
} /* for(;;) */
@@ -3598,8 +3360,8 @@ static void *GC_mprotect_thread(void *arg) {
meaningless and safe to ignore. */
#ifdef BROKEN_EXCEPTION_HANDLING
-typedef void (* SIG_PF)();
-static SIG_PF GC_old_bus_handler;
+typedef void (* SIG_HNDLR_PTR)();
+static SIG_HNDLR_PTR GC_old_bus_handler;
/* Updates to this aren't atomic, but the SIGBUSs seem pretty rare.
Even if this doesn't get updated property, it isn't really a problem */
@@ -3615,29 +3377,28 @@ static void GC_darwin_sigbus(int num,siginfo_t *sip,void *context) {
ABORT("Got more than 8 SIGBUSs in a row!");
} else {
GC_sigbus_count++;
- GC_err_printf0("GC: WARNING: Ignoring SIGBUS.\n");
+ GC_err_printf("GC: WARNING: Ignoring SIGBUS.\n");
}
}
#endif /* BROKEN_EXCEPTION_HANDLING */
-void GC_dirty_init() {
+void GC_dirty_init(void) {
kern_return_t r;
mach_port_t me;
pthread_t thread;
pthread_attr_t attr;
exception_mask_t mask;
-# ifdef PRINTSTATS
- GC_printf0("Inititalizing mach/darwin mprotect virtual dirty bit "
- "implementation\n");
-# endif
-# ifdef BROKEN_EXCEPTION_HANDLING
- GC_err_printf0("GC: WARNING: Enabling workarounds for various darwin "
+ if (GC_print_stats == VERBOSE)
+ GC_log_printf("Inititalizing mach/darwin mprotect virtual dirty bit "
+ "implementation\n");
+# ifdef BROKEN_EXCEPTION_HANDLING
+ GC_err_printf("GC: WARNING: Enabling workarounds for various darwin "
"exception handling bugs.\n");
-# endif
+# endif
GC_dirty_maintained = TRUE;
if (GC_page_size % HBLKSIZE != 0) {
- GC_err_printf0("Page size not multiple of HBLKSIZE\n");
+ GC_err_printf("Page size not multiple of HBLKSIZE\n");
ABORT("Page size not multiple of HBLKSIZE");
}
@@ -3693,15 +3454,14 @@ void GC_dirty_init() {
#ifdef BROKEN_EXCEPTION_HANDLING
{
struct sigaction sa, oldsa;
- sa.sa_handler = (SIG_PF)GC_darwin_sigbus;
+ sa.sa_handler = (SIG_HNDLR_PTR)GC_darwin_sigbus;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_RESTART|SA_SIGINFO;
if(sigaction(SIGBUS,&sa,&oldsa) < 0) ABORT("sigaction");
- GC_old_bus_handler = (SIG_PF)oldsa.sa_handler;
+ GC_old_bus_handler = (SIG_HNDLR_PTR)oldsa.sa_handler;
if (GC_old_bus_handler != SIG_DFL) {
-# ifdef PRINTSTATS
- GC_err_printf0("Replaced other SIGBUS handler\n");
-# endif
+ if (GC_print_stats == VERBOSE)
+ GC_err_printf("Replaced other SIGBUS handler\n");
}
}
#endif /* BROKEN_EXCEPTION_HANDLING */
@@ -3796,7 +3556,7 @@ catch_exception_raise(
if(exception != EXC_BAD_ACCESS || code[0] != KERN_PROTECTION_FAILURE) {
#ifdef DEBUG_EXCEPTION_HANDLING
/* We aren't interested, pass it on to the old handler */
- GC_printf3("Exception: 0x%x Code: 0x%x 0x%x in catch....\n",
+ GC_printf("Exception: 0x%x Code: 0x%x 0x%x in catch....\n",
exception,
code_count > 0 ? code[0] : -1,
code_count > 1 ? code[1] : -1);
@@ -3810,8 +3570,8 @@ catch_exception_raise(
/* The thread is supposed to be suspended while the exception handler
is called. This shouldn't fail. */
#ifdef BROKEN_EXCEPTION_HANDLING
- GC_err_printf0("thread_get_state failed in "
- "catch_exception_raise\n");
+ GC_err_printf("thread_get_state failed in "
+ "catch_exception_raise\n");
return KERN_SUCCESS;
#else
ABORT("thread_get_state failed in catch_exception_raise");
@@ -3837,17 +3597,17 @@ catch_exception_raise(
}
if(++last_fault_count < 32) {
if(last_fault_count == 1)
- GC_err_printf1(
+ GC_err_printf(
"GC: WARNING: Ignoring KERN_PROTECTION_FAILURE at %p\n",
addr);
return KERN_SUCCESS;
}
- GC_err_printf1("Unexpected KERN_PROTECTION_FAILURE at %p\n",addr);
+ GC_err_printf("Unexpected KERN_PROTECTION_FAILURE at %p\n",addr);
/* Can't pass it along to the signal handler because that is
ignoring SIGBUS signals. We also shouldn't call ABORT here as
signals don't always work too well from the exception handler. */
- GC_err_printf0("Aborting\n");
+ GC_err_printf("Aborting\n");
exit(EXIT_FAILURE);
#else /* BROKEN_EXCEPTION_HANDLING */
/* Pass it along to the next exception handler
@@ -3874,7 +3634,7 @@ catch_exception_raise(
will just fault again once it resumes */
} else {
/* Shouldn't happen, i don't think */
- GC_printf0("KERN_PROTECTION_FAILURE while world is stopped\n");
+ GC_printf("KERN_PROTECTION_FAILURE while world is stopped\n");
return FWD();
}
return KERN_SUCCESS;
@@ -3890,6 +3650,7 @@ kern_return_t catch_exception_raise_state(mach_port_name_t exception_port,
ABORT("catch_exception_raise_state");
return(KERN_INVALID_ARGUMENT);
}
+
kern_return_t catch_exception_raise_state_identity(
mach_port_name_t exception_port, mach_port_t thread, mach_port_t task,
int exception, exception_data_t code, mach_msg_type_number_t codeCnt,
@@ -3943,27 +3704,15 @@ kern_return_t catch_exception_raise_state_identity(
long fr_argd[6];
long fr_argx[0];
};
+# elif defined (DRSNX)
+# include <sys/sparc/frame.h>
+# elif defined(OPENBSD) || defined(NETBSD)
+# include <frame.h>
# else
-# if defined(SUNOS4)
-# include <machine/frame.h>
-# else
-# if defined (DRSNX)
-# include <sys/sparc/frame.h>
-# else
-# if defined(OPENBSD) || defined(NETBSD)
-# include <frame.h>
-# else
-# if defined(FREEBSD)
-# include <machine/frame.h>
-# else
-# include <sys/frame.h>
-# endif
-# endif
-# endif
-# endif
+# include <sys/frame.h>
# endif
# if NARGS > 6
- --> We only know how to to get the first 6 arguments
+# error We only know how to to get the first 6 arguments
# endif
#endif /* SPARC */
@@ -3986,8 +3735,7 @@ kern_return_t catch_exception_raise_state_identity(
#if NARGS == 0 && NFRAMES % 2 == 0 /* No padding */ \
&& defined(GC_HAVE_BUILTIN_BACKTRACE)
-void GC_save_callers (info)
-struct callinfo info[NFRAMES];
+void GC_save_callers (struct callinfo info[NFRAMES])
{
void * tmp_info[NFRAMES + 1];
int npcs, i;
@@ -4003,7 +3751,7 @@ struct callinfo info[NFRAMES];
#else /* No builtin backtrace; do it ourselves */
-#if (defined(OPENBSD) || defined(NETBSD) || defined(FREEBSD)) && defined(SPARC)
+#if (defined(OPENBSD) || defined(NETBSD)) && defined(SPARC)
# define FR_SAVFP fr_fp
# define FR_SAVPC fr_pc
#else
@@ -4017,8 +3765,7 @@ struct callinfo info[NFRAMES];
# define BIAS 0
#endif
-void GC_save_callers (info)
-struct callinfo info[NFRAMES];
+void GC_save_callers (struct callinfo info[NFRAMES])
{
struct frame *frame;
struct frame *fp;
@@ -4054,8 +3801,7 @@ struct callinfo info[NFRAMES];
#ifdef NEED_CALLINFO
/* Print info to stderr. We do NOT hold the allocation lock */
-void GC_print_callers (info)
-struct callinfo info[NFRAMES];
+void GC_print_callers (struct callinfo info[NFRAMES])
{
register int i;
static int reentry_count = 0;
@@ -4068,9 +3814,9 @@ struct callinfo info[NFRAMES];
UNLOCK();
# if NFRAMES == 1
- GC_err_printf0("\tCaller at allocation:\n");
+ GC_err_printf("\tCaller at allocation:\n");
# else
- GC_err_printf0("\tCall chain at allocation:\n");
+ GC_err_printf("\tCall chain at allocation:\n");
# endif
for (i = 0; i < NFRAMES && !stop ; i++) {
if (info[i].ci_pc == 0) break;
@@ -4078,19 +3824,19 @@ struct callinfo info[NFRAMES];
{
int j;
- GC_err_printf0("\t\targs: ");
+ GC_err_printf("\t\targs: ");
for (j = 0; j < NARGS; j++) {
- if (j != 0) GC_err_printf0(", ");
- GC_err_printf2("%d (0x%X)", ~(info[i].ci_arg[j]),
+ if (j != 0) GC_err_printf(", ");
+ GC_err_printf("%d (0x%X)", ~(info[i].ci_arg[j]),
~(info[i].ci_arg[j]));
}
- GC_err_printf0("\n");
+ GC_err_printf("\n");
}
# endif
if (reentry_count > 1) {
/* We were called during an allocation during */
/* a previous GC_print_callers call; punt. */
- GC_err_printf1("\t\t##PC##= 0x%lx\n", info[i].ci_pc);
+ GC_err_printf("\t\t##PC##= 0x%lx\n", info[i].ci_pc);
continue;
}
{
@@ -4165,8 +3911,8 @@ struct callinfo info[NFRAMES];
if (result_buf[result_len - 1] == '\n') --result_len;
result_buf[result_len] = 0;
if (result_buf[0] == '?'
- || result_buf[result_len-2] == ':'
- && result_buf[result_len-1] == '0') {
+ || (result_buf[result_len-2] == ':'
+ && result_buf[result_len-1] == '0')) {
pclose(pipe);
goto out;
}
@@ -4190,7 +3936,7 @@ struct callinfo info[NFRAMES];
out:;
}
# endif /* LINUX */
- GC_err_printf1("\t\t%s\n", name);
+ GC_err_printf("\t\t%s\n", name);
# if defined(GC_HAVE_BUILTIN_BACKTRACE) \
&& !defined(GC_BACKTRACE_SYMBOLS_BROKEN)
free(sym_name); /* May call GC_free; that's OK */
@@ -4217,11 +3963,11 @@ static word dump_maps(char *maps)
return 1;
}
-void GC_print_address_map()
+void GC_print_address_map(void)
{
- GC_err_printf0("---------- Begin address map ----------\n");
+ GC_err_printf("---------- Begin address map ----------\n");
GC_apply_to_maps(dump_maps);
- GC_err_printf0("---------- End address map ----------\n");
+ GC_err_printf("---------- End address map ----------\n");
}
#endif
diff --git a/pcr_interface.c b/pcr_interface.c
index 7bf02a45..77bddf80 100644
--- a/pcr_interface.c
+++ b/pcr_interface.c
@@ -65,26 +65,27 @@ typedef struct {
PCR_Any ed_client_data;
} enumerate_data;
-void GC_enumerate_block(h, ed)
-register struct hblk *h;
-enumerate_data * ed;
+void GC_enumerate_block(struct hblk *h; enumerate_data * ed)
{
register hdr * hhdr;
register int sz;
- word *p;
- word * lim;
+ ptr_t p;
+ ptr_t lim;
+ word descr;
+# error This code was updated without testing.
+# error and its precursor was clearly broken.
hhdr = HDR(h);
+ descr = hhdr -> hb_descr;
sz = hhdr -> hb_sz;
- if (sz >= 0 && ed -> ed_pointerfree
- || sz <= 0 && !(ed -> ed_pointerfree)) return;
- if (sz < 0) sz = -sz;
- lim = (word *)(h+1) - sz;
- p = (word *)h;
+ if (descr != 0 && ed -> ed_pointerfree
+ || descr == 0 && !(ed -> ed_pointerfree)) return;
+ lim = (ptr_t)(h+1) - sz;
+ p = (ptr_t)h;
do {
if (PCR_ERes_IsErr(ed -> ed_fail_code)) return;
ed -> ed_fail_code =
- (*(ed -> ed_proc))(p, WORDS_TO_BYTES(sz), ed -> ed_client_data);
+ (*(ed -> ed_proc))(p, sz, ed -> ed_client_data);
p+= sz;
} while (p <= lim);
}
diff --git a/pthread_stop_world.c b/pthread_stop_world.c
index 832c49ca..efc18d15 100644
--- a/pthread_stop_world.c
+++ b/pthread_stop_world.c
@@ -30,11 +30,11 @@ void GC_print_sig_mask()
if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
ABORT("pthread_sigmask");
- GC_printf0("Blocked: ");
+ GC_printf("Blocked: ");
for (i = 1; i < NSIG; i++) {
- if (sigismember(&blocked, i)) { GC_printf1("%ld ",(long) i); }
+ if (sigismember(&blocked, i)) { GC_printf("%d ", i); }
}
- GC_printf0("\n");
+ GC_printf("\n");
}
#endif
@@ -43,14 +43,12 @@ void GC_print_sig_mask()
/* handler from a set. */
void GC_remove_allowed_signals(sigset_t *set)
{
-# ifdef NO_SIGNALS
- if (sigdelset(set, SIGINT) != 0
+ if (sigdelset(set, SIGINT) != 0
|| sigdelset(set, SIGQUIT) != 0
|| sigdelset(set, SIGABRT) != 0
|| sigdelset(set, SIGTERM) != 0) {
ABORT("sigdelset() failed");
- }
-# endif
+ }
# ifdef MPROTECT_VDB
/* Handlers write to the thread structure, which is in the heap, */
@@ -117,7 +115,7 @@ void GC_suspend_handler(int sig)
if (sig != SIG_SUSPEND) ABORT("Bad signal in suspend_handler");
#if DEBUG_THREADS
- GC_printf1("Suspending 0x%lx\n", my_thread);
+ GC_printf("Suspending 0x%x\n", (unsigned)my_thread);
#endif
me = GC_lookup_thread(my_thread);
@@ -164,7 +162,7 @@ void GC_suspend_handler(int sig)
/* continue prematurely in a future round. */
#if DEBUG_THREADS
- GC_printf1("Continuing 0x%lx\n", my_thread);
+ GC_printf("Continuing 0x%x\n", (unsigned)my_thread);
#endif
}
@@ -192,7 +190,7 @@ void GC_restart_handler(int sig)
*/
#if DEBUG_THREADS
- GC_printf1("In GC_restart_handler for 0x%lx\n", pthread_self());
+ GC_printf("In GC_restart_handler for 0x%x\n", (unsigned)pthread_self());
#endif
}
@@ -215,7 +213,7 @@ void GC_push_all_stacks()
if (!GC_thr_initialized) GC_thr_init();
#if DEBUG_THREADS
- GC_printf1("Pushing stacks from thread 0x%lx\n", (unsigned long) me);
+ GC_printf("Pushing stacks from thread 0x%x\n", (unsigned) me);
#endif
for (i = 0; i < THREAD_TABLE_SZ; i++) {
for (p = GC_threads[i]; p != 0; p = p -> next) {
@@ -241,9 +239,8 @@ void GC_push_all_stacks()
IF_IA64(bs_lo = BACKING_STORE_BASE;)
}
#if DEBUG_THREADS
- GC_printf3("Stack for thread 0x%lx = [%lx,%lx)\n",
- (unsigned long) p -> id,
- (unsigned long) lo, (unsigned long) hi);
+ GC_printf("Stack for thread 0x%x = [%p,%p)\n",
+ (unsigned)(p -> id), lo, hi);
#endif
if (0 == lo) ABORT("GC_push_all_stacks: sp not set!\n");
# ifdef STACK_GROWS_UP
@@ -254,9 +251,8 @@ void GC_push_all_stacks()
# endif
# ifdef IA64
# if DEBUG_THREADS
- GC_printf3("Reg stack for thread 0x%lx = [%lx,%lx)\n",
- (unsigned long) p -> id,
- (unsigned long) bs_lo, (unsigned long) bs_hi);
+ GC_printf("Reg stack for thread 0x%x = [%lx,%lx)\n",
+ (unsigned)p -> id, bs_lo, bs_hi);
# endif
if (pthread_equal(p -> id, me)) {
GC_push_all_eager(bs_lo, bs_hi);
@@ -296,7 +292,8 @@ int GC_suspend_all()
if (p -> thread_blocked) /* Will wait */ continue;
n_live_threads++;
#if DEBUG_THREADS
- GC_printf1("Sending suspend signal to 0x%lx\n", p -> id);
+ GC_printf("Sending suspend signal to 0x%x\n",
+ (unsigned)(p -> id));
#endif
result = pthread_kill(p -> id, SIG_SUSPEND);
@@ -324,7 +321,7 @@ void GC_stop_world()
int code;
#if DEBUG_THREADS
- GC_printf1("Stopping the world from 0x%lx\n", pthread_self());
+ GC_printf("Stopping the world from 0x%x\n", (unsigned)pthread_self());
#endif
/* Make sure all free list construction has stopped before we start. */
@@ -351,12 +348,10 @@ void GC_stop_world()
if (wait_usecs > RETRY_INTERVAL) {
int newly_sent = GC_suspend_all();
-# ifdef CONDPRINT
- if (GC_print_stats) {
- GC_printf1("Resent %ld signals after timeout\n",
- newly_sent);
- }
-# endif
+ if (GC_print_stats) {
+ GC_log_printf("Resent %d signals after timeout\n",
+ newly_sent);
+ }
sem_getvalue(&GC_suspend_ack_sem, &ack_count);
if (newly_sent < n_live_threads - ack_count) {
WARN("Lost some threads during GC_stop_world?!\n",0);
@@ -369,8 +364,17 @@ void GC_stop_world()
}
}
for (i = 0; i < n_live_threads; i++) {
+ retry:
if (0 != (code = sem_wait(&GC_suspend_ack_sem))) {
- GC_err_printf1("Sem_wait returned %ld\n", (unsigned long)code);
+ GC_err_printf("Sem_wait returned %d (errno = %d)\n", code, errno);
+# ifdef LINUX
+ GC_err_printf("\tSem_wait is documented to never do this.\n");
+# endif
+ if (errno == EINTR) {
+ /* Seems to happen with some versions of gdb. */
+ GC_err_printf("\tRetrying anyway\n");
+ goto retry;
+ }
ABORT("sem_wait for handler failed");
}
}
@@ -378,7 +382,7 @@ void GC_stop_world()
GC_release_mark_lock();
# endif
#if DEBUG_THREADS
- GC_printf1("World stopped from 0x%lx\n", pthread_self());
+ GC_printf("World stopped from 0x%x\n", (unsigned)pthread_self());
#endif
GC_stopping_thread = 0; /* debugging only */
}
@@ -394,7 +398,7 @@ void GC_start_world()
register int result;
# if DEBUG_THREADS
- GC_printf0("World starting\n");
+ GC_printf("World starting\n");
# endif
for (i = 0; i < THREAD_TABLE_SZ; i++) {
@@ -404,10 +408,11 @@ void GC_start_world()
if (p -> thread_blocked) continue;
n_live_threads++;
#if DEBUG_THREADS
- GC_printf1("Sending restart signal to 0x%lx\n", p -> id);
+ GC_printf("Sending restart signal to 0x%x\n",
+ (unsigned)(p -> id));
#endif
- result = pthread_kill(p -> id, SIG_THR_RESTART);
+ result = pthread_kill(p -> id, SIG_THR_RESTART);
switch(result) {
case ESRCH:
/* Not really there anymore. Possible? */
@@ -422,7 +427,7 @@ void GC_start_world()
}
}
#if DEBUG_THREADS
- GC_printf0("World started\n");
+ GC_printf("World started\n");
#endif
}
@@ -462,11 +467,9 @@ void GC_stop_init() {
if (0 != GETENV("GC_NO_RETRY_SIGNALS")) {
GC_retry_signals = FALSE;
}
-# ifdef CONDPRINT
- if (GC_print_stats && GC_retry_signals) {
- GC_printf0("Will retry suspend signal if necessary.\n");
- }
-# endif
+ if (GC_print_stats && GC_retry_signals) {
+ GC_log_printf("Will retry suspend signal if necessary.\n");
+ }
}
#endif
diff --git a/pthread_support.c b/pthread_support.c
index e6b751b8..688297f8 100644
--- a/pthread_support.c
+++ b/pthread_support.c
@@ -2,7 +2,7 @@
* Copyright (c) 1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996 by Silicon Graphics. All rights reserved.
* Copyright (c) 1998 by Fergus Henderson. All rights reserved.
- * Copyright (c) 2000-2004 by Hewlett-Packard Company. All rights reserved.
+ * Copyright (c) 2000-2001 by Hewlett-Packard Company. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -64,7 +64,7 @@
# endif
# if defined USE_HPUX_TLS
- --> Macro replaced by USE_COMPILER_TLS
+# error USE_HPUX_TLS macro was replaced by USE_COMPILER_TLS
# endif
# if (defined(GC_DGUX386_THREADS) || defined(GC_OSF1_THREADS) || \
@@ -158,11 +158,13 @@
# endif
#endif
-void GC_thr_init();
+void GC_thr_init(void);
static GC_bool parallel_initialized = FALSE;
-void GC_init_parallel();
+GC_bool GC_need_to_lock = FALSE;
+
+void GC_init_parallel(void);
# if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
@@ -179,25 +181,25 @@ static GC_bool keys_initialized;
/* Note that the indexing scheme differs, in that gfl has finer size */
/* resolution, even if not all entries are used. */
/* We hold the allocator lock. */
-static void return_freelists(ptr_t *fl, ptr_t *gfl)
+static void return_freelists(void **fl, void **gfl)
{
int i;
- ptr_t q, *qptr;
- size_t nwords;
+ void *q, **qptr;
- for (i = 1; i < NFREELISTS; ++i) {
- nwords = i * (GRANULARITY/sizeof(word));
+ for (i = 1; i < TINY_FREELISTS; ++i) {
qptr = fl + i;
q = *qptr;
if ((word)q >= HBLKSIZE) {
- if (gfl[nwords] == 0) {
- gfl[nwords] = q;
+ if (gfl[i] == 0) {
+ gfl[i] = q;
} else {
+ GC_ASSERT(GC_size(q) == GRANULES_TO_BYTES(i));
+ GC_ASSERT(GC_size(gfl[i]) == GRANULES_TO_BYTES(i));
/* Concatenate: */
for (; (word)q >= HBLKSIZE; qptr = &(obj_link(q)), q = *qptr);
GC_ASSERT(0 == q);
- *qptr = gfl[nwords];
- gfl[nwords] = fl[i];
+ *qptr = gfl[i];
+ gfl[i] = fl[i];
}
}
/* Clear fl[i], since the thread structure may hang around. */
@@ -228,23 +230,23 @@ void GC_init_thread_local(GC_thread p)
if (0 != GC_setspecific(GC_thread_key, p)) {
ABORT("Failed to set thread specific allocation pointers");
}
- for (i = 1; i < NFREELISTS; ++i) {
- p -> ptrfree_freelists[i] = (ptr_t)1;
- p -> normal_freelists[i] = (ptr_t)1;
+ for (i = 1; i < TINY_FREELISTS; ++i) {
+ p -> ptrfree_freelists[i] = (void *)1;
+ p -> normal_freelists[i] = (void *)1;
# ifdef GC_GCJ_SUPPORT
- p -> gcj_freelists[i] = (ptr_t)1;
+ p -> gcj_freelists[i] = (void *)1;
# endif
}
/* Set up the size 0 free lists. */
- p -> ptrfree_freelists[0] = (ptr_t)(&size_zero_object);
- p -> normal_freelists[0] = (ptr_t)(&size_zero_object);
+ p -> ptrfree_freelists[0] = (void *)(&size_zero_object);
+ p -> normal_freelists[0] = (void *)(&size_zero_object);
# ifdef GC_GCJ_SUPPORT
- p -> gcj_freelists[0] = (ptr_t)(-1);
+ p -> gcj_freelists[0] = (void *)(-1);
# endif
}
#ifdef GC_GCJ_SUPPORT
- extern ptr_t * GC_gcjobjfreelist;
+ extern void ** GC_gcjobjfreelist;
#endif
/* We hold the allocator lock. */
@@ -262,16 +264,14 @@ void GC_destroy_thread_local(GC_thread p)
# endif
}
-extern GC_PTR GC_generic_malloc_many();
-
-GC_PTR GC_local_malloc(size_t bytes)
+void * GC_local_malloc(size_t bytes)
{
if (EXPECT(!SMALL_ENOUGH(bytes),0)) {
return(GC_malloc(bytes));
} else {
- int index = INDEX_FROM_BYTES(bytes);
- ptr_t * my_fl;
- ptr_t my_entry;
+ int index = INDEX_FROM_REQUESTED_BYTES(bytes);
+ void ** my_fl;
+ void * my_entry;
# if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC)
GC_key_t k = GC_thread_key;
# endif
@@ -295,42 +295,45 @@ GC_PTR GC_local_malloc(size_t bytes)
my_fl = ((GC_thread)tsd) -> normal_freelists + index;
my_entry = *my_fl;
if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
- ptr_t next = obj_link(my_entry);
- GC_PTR result = (GC_PTR)my_entry;
+ void * next = obj_link(my_entry);
+ void * result = (void *)my_entry;
*my_fl = next;
obj_link(my_entry) = 0;
PREFETCH_FOR_WRITE(next);
+ GC_ASSERT(GC_size(result) >= bytes + EXTRA_BYTES);
+ GC_ASSERT(((word *)result)[1] == 0);
return result;
} else if ((word)my_entry - 1 < DIRECT_GRANULES) {
*my_fl = my_entry + index + 1;
return GC_malloc(bytes);
} else {
- GC_generic_malloc_many(BYTES_FROM_INDEX(index), NORMAL, my_fl);
+ GC_generic_malloc_many(RAW_BYTES_FROM_INDEX(index), NORMAL, my_fl);
if (*my_fl == 0) return GC_oom_fn(bytes);
return GC_local_malloc(bytes);
}
}
}
-GC_PTR GC_local_malloc_atomic(size_t bytes)
+void * GC_local_malloc_atomic(size_t bytes)
{
if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
return(GC_malloc_atomic(bytes));
} else {
- int index = INDEX_FROM_BYTES(bytes);
- ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
+ int index = INDEX_FROM_REQUESTED_BYTES(bytes);
+ void **my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
-> ptrfree_freelists + index;
- ptr_t my_entry = *my_fl;
+ void *my_entry = *my_fl;
if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
- GC_PTR result = (GC_PTR)my_entry;
+ void * result = my_entry;
*my_fl = obj_link(my_entry);
+ GC_ASSERT(GC_size(result) >= bytes + EXTRA_BYTES);
return result;
} else if ((word)my_entry - 1 < DIRECT_GRANULES) {
*my_fl = my_entry + index + 1;
- return GC_malloc_atomic(bytes);
+ return GC_malloc_atomic(bytes);
} else {
- GC_generic_malloc_many(BYTES_FROM_INDEX(index), PTRFREE, my_fl);
+ GC_generic_malloc_many(RAW_BYTES_FROM_INDEX(index), PTRFREE, my_fl);
/* *my_fl is updated while the collector is excluded; */
/* the free list is always visible to the collector as */
/* such. */
@@ -350,19 +353,19 @@ GC_PTR GC_local_malloc_atomic(size_t bytes)
extern int GC_gcj_kind;
-GC_PTR GC_local_gcj_malloc(size_t bytes,
+void * GC_local_gcj_malloc(size_t bytes,
void * ptr_to_struct_containing_descr)
{
GC_ASSERT(GC_gcj_malloc_initialized);
if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
} else {
- int index = INDEX_FROM_BYTES(bytes);
- ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
+ int index = INDEX_FROM_REQUESTED_BYTES(bytes);
+ void **my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
-> gcj_freelists + index;
- ptr_t my_entry = *my_fl;
+ void *my_entry = *my_fl;
if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
- GC_PTR result = (GC_PTR)my_entry;
+ void * result = my_entry;
GC_ASSERT(!GC_incremental);
/* We assert that any concurrent marker will stop us. */
/* Thus it is impossible for a mark procedure to see the */
@@ -376,6 +379,7 @@ GC_PTR GC_local_gcj_malloc(size_t bytes,
/* A memory barrier is probably never needed, since the */
/* action of stopping this thread will cause prior writes */
/* to complete. */
+ GC_ASSERT(GC_size(result) >= bytes + EXTRA_BYTES);
GC_ASSERT(((void * volatile *)result)[1] == 0);
*(void * volatile *)result = ptr_to_struct_containing_descr;
return result;
@@ -385,7 +389,8 @@ GC_PTR GC_local_gcj_malloc(size_t bytes,
/* path. Thus we leave the counter alone. */
return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
} else {
- GC_generic_malloc_many(BYTES_FROM_INDEX(index), GC_gcj_kind, my_fl);
+ GC_generic_malloc_many(RAW_BYTES_FROM_INDEX(index),
+ GC_gcj_kind, my_fl);
if (*my_fl == 0) return GC_oom_fn(bytes);
return GC_local_gcj_malloc(bytes, ptr_to_struct_containing_descr);
}
@@ -444,7 +449,7 @@ void * GC_mark_thread(void * id)
my_mark_no = GC_mark_no;
}
# ifdef DEBUG_THREADS
- GC_printf1("Starting mark helper for mark number %ld\n", my_mark_no);
+ GC_printf("Starting mark helper for mark number %lu\n", my_mark_no);
# endif
GC_help_marker(my_mark_no);
}
@@ -458,7 +463,7 @@ pthread_t GC_mark_threads[MAX_MARKERS];
#define PTHREAD_CREATE REAL_FUNC(pthread_create)
-static void start_mark_threads()
+static void start_mark_threads(void)
{
unsigned i;
pthread_attr_t attr;
@@ -489,11 +494,9 @@ static void start_mark_threads()
}
}
# endif /* HPUX || GC_DGUX386_THREADS */
-# ifdef CONDPRINT
- if (GC_print_stats) {
- GC_printf1("Starting %ld marker threads\n", GC_markers - 1);
- }
-# endif
+ if (GC_print_stats) {
+ GC_log_printf("Starting %ld marker threads\n", GC_markers - 1);
+ }
for (i = 0; i < GC_markers - 1; ++i) {
if (0 != PTHREAD_CREATE(GC_mark_threads + i, &attr,
GC_mark_thread, (void *)(word)i)) {
@@ -504,7 +507,7 @@ static void start_mark_threads()
#else /* !PARALLEL_MARK */
-static __inline__ void start_mark_threads()
+static __inline__ void start_mark_threads(void)
{
}
@@ -514,7 +517,7 @@ GC_bool GC_thr_initialized = FALSE;
volatile GC_thread GC_threads[THREAD_TABLE_SZ];
-void GC_push_thread_structures GC_PROTO((void))
+void GC_push_thread_structures(void)
{
GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
# if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
@@ -523,7 +526,7 @@ void GC_push_thread_structures GC_PROTO((void))
# endif
}
-#ifdef THREAD_LOCAL_ALLOC
+#if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
/* We must explicitly mark ptrfree and gcj free lists, since the free */
/* list links wouldn't otherwise be found. We also set them in the */
/* normal free lists, since that involves touching less memory than if */
@@ -536,7 +539,7 @@ void GC_mark_thread_local_free_lists(void)
for (i = 0; i < THREAD_TABLE_SZ; ++i) {
for (p = GC_threads[i]; 0 != p; p = p -> next) {
- for (j = 1; j < NFREELISTS; ++j) {
+ for (j = 1; j < TINY_FREELISTS; ++j) {
q = p -> ptrfree_freelists[j];
if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
q = p -> normal_freelists[j];
@@ -549,6 +552,36 @@ void GC_mark_thread_local_free_lists(void)
}
}
}
+
+#if defined(GC_ASSERTIONS)
+# if defined(USE_COMPILER_TLS) || defined(USE_PTHREAD_SPECIFIC)
+ void GC_check_tls(void) {};
+# else
+ void GC_check_tls(void) {
+ int i, j;
+ GC_thread p;
+ ptr_t q;
+
+ for (i = 0; i < THREAD_TABLE_SZ; ++i) {
+ for (p = GC_threads[i]; 0 != p; p = p -> next) {
+ for (j = 1; j < TINY_FREELISTS; ++j) {
+ q = p -> ptrfree_freelists[j];
+ if ((word)q > HBLKSIZE) GC_check_fl_marks(q);
+ q = p -> normal_freelists[j];
+ if ((word)q > HBLKSIZE) GC_check_fl_marks(q);
+# ifdef GC_GCJ_SUPPORT
+ q = p -> gcj_freelists[j];
+ if ((word)q > HBLKSIZE) GC_check_fl_marks(q);
+# endif /* GC_GCJ_SUPPORT */
+ }
+ }
+ }
+ if (GC_thread_key != 0)
+ GC_check_tsd_marks(GC_thread_key);
+ }
+# endif
+#endif /* GC_ASSERTIONS */
+
#endif /* THREAD_LOCAL_ALLOC */
static struct GC_Thread_Rep first_thread;
@@ -561,12 +594,14 @@ GC_thread GC_new_thread(pthread_t id)
GC_thread result;
static GC_bool first_thread_used = FALSE;
+ GC_ASSERT(I_HOLD_LOCK());
if (!first_thread_used) {
result = &first_thread;
first_thread_used = TRUE;
} else {
result = (struct GC_Thread_Rep *)
GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
+ GC_ASSERT(result -> flags == 0);
}
if (result == 0) return(0);
result -> id = id;
@@ -694,7 +729,7 @@ int GC_segment_is_thread_stack(ptr_t lo, ptr_t hi)
#ifdef GC_LINUX_THREADS
/* Return the number of processors, or i<= 0 if it can't be determined. */
-int GC_get_nprocs()
+int GC_get_nprocs(void)
{
/* Should be "return sysconf(_SC_NPROCESSORS_ONLN);" but that */
/* appears to be buggy in many cases. */
@@ -733,7 +768,7 @@ int GC_get_nprocs()
/* If wait_for_all is true, then we exit with the GC lock held and no */
/* collection in progress; otherwise we just wait for the current GC */
/* to finish. */
-extern GC_bool GC_collection_in_progress();
+extern GC_bool GC_collection_in_progress(void);
void GC_wait_for_gc_completion(GC_bool wait_for_all)
{
if (GC_incremental && GC_collection_in_progress()) {
@@ -813,7 +848,7 @@ void GC_fork_child_proc(void)
#if defined(GC_DGUX386_THREADS)
/* Return the number of processors, or i<= 0 if it can't be determined. */
-int GC_get_nprocs()
+int GC_get_nprocs(void)
{
/* <takis@XFree86.Org> */
int numCpus;
@@ -830,18 +865,18 @@ int GC_get_nprocs()
numCpus = pm_sysinfo.idle_vp_count;
# ifdef DEBUG_THREADS
- GC_printf1("Number of active CPUs in this system: %d\n", numCpus);
+ GC_printf("Number of active CPUs in this system: %d\n", numCpus);
# endif
return(numCpus);
}
#endif /* GC_DGUX386_THREADS */
/* We hold the allocation lock. */
-void GC_thr_init()
+void GC_thr_init(void)
{
-# ifndef GC_DARWIN_THREADS
- int dummy;
-# endif
+# ifndef GC_DARWIN_THREADS
+ int dummy;
+# endif
GC_thread t;
if (GC_thr_initialized) return;
@@ -909,19 +944,16 @@ void GC_thr_init()
# endif
}
# ifdef PARALLEL_MARK
-# ifdef CONDPRINT
- if (GC_print_stats) {
- GC_printf2("Number of processors = %ld, "
+ if (GC_print_stats) {
+ GC_log_printf("Number of processors = %ld, "
"number of marker threads = %ld\n", GC_nprocs, GC_markers);
- }
-# endif
+ }
if (GC_markers == 1) {
GC_parallel = FALSE;
-# ifdef CONDPRINT
- if (GC_print_stats) {
- GC_printf0("Single marker thread, turning off parallel marking\n");
- }
-# endif
+ if (GC_print_stats) {
+ GC_log_printf(
+ "Single marker thread, turning off parallel marking\n");
+ }
} else {
GC_parallel = TRUE;
/* Disable true incremental collection, but generational is OK. */
@@ -938,7 +970,7 @@ void GC_thr_init()
/* Called without allocation lock. */
/* Must be called before a second thread is created. */
/* Called without allocation lock. */
-void GC_init_parallel()
+void GC_init_parallel(void)
{
if (parallel_initialized) return;
parallel_initialized = TRUE;
@@ -1127,9 +1159,9 @@ void * GC_start_routine(void * arg)
my_pthread = pthread_self();
# ifdef DEBUG_THREADS
- GC_printf1("Starting thread 0x%lx\n", my_pthread);
- GC_printf1("pid = %ld\n", (long) getpid());
- GC_printf1("sp = 0x%lx\n", (long) &arg);
+ GC_printf("Starting thread 0x%x\n", (unsigned)my_pthread);
+ GC_printf("pid = %ld\n", (long) getpid());
+ GC_printf("sp = 0x%lx\n", (long) &arg);
# endif
LOCK();
GC_in_thread_creation = TRUE;
@@ -1168,7 +1200,7 @@ void * GC_start_routine(void * arg)
UNLOCK();
start = si -> start_routine;
# ifdef DEBUG_THREADS
- GC_printf1("start_routine = 0x%lx\n", start);
+ GC_printf("start_routine = %p\n", (void *)start);
# endif
start_arg = si -> arg;
sem_post(&(si -> registered)); /* Last action on si. */
@@ -1181,7 +1213,7 @@ void * GC_start_routine(void * arg)
# endif
result = (*start)(start_arg);
#if DEBUG_THREADS
- GC_printf1("Finishing thread 0x%x\n", pthread_self());
+ GC_printf("Finishing thread 0x%x\n", (unsigned)pthread_self());
#endif
me -> status = result;
pthread_cleanup_pop(1);
@@ -1243,14 +1275,15 @@ WRAP_FUNC(pthread_create)(pthread_t *new_thread,
si -> flags = my_flags;
UNLOCK();
# ifdef DEBUG_THREADS
- GC_printf1("About to start new thread from thread 0x%X\n",
- pthread_self());
+ GC_printf("About to start new thread from thread 0x%x\n",
+ (unsigned)pthread_self());
# endif
+ GC_need_to_lock = TRUE;
result = REAL_FUNC(pthread_create)(new_thread, attr, GC_start_routine, si);
# ifdef DEBUG_THREADS
- GC_printf1("Started thread 0x%X\n", *new_thread);
+ GC_printf("Started thread 0x%x\n", (unsigned)(*new_thread));
# endif
/* Wait until child has been added to the thread table. */
/* This also ensures that we hold onto si until the child is done */
@@ -1300,7 +1333,7 @@ WRAP_FUNC(pthread_create)(pthread_t *new_thread,
#endif /* GENERIC_COMPARE_AND_SWAP */
/* Spend a few cycles in a way that can't introduce contention with */
/* othre threads. */
-void GC_pause()
+void GC_pause(void)
{
int i;
# if !defined(__GNUC__) || defined(__INTEL_COMPILER)
@@ -1320,7 +1353,7 @@ void GC_pause()
#define SPIN_MAX 128 /* Maximum number of calls to GC_pause before */
/* give up. */
-VOLATILE GC_bool GC_collecting = 0;
+volatile GC_bool GC_collecting = 0;
/* A hint that we're in the collector and */
/* holding the allocation lock for an */
/* extended period. */
@@ -1392,10 +1425,10 @@ void GC_generic_lock(pthread_mutex_t * lock)
/* as STL alloc.h. This isn't really the right way to do this. */
/* but until the POSIX scheduling mess gets straightened out ... */
-volatile unsigned int GC_allocate_lock = 0;
+volatile AO_TS_t GC_allocate_lock = 0;
-void GC_lock()
+void GC_lock(void)
{
# define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
# define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */
@@ -1405,18 +1438,18 @@ void GC_lock()
unsigned my_last_spins;
int i;
- if (!GC_test_and_set(&GC_allocate_lock)) {
+ if (!AO_test_and_set_acquire(&GC_allocate_lock)) {
return;
}
my_spin_max = spin_max;
my_last_spins = last_spins;
for (i = 0; i < my_spin_max; i++) {
if (GC_collecting || GC_nprocs == 1) goto yield;
- if (i < my_last_spins/2 || GC_allocate_lock) {
+ if (i < my_last_spins/2) {
GC_pause();
continue;
}
- if (!GC_test_and_set(&GC_allocate_lock)) {
+ if (!AO_test_and_set_acquire(&GC_allocate_lock)) {
/*
* got it!
* Spinning worked. Thus we're probably not being scheduled
@@ -1432,7 +1465,7 @@ void GC_lock()
spin_max = low_spin_max;
yield:
for (i = 0;; ++i) {
- if (!GC_test_and_set(&GC_allocate_lock)) {
+ if (!AO_test_and_set_acquire(&GC_allocate_lock)) {
return;
}
# define SLEEP_THRESHOLD 12
@@ -1458,7 +1491,7 @@ yield:
}
#else /* !USE_SPINLOCK */
-void GC_lock()
+void GC_lock(void)
{
#ifndef NO_PTHREAD_TRYLOCK
if (1 == GC_nprocs || GC_collecting) {
@@ -1495,7 +1528,7 @@ void GC_lock()
static pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;
-void GC_acquire_mark_lock()
+void GC_acquire_mark_lock(void)
{
/*
if (pthread_mutex_lock(&mark_mutex) != 0) {
@@ -1508,7 +1541,7 @@ void GC_acquire_mark_lock()
# endif
}
-void GC_release_mark_lock()
+void GC_release_mark_lock(void)
{
GC_ASSERT(GC_mark_lock_holder == pthread_self());
# ifdef GC_ASSERTIONS
@@ -1524,7 +1557,7 @@ void GC_release_mark_lock()
/* 2) Partial free lists referenced only by locals may not be scanned */
/* correctly, e.g. if they contain "pointer-free" objects, since the */
/* free-list link may be ignored. */
-void GC_wait_builder()
+void GC_wait_builder(void)
{
GC_ASSERT(GC_mark_lock_holder == pthread_self());
# ifdef GC_ASSERTIONS
@@ -1539,7 +1572,7 @@ void GC_wait_builder()
# endif
}
-void GC_wait_for_reclaim()
+void GC_wait_for_reclaim(void)
{
GC_acquire_mark_lock();
while (GC_fl_builder_count > 0) {
@@ -1548,7 +1581,7 @@ void GC_wait_for_reclaim()
GC_release_mark_lock();
}
-void GC_notify_all_builder()
+void GC_notify_all_builder(void)
{
GC_ASSERT(GC_mark_lock_holder == pthread_self());
if (pthread_cond_broadcast(&builder_cv) != 0) {
@@ -1562,7 +1595,7 @@ void GC_notify_all_builder()
static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;
-void GC_wait_marker()
+void GC_wait_marker(void)
{
GC_ASSERT(GC_mark_lock_holder == pthread_self());
# ifdef GC_ASSERTIONS
@@ -1577,7 +1610,7 @@ void GC_wait_marker()
# endif
}
-void GC_notify_all_marker()
+void GC_notify_all_marker(void)
{
if (pthread_cond_broadcast(&mark_cv) != 0) {
ABORT("pthread_cond_broadcast failed");
diff --git a/ptr_chck.c b/ptr_chck.c
index d83d730d..23e183c7 100644
--- a/ptr_chck.c
+++ b/ptr_chck.c
@@ -18,19 +18,13 @@
#include "private/gc_pmark.h"
-#ifdef __STDC__
-void GC_default_same_obj_print_proc(GC_PTR p, GC_PTR q)
-#else
-void GC_default_same_obj_print_proc (p, q)
-GC_PTR p, q;
-#endif
+void GC_default_same_obj_print_proc(void * p, void * q)
{
- GC_err_printf2("0x%lx and 0x%lx are not in the same object\n",
- (unsigned long)p, (unsigned long)q);
+ GC_err_printf("%p and %p are not in the same object\n", p, q);
ABORT("GC_same_obj test failed");
}
-void (*GC_same_obj_print_proc) GC_PROTO((GC_PTR, GC_PTR))
+void (*GC_same_obj_print_proc) (void *, void *)
= GC_default_same_obj_print_proc;
/* Check that p and q point to the same object. Call */
@@ -42,17 +36,12 @@ void (*GC_same_obj_print_proc) GC_PROTO((GC_PTR, GC_PTR))
/* We assume this is performance critical. (It shouldn't */
/* be called by production code, but this can easily make */
/* debugging intolerably slow.) */
-#ifdef __STDC__
- GC_PTR GC_same_obj(register void *p, register void *q)
-#else
- GC_PTR GC_same_obj(p, q)
- register char *p, *q;
-#endif
+void * GC_same_obj(void *p, void *q)
{
- register struct hblk *h;
- register hdr *hhdr;
- register ptr_t base, limit;
- register word sz;
+ struct hblk *h;
+ hdr *hhdr;
+ ptr_t base, limit;
+ word sz;
if (!GC_is_initialized) GC_init();
hhdr = HDR((word)p);
@@ -72,13 +61,13 @@ void (*GC_same_obj_print_proc) GC_PROTO((GC_PTR, GC_PTR))
h = FORWARDED_ADDR(h, hhdr);
hhdr = HDR(h);
}
- limit = (ptr_t)((word *)h + hhdr -> hb_sz);
+ limit = (ptr_t)h + hhdr -> hb_sz;
if ((ptr_t)p >= limit || (ptr_t)q >= limit || (ptr_t)q < (ptr_t)h ) {
goto fail;
}
return(p);
}
- sz = WORDS_TO_BYTES(hhdr -> hb_sz);
+ sz = hhdr -> hb_sz;
if (sz > MAXOBJBYTES) {
base = (ptr_t)HBLKPTR(p);
limit = base + sz;
@@ -86,19 +75,15 @@ void (*GC_same_obj_print_proc) GC_PROTO((GC_PTR, GC_PTR))
goto fail;
}
} else {
- register int map_entry;
- register int pdispl = HBLKDISPL(p);
+ int offset;
+ int pdispl = HBLKDISPL(p);
- map_entry = MAP_ENTRY((hhdr -> hb_map), pdispl);
- if (map_entry > CPP_MAX_OFFSET) {
- map_entry = BYTES_TO_WORDS(pdispl) % BYTES_TO_WORDS(sz);
- if (HBLKPTR(p) != HBLKPTR(q)) goto fail;
+ offset = pdispl % sz;
+ if (HBLKPTR(p) != HBLKPTR(q)) goto fail;
/* W/o this check, we might miss an error if */
/* q points to the first object on a page, and */
/* points just before the page. */
- }
- base = (char *)((word)p & ~(WORDS_TO_BYTES(1) - 1));
- base -= WORDS_TO_BYTES(map_entry);
+ base = (ptr_t)p - offset;
limit = base + sz;
}
/* [base, limit) delimits the object containing p, if any. */
@@ -114,19 +99,13 @@ fail:
return(p);
}
-#ifdef __STDC__
-void GC_default_is_valid_displacement_print_proc (GC_PTR p)
-#else
-void GC_default_is_valid_displacement_print_proc (p)
-GC_PTR p;
-#endif
+void GC_default_is_valid_displacement_print_proc (void *p)
{
- GC_err_printf1("0x%lx does not point to valid object displacement\n",
- (unsigned long)p);
+ GC_err_printf("%p does not point to valid object displacement\n", p);
ABORT("GC_is_valid_displacement test failed");
}
-void (*GC_is_valid_displacement_print_proc) GC_PROTO((GC_PTR)) =
+void (*GC_is_valid_displacement_print_proc)(void *) =
GC_default_is_valid_displacement_print_proc;
/* Check that if p is a pointer to a heap page, then it points to */
@@ -135,18 +114,13 @@ void (*GC_is_valid_displacement_print_proc) GC_PROTO((GC_PTR)) =
/* Always returns its argument. */
/* Note that we don't lock, since nothing relevant about the header */
/* should change while we have a valid object pointer to the block. */
-#ifdef __STDC__
- void * GC_is_valid_displacement(void *p)
-#else
- char *GC_is_valid_displacement(p)
- char *p;
-#endif
+void * GC_is_valid_displacement(void *p)
{
- register hdr *hhdr;
- register word pdispl;
- register struct hblk *h;
- register map_entry_type map_entry;
- register word sz;
+ hdr *hhdr;
+ word pdispl;
+ word offset;
+ struct hblk *h;
+ word sz;
if (!GC_is_initialized) GC_init();
hhdr = HDR((word)p);
@@ -161,11 +135,12 @@ void (*GC_is_valid_displacement_print_proc) GC_PROTO((GC_PTR)) =
if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
goto fail;
}
- sz = WORDS_TO_BYTES(hhdr -> hb_sz);
+ sz = hhdr -> hb_sz;
pdispl = HBLKDISPL(p);
- map_entry = MAP_ENTRY((hhdr -> hb_map), pdispl);
- if (map_entry == OBJ_INVALID
- || sz > MAXOBJBYTES && (ptr_t)p >= (ptr_t)h + sz) {
+ offset = pdispl % sz;
+ if ((sz > MAXOBJBYTES && (ptr_t)p >= (ptr_t)h + sz)
+ || !GC_valid_offsets[offset]
+ || (ptr_t)p - offset + sz > (ptr_t)(h + 1)) {
goto fail;
}
return(p);
@@ -174,24 +149,16 @@ fail:
return(p);
}
-#ifdef __STDC__
-void GC_default_is_visible_print_proc(GC_PTR p)
-#else
-void GC_default_is_visible_print_proc(p)
-GC_PTR p;
-#endif
+void GC_default_is_visible_print_proc(void * p)
{
- GC_err_printf1("0x%lx is not a GC visible pointer location\n",
- (unsigned long)p);
+ GC_err_printf("%p is not a GC visible pointer location\n", p);
ABORT("GC_is_visible test failed");
}
-void (*GC_is_visible_print_proc) GC_PROTO((GC_PTR p)) =
- GC_default_is_visible_print_proc;
+void (*GC_is_visible_print_proc)(void * p) = GC_default_is_visible_print_proc;
/* Could p be a stack address? */
-GC_bool GC_on_stack(p)
-ptr_t p;
+GC_bool GC_on_stack(ptr_t p)
{
# ifdef THREADS
return(TRUE);
@@ -218,14 +185,9 @@ ptr_t p;
/* untyped allocations. The idea is that it should be possible, though */
/* slow, to add such a call to all indirect pointer stores.) */
/* Currently useless for multithreaded worlds. */
-#ifdef __STDC__
- void * GC_is_visible(void *p)
-#else
- char *GC_is_visible(p)
- char *p;
-#endif
+void * GC_is_visible(void *p)
{
- register hdr *hhdr;
+ hdr *hhdr;
if ((word)p & (ALIGNMENT - 1)) goto fail;
if (!GC_is_initialized) GC_init();
@@ -249,10 +211,8 @@ ptr_t p;
# if (defined(DYNAMIC_LOADING) || defined(MSWIN32) || \
defined(MSWINCE) || defined(PCR)) \
&& !defined(SRC_M3)
- DISABLE_SIGNALS();
GC_register_dynamic_libraries();
result = GC_is_static_root(p);
- ENABLE_SIGNALS();
if (result) return(p);
# endif
goto fail;
@@ -300,12 +260,10 @@ fail:
}
-GC_PTR GC_pre_incr (p, how_much)
-GC_PTR *p;
-size_t how_much;
+void * GC_pre_incr (void **p, size_t how_much)
{
- GC_PTR initial = *p;
- GC_PTR result = GC_same_obj((GC_PTR)((word)initial + how_much), initial);
+ void * initial = *p;
+ void * result = GC_same_obj((void *)((word)initial + how_much), initial);
if (!GC_all_interior_pointers) {
(void) GC_is_valid_displacement(result);
@@ -313,12 +271,10 @@ size_t how_much;
return (*p = result);
}
-GC_PTR GC_post_incr (p, how_much)
-GC_PTR *p;
-size_t how_much;
+void * GC_post_incr (void **p, size_t how_much)
{
- GC_PTR initial = *p;
- GC_PTR result = GC_same_obj((GC_PTR)((word)initial + how_much), initial);
+ void * initial = *p;
+ void * result = GC_same_obj((void *)((word)initial + how_much), initial);
if (!GC_all_interior_pointers) {
(void) GC_is_valid_displacement(result);
diff --git a/real_malloc.c b/real_malloc.c
index dece9fdc..85befdcb 100644
--- a/real_malloc.c
+++ b/real_malloc.c
@@ -23,14 +23,9 @@
# define PCR_NO_RENAME
# include <stdlib.h>
-# ifdef __STDC__
- char * real_malloc(size_t size)
-# else
- char * real_malloc()
- int size;
-# endif
+void * real_malloc(size_t size)
{
- return((char *)malloc(size));
+ return(malloc(size));
}
#endif /* PCR */
diff --git a/reclaim.c b/reclaim.c
index 864c0cad..279b75b7 100644
--- a/reclaim.c
+++ b/reclaim.c
@@ -2,7 +2,7 @@
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -17,8 +17,8 @@
#include <stdio.h>
#include "private/gc_priv.h"
-signed_word GC_mem_found = 0;
- /* Number of words of memory reclaimed */
+signed_word GC_bytes_found = 0;
+ /* Number of bytes of memory reclaimed */
#if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
word GC_fl_builder_count = 0;
@@ -36,8 +36,7 @@ unsigned GC_n_leaked = 0;
GC_bool GC_have_errors = FALSE;
-void GC_add_leaked(leaked)
-ptr_t leaked;
+void GC_add_leaked(ptr_t leaked)
{
if (GC_n_leaked < MAX_LEAKED) {
GC_have_errors = TRUE;
@@ -65,12 +64,12 @@ void GC_print_all_errors ()
for (i = 0; i < GC_n_leaked; ++i) {
ptr_t p = GC_leaked[i];
if (HDR(p) -> hb_obj_kind == PTRFREE) {
- GC_err_printf0("Leaked atomic object at ");
+ GC_err_printf("Leaked atomic object at ");
} else {
- GC_err_printf0("Leaked composite object at ");
+ GC_err_printf("Leaked composite object at ");
}
GC_print_heap_obj(p);
- GC_err_printf0("\n");
+ GC_err_printf("\n");
GC_free(p);
GC_leaked[i] = 0;
}
@@ -79,11 +78,6 @@ void GC_print_all_errors ()
}
-# define FOUND_FREE(hblk, word_no) \
- { \
- GC_add_leaked((ptr_t)hblk + WORDS_TO_BYTES(word_no)); \
- }
-
/*
* reclaim phase
*
@@ -96,245 +90,49 @@ void GC_print_all_errors ()
* memory.
*/
-GC_bool GC_block_empty(hhdr)
-register hdr * hhdr;
-{
- /* We treat hb_marks as an array of words here, even if it is */
- /* actually an array of bytes. Since we only check for zero, there */
- /* are no endian-ness issues. */
- register word *p = (word *)(&(hhdr -> hb_marks[0]));
- register word * plim =
- (word *)(&(hhdr -> hb_marks[MARK_BITS_SZ]));
- while (p < plim) {
- if (*p++) return(FALSE);
- }
- return(TRUE);
-}
-
-/* The following functions sometimes return a DONT_KNOW value. */
-#define DONT_KNOW 2
-
-#ifdef SMALL_CONFIG
-# define GC_block_nearly_full1(hhdr, pat1) DONT_KNOW
-# define GC_block_nearly_full3(hhdr, pat1, pat2) DONT_KNOW
-# define GC_block_nearly_full(hhdr) DONT_KNOW
-#endif
-
-#if !defined(SMALL_CONFIG) && defined(USE_MARK_BYTES)
-
-# define GC_block_nearly_full1(hhdr, pat1) GC_block_nearly_full(hhdr)
-# define GC_block_nearly_full3(hhdr, pat1, pat2) GC_block_nearly_full(hhdr)
-
-
-GC_bool GC_block_nearly_full(hhdr)
-register hdr * hhdr;
-{
- /* We again treat hb_marks as an array of words, even though it */
- /* isn't. We first sum up all the words, resulting in a word */
- /* containing 4 or 8 separate partial sums. */
- /* We then sum the bytes in the word of partial sums. */
- /* This is still endian independant. This fails if the partial */
- /* sums can overflow. */
-# if (BYTES_TO_WORDS(MARK_BITS_SZ)) >= 256
- --> potential overflow; fix the code
-# endif
- register word *p = (word *)(&(hhdr -> hb_marks[0]));
- register word * plim =
- (word *)(&(hhdr -> hb_marks[MARK_BITS_SZ]));
- word sum_vector = 0;
- unsigned sum;
- while (p < plim) {
- sum_vector += *p;
- ++p;
- }
- sum = 0;
- while (sum_vector > 0) {
- sum += sum_vector & 0xff;
- sum_vector >>= 8;
- }
- return (sum > BYTES_TO_WORDS(7*HBLKSIZE/8)/(hhdr -> hb_sz));
-}
-#endif /* USE_MARK_BYTES */
-
-#if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES)
-
-/*
- * Test whether nearly all of the mark words consist of the same
- * repeating pattern.
- */
-#define FULL_THRESHOLD (MARK_BITS_SZ/16)
-
-GC_bool GC_block_nearly_full1(hhdr, pat1)
-hdr *hhdr;
-word pat1;
+GC_bool GC_block_empty(hdr *hhdr)
{
- unsigned i;
- unsigned misses = 0;
- GC_ASSERT((MARK_BITS_SZ & 1) == 0);
- for (i = 0; i < MARK_BITS_SZ; ++i) {
- if ((hhdr -> hb_marks[i] | ~pat1) != ONES) {
- if (++misses > FULL_THRESHOLD) return FALSE;
- }
- }
- return TRUE;
+ return (hhdr -> hb_n_marks == 0);
}
-/*
- * Test whether the same repeating 3 word pattern occurs in nearly
- * all the mark bit slots.
- * This is used as a heuristic, so we're a bit sloppy and ignore
- * the last one or two words.
- */
-GC_bool GC_block_nearly_full3(hhdr, pat1, pat2, pat3)
-hdr *hhdr;
-word pat1, pat2, pat3;
+GC_bool GC_block_nearly_full(hdr *hhdr)
{
- unsigned i;
- unsigned misses = 0;
-
- if (MARK_BITS_SZ < 4) {
- return DONT_KNOW;
- }
- for (i = 0; i < MARK_BITS_SZ - 2; i += 3) {
- if ((hhdr -> hb_marks[i] | ~pat1) != ONES) {
- if (++misses > FULL_THRESHOLD) return FALSE;
- }
- if ((hhdr -> hb_marks[i+1] | ~pat2) != ONES) {
- if (++misses > FULL_THRESHOLD) return FALSE;
- }
- if ((hhdr -> hb_marks[i+2] | ~pat3) != ONES) {
- if (++misses > FULL_THRESHOLD) return FALSE;
- }
- }
- return TRUE;
+ return (hhdr -> hb_n_marks > 7 * HBLK_OBJS(hhdr -> hb_sz)/8);
}
-/* Check whether a small object block is nearly full by looking at only */
-/* the mark bits. */
-/* We manually precomputed the mark bit patterns that need to be */
-/* checked for, and we give up on the ones that are unlikely to occur, */
-/* or have period > 3. */
-/* This would be a lot easier with a mark bit per object instead of per */
-/* word, but that would rewuire computing object numbers in the mark */
-/* loop, which would require different data structures ... */
-GC_bool GC_block_nearly_full(hhdr)
-hdr *hhdr;
-{
- int sz = hhdr -> hb_sz;
-
-# if CPP_WORDSZ != 32 && CPP_WORDSZ != 64
- return DONT_KNOW; /* Shouldn't be used in any standard config. */
-# endif
-# if CPP_WORDSZ == 32
- switch(sz) {
- case 1:
- return GC_block_nearly_full1(hhdr, 0xffffffffl);
- case 2:
- return GC_block_nearly_full1(hhdr, 0x55555555l);
- case 4:
- return GC_block_nearly_full1(hhdr, 0x11111111l);
- case 6:
- return GC_block_nearly_full3(hhdr, 0x41041041l,
- 0x10410410l,
- 0x04104104l);
- case 8:
- return GC_block_nearly_full1(hhdr, 0x01010101l);
- case 12:
- return GC_block_nearly_full3(hhdr, 0x01001001l,
- 0x10010010l,
- 0x00100100l);
- case 16:
- return GC_block_nearly_full1(hhdr, 0x00010001l);
- case 32:
- return GC_block_nearly_full1(hhdr, 0x00000001l);
- default:
- return DONT_KNOW;
- }
-# endif
-# if CPP_WORDSZ == 64
- switch(sz) {
- case 1:
- return GC_block_nearly_full1(hhdr, 0xffffffffffffffffl);
- case 2:
- return GC_block_nearly_full1(hhdr, 0x5555555555555555l);
- case 4:
- return GC_block_nearly_full1(hhdr, 0x1111111111111111l);
- case 6:
- return GC_block_nearly_full3(hhdr, 0x1041041041041041l,
- 0x4104104104104104l,
- 0x0410410410410410l);
- case 8:
- return GC_block_nearly_full1(hhdr, 0x0101010101010101l);
- case 12:
- return GC_block_nearly_full3(hhdr, 0x1001001001001001l,
- 0x0100100100100100l,
- 0x0010010010010010l);
- case 16:
- return GC_block_nearly_full1(hhdr, 0x0001000100010001l);
- case 32:
- return GC_block_nearly_full1(hhdr, 0x0000000100000001l);
- default:
- return DONT_KNOW;
- }
-# endif
-}
-#endif /* !SMALL_CONFIG && !USE_MARK_BYTES */
+/* FIXME: This should perhaps again be specialized for USE_MARK_BYTES */
+/* and USE_MARK_BITS cases. */
-/* We keep track of reclaimed memory if we are either asked to, or */
-/* we are using the parallel marker. In the latter case, we assume */
-/* that most allocation goes through GC_malloc_many for scalability. */
-/* GC_malloc_many needs the count anyway. */
-# if defined(GATHERSTATS) || defined(PARALLEL_MARK)
-# define INCR_WORDS(sz) n_words_found += (sz)
-# define COUNT_PARAM , count
-# define COUNT_ARG , count
-# define COUNT_DECL signed_word * count;
-# define NWORDS_DECL signed_word n_words_found = 0;
-# define COUNT_UPDATE *count += n_words_found;
-# define MEM_FOUND_ADDR , &GC_mem_found
-# else
-# define INCR_WORDS(sz)
-# define COUNT_PARAM
-# define COUNT_ARG
-# define COUNT_DECL
-# define NWORDS_DECL
-# define COUNT_UPDATE
-# define MEM_FOUND_ADDR
-# endif
/*
* Restore unmarked small objects in h of size sz to the object
* free list. Returns the new list.
- * Clears unmarked objects.
+ * Clears unmarked objects. Sz is in bytes.
*/
/*ARGSUSED*/
-ptr_t GC_reclaim_clear(hbp, hhdr, sz, list COUNT_PARAM)
-register struct hblk *hbp; /* ptr to current heap block */
-register hdr * hhdr;
-register ptr_t list;
-register word sz;
-COUNT_DECL
+ptr_t GC_reclaim_clear(struct hblk *hbp, hdr *hhdr, size_t sz,
+ ptr_t list, signed_word *count)
{
- register int word_no;
- register word *p, *q, *plim;
- NWORDS_DECL
+ int bit_no = 0;
+ word *p, *q, *plim;
+ signed_word n_bytes_found = 0;
GC_ASSERT(hhdr == GC_find_header((ptr_t)hbp));
+ GC_ASSERT(sz == hhdr -> hb_sz);
+ GC_ASSERT((sz & (BYTES_PER_WORD-1)) == 0);
p = (word *)(hbp->hb_body);
- word_no = 0;
- plim = (word *)((((word)hbp) + HBLKSIZE)
- - WORDS_TO_BYTES(sz));
+ plim = (word *)(hbp->hb_body + HBLKSIZE - sz);
/* go through all words in block */
while( p <= plim ) {
- if( mark_bit_from_hdr(hhdr, word_no) ) {
- p += sz;
+ if( mark_bit_from_hdr(hhdr, bit_no) ) {
+ p = (word *)((ptr_t)p + sz);
} else {
- INCR_WORDS(sz);
+ n_bytes_found += sz;
/* object is available - put on list */
obj_link(p) = list;
list = ((ptr_t)p);
/* Clear object, advance p to next object in the process */
- q = p + sz;
+ q = (word *)((ptr_t)p + sz);
# ifdef USE_MARK_BYTES
GC_ASSERT(!(sz & 1)
&& !((word)p & (2 * sizeof(word) - 1)));
@@ -351,362 +149,79 @@ COUNT_DECL
}
# endif
}
- word_no += sz;
- }
- COUNT_UPDATE
- return(list);
-}
-
-#if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES)
-
-/*
- * A special case for 2 word composite objects (e.g. cons cells):
- */
-/*ARGSUSED*/
-ptr_t GC_reclaim_clear2(hbp, hhdr, list COUNT_PARAM)
-register struct hblk *hbp; /* ptr to current heap block */
-hdr * hhdr;
-register ptr_t list;
-COUNT_DECL
-{
- register word * mark_word_addr = &(hhdr->hb_marks[0]);
- register word *p, *plim;
- register word mark_word;
- register int i;
- NWORDS_DECL
-# define DO_OBJ(start_displ) \
- if (!(mark_word & ((word)1 << start_displ))) { \
- p[start_displ] = (word)list; \
- list = (ptr_t)(p+start_displ); \
- p[start_displ+1] = 0; \
- INCR_WORDS(2); \
+ bit_no += MARK_BIT_OFFSET(sz);
}
-
- p = (word *)(hbp->hb_body);
- plim = (word *)(((word)hbp) + HBLKSIZE);
-
- /* go through all words in block */
- while( p < plim ) {
- mark_word = *mark_word_addr++;
- for (i = 0; i < WORDSZ; i += 8) {
- DO_OBJ(0);
- DO_OBJ(2);
- DO_OBJ(4);
- DO_OBJ(6);
- p += 8;
- mark_word >>= 8;
- }
- }
- COUNT_UPDATE
+ *count += n_bytes_found;
return(list);
-# undef DO_OBJ
}
-/*
- * Another special case for 4 word composite objects:
- */
-/*ARGSUSED*/
-ptr_t GC_reclaim_clear4(hbp, hhdr, list COUNT_PARAM)
-register struct hblk *hbp; /* ptr to current heap block */
-hdr * hhdr;
-register ptr_t list;
-COUNT_DECL
-{
- register word * mark_word_addr = &(hhdr->hb_marks[0]);
- register word *p, *plim;
- register word mark_word;
- NWORDS_DECL
-# define DO_OBJ(start_displ) \
- if (!(mark_word & ((word)1 << start_displ))) { \
- p[start_displ] = (word)list; \
- list = (ptr_t)(p+start_displ); \
- p[start_displ+1] = 0; \
- CLEAR_DOUBLE(p + start_displ + 2); \
- INCR_WORDS(4); \
- }
-
- p = (word *)(hbp->hb_body);
- plim = (word *)(((word)hbp) + HBLKSIZE);
-
- /* go through all words in block */
- while( p < plim ) {
- mark_word = *mark_word_addr++;
- DO_OBJ(0);
- DO_OBJ(4);
- DO_OBJ(8);
- DO_OBJ(12);
- DO_OBJ(16);
- DO_OBJ(20);
- DO_OBJ(24);
- DO_OBJ(28);
-# if CPP_WORDSZ == 64
- DO_OBJ(32);
- DO_OBJ(36);
- DO_OBJ(40);
- DO_OBJ(44);
- DO_OBJ(48);
- DO_OBJ(52);
- DO_OBJ(56);
- DO_OBJ(60);
-# endif
- p += WORDSZ;
- }
- COUNT_UPDATE
- return(list);
-# undef DO_OBJ
-}
-
-#endif /* !SMALL_CONFIG && !USE_MARK_BYTES */
-
/* The same thing, but don't clear objects: */
/*ARGSUSED*/
-ptr_t GC_reclaim_uninit(hbp, hhdr, sz, list COUNT_PARAM)
-register struct hblk *hbp; /* ptr to current heap block */
-register hdr * hhdr;
-register ptr_t list;
-register word sz;
-COUNT_DECL
+ptr_t GC_reclaim_uninit(struct hblk *hbp, hdr *hhdr, size_t sz,
+ ptr_t list, signed_word *count)
{
- register int word_no = 0;
- register word *p, *plim;
- NWORDS_DECL
+ int bit_no = 0;
+ word *p, *plim;
+ signed_word n_bytes_found = 0;
+ GC_ASSERT(sz == hhdr -> hb_sz);
p = (word *)(hbp->hb_body);
- plim = (word *)((((word)hbp) + HBLKSIZE)
- - WORDS_TO_BYTES(sz));
+ plim = (word *)((ptr_t)hbp + HBLKSIZE - sz);
/* go through all words in block */
while( p <= plim ) {
- if( !mark_bit_from_hdr(hhdr, word_no) ) {
- INCR_WORDS(sz);
+ if( !mark_bit_from_hdr(hhdr, bit_no) ) {
+ n_bytes_found += sz;
/* object is available - put on list */
obj_link(p) = list;
list = ((ptr_t)p);
}
- p += sz;
- word_no += sz;
+ p = (word *)((ptr_t)p + sz);
+ bit_no += MARK_BIT_OFFSET(sz);
}
- COUNT_UPDATE
+ *count += n_bytes_found;
return(list);
}
/* Don't really reclaim objects, just check for unmarked ones: */
/*ARGSUSED*/
-void GC_reclaim_check(hbp, hhdr, sz)
-register struct hblk *hbp; /* ptr to current heap block */
-register hdr * hhdr;
-register word sz;
+void GC_reclaim_check(struct hblk *hbp, hdr *hhdr, word sz)
{
- register int word_no = 0;
- register word *p, *plim;
-# ifdef GATHERSTATS
- register int n_words_found = 0;
-# endif
+ int bit_no = 0;
+ ptr_t p, plim;
- p = (word *)(hbp->hb_body);
- plim = (word *)((((word)hbp) + HBLKSIZE)
- - WORDS_TO_BYTES(sz));
+ GC_ASSERT(sz == hhdr -> hb_sz);
+ p = hbp->hb_body;
+ plim = p + HBLKSIZE - sz;
/* go through all words in block */
while( p <= plim ) {
- if( !mark_bit_from_hdr(hhdr, word_no) ) {
- FOUND_FREE(hbp, word_no);
+ if( !mark_bit_from_hdr(hhdr, bit_no) ) {
+ GC_add_leaked(p);
}
p += sz;
- word_no += sz;
+ bit_no += MARK_BIT_OFFSET(sz);
}
}
-#if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES)
-/*
- * Another special case for 2 word atomic objects:
- */
-/*ARGSUSED*/
-ptr_t GC_reclaim_uninit2(hbp, hhdr, list COUNT_PARAM)
-register struct hblk *hbp; /* ptr to current heap block */
-hdr * hhdr;
-register ptr_t list;
-COUNT_DECL
-{
- register word * mark_word_addr = &(hhdr->hb_marks[0]);
- register word *p, *plim;
- register word mark_word;
- register int i;
- NWORDS_DECL
-# define DO_OBJ(start_displ) \
- if (!(mark_word & ((word)1 << start_displ))) { \
- p[start_displ] = (word)list; \
- list = (ptr_t)(p+start_displ); \
- INCR_WORDS(2); \
- }
-
- p = (word *)(hbp->hb_body);
- plim = (word *)(((word)hbp) + HBLKSIZE);
-
- /* go through all words in block */
- while( p < plim ) {
- mark_word = *mark_word_addr++;
- for (i = 0; i < WORDSZ; i += 8) {
- DO_OBJ(0);
- DO_OBJ(2);
- DO_OBJ(4);
- DO_OBJ(6);
- p += 8;
- mark_word >>= 8;
- }
- }
- COUNT_UPDATE
- return(list);
-# undef DO_OBJ
-}
-
-/*
- * Another special case for 4 word atomic objects:
- */
-/*ARGSUSED*/
-ptr_t GC_reclaim_uninit4(hbp, hhdr, list COUNT_PARAM)
-register struct hblk *hbp; /* ptr to current heap block */
-hdr * hhdr;
-register ptr_t list;
-COUNT_DECL
-{
- register word * mark_word_addr = &(hhdr->hb_marks[0]);
- register word *p, *plim;
- register word mark_word;
- NWORDS_DECL
-# define DO_OBJ(start_displ) \
- if (!(mark_word & ((word)1 << start_displ))) { \
- p[start_displ] = (word)list; \
- list = (ptr_t)(p+start_displ); \
- INCR_WORDS(4); \
- }
-
- p = (word *)(hbp->hb_body);
- plim = (word *)(((word)hbp) + HBLKSIZE);
-
- /* go through all words in block */
- while( p < plim ) {
- mark_word = *mark_word_addr++;
- DO_OBJ(0);
- DO_OBJ(4);
- DO_OBJ(8);
- DO_OBJ(12);
- DO_OBJ(16);
- DO_OBJ(20);
- DO_OBJ(24);
- DO_OBJ(28);
-# if CPP_WORDSZ == 64
- DO_OBJ(32);
- DO_OBJ(36);
- DO_OBJ(40);
- DO_OBJ(44);
- DO_OBJ(48);
- DO_OBJ(52);
- DO_OBJ(56);
- DO_OBJ(60);
-# endif
- p += WORDSZ;
- }
- COUNT_UPDATE
- return(list);
-# undef DO_OBJ
-}
-
-/* Finally the one word case, which never requires any clearing: */
-/*ARGSUSED*/
-ptr_t GC_reclaim1(hbp, hhdr, list COUNT_PARAM)
-register struct hblk *hbp; /* ptr to current heap block */
-hdr * hhdr;
-register ptr_t list;
-COUNT_DECL
-{
- register word * mark_word_addr = &(hhdr->hb_marks[0]);
- register word *p, *plim;
- register word mark_word;
- register int i;
- NWORDS_DECL
-# define DO_OBJ(start_displ) \
- if (!(mark_word & ((word)1 << start_displ))) { \
- p[start_displ] = (word)list; \
- list = (ptr_t)(p+start_displ); \
- INCR_WORDS(1); \
- }
-
- p = (word *)(hbp->hb_body);
- plim = (word *)(((word)hbp) + HBLKSIZE);
-
- /* go through all words in block */
- while( p < plim ) {
- mark_word = *mark_word_addr++;
- for (i = 0; i < WORDSZ; i += 4) {
- DO_OBJ(0);
- DO_OBJ(1);
- DO_OBJ(2);
- DO_OBJ(3);
- p += 4;
- mark_word >>= 4;
- }
- }
- COUNT_UPDATE
- return(list);
-# undef DO_OBJ
-}
-
-#endif /* !SMALL_CONFIG && !USE_MARK_BYTES */
/*
* Generic procedure to rebuild a free list in hbp.
* Also called directly from GC_malloc_many.
+ * Sz is now in bytes.
*/
-ptr_t GC_reclaim_generic(hbp, hhdr, sz, init, list COUNT_PARAM)
-struct hblk *hbp; /* ptr to current heap block */
-hdr * hhdr;
-GC_bool init;
-ptr_t list;
-word sz;
-COUNT_DECL
+ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz,
+ GC_bool init, ptr_t list, signed_word *count)
{
ptr_t result = list;
GC_ASSERT(GC_find_header((ptr_t)hbp) == hhdr);
GC_remove_protection(hbp, 1, (hhdr)->hb_descr == 0 /* Pointer-free? */);
if (init) {
- switch(sz) {
-# if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES)
- case 1:
- /* We now issue the hint even if GC_nearly_full returned */
- /* DONT_KNOW. */
- result = GC_reclaim1(hbp, hhdr, list COUNT_ARG);
- break;
- case 2:
- result = GC_reclaim_clear2(hbp, hhdr, list COUNT_ARG);
- break;
- case 4:
- result = GC_reclaim_clear4(hbp, hhdr, list COUNT_ARG);
- break;
-# endif /* !SMALL_CONFIG && !USE_MARK_BYTES */
- default:
- result = GC_reclaim_clear(hbp, hhdr, sz, list COUNT_ARG);
- break;
- }
+ result = GC_reclaim_clear(hbp, hhdr, sz, list, count);
} else {
GC_ASSERT((hhdr)->hb_descr == 0 /* Pointer-free block */);
- switch(sz) {
-# if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES)
- case 1:
- result = GC_reclaim1(hbp, hhdr, list COUNT_ARG);
- break;
- case 2:
- result = GC_reclaim_uninit2(hbp, hhdr, list COUNT_ARG);
- break;
- case 4:
- result = GC_reclaim_uninit4(hbp, hhdr, list COUNT_ARG);
- break;
-# endif /* !SMALL_CONFIG && !USE_MARK_BYTES */
- default:
- result = GC_reclaim_uninit(hbp, hhdr, sz, list COUNT_ARG);
- break;
- }
+ result = GC_reclaim_uninit(hbp, hhdr, sz, list, count);
}
if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) GC_set_hdr_marks(hhdr);
return result;
@@ -718,16 +233,14 @@ COUNT_DECL
* If entirely empty blocks are to be completely deallocated, then
* caller should perform that check.
*/
-void GC_reclaim_small_nonempty_block(hbp, report_if_found COUNT_PARAM)
-register struct hblk *hbp; /* ptr to current heap block */
-int report_if_found; /* Abort if a reclaimable object is found */
-COUNT_DECL
+void GC_reclaim_small_nonempty_block(struct hblk *hbp,
+ int report_if_found, signed_word *count)
{
hdr *hhdr = HDR(hbp);
- word sz = hhdr -> hb_sz;
+ size_t sz = hhdr -> hb_sz;
int kind = hhdr -> hb_obj_kind;
struct obj_kind * ok = &GC_obj_kinds[kind];
- ptr_t * flh = &(ok -> ok_freelist[sz]);
+ void **flh = &(ok -> ok_freelist[BYTES_TO_GRANULES(sz)]);
hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
@@ -736,7 +249,7 @@ COUNT_DECL
} else {
*flh = GC_reclaim_generic(hbp, hhdr, sz,
(ok -> ok_init || GC_debugging_started),
- *flh MEM_FOUND_ADDR);
+ *flh, &GC_bytes_found);
}
}
@@ -748,51 +261,49 @@ COUNT_DECL
* If report_if_found is TRUE, then process any block immediately, and
* simply report free objects; do not actually reclaim them.
*/
-# if defined(__STDC__) || defined(__cplusplus)
- void GC_reclaim_block(register struct hblk *hbp, word report_if_found)
-# else
- void GC_reclaim_block(hbp, report_if_found)
- register struct hblk *hbp; /* ptr to current heap block */
- word report_if_found; /* Abort if a reclaimable object is found */
-# endif
+void GC_reclaim_block(struct hblk *hbp, word report_if_found)
{
- register hdr * hhdr;
- register word sz; /* size of objects in current block */
- register struct obj_kind * ok;
+ hdr * hhdr = HDR(hbp);
+ size_t sz = hhdr -> hb_sz; /* size of objects in current block */
+ struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
struct hblk ** rlh;
- hhdr = HDR(hbp);
- sz = hhdr -> hb_sz;
- ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
-
- if( sz > MAXOBJSZ ) { /* 1 big object */
+ if( sz > MAXOBJBYTES ) { /* 1 big object */
if( !mark_bit_from_hdr(hhdr, 0) ) {
if (report_if_found) {
- FOUND_FREE(hbp, 0);
+ GC_add_leaked((ptr_t)hbp);
} else {
- word blocks = OBJ_SZ_TO_BLOCKS(sz);
+ size_t blocks = OBJ_SZ_TO_BLOCKS(sz);
if (blocks > 1) {
GC_large_allocd_bytes -= blocks * HBLKSIZE;
}
-# ifdef GATHERSTATS
- GC_mem_found += sz;
-# endif
+ GC_bytes_found += sz;
GC_freehblk(hbp);
}
+ } else {
+ if (hhdr -> hb_descr != 0) {
+ GC_composite_in_use += sz;
+ } else {
+ GC_atomic_in_use += sz;
+ }
}
} else {
GC_bool empty = GC_block_empty(hhdr);
+ GC_ASSERT(sz * hhdr -> hb_n_marks <= HBLKSIZE);
+ if (hhdr -> hb_descr != 0) {
+ GC_composite_in_use += sz * hhdr -> hb_n_marks;
+ } else {
+ GC_atomic_in_use += sz * hhdr -> hb_n_marks;
+ }
if (report_if_found) {
- GC_reclaim_small_nonempty_block(hbp, (int)report_if_found
- MEM_FOUND_ADDR);
+ GC_reclaim_small_nonempty_block(hbp, (int)report_if_found,
+ &GC_bytes_found);
} else if (empty) {
-# ifdef GATHERSTATS
- GC_mem_found += BYTES_TO_WORDS(HBLKSIZE);
-# endif
+ GC_bytes_found += HBLKSIZE;
GC_freehblk(hbp);
} else if (TRUE != GC_block_nearly_full(hhdr)){
/* group of smaller objects, enqueue the real work */
- rlh = &(ok -> ok_reclaim_list[sz]);
+ rlh = &(ok -> ok_reclaim_list[BYTES_TO_GRANULES(sz)]);
hhdr -> hb_next = *rlh;
*rlh = hbp;
} /* else not worth salvaging. */
@@ -817,26 +328,27 @@ struct Print_stats
#ifdef USE_MARK_BYTES
/* Return the number of set mark bits in the given header */
-int GC_n_set_marks(hhdr)
-hdr * hhdr;
+int GC_n_set_marks(hdr *hhdr)
{
- register int result = 0;
- register int i;
+ int result = 0;
+ int i;
+ int n_objs = HBLK_OBJS(hhdr -> hb_sz);
- for (i = 0; i < MARK_BITS_SZ; i++) {
+ if (0 == n_objs) n_objs = 1;
+ for (i = 0; i < n_objs; i++) {
result += hhdr -> hb_marks[i];
}
+ GC_ASSERT(hhdr -> hb_marks[n_objs]);
return(result);
}
#else
/* Number of set bits in a word. Not performance critical. */
-static int set_bits(n)
-word n;
+static int set_bits(word n)
{
- register word m = n;
- register int result = 0;
+ word m = n;
+ int result = 0;
while (m > 0) {
if (m & 1) result++;
@@ -846,36 +358,41 @@ word n;
}
/* Return the number of set mark bits in the given header */
-int GC_n_set_marks(hhdr)
-hdr * hhdr;
+int GC_n_set_marks(hdr *hhdr)
{
- register int result = 0;
- register int i;
+ int result = 0;
+ int i;
+ int n_objs = HBLK_OBJS(hhdr -> hb_sz);
+ int n_mark_words;
- for (i = 0; i < MARK_BITS_SZ; i++) {
+ if (0 == n_objs) n_objs = 1;
+ n_mark_words = divWORDSZ(n_objs + WORDSZ - 1);
+ for (i = 0; i < n_mark_words - 1; i++) {
result += set_bits(hhdr -> hb_marks[i]);
}
- return(result);
+ result += set_bits((hhdr -> hb_marks[n_mark_words])
+ << (n_mark_words * WORDSZ - n_objs));
+ return(result - 1);
}
#endif /* !USE_MARK_BYTES */
/*ARGSUSED*/
-# if defined(__STDC__) || defined(__cplusplus)
- void GC_print_block_descr(struct hblk *h, word dummy)
-# else
- void GC_print_block_descr(h, dummy)
- struct hblk *h;
- word dummy;
-# endif
+void GC_print_block_descr(struct hblk *h, word dummy)
{
- register hdr * hhdr = HDR(h);
- register size_t bytes = WORDS_TO_BYTES(hhdr -> hb_sz);
+ hdr * hhdr = HDR(h);
+ unsigned bytes = hhdr -> hb_sz;
struct Print_stats *ps;
+ unsigned n_marks = GC_n_set_marks(hhdr);
- GC_printf3("(%lu:%lu,%lu)", (unsigned long)(hhdr -> hb_obj_kind),
- (unsigned long)bytes,
- (unsigned long)(GC_n_set_marks(hhdr)));
+ if (hhdr -> hb_n_marks != n_marks) {
+ GC_printf("(%u:%u,%u!=%u)", hhdr -> hb_obj_kind,
+ bytes,
+ hhdr -> hb_n_marks, n_marks);
+ } else {
+ GC_printf("(%u:%u,%u)", hhdr -> hb_obj_kind,
+ bytes, n_marks);
+ }
bytes += HBLKSIZE-1;
bytes &= ~(HBLKSIZE-1);
@@ -888,13 +405,13 @@ void GC_print_block_list()
{
struct Print_stats pstats;
- GC_printf1("(kind(0=ptrfree,1=normal,2=unc.,%lu=stubborn):size_in_bytes, #_marks_set)\n", STUBBORN);
+ GC_printf("(kind(0=ptrfree,1=normal,2=unc.,3=stubborn):size_in_bytes, #_marks_set)\n");
pstats.number_of_blocks = 0;
pstats.total_bytes = 0;
GC_apply_to_all_blocks(GC_print_block_descr, (word)&pstats);
- GC_printf2("\nblocks = %lu, bytes = %lu\n",
- (unsigned long)pstats.number_of_blocks,
- (unsigned long)pstats.total_bytes);
+ GC_printf("\nblocks = %lu, bytes = %lu\n",
+ (unsigned long)pstats.number_of_blocks,
+ (unsigned long)pstats.total_bytes);
}
#endif /* NO_DEBUGGING */
@@ -906,10 +423,9 @@ void GC_print_block_list()
* since may otherwise end up with dangling "descriptor" pointers.
* It may help for other pointer-containing objects.
*/
-void GC_clear_fl_links(flp)
-ptr_t *flp;
+void GC_clear_fl_links(void **flp)
{
- ptr_t next = *flp;
+ void *next = *flp;
while (0 != next) {
*flp = 0;
@@ -922,18 +438,20 @@ ptr_t *flp;
* Perform GC_reclaim_block on the entire heap, after first clearing
* small object free lists (if we are not just looking for leaks).
*/
-void GC_start_reclaim(report_if_found)
-int report_if_found; /* Abort if a GC_reclaimable object is found */
+void GC_start_reclaim(GC_bool report_if_found)
{
int kind;
# if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
GC_ASSERT(0 == GC_fl_builder_count);
# endif
+ /* Reset in use counters. GC_reclaim_block recomputes them. */
+ GC_composite_in_use = 0;
+ GC_atomic_in_use = 0;
/* Clear reclaim- and free-lists */
for (kind = 0; kind < GC_n_kinds; kind++) {
- ptr_t *fop;
- ptr_t *lim;
+ void **fop;
+ void **lim;
struct hblk ** rlp;
struct hblk ** rlim;
struct hblk ** rlist = GC_obj_kinds[kind].ok_reclaim_list;
@@ -941,7 +459,7 @@ int report_if_found; /* Abort if a GC_reclaimable object is found */
if (rlist == 0) continue; /* This kind not used. */
if (!report_if_found) {
- lim = &(GC_obj_kinds[kind].ok_freelist[MAXOBJSZ+1]);
+ lim = &(GC_obj_kinds[kind].ok_freelist[MAXOBJGRANULES+1]);
for( fop = GC_obj_kinds[kind].ok_freelist; fop < lim; fop++ ) {
if (*fop != 0) {
if (should_clobber) {
@@ -953,16 +471,12 @@ int report_if_found; /* Abort if a GC_reclaimable object is found */
}
} /* otherwise free list objects are marked, */
/* and its safe to leave them */
- rlim = rlist + MAXOBJSZ+1;
+ rlim = rlist + MAXOBJGRANULES+1;
for( rlp = rlist; rlp < rlim; rlp++ ) {
*rlp = 0;
}
}
-# ifdef PRINTBLOCKS
- GC_printf0("GC_reclaim: current block sizes:\n");
- GC_print_block_list();
-# endif
/* Go through all heap blocks (in hblklist) and reclaim unmarked objects */
/* or enqueue the block for later processing. */
@@ -984,22 +498,20 @@ int report_if_found; /* Abort if a GC_reclaimable object is found */
* appropriate free list is nonempty, or there are no more blocks to
* sweep.
*/
-void GC_continue_reclaim(sz, kind)
-word sz; /* words */
-int kind;
+void GC_continue_reclaim(size_t sz /* granules */, int kind)
{
- register hdr * hhdr;
- register struct hblk * hbp;
- register struct obj_kind * ok = &(GC_obj_kinds[kind]);
+ hdr * hhdr;
+ struct hblk * hbp;
+ struct obj_kind * ok = &(GC_obj_kinds[kind]);
struct hblk ** rlh = ok -> ok_reclaim_list;
- ptr_t *flh = &(ok -> ok_freelist[sz]);
+ void **flh = &(ok -> ok_freelist[sz]);
if (rlh == 0) return; /* No blocks of this kind. */
rlh += sz;
while ((hbp = *rlh) != 0) {
hhdr = HDR(hbp);
*rlh = hhdr -> hb_next;
- GC_reclaim_small_nonempty_block(hbp, FALSE MEM_FOUND_ADDR);
+ GC_reclaim_small_nonempty_block(hbp, FALSE, &GC_bytes_found);
if (*flh != 0) break;
}
}
@@ -1013,29 +525,26 @@ int kind;
* recently reclaimed, and discard the rest.
* Stop_func may be 0.
*/
-GC_bool GC_reclaim_all(stop_func, ignore_old)
-GC_stop_func stop_func;
-GC_bool ignore_old;
+GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old)
{
- register word sz;
- register int kind;
- register hdr * hhdr;
- register struct hblk * hbp;
- register struct obj_kind * ok;
+ word sz;
+ int kind;
+ hdr * hhdr;
+ struct hblk * hbp;
+ struct obj_kind * ok;
struct hblk ** rlp;
struct hblk ** rlh;
-# ifdef PRINTTIMES
- CLOCK_TYPE start_time;
- CLOCK_TYPE done_time;
+ CLOCK_TYPE start_time;
+ CLOCK_TYPE done_time;
+ if (GC_print_stats == VERBOSE)
GET_TIME(start_time);
-# endif
for (kind = 0; kind < GC_n_kinds; kind++) {
ok = &(GC_obj_kinds[kind]);
rlp = ok -> ok_reclaim_list;
if (rlp == 0) continue;
- for (sz = 1; sz <= MAXOBJSZ; sz++) {
+ for (sz = 1; sz <= MAXOBJGRANULES; sz++) {
rlh = rlp + sz;
while ((hbp = *rlh) != 0) {
if (stop_func != (GC_stop_func)0 && (*stop_func)()) {
@@ -1047,15 +556,15 @@ GC_bool ignore_old;
/* It's likely we'll need it this time, too */
/* It's been touched recently, so this */
/* shouldn't trigger paging. */
- GC_reclaim_small_nonempty_block(hbp, FALSE MEM_FOUND_ADDR);
+ GC_reclaim_small_nonempty_block(hbp, FALSE, &GC_bytes_found);
}
}
}
}
-# ifdef PRINTTIMES
+ if (GC_print_stats == VERBOSE) {
GET_TIME(done_time);
- GC_printf1("Disposing of reclaim lists took %lu msecs\n",
- MS_TIME_DIFF(done_time,start_time));
-# endif
+ GC_log_printf("Disposing of reclaim lists took %lu msecs\n",
+ MS_TIME_DIFF(done_time,start_time));
+ }
return(TRUE);
}
diff --git a/setjmp_t.c b/setjmp_t.c
index 07686ef0..9f984fcc 100644
--- a/setjmp_t.c
+++ b/setjmp_t.c
@@ -57,7 +57,7 @@ int * nested_sp()
return(&dummy);
}
-main()
+int main()
{
int dummy;
long ps = GETPAGESIZE();
diff --git a/solaris_pthreads.c b/solaris_pthreads.c
deleted file mode 100644
index d604b6ec..00000000
--- a/solaris_pthreads.c
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- */
-/*
- * Support code for Solaris threads. Provides functionality we wish Sun
- * had provided. Relies on some information we probably shouldn't rely on.
- * Modified by Peter C. for Solaris Posix Threads.
- */
-
-# include "private/gc_priv.h"
-
-# if defined(GC_SOLARIS_PTHREADS)
-# include <pthread.h>
-# include <thread.h>
-# include <signal.h>
-# include <fcntl.h>
-# include <sys/types.h>
-# include <sys/mman.h>
-# include <sys/time.h>
-# include <sys/resource.h>
-# include <sys/stat.h>
-# include <sys/syscall.h>
-# include <sys/procfs.h>
-# include <sys/lwp.h>
-# include <sys/reg.h>
-# define _CLASSIC_XOPEN_TYPES
-# include <unistd.h>
-# include <errno.h>
-# include "private/solaris_threads.h"
-# include <stdio.h>
-
-#undef pthread_join
-#undef pthread_create
-
-pthread_cond_t GC_prom_join_cv; /* Broadcast when any thread terminates */
-pthread_cond_t GC_create_cv; /* Signalled when a new undetached */
- /* thread starts. */
-
-extern GC_bool GC_multithreaded;
-
-/* We use the allocation lock to protect thread-related data structures. */
-
-/* We stop the world using /proc primitives. This makes some */
-/* minimal assumptions about the threads implementation. */
-/* We don't play by the rules, since the rules make this */
-/* impossible (as of Solaris 2.3). Also note that as of */
-/* Solaris 2.3 the various thread and lwp suspension */
-/* primitives failed to stop threads by the time the request */
-/* is completed. */
-
-
-
-int GC_pthread_join(pthread_t wait_for, void **status)
-{
- return GC_thr_join((thread_t)wait_for, NULL, status);
-}
-
-
-int
-GC_pthread_create(pthread_t *new_thread,
- const pthread_attr_t *attr_in,
- void * (*thread_execp)(void *), void *arg)
-{
- int result;
- GC_thread t;
- pthread_t my_new_thread;
- pthread_attr_t attr;
- word my_flags = 0;
- int flag;
- void * stack = 0;
- size_t stack_size = 0;
- int n;
- struct sched_param schedparam;
-
- (void)pthread_attr_init(&attr);
- if (attr_in != 0) {
- (void)pthread_attr_getstacksize(attr_in, &stack_size);
- (void)pthread_attr_getstackaddr(attr_in, &stack);
- }
-
- LOCK();
- if (!GC_is_initialized) {
- GC_init_inner();
- }
- GC_multithreaded++;
-
- if (stack == 0) {
- if (stack_size == 0)
- stack_size = 1048576;
- /* ^-- 1 MB (this was GC_min_stack_sz, but that
- * violates the pthread_create documentation which
- * says the default value if none is supplied is
- * 1MB) */
- else
- stack_size += thr_min_stack();
-
- stack = (void *)GC_stack_alloc(&stack_size);
- if (stack == 0) {
- GC_multithreaded--;
- UNLOCK();
- errno = ENOMEM;
- return -1;
- }
- } else {
- my_flags |= CLIENT_OWNS_STACK;
- }
- (void)pthread_attr_setstacksize(&attr, stack_size);
- (void)pthread_attr_setstackaddr(&attr, stack);
- if (attr_in != 0) {
- (void)pthread_attr_getscope(attr_in, &n);
- (void)pthread_attr_setscope(&attr, n);
- (void)pthread_attr_getschedparam(attr_in, &schedparam);
- (void)pthread_attr_setschedparam(&attr, &schedparam);
- (void)pthread_attr_getschedpolicy(attr_in, &n);
- (void)pthread_attr_setschedpolicy(&attr, n);
- (void)pthread_attr_getinheritsched(attr_in, &n);
- (void)pthread_attr_setinheritsched(&attr, n);
-
- (void)pthread_attr_getdetachstate(attr_in, &flag);
- if (flag == PTHREAD_CREATE_DETACHED) {
- my_flags |= DETACHED;
- }
- (void)pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
- }
- /*
- * thr_create can call malloc(), which if redirected will
- * attempt to acquire the allocation lock.
- * Unlock here to prevent deadlock.
- */
-
-
-#if 0
-#ifdef I386
- UNLOCK();
-#endif
-#endif
- result =
- pthread_create(&my_new_thread, &attr, thread_execp, arg);
-#if 0
-#ifdef I386
- LOCK();
-#endif
-#endif
- if (result == 0) {
- t = GC_new_thread(my_new_thread);
- t -> flags = my_flags;
- if (!(my_flags & DETACHED)) cond_init(&(t->join_cv), USYNC_THREAD, 0);
- t -> stack = stack;
- t -> stack_size = stack_size;
- if (new_thread != 0) *new_thread = my_new_thread;
- pthread_cond_signal(&GC_create_cv);
- } else {
- if (!(my_flags & CLIENT_OWNS_STACK)) {
- GC_stack_free(stack, stack_size);
- }
- GC_multithreaded--;
- }
- UNLOCK();
- pthread_attr_destroy(&attr);
- return(result);
-}
-
-# else
-
-#ifndef LINT
- int GC_no_sunOS_pthreads;
-#endif
-
-# endif /* GC_SOLARIS_PTHREADS */
-
diff --git a/solaris_threads.c b/solaris_threads.c
deleted file mode 100644
index 8c9866d8..00000000
--- a/solaris_threads.c
+++ /dev/null
@@ -1,959 +0,0 @@
-/*
- * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- */
-/*
- * Support code for Solaris threads. Provides functionality we wish Sun
- * had provided. Relies on some information we probably shouldn't rely on.
- */
-/* Boehm, September 14, 1994 4:44 pm PDT */
-
-# include "private/gc_priv.h"
-
-# if defined(GC_SOLARIS_THREADS) || defined(GC_SOLARIS_PTHREADS)
-# include "private/solaris_threads.h"
-# include <thread.h>
-# include <synch.h>
-# include <signal.h>
-# include <fcntl.h>
-# include <sys/types.h>
-# include <sys/mman.h>
-# include <sys/time.h>
-# include <sys/resource.h>
-# include <sys/stat.h>
-# include <sys/syscall.h>
-# include <sys/procfs.h>
-# include <sys/lwp.h>
-# include <sys/reg.h>
-# define _CLASSIC_XOPEN_TYPES
-# include <unistd.h>
-# include <errno.h>
-
-#ifdef HANDLE_FORK
- --> Not yet supported. Try porting the code from linux_threads.c.
-#endif
-
-/*
- * This is the default size of the LWP arrays. If there are more LWPs
- * than this when a stop-the-world GC happens, set_max_lwps will be
- * called to cope.
- * This must be higher than the number of LWPs at startup time.
- * The threads library creates a thread early on, so the min. is 3
- */
-# define DEFAULT_MAX_LWPS 4
-
-#undef thr_join
-#undef thr_create
-#undef thr_suspend
-#undef thr_continue
-
-cond_t GC_prom_join_cv; /* Broadcast when any thread terminates */
-cond_t GC_create_cv; /* Signalled when a new undetached */
- /* thread starts. */
-
-
-#ifdef MMAP_STACKS
-static int GC_zfd;
-#endif /* MMAP_STACKS */
-
-/* We use the allocation lock to protect thread-related data structures. */
-
-/* We stop the world using /proc primitives. This makes some */
-/* minimal assumptions about the threads implementation. */
-/* We don't play by the rules, since the rules make this */
-/* impossible (as of Solaris 2.3). Also note that as of */
-/* Solaris 2.3 the various thread and lwp suspension */
-/* primitives failed to stop threads by the time the request */
-/* is completed. */
-
-
-static sigset_t old_mask;
-
-/* Sleep for n milliseconds, n < 1000 */
-void GC_msec_sleep(int n)
-{
- struct timespec ts;
-
- ts.tv_sec = 0;
- ts.tv_nsec = 1000000*n;
- if (syscall(SYS_nanosleep, &ts, 0) < 0) {
- ABORT("nanosleep failed");
- }
-}
-/* Turn off preemption; gross but effective. */
-/* Caller has allocation lock. */
-/* Actually this is not needed under Solaris 2.3 and */
-/* 2.4, but hopefully that'll change. */
-void preempt_off()
-{
- sigset_t set;
-
- (void)sigfillset(&set);
- sigdelset(&set, SIGABRT);
- syscall(SYS_sigprocmask, SIG_SETMASK, &set, &old_mask);
-}
-
-void preempt_on()
-{
- syscall(SYS_sigprocmask, SIG_SETMASK, &old_mask, NULL);
-}
-
-int GC_main_proc_fd = -1;
-
-
-struct lwp_cache_entry {
- lwpid_t lc_id;
- int lc_descr; /* /proc file descriptor. */
-} GC_lwp_cache_default[DEFAULT_MAX_LWPS];
-
-static int max_lwps = DEFAULT_MAX_LWPS;
-static struct lwp_cache_entry *GC_lwp_cache = GC_lwp_cache_default;
-
-static prgregset_t GC_lwp_registers_default[DEFAULT_MAX_LWPS];
-static prgregset_t *GC_lwp_registers = GC_lwp_registers_default;
-
-/* Return a file descriptor for the /proc entry corresponding */
-/* to the given lwp. The file descriptor may be stale if the */
-/* lwp exited and a new one was forked. */
-static int open_lwp(lwpid_t id)
-{
- int result;
- static int next_victim = 0;
- register int i;
-
- for (i = 0; i < max_lwps; i++) {
- if (GC_lwp_cache[i].lc_id == id) return(GC_lwp_cache[i].lc_descr);
- }
- result = syscall(SYS_ioctl, GC_main_proc_fd, PIOCOPENLWP, &id);
- /*
- * If PIOCOPENLWP fails, try closing fds in the cache until it succeeds.
- */
- if (result < 0 && errno == EMFILE) {
- for (i = 0; i < max_lwps; i++) {
- if (GC_lwp_cache[i].lc_id != 0) {
- (void)syscall(SYS_close, GC_lwp_cache[i].lc_descr);
- result = syscall(SYS_ioctl, GC_main_proc_fd, PIOCOPENLWP, &id);
- if (result >= 0 || (result < 0 && errno != EMFILE))
- break;
- }
- }
- }
- if (result < 0) {
- if (errno == EMFILE) {
- ABORT("Too many open files");
- }
- return(-1) /* exited? */;
- }
- if (GC_lwp_cache[next_victim].lc_id != 0)
- (void)syscall(SYS_close, GC_lwp_cache[next_victim].lc_descr);
- GC_lwp_cache[next_victim].lc_id = id;
- GC_lwp_cache[next_victim].lc_descr = result;
- if (++next_victim >= max_lwps)
- next_victim = 0;
- return(result);
-}
-
-static void uncache_lwp(lwpid_t id)
-{
- register int i;
-
- for (i = 0; i < max_lwps; i++) {
- if (GC_lwp_cache[i].lc_id == id) {
- (void)syscall(SYS_close, GC_lwp_cache[id].lc_descr);
- GC_lwp_cache[i].lc_id = 0;
- break;
- }
- }
-}
- /* Sequence of current lwp ids */
-static lwpid_t GC_current_ids_default[DEFAULT_MAX_LWPS + 1];
-static lwpid_t *GC_current_ids = GC_current_ids_default;
-
- /* Temporary used below (can be big if large number of LWPs) */
-static lwpid_t last_ids_default[DEFAULT_MAX_LWPS + 1];
-static lwpid_t *last_ids = last_ids_default;
-
-
-#define ROUNDUP(n) WORDS_TO_BYTES(ROUNDED_UP_WORDS(n))
-
-static void set_max_lwps(GC_word n)
-{
- char *mem;
- char *oldmem;
- int required_bytes = ROUNDUP(n * sizeof(struct lwp_cache_entry))
- + ROUNDUP(n * sizeof(prgregset_t))
- + ROUNDUP((n + 1) * sizeof(lwpid_t))
- + ROUNDUP((n + 1) * sizeof(lwpid_t));
-
- GC_expand_hp_inner(divHBLKSZ((word)required_bytes));
- oldmem = mem = GC_scratch_alloc(required_bytes);
- if (0 == mem) ABORT("No space for lwp data structures");
-
- /*
- * We can either flush the old lwp cache or copy it over. Do the latter.
- */
- memcpy(mem, GC_lwp_cache, max_lwps * sizeof(struct lwp_cache_entry));
- GC_lwp_cache = (struct lwp_cache_entry*)mem;
- mem += ROUNDUP(n * sizeof(struct lwp_cache_entry));
-
- BZERO(GC_lwp_registers, max_lwps * sizeof(GC_lwp_registers[0]));
- GC_lwp_registers = (prgregset_t *)mem;
- mem += ROUNDUP(n * sizeof(prgregset_t));
-
-
- GC_current_ids = (lwpid_t *)mem;
- mem += ROUNDUP((n + 1) * sizeof(lwpid_t));
-
- last_ids = (lwpid_t *)mem;
- mem += ROUNDUP((n + 1)* sizeof(lwpid_t));
-
- if (mem > oldmem + required_bytes)
- ABORT("set_max_lwps buffer overflow");
-
- max_lwps = n;
-}
-
-
-/* Stop all lwps in process. Assumes preemption is off. */
-/* Caller has allocation lock (and any other locks he may */
-/* need). */
-static void stop_all_lwps()
-{
- int lwp_fd;
- char buf[30];
- prstatus_t status;
- register int i;
- GC_bool changed;
- lwpid_t me = _lwp_self();
-
- if (GC_main_proc_fd == -1) {
- sprintf(buf, "/proc/%d", getpid());
- GC_main_proc_fd = syscall(SYS_open, buf, O_RDONLY);
- if (GC_main_proc_fd < 0) {
- if (errno == EMFILE)
- ABORT("/proc open failed: too many open files");
- GC_printf1("/proc open failed: errno %d", errno);
- abort();
- }
- }
- BZERO(GC_lwp_registers, sizeof (prgregset_t) * max_lwps);
- for (i = 0; i < max_lwps; i++)
- last_ids[i] = 0;
- for (;;) {
- if (syscall(SYS_ioctl, GC_main_proc_fd, PIOCSTATUS, &status) < 0)
- ABORT("Main PIOCSTATUS failed");
- if (status.pr_nlwp < 1)
- ABORT("Invalid number of lwps returned by PIOCSTATUS");
- if (status.pr_nlwp >= max_lwps) {
- set_max_lwps(status.pr_nlwp*2 + 10);
- /*
- * The data in the old GC_current_ids and
- * GC_lwp_registers has been trashed. Cleaning out last_ids
- * will make sure every LWP gets re-examined.
- */
- for (i = 0; i < max_lwps; i++)
- last_ids[i] = 0;
- continue;
- }
- if (syscall(SYS_ioctl, GC_main_proc_fd, PIOCLWPIDS, GC_current_ids) < 0)
- ABORT("PIOCLWPIDS failed");
- changed = FALSE;
- for (i = 0; GC_current_ids[i] != 0 && i < max_lwps; i++) {
- if (GC_current_ids[i] != last_ids[i]) {
- changed = TRUE;
- if (GC_current_ids[i] != me) {
- /* PIOCSTOP doesn't work without a writable */
- /* descriptor. And that makes the process */
- /* undebuggable. */
- if (_lwp_suspend(GC_current_ids[i]) < 0) {
- /* Could happen if the lwp exited */
- uncache_lwp(GC_current_ids[i]);
- GC_current_ids[i] = me; /* ignore */
- }
- }
- }
- }
- /*
- * In the unlikely event something does a fork between the
- * PIOCSTATUS and the PIOCLWPIDS.
- */
- if (i >= max_lwps)
- continue;
- /* All lwps in GC_current_ids != me have been suspended. Note */
- /* that _lwp_suspend is idempotent. */
- for (i = 0; GC_current_ids[i] != 0; i++) {
- if (GC_current_ids[i] != last_ids[i]) {
- if (GC_current_ids[i] != me) {
- lwp_fd = open_lwp(GC_current_ids[i]);
- if (lwp_fd == -1)
- {
- GC_current_ids[i] = me;
- continue;
- }
- /* LWP should be stopped. Empirically it sometimes */
- /* isn't, and more frequently the PR_STOPPED flag */
- /* is not set. Wait for PR_STOPPED. */
- if (syscall(SYS_ioctl, lwp_fd,
- PIOCSTATUS, &status) < 0) {
- /* Possible if the descriptor was stale, or */
- /* we encountered the 2.3 _lwp_suspend bug. */
- uncache_lwp(GC_current_ids[i]);
- GC_current_ids[i] = me; /* handle next time. */
- } else {
- while (!(status.pr_flags & PR_STOPPED)) {
- GC_msec_sleep(1);
- if (syscall(SYS_ioctl, lwp_fd,
- PIOCSTATUS, &status) < 0) {
- ABORT("Repeated PIOCSTATUS failed");
- }
- if (status.pr_flags & PR_STOPPED) break;
-
- GC_msec_sleep(20);
- if (syscall(SYS_ioctl, lwp_fd,
- PIOCSTATUS, &status) < 0) {
- ABORT("Repeated PIOCSTATUS failed");
- }
- }
- if (status.pr_who != GC_current_ids[i]) {
- /* can happen if thread was on death row */
- uncache_lwp(GC_current_ids[i]);
- GC_current_ids[i] = me; /* handle next time. */
- continue;
- }
- /* Save registers where collector can */
- /* find them. */
- BCOPY(status.pr_reg, GC_lwp_registers[i],
- sizeof (prgregset_t));
- }
- }
- }
- }
- if (!changed) break;
- for (i = 0; i < max_lwps; i++) last_ids[i] = GC_current_ids[i];
- }
-}
-
-/* Restart all lwps in process. Assumes preemption is off. */
-static void restart_all_lwps()
-{
- int lwp_fd;
- register int i;
- GC_bool changed;
- lwpid_t me = _lwp_self();
-# define PARANOID
-
- for (i = 0; GC_current_ids[i] != 0; i++) {
-# ifdef PARANOID
- if (GC_current_ids[i] != me) {
- int lwp_fd = open_lwp(GC_current_ids[i]);
- prstatus_t status;
-
- if (lwp_fd < 0) ABORT("open_lwp failed");
- if (syscall(SYS_ioctl, lwp_fd,
- PIOCSTATUS, &status) < 0) {
- ABORT("PIOCSTATUS failed in restart_all_lwps");
- }
- if (memcmp(status.pr_reg, GC_lwp_registers[i],
- sizeof (prgregset_t)) != 0) {
- int j;
-
- for(j = 0; j < NPRGREG; j++)
- {
- GC_printf3("%i: %x -> %x\n", j,
- GC_lwp_registers[i][j],
- status.pr_reg[j]);
- }
- ABORT("Register contents changed");
- }
- if (!status.pr_flags & PR_STOPPED) {
- ABORT("lwp no longer stopped");
- }
-#ifdef SPARC
- {
- gwindows_t windows;
- if (syscall(SYS_ioctl, lwp_fd,
- PIOCGWIN, &windows) < 0) {
- ABORT("PIOCSTATUS failed in restart_all_lwps");
- }
- if (windows.wbcnt > 0) ABORT("unsaved register windows");
- }
-#endif
- }
-# endif /* PARANOID */
- if (GC_current_ids[i] == me) continue;
- if (_lwp_continue(GC_current_ids[i]) < 0) {
- ABORT("Failed to restart lwp");
- }
- }
- if (i >= max_lwps) ABORT("Too many lwps");
-}
-
-GC_bool GC_multithreaded = 0;
-
-void GC_stop_world()
-{
- preempt_off();
- if (GC_multithreaded)
- stop_all_lwps();
-}
-
-void GC_start_world()
-{
- if (GC_multithreaded)
- restart_all_lwps();
- preempt_on();
-}
-
-void GC_thr_init(void);
-
-GC_bool GC_thr_initialized = FALSE;
-
-size_t GC_min_stack_sz;
-
-
-/*
- * stack_head is stored at the top of free stacks
- */
-struct stack_head {
- struct stack_head *next;
- ptr_t base;
- thread_t owner;
-};
-
-# define N_FREE_LISTS 25
-struct stack_head *GC_stack_free_lists[N_FREE_LISTS] = { 0 };
- /* GC_stack_free_lists[i] is free list for stacks of */
- /* size GC_min_stack_sz*2**i. */
- /* Free lists are linked through stack_head stored */ /* at top of stack. */
-
-/* Return a stack of size at least *stack_size. *stack_size is */
-/* replaced by the actual stack size. */
-/* Caller holds allocation lock. */
-ptr_t GC_stack_alloc(size_t * stack_size)
-{
- register size_t requested_sz = *stack_size;
- register size_t search_sz = GC_min_stack_sz;
- register int index = 0; /* = log2(search_sz/GC_min_stack_sz) */
- register ptr_t base;
- register struct stack_head *result;
-
- while (search_sz < requested_sz) {
- search_sz *= 2;
- index++;
- }
- if ((result = GC_stack_free_lists[index]) == 0
- && (result = GC_stack_free_lists[index+1]) != 0) {
- /* Try next size up. */
- search_sz *= 2; index++;
- }
- if (result != 0) {
- base = GC_stack_free_lists[index]->base;
- GC_stack_free_lists[index] = GC_stack_free_lists[index]->next;
- } else {
-#ifdef MMAP_STACKS
- base = (ptr_t)mmap(0, search_sz + GC_page_size,
- PROT_READ|PROT_WRITE, MAP_PRIVATE |MAP_NORESERVE,
- GC_zfd, 0);
- if (base == (ptr_t)-1)
- {
- *stack_size = 0;
- return NULL;
- }
-
- mprotect(base, GC_page_size, PROT_NONE);
- /* Should this use divHBLKSZ(search_sz + GC_page_size) ? -- cf */
- GC_is_fresh((struct hblk *)base, divHBLKSZ(search_sz));
- base += GC_page_size;
-
-#else
- base = (ptr_t) GC_scratch_alloc(search_sz + 2*GC_page_size);
- if (base == NULL)
- {
- *stack_size = 0;
- return NULL;
- }
-
- base = (ptr_t)(((word)base + GC_page_size) & ~(GC_page_size - 1));
- /* Protect hottest page to detect overflow. */
-# ifdef SOLARIS23_MPROTECT_BUG_FIXED
- mprotect(base, GC_page_size, PROT_NONE);
-# endif
- GC_is_fresh((struct hblk *)base, divHBLKSZ(search_sz));
-
- base += GC_page_size;
-#endif
- }
- *stack_size = search_sz;
- return(base);
-}
-
-/* Caller holds allocationlock. */
-void GC_stack_free(ptr_t stack, size_t size)
-{
- register int index = 0;
- register size_t search_sz = GC_min_stack_sz;
- register struct stack_head *head;
-
-#ifdef MMAP_STACKS
- /* Zero pointers */
- mmap(stack, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_NORESERVE|MAP_FIXED,
- GC_zfd, 0);
-#endif
- while (search_sz < size) {
- search_sz *= 2;
- index++;
- }
- if (search_sz != size) ABORT("Bad stack size");
-
- head = (struct stack_head *)(stack + search_sz - sizeof(struct stack_head));
- head->next = GC_stack_free_lists[index];
- head->base = stack;
- GC_stack_free_lists[index] = head;
-}
-
-void GC_my_stack_limits();
-
-/* Notify virtual dirty bit implementation that known empty parts of */
-/* stacks do not contain useful data. */
-/* Caller holds allocation lock. */
-void GC_old_stacks_are_fresh()
-{
-/* No point in doing this for MMAP stacks - and pointers are zero'd out */
-/* by the mmap in GC_stack_free */
-#ifndef MMAP_STACKS
- register int i;
- register struct stack_head *s;
- register ptr_t p;
- register size_t sz;
- register struct hblk * h;
- int dummy;
-
- for (i = 0, sz= GC_min_stack_sz; i < N_FREE_LISTS;
- i++, sz *= 2) {
- for (s = GC_stack_free_lists[i]; s != 0; s = s->next) {
- p = s->base;
- h = (struct hblk *)(((word)p + HBLKSIZE-1) & ~(HBLKSIZE-1));
- if ((ptr_t)h == p) {
- GC_is_fresh((struct hblk *)p, divHBLKSZ(sz));
- } else {
- GC_is_fresh((struct hblk *)p, divHBLKSZ(sz) - 1);
- BZERO(p, (ptr_t)h - p);
- }
- }
- }
-#endif /* MMAP_STACKS */
- GC_my_stack_limits();
-}
-
-/* The set of all known threads. We intercept thread creation and */
-/* joins. We never actually create detached threads. We allocate all */
-/* new thread stacks ourselves. These allow us to maintain this */
-/* data structure. */
-
-# define THREAD_TABLE_SZ 128 /* Must be power of 2 */
-volatile GC_thread GC_threads[THREAD_TABLE_SZ];
-
-void GC_push_thread_structures GC_PROTO((void))
-{
- GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
-}
-
-/* Add a thread to GC_threads. We assume it wasn't already there. */
-/* Caller holds allocation lock. */
-GC_thread GC_new_thread(thread_t id)
-{
- int hv = ((word)id) % THREAD_TABLE_SZ;
- GC_thread result;
- static struct GC_Thread_Rep first_thread;
- static GC_bool first_thread_used = FALSE;
-
- if (!first_thread_used) {
- result = &first_thread;
- first_thread_used = TRUE;
- /* Dont acquire allocation lock, since we may already hold it. */
- } else {
- result = (struct GC_Thread_Rep *)
- GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
- }
- if (result == 0) return(0);
- result -> id = id;
- result -> next = GC_threads[hv];
- GC_threads[hv] = result;
- /* result -> finished = 0; */
- (void) cond_init(&(result->join_cv), USYNC_THREAD, 0);
- return(result);
-}
-
-/* Delete a thread from GC_threads. We assume it is there. */
-/* (The code intentionally traps if it wasn't.) */
-/* Caller holds allocation lock. */
-void GC_delete_thread(thread_t id)
-{
- int hv = ((word)id) % THREAD_TABLE_SZ;
- register GC_thread p = GC_threads[hv];
- register GC_thread prev = 0;
-
- while (p -> id != id) {
- prev = p;
- p = p -> next;
- }
- if (prev == 0) {
- GC_threads[hv] = p -> next;
- } else {
- prev -> next = p -> next;
- }
-}
-
-/* Return the GC_thread correpsonding to a given thread_t. */
-/* Returns 0 if it's not there. */
-/* Caller holds allocation lock. */
-GC_thread GC_lookup_thread(thread_t id)
-{
- int hv = ((word)id) % THREAD_TABLE_SZ;
- register GC_thread p = GC_threads[hv];
-
- while (p != 0 && p -> id != id) p = p -> next;
- return(p);
-}
-
-/* Solaris 2/Intel uses an initial stack size limit slightly bigger than the
- SPARC default of 8 MB. Account for this to warn only if the user has
- raised the limit beyond the default.
-
- This is identical to DFLSSIZ defined in <sys/vm_machparam.h>. This file
- is installed in /usr/platform/`uname -m`/include, which is not in the
- default include directory list, so copy the definition here. */
-#ifdef I386
-# define MAX_ORIG_STACK_SIZE (8 * 1024 * 1024 + ((USRSTACK) & 0x3FFFFF))
-#else
-# define MAX_ORIG_STACK_SIZE (8 * 1024 * 1024)
-#endif
-
-word GC_get_orig_stack_size() {
- struct rlimit rl;
- static int warned = 0;
- int result;
-
- if (getrlimit(RLIMIT_STACK, &rl) != 0) ABORT("getrlimit failed");
- result = (word)rl.rlim_cur & ~(HBLKSIZE-1);
- if (result > MAX_ORIG_STACK_SIZE) {
- if (!warned) {
- WARN("Large stack limit(%ld): only scanning 8 MB\n", result);
- warned = 1;
- }
- result = MAX_ORIG_STACK_SIZE;
- }
- return result;
-}
-
-/* Notify dirty bit implementation of unused parts of my stack. */
-/* Caller holds allocation lock. */
-void GC_my_stack_limits()
-{
- int dummy;
- register ptr_t hottest = (ptr_t)((word)(&dummy) & ~(HBLKSIZE-1));
- register GC_thread me = GC_lookup_thread(thr_self());
- register size_t stack_size = me -> stack_size;
- register ptr_t stack;
-
- if (stack_size == 0) {
- /* original thread */
- /* Empirically, what should be the stack page with lowest */
- /* address is actually inaccessible. */
- stack_size = GC_get_orig_stack_size() - GC_page_size;
- stack = GC_stackbottom - stack_size + GC_page_size;
- } else {
- stack = me -> stack;
- }
- if (stack > hottest || stack + stack_size < hottest) {
- ABORT("sp out of bounds");
- }
- GC_is_fresh((struct hblk *)stack, divHBLKSZ(hottest - stack));
-}
-
-
-/* We hold allocation lock. Should do exactly the right thing if the */
-/* world is stopped. Should not fail if it isn't. */
-void GC_push_all_stacks()
-{
- register int i;
- register GC_thread p;
- register ptr_t sp = GC_approx_sp();
- register ptr_t bottom, top;
- struct rlimit rl;
-
-# define PUSH(bottom,top) \
- if (GC_dirty_maintained) { \
- GC_push_selected((bottom), (top), GC_page_was_ever_dirty, \
- GC_push_all_stack); \
- } else { \
- GC_push_all_stack((bottom), (top)); \
- }
- GC_push_all_stack((ptr_t)GC_lwp_registers,
- (ptr_t)GC_lwp_registers
- + max_lwps * sizeof(GC_lwp_registers[0]));
- for (i = 0; i < THREAD_TABLE_SZ; i++) {
- for (p = GC_threads[i]; p != 0; p = p -> next) {
- if (p -> stack_size != 0) {
- bottom = p -> stack;
- top = p -> stack + p -> stack_size;
- } else {
- /* The original stack. */
- bottom = GC_stackbottom - GC_get_orig_stack_size() + GC_page_size;
- top = GC_stackbottom;
- }
- if ((word)sp > (word)bottom && (word)sp < (word)top) bottom = sp;
- PUSH(bottom, top);
- }
- }
-}
-
-
-int GC_is_thread_stack(ptr_t addr)
-{
- register int i;
- register GC_thread p;
- register ptr_t bottom, top;
-
- for (i = 0; i < THREAD_TABLE_SZ; i++) {
- for (p = GC_threads[i]; p != 0; p = p -> next) {
- if (p -> stack_size != 0) {
- if (p -> stack <= addr &&
- addr < p -> stack + p -> stack_size)
- return 1;
- }
- }
- }
- return 0;
-}
-
-/* The only thread that ever really performs a thr_join. */
-void * GC_thr_daemon(void * dummy)
-{
- void *status;
- thread_t departed;
- register GC_thread t;
- register int i;
- register int result;
-
- for(;;) {
- start:
- result = thr_join((thread_t)0, &departed, &status);
- LOCK();
- if (result != 0) {
- /* No more threads; wait for create. */
- for (i = 0; i < THREAD_TABLE_SZ; i++) {
- for (t = GC_threads[i]; t != 0; t = t -> next) {
- if (!(t -> flags & (DETACHED | FINISHED))) {
- UNLOCK();
- goto start; /* Thread started just before we */
- /* acquired the lock. */
- }
- }
- }
- cond_wait(&GC_create_cv, &GC_allocate_ml);
- UNLOCK();
- } else {
- t = GC_lookup_thread(departed);
- GC_multithreaded--;
- if (!(t -> flags & CLIENT_OWNS_STACK)) {
- GC_stack_free(t -> stack, t -> stack_size);
- }
- if (t -> flags & DETACHED) {
- GC_delete_thread(departed);
- } else {
- t -> status = status;
- t -> flags |= FINISHED;
- cond_signal(&(t -> join_cv));
- cond_broadcast(&GC_prom_join_cv);
- }
- UNLOCK();
- }
- }
-}
-
-/* We hold the allocation lock, or caller ensures that 2 instances */
-/* cannot be invoked concurrently. */
-void GC_thr_init(void)
-{
- GC_thread t;
- thread_t tid;
- int ret;
-
- if (GC_thr_initialized)
- return;
- GC_thr_initialized = TRUE;
- GC_min_stack_sz = ((thr_min_stack() + 32*1024 + HBLKSIZE-1)
- & ~(HBLKSIZE - 1));
-#ifdef MMAP_STACKS
- GC_zfd = open("/dev/zero", O_RDONLY);
- if (GC_zfd == -1)
- ABORT("Can't open /dev/zero");
-#endif /* MMAP_STACKS */
- cond_init(&GC_prom_join_cv, USYNC_THREAD, 0);
- cond_init(&GC_create_cv, USYNC_THREAD, 0);
- /* Add the initial thread, so we can stop it. */
- t = GC_new_thread(thr_self());
- t -> stack_size = 0;
- t -> flags = DETACHED | CLIENT_OWNS_STACK;
- ret = thr_create(0 /* stack */, 0 /* stack_size */, GC_thr_daemon,
- 0 /* arg */, THR_DETACHED | THR_DAEMON,
- &tid /* thread_id */);
- if (ret != 0) {
- GC_err_printf1("Thr_create returned %ld\n", ret);
- ABORT("Cant fork daemon");
- }
- thr_setprio(tid, 126);
-}
-
-/* We acquire the allocation lock to prevent races with */
-/* stopping/starting world. */
-/* This is no more correct than the underlying Solaris 2.X */
-/* implementation. Under 2.3 THIS IS BROKEN. */
-int GC_thr_suspend(thread_t target_thread)
-{
- GC_thread t;
- int result;
-
- LOCK();
- result = thr_suspend(target_thread);
- if (result == 0) {
- t = GC_lookup_thread(target_thread);
- if (t == 0) ABORT("thread unknown to GC");
- t -> flags |= SUSPNDED;
- }
- UNLOCK();
- return(result);
-}
-
-int GC_thr_continue(thread_t target_thread)
-{
- GC_thread t;
- int result;
-
- LOCK();
- result = thr_continue(target_thread);
- if (result == 0) {
- t = GC_lookup_thread(target_thread);
- if (t == 0) ABORT("thread unknown to GC");
- t -> flags &= ~SUSPNDED;
- }
- UNLOCK();
- return(result);
-}
-
-int GC_thr_join(thread_t wait_for, thread_t *departed, void **status)
-{
- register GC_thread t;
- int result = 0;
-
- LOCK();
- if (wait_for == 0) {
- register int i;
- register GC_bool thread_exists;
-
- for (;;) {
- thread_exists = FALSE;
- for (i = 0; i < THREAD_TABLE_SZ; i++) {
- for (t = GC_threads[i]; t != 0; t = t -> next) {
- if (!(t -> flags & DETACHED)) {
- if (t -> flags & FINISHED) {
- goto found;
- }
- thread_exists = TRUE;
- }
- }
- }
- if (!thread_exists) {
- result = ESRCH;
- goto out;
- }
- cond_wait(&GC_prom_join_cv, &GC_allocate_ml);
- }
- } else {
- t = GC_lookup_thread(wait_for);
- if (t == 0 || t -> flags & DETACHED) {
- result = ESRCH;
- goto out;
- }
- if (wait_for == thr_self()) {
- result = EDEADLK;
- goto out;
- }
- while (!(t -> flags & FINISHED)) {
- cond_wait(&(t -> join_cv), &GC_allocate_ml);
- }
-
- }
- found:
- if (status) *status = t -> status;
- if (departed) *departed = t -> id;
- cond_destroy(&(t -> join_cv));
- GC_delete_thread(t -> id);
- out:
- UNLOCK();
- return(result);
-}
-
-
-int
-GC_thr_create(void *stack_base, size_t stack_size,
- void *(*start_routine)(void *), void *arg, long flags,
- thread_t *new_thread)
-{
- int result;
- GC_thread t;
- thread_t my_new_thread;
- word my_flags = 0;
- void * stack = stack_base;
-
- LOCK();
- if (!GC_is_initialized) GC_init_inner();
- GC_multithreaded++;
- if (stack == 0) {
- if (stack_size == 0) stack_size = 1024*1024;
- stack = (void *)GC_stack_alloc(&stack_size);
- if (stack == 0) {
- GC_multithreaded--;
- UNLOCK();
- return(ENOMEM);
- }
- } else {
- my_flags |= CLIENT_OWNS_STACK;
- }
- if (flags & THR_DETACHED) my_flags |= DETACHED;
- if (flags & THR_SUSPENDED) my_flags |= SUSPNDED;
- result = thr_create(stack, stack_size, start_routine,
- arg, flags & ~THR_DETACHED, &my_new_thread);
- if (result == 0) {
- t = GC_new_thread(my_new_thread);
- t -> flags = my_flags;
- if (!(my_flags & DETACHED)) cond_init(&(t -> join_cv), USYNC_THREAD, 0);
- t -> stack = stack;
- t -> stack_size = stack_size;
- if (new_thread != 0) *new_thread = my_new_thread;
- cond_signal(&GC_create_cv);
- } else {
- GC_multithreaded--;
- if (!(my_flags & CLIENT_OWNS_STACK)) {
- GC_stack_free(stack, stack_size);
- }
- }
- UNLOCK();
- return(result);
-}
-
-# else /* !GC_SOLARIS_THREADS */
-
-#ifndef LINT
- int GC_no_sunOS_threads;
-#endif
-#endif
diff --git a/specific.c b/specific.c
index 7d5d8894..67addec8 100644
--- a/specific.c
+++ b/specific.c
@@ -11,16 +11,17 @@
* modified is included with the above copyright notice.
*/
-#include "private/gc_priv.h" /* For GC_compare_and_exchange, GC_memory_barrier */
+#include "private/gc_priv.h" /* For configuration, pthreads.h. */
+#include "atomic_ops.h"
#if defined(GC_LINUX_THREADS)
#include "private/specific.h"
static tse invalid_tse = {INVALID_QTID, 0, 0, INVALID_THREADID};
- /* A thread-specific data entry which will never */
- /* appear valid to a reader. Used to fill in empty */
- /* cache entries to avoid a check for 0. */
+ /* A thread-specific data entry which will never */
+ /* appear valid to a reader. Used to fill in empty */
+ /* cache entries to avoid a check for 0. */
int PREFIXED(key_create) (tsd ** key_ptr, void (* destructor)(void *)) {
int i;
@@ -57,7 +58,7 @@ int PREFIXED(setspecific) (tsd * key, void * value) {
GC_ASSERT(entry -> qtid == INVALID_QTID);
/* There can only be one writer at a time, but this needs to be */
/* atomic with respect to concurrent readers. */
- *(volatile tse **)(key -> hash + hash_val) = entry;
+ AO_store_release((volatile AO_t *)(key -> hash + hash_val), (AO_t)entry);
pthread_mutex_unlock(&(key -> lock));
return 0;
}
@@ -125,4 +126,37 @@ void * PREFIXED(slow_getspecific) (tsd * key, unsigned long qtid,
return entry -> value;
}
+#ifdef GC_ASSERTIONS
+
+/* Check that that all elements of the data structure associated */
+/* with key are marked. */
+void PREFIXED(check_tsd_marks) (tsd *key)
+{
+ int i;
+ tse *p;
+
+ if (!GC_is_marked(GC_base(key))) {
+ ABORT("Unmarked thread-specific-data table");
+ }
+ for (i = 0; i < TS_HASH_SIZE; ++i) {
+ for (p = key -> hash[i]; p != 0; p = p -> next) {
+ if (!GC_is_marked(GC_base(p))) {
+ GC_err_printf(
+ "Thread-specific-data entry at %p not marked\n",p);
+ ABORT("Unmarked tse");
+ }
+ }
+ }
+ for (i = 0; i < TS_CACHE_SIZE; ++i) {
+ p = key -> cache[i];
+ if (p != &invalid_tse && !GC_is_marked(GC_base(p))) {
+ GC_err_printf(
+ "Cached thread-specific-data entry at %p not marked\n",p);
+ ABORT("Unmarked cached tse");
+ }
+ }
+}
+
+#endif
+
#endif /* GC_LINUX_THREADS */
diff --git a/stubborn.c b/stubborn.c
index bb137616..f4e09583 100644
--- a/stubborn.c
+++ b/stubborn.c
@@ -16,311 +16,43 @@
#include "private/gc_priv.h"
-# ifdef STUBBORN_ALLOC
+#if defined(MANUAL_VDB)
/* Stubborn object (hard to change, nearly immutable) allocation. */
-
-extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
-
-#define GENERAL_MALLOC(lb,k) \
- (GC_PTR)GC_clear_stack(GC_generic_malloc((word)lb, k))
-
-/* Data structure representing immutable objects that */
-/* are still being initialized. */
-/* This is a bit baroque in order to avoid acquiring */
-/* the lock twice for a typical allocation. */
-
-GC_PTR * GC_changing_list_start;
-
-void GC_push_stubborn_structures GC_PROTO((void))
-{
- GC_push_all((ptr_t)(&GC_changing_list_start),
- (ptr_t)(&GC_changing_list_start) + sizeof(GC_PTR *));
-}
-
-# ifdef THREADS
- VOLATILE GC_PTR * VOLATILE GC_changing_list_current;
-# else
- GC_PTR * GC_changing_list_current;
-# endif
- /* Points at last added element. Also (ab)used for */
- /* synchronization. Updates and reads are assumed atomic. */
-
-GC_PTR * GC_changing_list_limit;
- /* Points at the last word of the buffer, which is always 0 */
- /* All entries in (GC_changing_list_current, */
- /* GC_changing_list_limit] are 0 */
-
-
-void GC_stubborn_init()
-{
-# define INIT_SIZE 10
-
- GC_changing_list_start = (GC_PTR *)
- GC_INTERNAL_MALLOC(
- (word)(INIT_SIZE * sizeof(GC_PTR)),
- PTRFREE);
- BZERO(GC_changing_list_start,
- INIT_SIZE * sizeof(GC_PTR));
- if (GC_changing_list_start == 0) {
- GC_err_printf0("Insufficient space to start up\n");
- ABORT("GC_stubborn_init: put of space");
- }
- GC_changing_list_current = GC_changing_list_start;
- GC_changing_list_limit = GC_changing_list_start + INIT_SIZE - 1;
- * GC_changing_list_limit = 0;
-}
-
-/* Compact and possibly grow GC_uninit_list. The old copy is */
-/* left alone. Lock must be held. */
-/* When called GC_changing_list_current == GC_changing_list_limit */
-/* which is one past the current element. */
-/* When we finish GC_changing_list_current again points one past last */
-/* element. */
-/* Invariant while this is running: GC_changing_list_current */
-/* points at a word containing 0. */
-/* Returns FALSE on failure. */
-GC_bool GC_compact_changing_list()
-{
- register GC_PTR *p, *q;
- register word count = 0;
- word old_size = (char **)GC_changing_list_limit
- - (char **)GC_changing_list_start+1;
- /* The casts are needed as a workaround for an Amiga bug */
- register word new_size = old_size;
- GC_PTR * new_list;
-
- for (p = GC_changing_list_start; p < GC_changing_list_limit; p++) {
- if (*p != 0) count++;
- }
- if (2 * count > old_size) new_size = 2 * count;
- new_list = (GC_PTR *)
- GC_INTERNAL_MALLOC(
- new_size * sizeof(GC_PTR), PTRFREE);
- /* PTRFREE is a lie. But we don't want the collector to */
- /* consider these. We do want the list itself to be */
- /* collectable. */
- if (new_list == 0) return(FALSE);
- BZERO(new_list, new_size * sizeof(GC_PTR));
- q = new_list;
- for (p = GC_changing_list_start; p < GC_changing_list_limit; p++) {
- if (*p != 0) *q++ = *p;
- }
- GC_changing_list_start = new_list;
- GC_changing_list_limit = new_list + new_size - 1;
- GC_changing_list_current = q;
- return(TRUE);
-}
-
-/* Add p to changing list. Clear p on failure. */
-# define ADD_CHANGING(p) \
- { \
- register struct hblk * h = HBLKPTR(p); \
- register word index = PHT_HASH(h); \
- \
- set_pht_entry_from_index(GC_changed_pages, index); \
- } \
- if (*GC_changing_list_current != 0 \
- && ++GC_changing_list_current == GC_changing_list_limit) { \
- if (!GC_compact_changing_list()) (p) = 0; \
- } \
- *GC_changing_list_current = p;
-
-void GC_change_stubborn(p)
-GC_PTR p;
-{
- DCL_LOCK_STATE;
-
- DISABLE_SIGNALS();
- LOCK();
- ADD_CHANGING(p);
- UNLOCK();
- ENABLE_SIGNALS();
-}
-
-void GC_end_stubborn_change(p)
-GC_PTR p;
-{
-# ifdef THREADS
- register VOLATILE GC_PTR * my_current = GC_changing_list_current;
-# else
- register GC_PTR * my_current = GC_changing_list_current;
-# endif
- register GC_bool tried_quick;
- DCL_LOCK_STATE;
-
- if (*my_current == p) {
- /* Hopefully the normal case. */
- /* Compaction could not have been running when we started. */
- *my_current = 0;
-# ifdef THREADS
- if (my_current == GC_changing_list_current) {
- /* Compaction can't have run in the interim. */
- /* We got away with the quick and dirty approach. */
- return;
- }
- tried_quick = TRUE;
-# else
- return;
-# endif
- } else {
- tried_quick = FALSE;
- }
- DISABLE_SIGNALS();
- LOCK();
- my_current = GC_changing_list_current;
- for (; my_current >= GC_changing_list_start; my_current--) {
- if (*my_current == p) {
- *my_current = 0;
- UNLOCK();
- ENABLE_SIGNALS();
- return;
- }
- }
- if (!tried_quick) {
- GC_err_printf1("Bad arg to GC_end_stubborn_change: 0x%lx\n",
- (unsigned long)p);
- ABORT("Bad arg to GC_end_stubborn_change");
- }
- UNLOCK();
- ENABLE_SIGNALS();
-}
-
-/* Allocate lb bytes of composite (pointerful) data */
-/* No pointer fields may be changed after a call to */
-/* GC_end_stubborn_change(p) where p is the value */
-/* returned by GC_malloc_stubborn. */
-# ifdef __STDC__
- GC_PTR GC_malloc_stubborn(size_t lb)
-# else
- GC_PTR GC_malloc_stubborn(lb)
- size_t lb;
-# endif
-{
-register ptr_t op;
-register ptr_t *opp;
-register word lw;
-ptr_t result;
-DCL_LOCK_STATE;
-
- if( SMALL_OBJ(lb) ) {
-# ifdef MERGE_SIZES
- lw = GC_size_map[lb];
-# else
- lw = ALIGNED_WORDS(lb);
-# endif
- opp = &(GC_sobjfreelist[lw]);
- FASTLOCK();
- if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
- FASTUNLOCK();
- result = GC_generic_malloc((word)lb, STUBBORN);
- goto record;
- }
- *opp = obj_link(op);
- obj_link(op) = 0;
- GC_words_allocd += lw;
- result = (GC_PTR) op;
- ADD_CHANGING(result);
- FASTUNLOCK();
- return((GC_PTR)result);
- } else {
- result = (GC_PTR)
- GC_generic_malloc((word)lb, STUBBORN);
- }
-record:
- DISABLE_SIGNALS();
- LOCK();
- ADD_CHANGING(result);
- UNLOCK();
- ENABLE_SIGNALS();
- return((GC_PTR)GC_clear_stack(result));
-}
-
-
-/* Functions analogous to GC_read_dirty and GC_page_was_dirty. */
-/* Report pages on which stubborn objects were changed. */
-void GC_read_changed()
+/* This interface is deprecated. We mostly emulate it using */
+/* MANUAL_VDB. But that imposes the additional constraint that */
+/* written, but not yet GC_dirty()ed objects must be referenced */
+/* by a stack. */
+void * GC_malloc_stubborn(size_t lb)
{
- register GC_PTR * p = GC_changing_list_start;
- register GC_PTR q;
- register struct hblk * h;
- register word index;
-
- if (p == 0) /* initializing */ return;
- BCOPY(GC_changed_pages, GC_prev_changed_pages,
- (sizeof GC_changed_pages));
- BZERO(GC_changed_pages, (sizeof GC_changed_pages));
- for (; p <= GC_changing_list_current; p++) {
- if ((q = *p) != 0) {
- h = HBLKPTR(q);
- index = PHT_HASH(h);
- set_pht_entry_from_index(GC_changed_pages, index);
- }
- }
+ return(GC_malloc(lb));
}
-GC_bool GC_page_was_changed(h)
-struct hblk * h;
+/*ARGSUSED*/
+void GC_end_stubborn_change(void *p)
{
- register word index = PHT_HASH(h);
-
- return(get_pht_entry_from_index(GC_prev_changed_pages, index));
+ GC_dirty(p);
}
-/* Remove unreachable entries from changed list. Should only be */
-/* called with mark bits consistent and lock held. */
-void GC_clean_changing_list()
+/*ARGSUSED*/
+void GC_change_stubborn(void *p)
{
- register GC_PTR * p = GC_changing_list_start;
- register GC_PTR q;
- register ptr_t r;
- register unsigned long count = 0;
- register unsigned long dropped_count = 0;
-
- if (p == 0) /* initializing */ return;
- for (; p <= GC_changing_list_current; p++) {
- if ((q = *p) != 0) {
- count++;
- r = (ptr_t)GC_base(q);
- if (r == 0 || !GC_is_marked(r)) {
- *p = 0;
- dropped_count++;
- }
- }
- }
-# ifdef PRINTSTATS
- if (count > 0) {
- GC_printf2("%lu entries in changing list: reclaimed %lu\n",
- (unsigned long)count, (unsigned long)dropped_count);
- }
-# endif
}
-#else /* !STUBBORN_ALLOC */
+#else /* !MANUAL_VDB */
-# ifdef __STDC__
- GC_PTR GC_malloc_stubborn(size_t lb)
-# else
- GC_PTR GC_malloc_stubborn(lb)
- size_t lb;
-# endif
+void * GC_malloc_stubborn(size_t lb)
{
return(GC_malloc(lb));
}
/*ARGSUSED*/
-void GC_end_stubborn_change(p)
-GC_PTR p;
+void GC_end_stubborn_change(void *p)
{
}
/*ARGSUSED*/
-void GC_change_stubborn(p)
-GC_PTR p;
-{
-}
-
-void GC_push_stubborn_structures GC_PROTO((void))
+void GC_change_stubborn(void *p)
{
}
-#endif
+#endif /* !MANUAL_VDB */
diff --git a/tests/test.c b/tests/test.c
index e1676aad..14989bab 100644
--- a/tests/test.c
+++ b/tests/test.c
@@ -53,10 +53,7 @@
# ifdef PCR
# include "th/PCR_ThCrSec.h"
# include "th/PCR_Th.h"
-# undef GC_printf0
-# define GC_printf0 printf
-# undef GC_printf1
-# define GC_printf1 printf
+# define GC_printf printf
# endif
# if defined(GC_SOLARIS_THREADS) && !defined(GC_SOLARIS_PTHREADS)
@@ -97,8 +94,8 @@ int realloc_count = 0;
ret=GC_malloc_explicitly_typed(lb,d);
}
if(ret==NULL){
- GC_printf0("Out of memory, (typed allocations are not directly "
- "supported with the GC_AMIGA_FASTALLOC option.)\n");
+ GC_printf("Out of memory, (typed allocations are not directly "
+ "supported with the GC_AMIGA_FASTALLOC option.)\n");
FAIL;
}
}
@@ -112,8 +109,8 @@ int realloc_count = 0;
ret=GC_calloc_explicitly_typed(a,lb,d);
}
if(ret==NULL){
- GC_printf0("Out of memory, (typed allocations are not directly "
- "supported with the GC_AMIGA_FASTALLOC option.)\n");
+ GC_printf("Out of memory, (typed allocations are not directly "
+ "supported with the GC_AMIGA_FASTALLOC option.)\n");
FAIL;
}
}
@@ -177,17 +174,16 @@ sexpr y;
stubborn_count++;
r = (sexpr) GC_MALLOC_STUBBORN(sizeof(struct SEXPR) + my_extra);
if (r == 0) {
- (void)GC_printf0("Out of memory\n");
+ (void)GC_printf("Out of memory\n");
exit(1);
}
for (p = (int *)r;
((char *)p) < ((char *)r) + my_extra + sizeof(struct SEXPR); p++) {
if (*p) {
- (void)GC_printf1("Found nonzero at 0x%lx - allocator is broken\n",
- (unsigned long)p);
+ (void)GC_printf("Found nonzero at %p - allocator is broken\n", p);
FAIL;
}
- *p = 13;
+ *p = (13 << 12) + ((p - (int *)r) & 0xfff);
}
# ifdef AT_END
r = (sexpr)((char *)r + (my_extra & ~7));
@@ -236,11 +232,11 @@ struct GC_ms_entry * fake_gcj_mark_proc(word * addr,
}
x = (sexpr)(addr + 1); /* Skip the vtable pointer. */
mark_stack_ptr = GC_MARK_AND_PUSH(
- (GC_PTR)(x -> sexpr_cdr), mark_stack_ptr,
- mark_stack_limit, (GC_PTR *)&(x -> sexpr_cdr));
+ (void *)(x -> sexpr_cdr), mark_stack_ptr,
+ mark_stack_limit, (void * *)&(x -> sexpr_cdr));
mark_stack_ptr = GC_MARK_AND_PUSH(
- (GC_PTR)(x -> sexpr_car), mark_stack_ptr,
- mark_stack_limit, (GC_PTR *)&(x -> sexpr_car));
+ (void *)(x -> sexpr_car), mark_stack_ptr,
+ mark_stack_limit, (void * *)&(x -> sexpr_car));
return(mark_stack_ptr);
}
@@ -270,22 +266,22 @@ sexpr y;
}
# endif
if (r == 0) {
- (void)GC_printf0("Out of memory\n");
+ (void)GC_printf("Out of memory\n");
exit(1);
}
for (p = (int *)r;
((char *)p) < ((char *)r) + my_extra + sizeof(struct SEXPR); p++) {
if (*p) {
- (void)GC_printf1("Found nonzero at 0x%lx (local) - allocator is broken\n",
- (unsigned long)p);
+ (void)GC_printf(
+ "Found nonzero at %p (local) - allocator is broken\n", p);
FAIL;
}
- *p = 13;
+ *p = (7 << 12) + ((p - (int *)r) & 0xfff);
}
r -> sexpr_car = x;
r -> sexpr_cdr = y;
my_extra++;
- if ( my_extra >= 5000 || my_extra == 200 && ++my_random % 37 != 0) {
+ if ( my_extra >= 5000 || (my_extra == 200 && ++my_random % 37 != 0)) {
extra_count = 0;
} else {
extra_count = my_extra;
@@ -303,7 +299,7 @@ sexpr y;
collectable_count++;
r = (sexpr) GC_MALLOC(sizeof(struct SEXPR));
if (r == 0) {
- (void)GC_printf0("Out of memory\n");
+ (void)GC_printf("Out of memory\n");
exit(1);
}
r -> sexpr_car = x;
@@ -320,7 +316,7 @@ sexpr y;
uncollectable_count++;
r = (sexpr) GC_MALLOC_UNCOLLECTABLE(sizeof(struct SEXPR));
if (r == 0) {
- (void)GC_printf0("Out of memory\n");
+ (void)GC_printf("Out of memory\n");
exit(1);
}
r -> sexpr_car = x;
@@ -351,7 +347,7 @@ sexpr y;
&gcj_class_struct2);
}
if (r == 0) {
- (void)GC_printf0("Out of memory\n");
+ (void)GC_printf("Out of memory\n");
exit(1);
}
result = (sexpr)(r + 1);
@@ -467,13 +463,13 @@ sexpr list;
int low, up;
{
if ((int)(GC_word)(car(car(list))) != low) {
- (void)GC_printf0(
+ (void)GC_printf(
"List reversal produced incorrect list - collector is broken\n");
FAIL;
}
if (low == up) {
if (cdr(list) != nil) {
- (void)GC_printf0("List too long - collector is broken\n");
+ (void)GC_printf("List too long - collector is broken\n");
FAIL;
}
} else {
@@ -488,13 +484,13 @@ sexpr list;
int low, up;
{
if ((int)(GC_word)(car(car(list))) != low) {
- (void)GC_printf0(
+ (void)GC_printf(
"Uncollectable list corrupted - collector is broken\n");
FAIL;
}
if (low == up) {
if (UNCOLLECTABLE_CDR(list) != nil) {
- (void)GC_printf0("Uncollectable list too long - collector is broken\n");
+ (void)GC_printf("Uncollectable list too long - collector is broken\n");
FAIL;
}
} else {
@@ -503,18 +499,38 @@ int low, up;
}
/* Not used, but useful for debugging: */
-void print_int_list(x)
-sexpr x;
+void print_int_list(sexpr x)
{
if (is_nil(x)) {
- (void)GC_printf0("NIL\n");
+ (void)GC_printf("NIL\n");
} else {
- (void)GC_printf1("(%ld)", (long)(car(car(x))));
+ (void)GC_printf("(%ld)", (long)(car(car(x))));
if (!is_nil(cdr(x))) {
- (void)GC_printf0(", ");
+ (void)GC_printf(", ");
(void)print_int_list(cdr(x));
} else {
- (void)GC_printf0("\n");
+ (void)GC_printf("\n");
+ }
+ }
+}
+
+/* ditto: */
+void check_marks_int_list(sexpr x)
+{
+ if (!GC_is_marked((ptr_t)x))
+ GC_printf("[unm:%p]", x);
+ else
+ GC_printf("[mkd:%p]", x);
+ if (is_nil(x)) {
+ (void)GC_printf("NIL\n");
+ } else {
+ if (!GC_is_marked((ptr_t)car(x))) GC_printf("[unm car:%p]", car(x));
+ (void)GC_printf("(%ld)", (long)(car(car(x))));
+ if (!is_nil(cdr(x))) {
+ (void)GC_printf(", ");
+ (void)check_marks_int_list(cdr(x));
+ } else {
+ (void)GC_printf("\n");
}
}
}
@@ -546,13 +562,11 @@ sexpr x;
pthread_t t;
int code;
if ((code = pthread_create(&t, 0, tiny_reverse_test, 0)) != 0) {
- (void)GC_printf1("Small thread creation failed %lu\n",
- (unsigned long)code);
+ (void)GC_printf("Small thread creation failed %d\n", code);
FAIL;
}
if ((code = pthread_join(t, 0)) != 0) {
- (void)GC_printf1("Small thread join failed %lu\n",
- (unsigned long)code);
+ (void)GC_printf("Small thread join failed %d\n", code);
FAIL;
}
}
@@ -564,13 +578,13 @@ sexpr x;
HANDLE h;
h = GC_CreateThread(NULL, 0, tiny_reverse_test, 0, 0, &thread_id);
if (h == (HANDLE)NULL) {
- (void)GC_printf1("Small thread creation failed %lu\n",
- (unsigned long)GetLastError());
+ (void)GC_printf("Small thread creation failed %d\n",
+ GetLastError());
FAIL;
}
if (WaitForSingleObject(h, INFINITE) != WAIT_OBJECT_0) {
- (void)GC_printf1("Small thread wait failed %lu\n",
- (unsigned long)GetLastError());
+ (void)GC_printf("Small thread wait failed %d\n",
+ GetLastError());
FAIL;
}
}
@@ -640,17 +654,17 @@ void reverse_test()
collectable_count++;
f = (sexpr *)GC_MALLOC(4 * sizeof(sexpr));
realloc_count++;
- f = (sexpr *)GC_REALLOC((GC_PTR)f, 6 * sizeof(sexpr));
+ f = (sexpr *)GC_REALLOC((void *)f, 6 * sizeof(sexpr));
f[5] = ints(1,17);
collectable_count++;
g = (sexpr *)GC_MALLOC(513 * sizeof(sexpr));
realloc_count++;
- g = (sexpr *)GC_REALLOC((GC_PTR)g, 800 * sizeof(sexpr));
+ g = (sexpr *)GC_REALLOC((void *)g, 800 * sizeof(sexpr));
g[799] = ints(1,18);
collectable_count++;
h = (sexpr *)GC_MALLOC(1025 * sizeof(sexpr));
realloc_count++;
- h = (sexpr *)GC_REALLOC((GC_PTR)h, 2000 * sizeof(sexpr));
+ h = (sexpr *)GC_REALLOC((void *)h, 2000 * sizeof(sexpr));
# ifdef GC_GCJ_SUPPORT
h[1999] = gcj_ints(1,200);
for (i = 0; i < 51; ++i)
@@ -692,9 +706,9 @@ void reverse_test()
# if !defined(AT_END) && !defined(THREADS)
/* This is not thread safe, since realloc explicitly deallocates */
if (i & 1) {
- a = (sexpr)GC_REALLOC((GC_PTR)a, 500);
+ a = (sexpr)GC_REALLOC((void *)a, 500);
} else {
- a = (sexpr)GC_REALLOC((GC_PTR)a, 8200);
+ a = (sexpr)GC_REALLOC((void *)a, 8200);
}
# endif
}
@@ -730,7 +744,7 @@ typedef struct treenode {
int finalizable_count = 0;
int finalized_count = 0;
-VOLATILE int dropped_something = 0;
+volatile int dropped_something = 0;
# ifdef __STDC__
void finalizer(void * obj, void * client_data)
@@ -758,7 +772,7 @@ VOLATILE int dropped_something = 0;
# endif
# endif
if ((int)(GC_word)client_data != t -> level) {
- (void)GC_printf0("Wrong finalization data - collector is broken\n");
+ (void)GC_printf("Wrong finalization data - collector is broken\n");
FAIL;
}
finalized_count++;
@@ -814,13 +828,13 @@ int n;
live_indicators =
(GC_word*)NewPtrClear(MAX_FINALIZED * sizeof(GC_word));
if (!live_indicators) {
- (void)GC_printf0("Out of memory\n");
+ (void)GC_printf("Out of memory\n");
exit(1);
}
# endif
if (n == 0) return(0);
if (result == 0) {
- (void)GC_printf0("Out of memory\n");
+ (void)GC_printf("Out of memory\n");
exit(1);
}
result -> level = n;
@@ -869,29 +883,29 @@ int n;
# endif
}
- GC_REGISTER_FINALIZER((GC_PTR)result, finalizer, (GC_PTR)(GC_word)n,
- (GC_finalization_proc *)0, (GC_PTR *)0);
+ GC_REGISTER_FINALIZER((void *)result, finalizer, (void *)(GC_word)n,
+ (GC_finalization_proc *)0, (void * *)0);
if (my_index >= MAX_FINALIZED) {
- GC_printf0("live_indicators overflowed\n");
+ GC_printf("live_indicators overflowed\n");
FAIL;
}
live_indicators[my_index] = 13;
if (GC_GENERAL_REGISTER_DISAPPEARING_LINK(
- (GC_PTR *)(&(live_indicators[my_index])),
- (GC_PTR)result) != 0) {
- GC_printf0("GC_general_register_disappearing_link failed\n");
+ (void * *)(&(live_indicators[my_index])),
+ (void *)result) != 0) {
+ GC_printf("GC_general_register_disappearing_link failed\n");
FAIL;
}
if (GC_unregister_disappearing_link(
- (GC_PTR *)
+ (void * *)
(&(live_indicators[my_index]))) == 0) {
- GC_printf0("GC_unregister_disappearing_link failed\n");
+ GC_printf("GC_unregister_disappearing_link failed\n");
FAIL;
}
if (GC_GENERAL_REGISTER_DISAPPEARING_LINK(
- (GC_PTR *)(&(live_indicators[my_index])),
- (GC_PTR)result) != 0) {
- GC_printf0("GC_general_register_disappearing_link failed 2\n");
+ (void * *)(&(live_indicators[my_index])),
+ (void *)result) != 0) {
+ GC_printf("GC_general_register_disappearing_link failed 2\n");
FAIL;
}
}
@@ -903,13 +917,12 @@ tn *t;
int n;
{
if (n == 0 && t != 0) {
- (void)GC_printf0("Clobbered a leaf - collector is broken\n");
+ (void)GC_printf("Clobbered a leaf - collector is broken\n");
FAIL;
}
if (n == 0) return;
if (t -> level != n) {
- (void)GC_printf1("Lost a node at level %lu - collector is broken\n",
- (unsigned long)n);
+ (void)GC_printf("Lost a node at level %d - collector is broken\n", n);
FAIL;
}
if (counter++ % 373 == 0) {
@@ -937,14 +950,14 @@ void * alloc8bytes()
void * my_free_list;
if (thr_getspecific(fl_key, (void **)(&my_free_list_ptr)) != 0) {
- (void)GC_printf0("thr_getspecific failed\n");
+ (void)GC_printf("thr_getspecific failed\n");
FAIL;
}
if (my_free_list_ptr == 0) {
uncollectable_count++;
my_free_list_ptr = GC_NEW_UNCOLLECTABLE(void *);
if (thr_setspecific(fl_key, my_free_list_ptr) != 0) {
- (void)GC_printf0("thr_setspecific failed\n");
+ (void)GC_printf("thr_setspecific failed\n");
FAIL;
}
}
@@ -953,7 +966,7 @@ void * alloc8bytes()
collectable_count++;
my_free_list = GC_malloc_many(8);
if (my_free_list == 0) {
- (void)GC_printf0("alloc8bytes out of memory\n");
+ (void)GC_printf("alloc8bytes out of memory\n");
FAIL;
}
}
@@ -982,7 +995,7 @@ void * alloc8bytes()
uncollectable_count++;
my_free_list_ptr = GC_NEW_UNCOLLECTABLE(void *);
if (pthread_setspecific(fl_key, my_free_list_ptr) != 0) {
- (void)GC_printf0("pthread_setspecific failed\n");
+ (void)GC_printf("pthread_setspecific failed\n");
FAIL;
}
}
@@ -990,7 +1003,7 @@ void * alloc8bytes()
if (my_free_list == 0) {
my_free_list = GC_malloc_many(8);
if (my_free_list == 0) {
- (void)GC_printf0("alloc8bytes out of memory\n");
+ (void)GC_printf("alloc8bytes out of memory\n");
FAIL;
}
}
@@ -1014,7 +1027,7 @@ int n;
for (i = 0; i < n; i += 8) {
atomic_count++;
if (alloc8bytes() == 0) {
- (void)GC_printf0("Out of memory\n");
+ (void)GC_printf("Out of memory\n");
FAIL;
}
}
@@ -1044,7 +1057,7 @@ void tree_test()
# endif
chktree(root, TREE_HEIGHT);
if (finalized_count && ! dropped_something) {
- (void)GC_printf0("Premature finalization - collector is broken\n");
+ (void)GC_printf("Premature finalization - collector is broken\n");
FAIL;
}
dropped_something = 1;
@@ -1085,21 +1098,21 @@ void typed_test()
GC_word bm_large = 0xf7ff7fff;
GC_descr d1 = GC_make_descriptor(&bm3, 2);
GC_descr d2 = GC_make_descriptor(&bm2, 2);
-# ifndef LINT
- GC_descr dummy = GC_make_descriptor(&bm_large, 32);
-# endif
GC_descr d3 = GC_make_descriptor(&bm_large, 32);
GC_descr d4 = GC_make_descriptor(bm_huge, 320);
GC_word * x = (GC_word *)GC_malloc_explicitly_typed(2000, d4);
- register int i;
+ int i;
+# ifndef LINT
+ (void)GC_make_descriptor(&bm_large, 32);
+# endif
collectable_count++;
old = 0;
for (i = 0; i < 4000; i++) {
collectable_count++;
new = (GC_word *) GC_malloc_explicitly_typed(4 * sizeof(GC_word), d1);
if (0 != new[0] || 0 != new[1]) {
- GC_printf0("Bad initialization by GC_malloc_explicitly_typed\n");
+ GC_printf("Bad initialization by GC_malloc_explicitly_typed\n");
FAIL;
}
new[0] = 17;
@@ -1130,7 +1143,7 @@ void typed_test()
3 * sizeof(GC_word),
d2);
if (0 != new[0] || 0 != new[1]) {
- GC_printf0("Bad initialization by GC_malloc_explicitly_typed\n");
+ GC_printf("Bad initialization by GC_malloc_explicitly_typed\n");
FAIL;
}
}
@@ -1140,8 +1153,8 @@ void typed_test()
}
for (i = 0; i < 20000; i++) {
if (new[0] != 17) {
- (void)GC_printf1("typed alloc failed at %lu\n",
- (unsigned long)i);
+ (void)GC_printf("typed alloc failed at %lu\n",
+ (unsigned long)i);
FAIL;
}
new[0] = 0;
@@ -1157,7 +1170,7 @@ int fail_count = 0;
#ifndef __STDC__
/*ARGSUSED*/
void fail_proc1(x)
-GC_PTR x;
+void * x;
{
fail_count++;
}
@@ -1165,7 +1178,7 @@ GC_PTR x;
#else
/*ARGSUSED*/
-void fail_proc1(GC_PTR x)
+void fail_proc1(void * x)
{
fail_count++;
}
@@ -1181,7 +1194,7 @@ static void uniq(void *p, ...) {
for (i=0; i<n; i++)
for (j=0; j<i; j++)
if (q[i] == q[j]) {
- GC_printf0(
+ GC_printf(
"Apparently failed to mark form some function arguments.\n"
"Perhaps GC_push_regs was configured incorrectly?\n"
);
@@ -1208,28 +1221,28 @@ void run_one_test()
DCL_LOCK_STATE;
# ifdef FIND_LEAK
- (void)GC_printf0(
+ (void)GC_printf(
"This test program is not designed for leak detection mode\n");
- (void)GC_printf0("Expect lots of problems.\n");
+ (void)GC_printf("Expect lots of problems.\n");
# endif
GC_FREE(0);
# ifndef DBG_HDRS_ALL
collectable_count += 3;
- if (GC_size(GC_malloc(7)) != 8 &&
- GC_size(GC_malloc(7)) != MIN_WORDS * sizeof(GC_word)
+ if ((GC_size(GC_malloc(7)) != 8 &&
+ GC_size(GC_malloc(7)) != MIN_WORDS * sizeof(GC_word))
|| GC_size(GC_malloc(15)) != 16) {
- (void)GC_printf0("GC_size produced unexpected results\n");
+ (void)GC_printf("GC_size produced unexpected results\n");
FAIL;
}
collectable_count += 1;
if (GC_size(GC_malloc(0)) != MIN_WORDS * sizeof(GC_word)) {
- (void)GC_printf1("GC_malloc(0) failed: GC_size returns %ld\n",
- GC_size(GC_malloc(0)));
+ (void)GC_printf("GC_malloc(0) failed: GC_size returns %ld\n",
+ (unsigned long)GC_size(GC_malloc(0)));
FAIL;
}
collectable_count += 1;
if (GC_size(GC_malloc_uncollectable(0)) != MIN_WORDS * sizeof(GC_word)) {
- (void)GC_printf0("GC_malloc_uncollectable(0) failed\n");
+ (void)GC_printf("GC_malloc_uncollectable(0) failed\n");
FAIL;
}
GC_is_valid_displacement_print_proc = fail_proc1;
@@ -1237,21 +1250,21 @@ void run_one_test()
collectable_count += 1;
x = GC_malloc(16);
if (GC_base(x + 13) != x) {
- (void)GC_printf0("GC_base(heap ptr) produced incorrect result\n");
+ (void)GC_printf("GC_base(heap ptr) produced incorrect result\n");
FAIL;
}
# ifndef PCR
if (GC_base(y) != 0) {
- (void)GC_printf0("GC_base(fn_ptr) produced incorrect result\n");
+ (void)GC_printf("GC_base(fn_ptr) produced incorrect result\n");
FAIL;
}
# endif
if (GC_same_obj(x+5, x) != x + 5) {
- (void)GC_printf0("GC_same_obj produced incorrect result\n");
+ (void)GC_printf("GC_same_obj produced incorrect result\n");
FAIL;
}
if (GC_is_visible(y) != y || GC_is_visible(x) != x) {
- (void)GC_printf0("GC_is_visible produced incorrect result\n");
+ (void)GC_printf("GC_is_visible produced incorrect result\n");
FAIL;
}
if (!TEST_FAIL_COUNT(1)) {
@@ -1260,14 +1273,14 @@ void run_one_test()
/* data segment, so there should have been no failures. */
/* The same applies to IA64. Something similar seems to */
/* be going on with NetBSD/M68K. */
- (void)GC_printf0("GC_is_visible produced wrong failure indication\n");
+ (void)GC_printf("GC_is_visible produced wrong failure indication\n");
FAIL;
# endif
}
if (GC_is_valid_displacement(y) != y
|| GC_is_valid_displacement(x) != x
|| GC_is_valid_displacement(x + 3) != x + 3) {
- (void)GC_printf0(
+ (void)GC_printf(
"GC_is_valid_displacement produced incorrect result\n");
FAIL;
}
@@ -1291,7 +1304,7 @@ void run_one_test()
if (GC_all_interior_pointers && !TEST_FAIL_COUNT(1)
|| !GC_all_interior_pointers && !TEST_FAIL_COUNT(2)) {
# endif
- (void)GC_printf0("GC_is_valid_displacement produced wrong failure indication\n");
+ (void)GC_printf("GC_is_valid_displacement produced wrong failure indication\n");
FAIL;
}
# endif
@@ -1322,12 +1335,12 @@ void run_one_test()
/* Repeated list reversal test. */
reverse_test();
# ifdef PRINTSTATS
- GC_printf0("-------------Finished reverse_test\n");
+ GC_printf("-------------Finished reverse_test\n");
# endif
# ifndef DBG_HDRS_ALL
typed_test();
# ifdef PRINTSTATS
- GC_printf0("-------------Finished typed_test\n");
+ GC_printf("-------------Finished typed_test\n");
# endif
# endif /* DBG_HDRS_ALL */
tree_test();
@@ -1339,17 +1352,17 @@ void run_one_test()
GC_gcollect();
tiny_reverse_test(0);
GC_gcollect();
- GC_printf0("Finished a child process\n");
+ GC_printf("Finished a child process\n");
exit(0);
}
# endif
- /* GC_printf1("Finished %x\n", pthread_self()); */
+ /* GC_printf("Finished %x\n", pthread_self()); */
}
void check_heap_stats()
{
unsigned long max_heap_sz;
- register int i;
+ int i;
int still_live;
int late_finalize_count = 0;
@@ -1383,26 +1396,26 @@ void check_heap_stats()
GC_gcollect();
late_finalize_count += GC_invoke_finalizers();
}
- (void)GC_printf1("Completed %lu tests\n", (unsigned long)n_tests);
- (void)GC_printf1("Allocated %lu collectable objects\n", (unsigned long)collectable_count);
- (void)GC_printf1("Allocated %lu uncollectable objects\n", (unsigned long)uncollectable_count);
- (void)GC_printf1("Allocated %lu atomic objects\n", (unsigned long)atomic_count);
- (void)GC_printf1("Allocated %lu stubborn objects\n", (unsigned long)stubborn_count);
- (void)GC_printf2("Finalized %lu/%lu objects - ",
- (unsigned long)finalized_count,
- (unsigned long)finalizable_count);
+ (void)GC_printf("Completed %u tests\n", n_tests);
+ (void)GC_printf("Allocated %d collectable objects\n", collectable_count);
+ (void)GC_printf("Allocated %d uncollectable objects\n",
+ uncollectable_count);
+ (void)GC_printf("Allocated %d atomic objects\n", atomic_count);
+ (void)GC_printf("Allocated %d stubborn objects\n", stubborn_count);
+ (void)GC_printf("Finalized %d/%d objects - ",
+ finalized_count, finalizable_count);
# ifdef FINALIZE_ON_DEMAND
if (finalized_count != late_finalize_count) {
- (void)GC_printf0("Demand finalization error\n");
+ (void)GC_printf("Demand finalization error\n");
FAIL;
}
# endif
if (finalized_count > finalizable_count
|| finalized_count < finalizable_count/2) {
- (void)GC_printf0("finalization is probably broken\n");
+ (void)GC_printf("finalization is probably broken\n");
FAIL;
} else {
- (void)GC_printf0("finalization is probably ok\n");
+ (void)GC_printf("finalization is probably ok\n");
}
still_live = 0;
for (i = 0; i < MAX_FINALIZED; i++) {
@@ -1412,34 +1425,33 @@ void check_heap_stats()
}
i = finalizable_count - finalized_count - still_live;
if (0 != i) {
- (void)GC_printf2
- ("%lu disappearing links remain and %ld more objects were not finalized\n",
- (unsigned long) still_live, (long)i);
+ GC_printf("%d disappearing links remain and %d more objects "
+ "were not finalized\n", still_live, i);
if (i > 10) {
- GC_printf0("\tVery suspicious!\n");
+ GC_printf("\tVery suspicious!\n");
} else {
- GC_printf0("\tSlightly suspicious, but probably OK.\n");
+ GC_printf("\tSlightly suspicious, but probably OK.\n");
}
}
- (void)GC_printf1("Total number of bytes allocated is %lu\n",
+ (void)GC_printf("Total number of bytes allocated is %lu\n",
(unsigned long)
- WORDS_TO_BYTES(GC_words_allocd + GC_words_allocd_before_gc));
- (void)GC_printf1("Final heap size is %lu bytes\n",
- (unsigned long)GC_get_heap_size());
- if (WORDS_TO_BYTES(GC_words_allocd + GC_words_allocd_before_gc)
+ (GC_bytes_allocd + GC_bytes_allocd_before_gc));
+ (void)GC_printf("Final heap size is %lu bytes\n",
+ (unsigned long)GC_get_heap_size());
+ if (GC_bytes_allocd + GC_bytes_allocd_before_gc
# ifdef VERY_SMALL_CONFIG
< 2700000*n_tests) {
# else
< 33500000*n_tests) {
# endif
- (void)GC_printf0("Incorrect execution - missed some allocations\n");
+ (void)GC_printf("Incorrect execution - missed some allocations\n");
FAIL;
}
if (GC_get_heap_size() > max_heap_sz*n_tests) {
- (void)GC_printf0("Unexpected heap growth - collector may be broken\n");
+ (void)GC_printf("Unexpected heap growth - collector may be broken\n");
FAIL;
}
- (void)GC_printf0("Collector appears to work\n");
+ (void)GC_printf("Collector appears to work\n");
}
#if defined(MACOS)
@@ -1468,7 +1480,7 @@ void SetMinimumStack(long minSize)
GC_word p;
#endif
{
- GC_printf1(msg, (unsigned long)p);
+ GC_printf(msg, (unsigned long)p);
/*FAIL;*/
}
@@ -1490,7 +1502,7 @@ void SetMinimumStack(long minSize)
# if defined(DJGPP)
/* No good way to determine stack base from library; do it */
/* manually on this platform. */
- GC_stackbottom = (GC_PTR)(&dummy);
+ GC_stackbottom = (void *)(&dummy);
# endif
# if defined(MACOS)
/* Make sure we have lots and lots of stack space. */
@@ -1503,14 +1515,14 @@ void SetMinimumStack(long minSize)
# if (defined(MPROTECT_VDB) || defined(PROC_VDB)) \
&& !defined(MAKE_BACK_GRAPH)
GC_enable_incremental();
- (void) GC_printf0("Switched to incremental mode\n");
+ (void) GC_printf("Switched to incremental mode\n");
# if defined(MPROTECT_VDB)
- (void)GC_printf0("Emulating dirty bits with mprotect/signals\n");
+ (void)GC_printf("Emulating dirty bits with mprotect/signals\n");
# else
# ifdef PROC_VDB
- (void)GC_printf0("Reading dirty bits from /proc\n");
+ (void)GC_printf("Reading dirty bits from /proc\n");
# else
- (void)GC_printf0("Using DEFAULT_VDB dirty bit implementation\n");
+ (void)GC_printf("Using DEFAULT_VDB dirty bit implementation\n");
# endif
# endif
# endif
@@ -1560,11 +1572,11 @@ LRESULT CALLBACK window_proc(HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam)
LRESULT ret = 0;
switch (uMsg) {
case WM_HIBERNATE:
- GC_printf0("Received WM_HIBERNATE, calling GC_gcollect\n");
+ GC_printf("Received WM_HIBERNATE, calling GC_gcollect\n");
GC_gcollect();
break;
case WM_CLOSE:
- GC_printf0("Received WM_CLOSE, closing window\n");
+ GC_printf("Received WM_CLOSE, closing window\n");
DestroyWindow(hwnd);
break;
case WM_DESTROY:
@@ -1640,21 +1652,19 @@ int APIENTRY WinMain(HINSTANCE instance, HINSTANCE prev, LPSTR cmd, int n)
HANDLE win_thr_h;
# endif
DWORD thread_id;
-# if 0
- GC_enable_incremental();
-# endif
+ GC_enable_incremental();
GC_init();
InitializeCriticalSection(&incr_cs);
(void) GC_set_warn_proc(warn_proc);
# ifdef MSWINCE
win_created_h = CreateEvent(NULL, FALSE, FALSE, NULL);
if (win_created_h == (HANDLE)NULL) {
- (void)GC_printf1("Event creation failed %lu\n", (unsigned long)GetLastError());
+ (void)GC_printf("Event creation failed %\n", GetLastError());
FAIL;
}
win_thr_h = GC_CreateThread(NULL, 0, thr_window, 0, 0, &thread_id);
if (win_thr_h == (HANDLE)NULL) {
- (void)GC_printf1("Thread creation failed %lu\n", (unsigned long)GetLastError());
+ (void)GC_printf("Thread creation failed %d\n", GetLastError());
FAIL;
}
if (WaitForSingleObject(win_created_h, INFINITE) != WAIT_OBJECT_0)
@@ -1665,7 +1675,7 @@ int APIENTRY WinMain(HINSTANCE instance, HINSTANCE prev, LPSTR cmd, int n)
for (i = 0; i < NTEST; i++) {
h[i] = GC_CreateThread(NULL, 0, thr_run_one_test, 0, 0, &thread_id);
if (h[i] == (HANDLE)NULL) {
- (void)GC_printf1("Thread creation failed %lu\n", (unsigned long)GetLastError());
+ (void)GC_printf("Thread creation failed %d\n", GetLastError());
FAIL;
}
}
@@ -1674,7 +1684,7 @@ int APIENTRY WinMain(HINSTANCE instance, HINSTANCE prev, LPSTR cmd, int n)
# if NTEST > 0
for (i = 0; i < NTEST; i++) {
if (WaitForSingleObject(h[i], INFINITE) != WAIT_OBJECT_0) {
- (void)GC_printf1("Thread wait failed %lu\n", (unsigned long)GetLastError());
+ (void)GC_printf("Thread wait failed %d\n", GetLastError());
FAIL;
}
}
@@ -1706,11 +1716,11 @@ test()
run_one_test();
if (PCR_Th_T_Join(th1, &code, NIL, PCR_allSigsBlocked, PCR_waitForever)
!= PCR_ERes_okay || code != 0) {
- (void)GC_printf0("Thread 1 failed\n");
+ (void)GC_printf("Thread 1 failed\n");
}
if (PCR_Th_T_Join(th2, &code, NIL, PCR_allSigsBlocked, PCR_waitForever)
!= PCR_ERes_okay || code != 0) {
- (void)GC_printf0("Thread 2 failed\n");
+ (void)GC_printf("Thread 2 failed\n");
}
check_heap_stats();
return(0);
@@ -1742,24 +1752,24 @@ main()
# endif
(void) GC_set_warn_proc(warn_proc);
if (thr_keycreate(&fl_key, GC_free) != 0) {
- (void)GC_printf1("Key creation failed %lu\n", (unsigned long)code);
+ (void)GC_printf("Key creation failed %d\n", code);
FAIL;
}
if ((code = thr_create(0, 1024*1024, thr_run_one_test, 0, 0, &th1)) != 0) {
- (void)GC_printf1("Thread 1 creation failed %lu\n", (unsigned long)code);
+ (void)GC_printf("Thread 1 creation failed %d\n", code);
FAIL;
}
if ((code = thr_create(0, 1024*1024, thr_run_one_test, 0, THR_NEW_LWP, &th2)) != 0) {
- (void)GC_printf1("Thread 2 creation failed %lu\n", (unsigned long)code);
+ (void)GC_printf("Thread 2 creation failed %d\n", code);
FAIL;
}
run_one_test();
if ((code = thr_join(th1, 0, 0)) != 0) {
- (void)GC_printf1("Thread 1 failed %lu\n", (unsigned long)code);
+ (void)GC_printf("Thread 1 failed %d\n", code);
FAIL;
}
if (thr_join(th2, 0, 0) != 0) {
- (void)GC_printf1("Thread 2 failed %lu\n", (unsigned long)code);
+ (void)GC_printf("Thread 2 failed %d\n", code);
FAIL;
}
check_heap_stats();
@@ -1772,7 +1782,7 @@ main()
--> bad news
#endif
-main()
+int main()
{
pthread_t th1;
pthread_t th2;
@@ -1788,7 +1798,7 @@ main()
/* Default stack size is too small, especially with the 64 bit ABI */
/* Increase it. */
if (pthread_default_stacksize_np(1024*1024, 0) != 0) {
- (void)GC_printf0("pthread_default_stacksize_np failed.\n");
+ (void)GC_printf("pthread_default_stacksize_np failed.\n");
}
# endif /* GC_HPUX_THREADS */
GC_INIT();
@@ -1803,43 +1813,43 @@ main()
&& !defined(PARALLEL_MARK) &&!defined(REDIRECT_MALLOC) \
&& !defined(MAKE_BACK_GRAPH)
GC_enable_incremental();
- (void) GC_printf0("Switched to incremental mode\n");
+ (void) GC_printf("Switched to incremental mode\n");
# if defined(MPROTECT_VDB)
- (void)GC_printf0("Emulating dirty bits with mprotect/signals\n");
+ (void)GC_printf("Emulating dirty bits with mprotect/signals\n");
# else
# ifdef PROC_VDB
- (void)GC_printf0("Reading dirty bits from /proc\n");
+ (void)GC_printf("Reading dirty bits from /proc\n");
# else
- (void)GC_printf0("Using DEFAULT_VDB dirty bit implementation\n");
+ (void)GC_printf("Using DEFAULT_VDB dirty bit implementation\n");
# endif
# endif
# endif
(void) GC_set_warn_proc(warn_proc);
if ((code = pthread_key_create(&fl_key, 0)) != 0) {
- (void)GC_printf1("Key creation failed %lu\n", (unsigned long)code);
+ (void)GC_printf("Key creation failed %d\n", code);
FAIL;
}
if ((code = pthread_create(&th1, &attr, thr_run_one_test, 0)) != 0) {
- (void)GC_printf1("Thread 1 creation failed %lu\n", (unsigned long)code);
+ (void)GC_printf("Thread 1 creation failed %d\n", code);
FAIL;
}
if ((code = pthread_create(&th2, &attr, thr_run_one_test, 0)) != 0) {
- (void)GC_printf1("Thread 2 creation failed %lu\n", (unsigned long)code);
+ (void)GC_printf("Thread 2 creation failed %d\n", code);
FAIL;
}
run_one_test();
if ((code = pthread_join(th1, 0)) != 0) {
- (void)GC_printf1("Thread 1 failed %lu\n", (unsigned long)code);
+ (void)GC_printf("Thread 1 failed %d\n", code);
FAIL;
}
if (pthread_join(th2, 0) != 0) {
- (void)GC_printf1("Thread 2 failed %lu\n", (unsigned long)code);
+ (void)GC_printf("Thread 2 failed %d\n", code);
FAIL;
}
check_heap_stats();
(void)fflush(stdout);
pthread_attr_destroy(&attr);
- GC_printf1("Completed %d collections\n", GC_gc_no);
+ GC_printf("Completed %d collections\n", GC_gc_no);
return(0);
}
#endif /* GC_PTHREADS */
diff --git a/threadlibs.c b/threadlibs.c
index 9078c8d8..264b7240 100644
--- a/threadlibs.c
+++ b/threadlibs.c
@@ -11,17 +11,10 @@ int main()
"-Wl,--wrap -Wl,pthread_sigmask -Wl,--wrap -Wl,sleep\n");
# endif
# if defined(GC_LINUX_THREADS) || defined(GC_IRIX_THREADS) \
- || defined(GC_SOLARIS_PTHREADS) \
+ || defined(GC_FREEBSD_THREADS) || defined(GC_SOLARIS_PTHREADS) \
|| defined(GC_DARWIN_THREADS) || defined(GC_AIX_THREADS)
printf("-lpthread\n");
# endif
-# if defined(GC_FREEBSD_THREADS)
-# if (__FREEBSD_version >= 500000)
- printf("-lpthread\n");
-# else
- printf("-pthread\n");
-# endif
-# endif
# if defined(GC_HPUX_THREADS) || defined(GC_OSF1_THREADS)
printf("-lpthread -lrt\n");
# endif
diff --git a/typd_mlc.c b/typd_mlc.c
index 373257cd..cdedf465 100644
--- a/typd_mlc.c
+++ b/typd_mlc.c
@@ -65,16 +65,16 @@ typedef union ComplexDescriptor {
struct LeafDescriptor { /* Describes simple array */
word ld_tag;
# define LEAF_TAG 1
- word ld_size; /* bytes per element */
+ size_t ld_size; /* bytes per element */
/* multiple of ALIGNMENT */
- word ld_nelements; /* Number of elements. */
+ size_t ld_nelements; /* Number of elements. */
GC_descr ld_descriptor; /* A simple length, bitmap, */
/* or procedure descriptor. */
} ld;
struct ComplexArrayDescriptor {
word ad_tag;
# define ARRAY_TAG 2
- word ad_nelements;
+ size_t ad_nelements;
union ComplexDescriptor * ad_element_descr;
} ad;
struct SequenceDescriptor {
@@ -89,10 +89,10 @@ typedef union ComplexDescriptor {
ext_descr * GC_ext_descriptors; /* Points to array of extended */
/* descriptors. */
-word GC_ed_size = 0; /* Current size of above arrays. */
+size_t GC_ed_size = 0; /* Current size of above arrays. */
# define ED_INITIAL_SIZE 100;
-word GC_avail_descr = 0; /* Next available slot. */
+size_t GC_avail_descr = 0; /* Next available slot. */
int GC_typed_mark_proc_index; /* Indices of my mark */
int GC_array_mark_proc_index; /* procedures. */
@@ -101,18 +101,15 @@ int GC_array_mark_proc_index; /* procedures. */
/* starting index. */
/* Returns -1 on failure. */
/* Caller does not hold allocation lock. */
-signed_word GC_add_ext_descriptor(bm, nbits)
-GC_bitmap bm;
-word nbits;
+signed_word GC_add_ext_descriptor(GC_bitmap bm, word nbits)
{
- register size_t nwords = divWORDSZ(nbits + WORDSZ-1);
- register signed_word result;
- register word i;
- register word last_part;
- register int extra_bits;
+ size_t nwords = divWORDSZ(nbits + WORDSZ-1);
+ signed_word result;
+ size_t i;
+ word last_part;
+ int extra_bits;
DCL_LOCK_STATE;
- DISABLE_SIGNALS();
LOCK();
while (GC_avail_descr + nwords >= GC_ed_size) {
ext_descr * new;
@@ -120,7 +117,6 @@ word nbits;
word ed_size = GC_ed_size;
UNLOCK();
- ENABLE_SIGNALS();
if (ed_size == 0) {
new_size = ED_INITIAL_SIZE;
} else {
@@ -129,7 +125,6 @@ word nbits;
}
new = (ext_descr *) GC_malloc_atomic(new_size * sizeof(ext_descr));
if (new == 0) return(-1);
- DISABLE_SIGNALS();
LOCK();
if (ed_size == GC_ed_size) {
if (GC_avail_descr != 0) {
@@ -154,7 +149,6 @@ word nbits;
GC_ext_descriptors[result + i].ed_continued = FALSE;
GC_avail_descr += nwords;
UNLOCK();
- ENABLE_SIGNALS();
return(result);
}
@@ -166,9 +160,7 @@ GC_descr GC_bm_table[WORDSZ/2];
/* The result is known to be short enough to fit into a bitmap */
/* descriptor. */
/* Descriptor is a GC_DS_LENGTH or GC_DS_BITMAP descriptor. */
-GC_descr GC_double_descr(descriptor, nwords)
-register GC_descr descriptor;
-register word nwords;
+GC_descr GC_double_descr(GC_descr descriptor, word nwords)
{
if ((descriptor & GC_DS_TAGS) == GC_DS_LENGTH) {
descriptor = GC_bm_table[BYTES_TO_WORDS((word)descriptor)];
@@ -198,21 +190,17 @@ complex_descriptor * GC_make_sequence_descriptor();
# define LEAF 1
# define SIMPLE 0
# define NO_MEM (-1)
-int GC_make_array_descriptor(nelements, size, descriptor,
- simple_d, complex_d, leaf)
-word size;
-word nelements;
-GC_descr descriptor;
-GC_descr *simple_d;
-complex_descriptor **complex_d;
-struct LeafDescriptor * leaf;
+int GC_make_array_descriptor(size_t nelements, size_t size, GC_descr descriptor,
+ GC_descr *simple_d,
+ complex_descriptor **complex_d,
+ struct LeafDescriptor * leaf)
{
# define OPT_THRESHOLD 50
/* For larger arrays, we try to combine descriptors of adjacent */
/* descriptors to speed up marking, and to reduce the amount */
/* of space needed on the mark stack. */
if ((descriptor & GC_DS_TAGS) == GC_DS_LENGTH) {
- if ((word)descriptor == size) {
+ if (descriptor == (GC_descr)size) {
*simple_d = nelements * descriptor;
return(SIMPLE);
} else if ((word)descriptor == 0) {
@@ -298,9 +286,8 @@ struct LeafDescriptor * leaf;
}
}
-complex_descriptor * GC_make_sequence_descriptor(first, second)
-complex_descriptor * first;
-complex_descriptor * second;
+complex_descriptor * GC_make_sequence_descriptor(complex_descriptor *first,
+ complex_descriptor *second)
{
struct SequenceDescriptor * result =
(struct SequenceDescriptor *)
@@ -317,9 +304,8 @@ complex_descriptor * second;
}
#ifdef UNDEFINED
-complex_descriptor * GC_make_complex_array_descriptor(nelements, descr)
-word nelements;
-complex_descriptor * descr;
+complex_descriptor * GC_make_complex_array_descriptor(word nelements,
+ complex_descriptor *descr)
{
struct ComplexArrayDescriptor * result =
(struct ComplexArrayDescriptor *)
@@ -338,32 +324,24 @@ ptr_t * GC_eobjfreelist;
ptr_t * GC_arobjfreelist;
-mse * GC_typed_mark_proc GC_PROTO((register word * addr,
- register mse * mark_stack_ptr,
- mse * mark_stack_limit,
- word env));
+mse * GC_typed_mark_proc(word * addr, mse * mark_stack_ptr,
+ mse * mark_stack_limit, word env);
-mse * GC_array_mark_proc GC_PROTO((register word * addr,
- register mse * mark_stack_ptr,
- mse * mark_stack_limit,
- word env));
+mse * GC_array_mark_proc(word * addr, mse * mark_stack_ptr,
+ mse * mark_stack_limit, word env);
/* Caller does not hold allocation lock. */
-void GC_init_explicit_typing()
+void GC_init_explicit_typing(void)
{
register int i;
DCL_LOCK_STATE;
-# ifdef PRINTSTATS
- if (sizeof(struct LeafDescriptor) % sizeof(word) != 0)
- ABORT("Bad leaf descriptor size");
-# endif
- DISABLE_SIGNALS();
+ /* Ignore gcc "no effect" warning. */
+ GC_STATIC_ASSERT(sizeof(struct LeafDescriptor) % sizeof(word) == 0);
LOCK();
if (GC_explicit_typing_initialized) {
UNLOCK();
- ENABLE_SIGNALS();
return;
}
GC_explicit_typing_initialized = TRUE;
@@ -388,28 +366,19 @@ void GC_init_explicit_typing()
GC_bm_table[i] = d;
}
UNLOCK();
- ENABLE_SIGNALS();
}
-# if defined(__STDC__) || defined(__cplusplus)
- mse * GC_typed_mark_proc(register word * addr,
- register mse * mark_stack_ptr,
- mse * mark_stack_limit,
- word env)
-# else
- mse * GC_typed_mark_proc(addr, mark_stack_ptr, mark_stack_limit, env)
- register word * addr;
- register mse * mark_stack_ptr;
- mse * mark_stack_limit;
- word env;
-# endif
+mse * GC_typed_mark_proc(word * addr, mse * mark_stack_ptr,
+ mse * mark_stack_limit, word env)
{
- register word bm = GC_ext_descriptors[env].ed_bitmap;
- register word * current_p = addr;
- register word current;
- register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
- register ptr_t least_ha = GC_least_plausible_heap_addr;
-
+ word bm = GC_ext_descriptors[env].ed_bitmap;
+ word * current_p = addr;
+ word current;
+ ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
+ ptr_t least_ha = GC_least_plausible_heap_addr;
+ DECLARE_HDR_CACHE;
+
+ INIT_HDR_CACHE;
for (; bm != 0; bm >>= 1, current_p++) {
if (bm & 1) {
current = *current_p;
@@ -429,7 +398,7 @@ void GC_init_explicit_typing()
if (mark_stack_ptr >= mark_stack_limit) {
mark_stack_ptr = GC_signal_mark_stack_overflow(mark_stack_ptr);
}
- mark_stack_ptr -> mse_start = addr + WORDSZ;
+ mark_stack_ptr -> mse_start = (ptr_t)(addr + WORDSZ);
mark_stack_ptr -> mse_descr =
GC_MAKE_PROC(GC_typed_mark_proc_index, env+1);
}
@@ -439,8 +408,7 @@ void GC_init_explicit_typing()
/* Return the size of the object described by d. It would be faster to */
/* store this directly, or to compute it as part of */
/* GC_push_complex_descriptor, but hopefully it doesn't matter. */
-word GC_descr_obj_size(d)
-register complex_descriptor *d;
+word GC_descr_obj_size(complex_descriptor *d)
{
switch(d -> TAG) {
case LEAF_TAG:
@@ -459,11 +427,8 @@ register complex_descriptor *d;
/* Push descriptors for the object at addr with complex descriptor d */
/* onto the mark stack. Return 0 if the mark stack overflowed. */
-mse * GC_push_complex_descriptor(addr, d, msp, msl)
-word * addr;
-register complex_descriptor *d;
-register mse * msp;
-mse * msl;
+mse * GC_push_complex_descriptor(word *addr, complex_descriptor *d,
+ mse *msp, mse *msl)
{
register ptr_t current = (ptr_t) addr;
register word nelements;
@@ -480,7 +445,7 @@ mse * msl;
sz = d -> ld.ld_size;
for (i = 0; i < nelements; i++) {
msp++;
- msp -> mse_start = (word *)current;
+ msp -> mse_start = current;
msp -> mse_descr = descr;
current += sz;
}
@@ -518,22 +483,13 @@ mse * msl;
}
/*ARGSUSED*/
-# if defined(__STDC__) || defined(__cplusplus)
- mse * GC_array_mark_proc(register word * addr,
- register mse * mark_stack_ptr,
- mse * mark_stack_limit,
- word env)
-# else
- mse * GC_array_mark_proc(addr, mark_stack_ptr, mark_stack_limit, env)
- register word * addr;
- register mse * mark_stack_ptr;
- mse * mark_stack_limit;
- word env;
-# endif
+mse * GC_array_mark_proc(word * addr, mse * mark_stack_ptr,
+ mse * mark_stack_limit, word env)
{
- register hdr * hhdr = HDR(addr);
- register word sz = hhdr -> hb_sz;
- register complex_descriptor * descr = (complex_descriptor *)(addr[sz-1]);
+ hdr * hhdr = HDR(addr);
+ size_t sz = hhdr -> hb_sz;
+ size_t nwords = BYTES_TO_WORDS(sz);
+ complex_descriptor * descr = (complex_descriptor *)(addr[nwords-1]);
mse * orig_mark_stack_ptr = mark_stack_ptr;
mse * new_mark_stack_ptr;
@@ -554,28 +510,22 @@ mse * msl;
/* the original array entry. */
GC_mark_stack_too_small = TRUE;
new_mark_stack_ptr = orig_mark_stack_ptr + 1;
- new_mark_stack_ptr -> mse_start = addr;
- new_mark_stack_ptr -> mse_descr = WORDS_TO_BYTES(sz) | GC_DS_LENGTH;
+ new_mark_stack_ptr -> mse_start = (ptr_t)addr;
+ new_mark_stack_ptr -> mse_descr = sz | GC_DS_LENGTH;
} else {
/* Push descriptor itself */
new_mark_stack_ptr++;
- new_mark_stack_ptr -> mse_start = addr + sz - 1;
+ new_mark_stack_ptr -> mse_start = (ptr_t)(addr + nwords - 1);
new_mark_stack_ptr -> mse_descr = sizeof(word) | GC_DS_LENGTH;
}
- return(new_mark_stack_ptr);
+ return new_mark_stack_ptr;
}
-#if defined(__STDC__) || defined(__cplusplus)
- GC_descr GC_make_descriptor(GC_bitmap bm, size_t len)
-#else
- GC_descr GC_make_descriptor(bm, len)
- GC_bitmap bm;
- size_t len;
-#endif
+GC_descr GC_make_descriptor(GC_bitmap bm, size_t len)
{
- register signed_word last_set_bit = len - 1;
- register word result;
- register int i;
+ signed_word last_set_bit = len - 1;
+ GC_descr result;
+ int i;
# define HIGH_BIT (((word)1) << (WORDSZ - 1))
if (!GC_explicit_typing_initialized) GC_init_explicit_typing();
@@ -592,7 +542,7 @@ mse * msl;
}
if (all_bits_set) {
/* An initial section contains all pointers. Use length descriptor. */
- return(WORDS_TO_BYTES(last_set_bit+1) | GC_DS_LENGTH);
+ return (WORDS_TO_BYTES(last_set_bit+1) | GC_DS_LENGTH);
}
}
# endif
@@ -614,121 +564,88 @@ mse * msl;
/* Out of memory: use conservative */
/* approximation. */
result = GC_MAKE_PROC(GC_typed_mark_proc_index, (word)index);
- return(result);
+ return result;
}
}
ptr_t GC_clear_stack();
#define GENERAL_MALLOC(lb,k) \
- (GC_PTR)GC_clear_stack(GC_generic_malloc((word)lb, k))
+ (void *)GC_clear_stack(GC_generic_malloc((word)lb, k))
#define GENERAL_MALLOC_IOP(lb,k) \
- (GC_PTR)GC_clear_stack(GC_generic_malloc_ignore_off_page(lb, k))
-
-#if defined(__STDC__) || defined(__cplusplus)
- void * GC_malloc_explicitly_typed(size_t lb, GC_descr d)
-#else
- char * GC_malloc_explicitly_typed(lb, d)
- size_t lb;
- GC_descr d;
-#endif
+ (void *)GC_clear_stack(GC_generic_malloc_ignore_off_page(lb, k))
+
+void * GC_malloc_explicitly_typed(size_t lb, GC_descr d)
{
-register ptr_t op;
-register ptr_t * opp;
-register word lw;
-DCL_LOCK_STATE;
+ ptr_t op;
+ ptr_t * opp;
+ size_t lg;
+ DCL_LOCK_STATE;
lb += TYPD_EXTRA_BYTES;
- if( SMALL_OBJ(lb) ) {
-# ifdef MERGE_SIZES
- lw = GC_size_map[lb];
-# else
- lw = ALIGNED_WORDS(lb);
-# endif
- opp = &(GC_eobjfreelist[lw]);
+ if(SMALL_OBJ(lb)) {
+ lg = GC_size_map[lb];
+ opp = &(GC_eobjfreelist[lg]);
FASTLOCK();
if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
FASTUNLOCK();
op = (ptr_t)GENERAL_MALLOC((word)lb, GC_explicit_kind);
if (0 == op) return 0;
-# ifdef MERGE_SIZES
- lw = GC_size_map[lb]; /* May have been uninitialized. */
-# endif
+ lg = GC_size_map[lb]; /* May have been uninitialized. */
} else {
*opp = obj_link(op);
obj_link(op) = 0;
- GC_words_allocd += lw;
+ GC_bytes_allocd += GRANULES_TO_BYTES(lg);
FASTUNLOCK();
}
} else {
op = (ptr_t)GENERAL_MALLOC((word)lb, GC_explicit_kind);
if (op != NULL)
- lw = BYTES_TO_WORDS(GC_size(op));
+ lg = BYTES_TO_GRANULES(GC_size(op));
}
if (op != NULL)
- ((word *)op)[lw - 1] = d;
- return((GC_PTR) op);
+ ((word *)op)[GRANULES_TO_WORDS(lg) - 1] = d;
+ return((void *) op);
}
-#if defined(__STDC__) || defined(__cplusplus)
- void * GC_malloc_explicitly_typed_ignore_off_page(size_t lb, GC_descr d)
-#else
- char * GC_malloc_explicitly_typed_ignore_off_page(lb, d)
- size_t lb;
- GC_descr d;
-#endif
+void * GC_malloc_explicitly_typed_ignore_off_page(size_t lb, GC_descr d)
{
-register ptr_t op;
-register ptr_t * opp;
-register word lw;
+ptr_t op;
+ptr_t * opp;
+size_t lg;
DCL_LOCK_STATE;
lb += TYPD_EXTRA_BYTES;
if( SMALL_OBJ(lb) ) {
-# ifdef MERGE_SIZES
- lw = GC_size_map[lb];
-# else
- lw = ALIGNED_WORDS(lb);
-# endif
- opp = &(GC_eobjfreelist[lw]);
+ lg = GC_size_map[lb];
+ opp = &(GC_eobjfreelist[lg]);
FASTLOCK();
if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
FASTUNLOCK();
op = (ptr_t)GENERAL_MALLOC_IOP(lb, GC_explicit_kind);
-# ifdef MERGE_SIZES
- lw = GC_size_map[lb]; /* May have been uninitialized. */
-# endif
+ lg = GC_size_map[lb]; /* May have been uninitialized. */
} else {
*opp = obj_link(op);
obj_link(op) = 0;
- GC_words_allocd += lw;
+ GC_bytes_allocd += GRANULES_TO_BYTES(lg);
FASTUNLOCK();
}
} else {
op = (ptr_t)GENERAL_MALLOC_IOP(lb, GC_explicit_kind);
if (op != NULL)
- lw = BYTES_TO_WORDS(GC_size(op));
+ lg = BYTES_TO_WORDS(GC_size(op));
}
if (op != NULL)
- ((word *)op)[lw - 1] = d;
- return((GC_PTR) op);
+ ((word *)op)[GRANULES_TO_WORDS(lg) - 1] = d;
+ return((void *) op);
}
-#if defined(__STDC__) || defined(__cplusplus)
- void * GC_calloc_explicitly_typed(size_t n,
- size_t lb,
- GC_descr d)
-#else
- char * GC_calloc_explicitly_typed(n, lb, d)
- size_t n;
- size_t lb;
- GC_descr d;
-#endif
+void * GC_calloc_explicitly_typed(size_t n, size_t lb, GC_descr d)
{
-register ptr_t op;
-register ptr_t * opp;
-register word lw;
+ptr_t op;
+ptr_t * opp;
+size_t lg;
GC_descr simple_descr;
complex_descriptor *complex_descr;
register int descr_type;
@@ -750,54 +667,50 @@ DCL_LOCK_STATE;
break;
}
if( SMALL_OBJ(lb) ) {
-# ifdef MERGE_SIZES
- lw = GC_size_map[lb];
-# else
- lw = ALIGNED_WORDS(lb);
-# endif
- opp = &(GC_arobjfreelist[lw]);
+ lg = GC_size_map[lb];
+ opp = &(GC_arobjfreelist[lg]);
FASTLOCK();
if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
FASTUNLOCK();
op = (ptr_t)GENERAL_MALLOC((word)lb, GC_array_kind);
if (0 == op) return(0);
-# ifdef MERGE_SIZES
- lw = GC_size_map[lb]; /* May have been uninitialized. */
-# endif
+ lg = GC_size_map[lb]; /* May have been uninitialized. */
} else {
*opp = obj_link(op);
obj_link(op) = 0;
- GC_words_allocd += lw;
+ GC_bytes_allocd += GRANULES_TO_BYTES(lg);
FASTUNLOCK();
}
} else {
op = (ptr_t)GENERAL_MALLOC((word)lb, GC_array_kind);
if (0 == op) return(0);
- lw = BYTES_TO_WORDS(GC_size(op));
+ lg = BYTES_TO_GRANULES(GC_size(op));
}
if (descr_type == LEAF) {
/* Set up the descriptor inside the object itself. */
- VOLATILE struct LeafDescriptor * lp =
+ volatile struct LeafDescriptor * lp =
(struct LeafDescriptor *)
((word *)op
- + lw - (BYTES_TO_WORDS(sizeof(struct LeafDescriptor)) + 1));
+ + GRANULES_TO_WORDS(lg)
+ - (BYTES_TO_WORDS(sizeof(struct LeafDescriptor)) + 1));
lp -> ld_tag = LEAF_TAG;
lp -> ld_size = leaf.ld_size;
lp -> ld_nelements = leaf.ld_nelements;
lp -> ld_descriptor = leaf.ld_descriptor;
- ((VOLATILE word *)op)[lw - 1] = (word)lp;
+ ((volatile word *)op)[GRANULES_TO_WORDS(lg) - 1] = (word)lp;
} else {
extern unsigned GC_finalization_failures;
unsigned ff = GC_finalization_failures;
+ size_t lw = GRANULES_TO_WORDS(lg);
((word *)op)[lw - 1] = (word)complex_descr;
/* Make sure the descriptor is cleared once there is any danger */
/* it may have been collected. */
(void)
- GC_general_register_disappearing_link((GC_PTR *)
+ GC_general_register_disappearing_link((void * *)
((word *)op+lw-1),
- (GC_PTR) op);
+ (void *) op);
if (ff != GC_finalization_failures) {
/* Couldn't register it due to lack of memory. Punt. */
/* This will probably fail too, but gives the recovery code */
@@ -805,5 +718,5 @@ DCL_LOCK_STATE;
return(GC_malloc(n*lb));
}
}
- return((GC_PTR) op);
+ return((void *) op);
}
diff --git a/version.h b/version.h
index b06f6dfe..43a2d593 100644
--- a/version.h
+++ b/version.h
@@ -1,9 +1,9 @@
/* The version here should match that in configure/configure.in */
/* Eventually this one may become unnecessary. For now we need */
/* it to keep the old-style build process working. */
-#define GC_TMP_VERSION_MAJOR 6
-#define GC_TMP_VERSION_MINOR 4
-#define GC_TMP_ALPHA_VERSION GC_NOT_ALPHA
+#define GC_TMP_VERSION_MAJOR 7
+#define GC_TMP_VERSION_MINOR 0
+#define GC_TMP_ALPHA_VERSION 1
#ifndef GC_NOT_ALPHA
# define GC_NOT_ALPHA 0xff
diff --git a/win32_threads.c b/win32_threads.c
index 5604290d..51ed87e0 100755
--- a/win32_threads.c
+++ b/win32_threads.c
@@ -18,6 +18,8 @@
void * GC_start_routine(void * arg);
void GC_thread_exit_proc(void *arg);
+# include <pthread.h>
+
#endif
/* The type of the first argument to InterlockedExchange. */
@@ -34,6 +36,13 @@ typedef LONG * IE_t;
GC_bool GC_thr_initialized = FALSE;
+#ifdef GC_DLL
+ GC_bool GC_need_to_lock = TRUE;
+ /* Cannot intercept thread creation. */
+#else
+ GC_bool GC_need_to_lock = FALSE;
+#endif
+
DWORD GC_main_thread = 0;
struct GC_thread_Rep {
@@ -127,7 +136,7 @@ static GC_thread GC_new_thread(void) {
0,
DUPLICATE_SAME_ACCESS)) {
DWORD last_error = GetLastError();
- GC_printf1("Last error code: %lx\n", last_error);
+ GC_err_printf("Last error code: %d\n", last_error);
ABORT("DuplicateHandle failed");
}
thread_table[i].stack_base = GC_get_stack_base();
@@ -215,7 +224,7 @@ static GC_thread GC_lookup_thread(pthread_t id)
#endif /* CYGWIN32 */
-void GC_push_thread_structures GC_PROTO((void))
+void GC_push_thread_structures(void)
{
/* Unlike the other threads implementations, the thread table here */
/* contains no pointers to the collectable heap. Thus we have */
@@ -232,7 +241,7 @@ void GC_push_thread_structures GC_PROTO((void))
# endif
}
-void GC_stop_world()
+void GC_stop_world(void)
{
DWORD thread_id = GetCurrentThreadId();
int i;
@@ -272,7 +281,7 @@ void GC_stop_world()
}
}
-void GC_start_world()
+void GC_start_world(void)
{
DWORD thread_id = GetCurrentThreadId();
int i;
@@ -291,7 +300,7 @@ void GC_start_world()
# ifdef _MSC_VER
# pragma warning(disable:4715)
# endif
-ptr_t GC_current_stackbottom()
+ptr_t GC_current_stackbottom(void)
{
DWORD thread_id = GetCurrentThreadId();
int i;
@@ -327,7 +336,7 @@ ptr_t GC_current_stackbottom()
}
# endif
-void GC_push_all_stacks()
+void GC_push_all_stacks(void)
{
DWORD thread_id = GetCurrentThreadId();
GC_bool found_me = FALSE;
@@ -475,6 +484,7 @@ GC_API HANDLE WINAPI GC_CreateThread(
args -> start = lpStartAddress;
args -> param = lpParameter;
+ GC_need_to_lock = TRUE;
thread_h = CreateThread(lpThreadAttributes,
dwStackSize, thread_start,
args, dwCreationFlags,
@@ -565,7 +575,7 @@ DWORD WINAPI main_thread_start(LPVOID arg)
# else /* !MSWINCE */
/* Called by GC_init() - we hold the allocation lock. */
-void GC_thr_init() {
+void GC_thr_init(void) {
if (GC_thr_initialized) return;
GC_main_thread = GetCurrentThreadId();
GC_thr_initialized = TRUE;
@@ -588,8 +598,8 @@ int GC_pthread_join(pthread_t pthread_id, void **retval) {
GC_thread me;
# if DEBUG_CYGWIN_THREADS
- GC_printf3("thread 0x%x(0x%x) is joining thread 0x%x.\n",
- (int)pthread_self(), GetCurrentThreadId(), (int)pthread_id);
+ GC_printf("thread 0x%x(0x%x) is joining thread 0x%x.\n",
+ (int)pthread_self(), GetCurrentThreadId(), (int)pthread_id);
# endif
/* Thread being joined might not have registered itself yet. */
@@ -604,7 +614,7 @@ int GC_pthread_join(pthread_t pthread_id, void **retval) {
GC_delete_gc_thread(me);
# if DEBUG_CYGWIN_THREADS
- GC_printf3("thread 0x%x(0x%x) completed join with thread 0x%x.\n",
+ GC_printf("thread 0x%x(0x%x) completed join with thread 0x%x.\n",
(int)pthread_self(), GetCurrentThreadId(), (int)pthread_id);
# endif
@@ -639,9 +649,10 @@ GC_pthread_create(pthread_t *new_thread,
}
# if DEBUG_CYGWIN_THREADS
- GC_printf2("About to create a thread from 0x%x(0x%x)\n",
- (int)pthread_self(), GetCurrentThreadId);
+ GC_printf("About to create a thread from 0x%x(0x%x)\n",
+ (int)pthread_self(), GetCurrentThreadId);
# endif
+ GC_need_to_lock = TRUE;
result = pthread_create(new_thread, attr, GC_start_routine, si);
if (result) { /* failure */
@@ -663,8 +674,8 @@ void * GC_start_routine(void * arg)
int i;
# if DEBUG_CYGWIN_THREADS
- GC_printf2("thread 0x%x(0x%x) starting...\n",(int)pthread_self(),
- GetCurrentThreadId());
+ GC_printf("thread 0x%x(0x%x) starting...\n",(int)pthread_self(),
+ GetCurrentThreadId());
# endif
/* If a GC occurs before the thread is registered, that GC will */
@@ -690,8 +701,8 @@ void * GC_start_routine(void * arg)
pthread_cleanup_pop(0);
# if DEBUG_CYGWIN_THREADS
- GC_printf2("thread 0x%x(0x%x) returned from start routine.\n",
- (int)pthread_self(),GetCurrentThreadId());
+ GC_printf("thread 0x%x(0x%x) returned from start routine.\n",
+ (int)pthread_self(),GetCurrentThreadId());
# endif
return(result);
@@ -703,8 +714,8 @@ void GC_thread_exit_proc(void *arg)
int i;
# if DEBUG_CYGWIN_THREADS
- GC_printf2("thread 0x%x(0x%x) called pthread_exit().\n",
- (int)pthread_self(),GetCurrentThreadId());
+ GC_printf("thread 0x%x(0x%x) called pthread_exit().\n",
+ (int)pthread_self(),GetCurrentThreadId());
# endif
LOCK();