summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile76
-rw-r--r--OS2_MAKEFILE25
-rw-r--r--PCR-Makefile41
-rw-r--r--README523
-rw-r--r--alloc.c1014
-rw-r--r--allochblk.c429
-rw-r--r--black_list.c184
-rw-r--r--cons.c29
-rw-r--r--cons.h30
-rw-r--r--correct-output8
-rw-r--r--debug_malloc.c308
-rw-r--r--dynamic_load.c92
-rw-r--r--finalize.c308
-rw-r--r--gc.h937
-rw-r--r--gc.man58
-rw-r--r--gc_headers.h35
-rw-r--r--gc_inline.h91
-rw-r--r--gc_private.h1164
-rw-r--r--headers.c211
-rw-r--r--interface.c121
-rw-r--r--mach_dep.c407
-rw-r--r--mark.c361
-rw-r--r--mark_roots.c225
-rw-r--r--mips_mach_dep.s146
-rw-r--r--misc.c807
-rw-r--r--new_hblk.c230
-rw-r--r--obj_map.c127
-rw-r--r--os_dep.c295
-rw-r--r--pcr_interface.c109
-rw-r--r--real_malloc.c45
-rw-r--r--reclaim.c662
-rw-r--r--rs6000_mach_dep.s64
-rw-r--r--rt_allocobj.s106
-rw-r--r--setjmp_test.c73
-rw-r--r--test.c340
35 files changed, 6712 insertions, 2969 deletions
diff --git a/Makefile b/Makefile
index 23d66249..0f0d9cf8 100644
--- a/Makefile
+++ b/Makefile
@@ -1,60 +1,72 @@
-OBJS= alloc.o reclaim.o allochblk.o misc.o mach_dep.o mark_roots.o
-# add rt_allocobj.o for RT version
+OBJS= alloc.o reclaim.o allochblk.o misc.o mach_dep.o os_dep.o mark_roots.o headers.o mark.o obj_map.o black_list.o finalize.o new_hblk.o real_malloc.o dynamic_load.o debug_malloc.o
-SRCS= reclaim.c allochblk.c misc.c alloc.c mach_dep.c rt_allocobj.s mips_mach_dep.s mark_roots.c
+CSRCS= reclaim.c allochblk.c misc.c alloc.c mach_dep.c os_dep.c mark_roots.c headers.c mark.c obj_map.c pcr_interface.c black_list.c finalize.c new_hblk.c real_malloc.c dynamic_load.c debug_malloc.c
-CFLAGS= -O
+SRCS= $(CSRCS) mips_mach_dep.s rs6000_mach_dep.s interface.c gc.h gc_headers.h gc_private.h gc_inline.h gc.man
-# Set SPECIALCFLAGS to -q nodirect_code on Encore.
-# On Sun systems under 4.0, it's probably safer to link with -Bstatic.
-# I'm not sure that all static data will otherwise be found.
-# It also makes sense to replace -O with -O4, though it doesn't appear
-# to make much difference.
+CC= cc
+CFLAGS= -O
+# Setjmp_test may yield overly optimistic results when compiled
+# without optimization.
SPECIALCFLAGS =
+# Alternative flags to the C compiler for mach_dep.c.
+# Mach_dep.c often doesn't like optimization, and it's
+# not time-critical anyway.
+# Set SPECIALCFLAGS to -q nodirect_code on Encore.
all: gc.a gctest
-$(OBJS): gc.h
+pcr: PCR-Makefile gc_private.h gc_headers.h gc.h $(SRCS)
+ make -f PCR-Makefile
+$(OBJS) test.o: gc_private.h gc_headers.h gc.h Makefile
+
+# On some machines, the ranlib command may have to be removed.
+# On an SGI for example, ranlib doesn't exist, and is not needed.
+# Ditto for Solaris 2.X.
gc.a: $(OBJS)
ar ru gc.a $(OBJS)
ranlib gc.a
-# mach_dep.c doesn't like optimization
-# On a MIPS machine, move mips_mach_dep.s to mach_dep.s and remove
-# mach_dep.c as well as the following two lines from this Makefile
-# On an IBM RS6000, do the same thing with rs6000_mach_dep.s. Notice
-# that the assembly language interface to the allocator is not completely
-# implemented on an RS6000.
+# On a MIPS-based machine, replace the rule for mach_dep.o by the
+# following:
+# mach_dep.o: mips_mach_dep.s
+# as -o mach_dep.o mips_mach_dep.s
+# On an IBM RS6000, use the following two lines:
+# mach_dep.o: rs6000_mach_dep.s
+# as -o mach_dep.o rs6000_mach_dep.s
mach_dep.o: mach_dep.c
- cc -c ${SPECIALCFLAGS} mach_dep.c
+ $(CC) -c $(SPECIALCFLAGS) mach_dep.c
clean:
- rm -f gc.a test.o cons.o gctest output-local output-diff $(OBJS)
-
-test.o: cons.h test.c
+ rm -f gc.a test.o gctest output-local output-diff $(OBJS) \
+ setjmp_test mon.out gmon.out a.out core
+ -rm -f *~
-cons.o: cons.h cons.c
# On a MIPS system, the BSD version of libc.a should be used to get
# sigsetmask. I found it necessary to link against the system V
# library first, to get a working version of fprintf. But this may have
# been due to my failure to find the right version of stdio.h or some
# such thing.
-gctest: test.o cons.o gc.a
- cc $(CFLAGS) -o gctest test.o cons.o gc.a
+# On a Solaris 2.X system, also make sure you're using BSD libraries.
+gctest: test.o gc.a
+ $(CC) $(CFLAGS) -o gctest test.o gc.a
+# If an optimized setjmp_test generates a segmentation fault,
+# odds are your compiler is broken. Gctest may still work.
+# Try compiling setjmp_test unoptimized.
setjmp_test: setjmp_test.c gc.h
- cc -o setjmp_test -O setjmp_test.c
+ $(CC) $(CFLAGS) -o setjmp_test setjmp_test.c
test: setjmp_test gctest
./setjmp_test
- @echo "WARNING: for GC test to work, all debugging output must be turned off"
- rm -f output-local
- ./gctest > output-local
- -diff correct-output output-local > output-diff
- -@test -s output-diff && echo 'Output of program "gctest" is not correct. GC does not work.' || echo 'Output of program "gctest" is correct. GC probably works.'
-
-shar:
- makescript -o gc.shar README Makefile gc.h ${SRCS} test.c cons.c cons.h
+ ./gctest
+
+tar:
+ tar cvf gc.tar $(SRCS) Makefile PCR-Makefile OS2_MAKEFILE README test.c setjmp_test.c
+ compress gc.tar
+
+lint: $(CSRCS) test.c
+ lint $(CSRCS) test.c | egrep -v "possible pointer alignment problem|abort|exit"
diff --git a/OS2_MAKEFILE b/OS2_MAKEFILE
new file mode 100644
index 00000000..249486df
--- /dev/null
+++ b/OS2_MAKEFILE
@@ -0,0 +1,25 @@
+# Makefile for OS/2. Assumes IBM's compiler, static linking, and a single thread.
+# Adding dynamic linking support seems easy, but takes a little bit of work.
+# Adding thread support may be nontrivial, since we haven't yet figured out how to
+# look at another threads registers.
+
+# We also haven't figured out how to do partial links or build static libraries. Hence a
+# client currentlu needs to link against all of the following:
+
+OBJS= alloc.obj reclaim.obj allochblk.obj misc.obj mach_dep.obj os_dep.obj mark_roots.obj headers.obj mark.obj obj_map.obj black_list.obj finalize.obj new_hblk.obj real_malloc.obj dynamic_load.obj debug_malloc.obj
+
+CC= icc
+CFLAGS= /Ti /Q
+# Setjmp_test may yield overly optimistic results when compiled
+# without optimization.
+
+all: $(OBJS) gctest
+
+$(OBJS) test.obj: gc_private.h gc_headers.h gc.h
+
+mach_dep.obj: mach_dep.c
+ $(CC) $(CFLAGS) /C mach_dep.c
+
+gctest: test.obj $(OBJS)
+ $(CC) $(CFLAGS) /Fegctest test.obj $(OBJS)
+
diff --git a/PCR-Makefile b/PCR-Makefile
new file mode 100644
index 00000000..616ffdc7
--- /dev/null
+++ b/PCR-Makefile
@@ -0,0 +1,41 @@
+OBJS= alloc.o reclaim.o allochblk.o misc.o mach_dep.o os_dep.o mark_roots.o headers.o mark.o obj_map.o pcr_interface.o black_list.o finalize.o new_hblk.o real_malloc.o dynamic_load.o debug_malloc.o
+
+# Fix to point to local pcr installation directory.
+PCRDIR= /project/ppcr/dev
+CC= gcc
+CFLAGS= -g -DPCR -I$(PCRDIR) -I$(PCRDIR)/pcr -I$(PCRDIR)/pcr/ansi -I$(PCRDIR)/pcr/posix
+# On Sun systems under 4.x, it's safer to link with -Bstatic.
+# On other systems, -Bstatic usually doesn't make sense, and should be
+# removed.
+# Setjmp_test may yield overly optimistic results when compiled
+# without optimization.
+
+SPECIALCFLAGS =
+# Alternative flags to the C compiler for mach_dep.c.
+# Mach_dep.c often doesn't like optimization, and it's
+# not time-critical anyway.
+# Set SPECIALCFLAGS to -q nodirect_code on Encore.
+
+PCRINCLS= $(PCRDIR)/pcr/il/PCR_IL.h $(PCRDIR)/pcr/th/PCR_ThCtl.h $(PCRDIR)/pcr/mm/PCR_MM.h
+
+
+all: gc.o test.o gcpcr
+
+gcpcr: gc.o test.o $(PCRDIR)/pcr/base/pcr.o $(PCRDIR)/pcr/base/PCR_BaseMain.o
+ $(CC) -static -o gcpcr $(PCRDIR)/pcr/base/pcr.o $(PCRDIR)/pcr/base/PCR_BaseMain.o gc.o test.o
+
+$(OBJS) test.o: gc_private.h gc_headers.h gc.h PCR-Makefile $(PCRINCLS)
+
+gc.o: $(OBJS)
+ -ld -r -o gc.o $(OBJS)
+
+# On a MIPS machine, replace the rule for mach_dep.o by the
+# following:
+# mach_dep.o: mips_mach_dep.s
+# as -o mach_dep.o mips_mach_dep.s
+# On an IBM RS6000, use the following two lines:
+# mach_dep.o: rs6000_mach_dep.s
+# as -o mach_dep.o rs6000_mach_dep.s
+mach_dep.o: mach_dep.c
+ $(CC) -c ${SPECIALCFLAGS} mach_dep.c
+
diff --git a/README b/README
index 21d137aa..13133fa6 100644
--- a/README
+++ b/README
@@ -1,5 +1,5 @@
Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
-Copyright (c) 1991 by Xerox Corporation. All rights reserved.
+Copyright (c) 1991, 1992 by Xerox Corporation. All rights reserved.
THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -8,59 +8,80 @@ Permission is hereby granted to copy this garbage collector for any purpose,
provided the above notices are retained on all copies.
-This is version 1.9.
+This is version 2.4. Note that functions were renamed since version 1.9
+to make naming consistent with PCR collectors.
HISTORY -
- This collector was developed as a part of research projects supported in
-part by the National Science Foundation and the Defense Advance Research
-Projects Agency. The SPARC specific code was contributed by Mark Weiser
-(weiser.pa@xerox.com). The Encore Multimax modifications were supplied by
+ Early versions of this collector were developed as a part of research
+projects supported in part by the National Science Foundation
+and the Defense Advance Research Projects Agency.
+The SPARC specific code was contributed by Mark Weiser
+(weiser@parc.xerox.com). The Encore Multimax modifications were supplied by
Kevin Kenny (kenny@m.cs.uiuc.edu). The adaptation to the RT is largely due
to Vernon Lee (scorpion@rice.edu), on machines made available by IBM.
-The HP specific code and a number of good suggestions for improving the
+Much of the HP specific code and a number of good suggestions for improving the
generic code are due to Walter Underwood (wunder@hp-ses.sde.hp.com).
-Robert Brazile (brazile@diamond.bbn.com) supplied the ULTRIX code.
-(Blame for misinstallation of those modifications goes to the first author,
+Robert Brazile (brazile@diamond.bbn.com) originally supplied the ULTRIX code.
+Al Dosser (dosser@src.dec.com) and Regis Cridlig (Regis.Cridlig@cl.cam.ac.uk)
+subsequently provided updates and information on variation between ULTRIX
+systems. Parag Patel (parag@netcom.com) supplied the A/UX code.
+Bill Janssen (janssen@parc.xerox.com) supplied the SunOS dynamic loader
+specific code. Manuel Serrano (serrano@cornas.inria.fr) supplied linux and
+Sony News specific code.
+
+ (Blame for misinstallation of those modifications goes to the first author,
however.) Some of the improvements incorporated in this version were
suggested by David Chase, then at Olivetti Research.
+ Much of the code was rewritten by Hans-J. Boehm at Xerox PARC.
+
This is intended to be a general purpose, garbage collecting storage
allocator. The algorithms used are described in:
Boehm, H., and M. Weiser, "Garbage Collection in an Uncooperative Environment",
Software Practice & Experience, September 1988, pp. 807-820.
- Many of the ideas underlying the collector have previously been explored
-by others. (We discovered recently that Doug McIlroy wrote a more or less
-similar collector that is part of version 8 UNIX (tm).) However none of this
-work appears to have been widely disseminated.
+ Some of the ideas underlying the collector have previously been explored
+by others. (Doug McIlroy wrote a vaguely similar collector that is part of
+version 8 UNIX (tm).) However none of this work appears to have been widely
+disseminated.
+
+ This collector includes numerous refinements not described in the above paper.
- The tools for detecting storage leaks described in the above paper
-are not included here. There is some hope that they might be released
-by Xerox in the future.
+ Rudimentary tools for use of the collector as a leak detector are included.
GENERAL DESCRIPTION
+ This is a garbage colecting storage allocator that is intended to be
+used as a plug-in replacement for C's malloc.
+
Since the collector does not require pointers to be tagged, it does not
-attempt to insure that all inaccessible storage is reclaimed. However,
+attempt to ensure that all inaccessible storage is reclaimed. However,
in our experience, it is typically more successful at reclaiming unused
-memory than most C programs using explicit deallocation.
+memory than most C programs using explicit deallocation. Unlike manually
+introduced leaks, the amount of unreclaimed memory typically stays
+bounded.
In the following, an "object" is defined to be a region of memory allocated
by the routines described below.
Any objects not intended to be collected must be pointed to either
from other such accessible objects, or from the registers,
-stack, data, or statically allocated bss segments. It is usually assumed
-that all such pointers point to the beginning of the object. (This does
+stack, data, or statically allocated bss segments. Pointers from
+the stack or registers may point to anywhere inside an object.
+However, it is usually assumed that all pointers originating in the
+heap point to the beginning of an object. (This does
not disallow interior pointers; it simply requires that there must be a
pointer to the beginning of every accessible object, in addition to any
-interior pointers. Conditionally compiled code to check for pointers to the
-interiors of objects is supplied. As explained in "gc.h", this
-may create other problems, but on modern machines requiring 32-bit-aligned
-pointers, this is often acceptable.)
+interior pointers.) There are two facilities for altering this behavior.
+The macro ALL_INTERIOR_POINTERS may be defined in gc_private.h to
+cause any pointer into an object to retain the object. A routine
+GC_register_displacement is provided to allow for more controlled
+interior pointer use in the heap. Defining ALL_INTERIOR_POINTERS
+is somewhat dangerous. See gc_private.h for details. The routine
+GC_register_displacement is described in gc.h.
Note that pointers inside memory allocated by the standard "malloc" are not
seen by the garbage collector. Thus objects pointed to only from such a
@@ -69,88 +90,126 @@ standard "malloc" be used only for memory regions, such as I/O buffers, that
are guaranteed not to contain pointers. Pointers in C language automatic,
static, or register variables, are correctly recognized.
- The collector does not understand SunOS 4.x dynamic libraries. Space
-allocated by the dynamic linker past at addresses higher than "_end" will not
-be seen by the collector. (We have not had a chance to track down exactly
-what ends up there. Some data does. If we understood exactly where things
-ended up, it would probably be easy to fix this problem.) When in doubt,
-use -Bstatic.
-
- The collector is designed to minimize stack growth if list-like structures
-store the link in their first field; for example
-
- struct list_node {
- struct list_node * link; /* first field */
- ...
- };
-
-instead of
-
- struct list_node {
- ...
- struct list_node * link; /* last field */
- };
-
- This should not matter for lists that are less than tens of thousands
-of elements long.
-
- Signal processing for most signals is deferred during collection. (The
-necessary calls to sigsetmask may need to be commented out under a pure
-system V implementation, since there does not seem to be an equivalent
-call. Multiple calls to signal are likely to be slow.)
+ The collector does not generally know how to find pointers in data
+areas that are associated with dynamic libraries. This is easy to
+remedy IF you know how to find those data areas on your operating
+system (see GC_add_roots). Code for doing this under SunOS4.X only is
+included (see dynamic_load.c). (Note that it includes a special version
+of dlopen, GC_dlopen, that should be called instead of the standard one.
+By default, this is not compiled in, since it requires the -ldl library.)
+Note that the garbage collector does not need to be informed of shared
+read-only data. However if the shared library mechanism can introduce
+discontiguous data areas that may contain pointers, then the collector does
+need to be informed.
+
+ Signal processing for most signals is normally deferred during collection,
+and during uninterruptible parts of the allocation process. Unlike
+standard ANSI C mallocs, it is intended to be safe to invoke malloc
+from a signal handler while another malloc is in progress, provided
+the original malloc is not restarted. (Empirically, many UNIX
+applications already asssume this.) The allocator/collector can
+also be configured for thread-safe operation. (Full signal safety can
+also be acheived, but only at the cost of two system calls per malloc,
+which is usually unacceptable.)
INSTALLATION AND PORTABILITY
- As distributed, the collector produces garbage collection statistics
-during every collection. Once the collector is known to operate properly,
-these can be suppressed by defining the macro SILENT at the top
-of "gc.h". (The given statistics exhibit a few peculiarities.
+ As distributed, the macro SILENT is defined at the top of gc_private.h.
+In the event of problems, this can be removed to obtain a moderate
+amount of descriptive output for each collection.
+(The given statistics exhibit a few peculiarities.
Things don't appear to add up for a variety of reasons, most notably
fragmentation losses. These are probably much more significant for the
contrived program "test.c" than for your application.)
- Note that typing "make test" will automatically compare the output
-of the test program against the correct output. This does require that
-collection statistics have been disabled.
+ Note that typing "make test" will automatically build the collector
+and then run setjmp_test and gctest. Setjmp_test will give you information
+about configuring the collector, which is useful primarily if you have
+a machine that's not already supported. Gctest is a somewhat superficial
+test of collector functionality. Failure is indicated by a core dump or
+a message to the effect that the collector is broken. Gctest takes about
+20 seconds to run on a SPARCstation 2. On a slower machine,
+expect it to take a while. It may use up to 8 MB of memory. (The
+multi-threaded version will use more.)
The Makefile will generate a library gc.a which you should link against.
It is suggested that if you need to replace a piece of the collector
-(e.g. mark_roots.c) you simply list your version ahead of gc.a on the
-ld command line, rather than replacing the one in gc.a.
+(e.g. GC_mark_roots.c) you simply list your version ahead of gc.a on the
+ld command line, rather than replacing the one in gc.a. (This will
+generate numerous warnings under some versions of AIX, but it still
+works.)
The collector currently is designed to run essentially unmodified on
the following machines:
Sun 3
- Sun 4 (except under some versions of 3.2)
- Vax under Berkeley UNIX
+ Sun 4 under SunOS 4.X or Solaris2.X
+ Vax under 4.3BSD, Ultrix
+ Intel 386 or 486 under OS/2 (no threads) or linux.
Sequent Symmetry (no concurrency)
Encore Multimax (no concurrency)
MIPS M/120 (and presumably M/2000) (RISC/os 4.0 with BSD libraries)
IBM PC/RT (Berkeley UNIX)
IBM RS/6000
HP9000/300
+ HP9000/700
+ DECstations under Ultrix
+ SGI workstations under IRIX
+ Sony News
+ Apple MacIntosh under A/UX
For these machines you should check the beginning of gc.h
-to verify that the machine type is correctly defined. On an Encore Multimax,
-MIPS M/120, or a PC/RT, you will also need to make changes to the
+to verify that the machine type is correctly defined. On
+nonSun machines, you may also need to make changes to the
Makefile, as described by comments there.
+ Dynamic libraries are completely supported only under SunOS4.X
+(and even that support is not functional on the last Sun 3 release).
+On other machines we recommend that you do one of the following:
+
+ 1) Add dynamic library support (and send us the code).
+ 2) Use static versions of the libraries.
+ 3) Arrange for dynamic libraries to use the standard malloc.
+ This is still dangerous if the library stores a pointer to a
+ garbage collected object. But nearly all standard interfaces
+ prohibit this, because they deal correctly with pointers
+ to stack allocated objects. (Strtok is an exception. Don't
+ use it.)
+
In all cases we assume that pointer alignment is consistent with that
enforced by the standard C compilers. If you use a nonstandard compiler
-you may have to adjust the alignment parameters defined in gc.h.
+you may have to adjust the alignment parameters defined in gc_private.h.
- On a MIPS machine or PC/RT, we assume that no calls to sbrk occur during a
-collection. (This is necessary due to the way stack expansion works on these
-machines.) This may become false if certain kinds of I/O calls are inserted
-into the collector.
+ A port to a machine that is not byte addressed, or does not use 32 bit
+addresses will require a major effort. (Parts of the code try to anticipate
+64 bit addresses. Others will need to be rewritten, since different data
+structures are needed.) A port to MSDOS is hopeless, unless you are willing
+to assume an 80386 or better, and that only flat 32 bit pointers will ever be
+used.
For machines not already mentioned, or for nonstandard compilers, the
following are likely to require change:
-1. The parameters at the top of gc.h and the definition of
- TMP_POINTER_MASK further down in the same file.
-
+1. The parameters at the top of gc_private.h.
+ The parameters that will usually require adjustment are
+ STACKBOTTOM, ALIGNMENT and DATASTART. Setjmp_test
+ prints its guesses of the first two.
+ DATASTART should be an expression for computing the
+ address of the beginning of the data segment. This can often be
+ &etext. But some memory management units require that there be
+ some unmapped space between the text and the data segment. Thus
+ it may be more complicated. On UNIX systems, this is rarely
+ documented. But the adb "$m" command may be helpful. (Note
+ that DATASTART will usually be a function of &etext. Thus a
+ single experiment is usually insufficient.)
+ STACKBOTTOM is used to initialize GC_stackbottom, which
+ should be a sufficient approximation to the coldest stack address.
+ On some machines, it is difficult to obtain such a value that is
+ valid across a variety of MMUs, OS releases, etc. A number of
+ alternatives exist for using the collector in spite of this. See the
+ discussion in gc_private.h immediately preceding the various
+ definitions of STACKBOTTOM.
+
2. mach_dep.c.
The most important routine here is one to mark from registers.
The distributed file includes a generic hack (based on setjmp) that
@@ -162,28 +221,20 @@ following are likely to require change:
all accessible variables, including registers, have the value they
had at the time of the longjmp, it also will not work. Vanilla 4.2 BSD
makes such a claim. SunOS does not.)
- This file also contains interface routines that save registers
- not normally preserved by the C compiler. These are intended for
- a fast assembly language interface to the allocator, such as the
- one that is used by the Russell compiler. (These routines work
- only for small objects. A call to one of these routines ensures
- that the free list for a particular object size is nonempty. Normally
- in-line code would call these routines only after finding an empty free
- list for an about-to-be-allocated object size.) If a pure C interface
- is used, these routines are not needed.
- If your machine does not allow in-line assembly code, or if you prefer
+ If your compiler does not allow in-line assembly code, or if you prefer
not to use such a facility, mach_dep.c may be replaced by a .s file
(as we did for the MIPS machine and the PC/RT).
3. mark_roots.c.
These are the top level mark routines that determine which sections
of memory the collector should mark from. This is normally not
- architecture specific (aside from the macros defined in gc.h and
+ architecture specific (aside from the macros defined in gc_private.h and
referenced here), but it can be programming language and compiler
specific. The supplied routine should work for most C compilers
- running under UNIX.
+ running under UNIX. Calls to GC_add_roots may sometimes be used
+ for similar effect.
-4. The sigsetmask call does not appear to exist under system V UNIX.
+4. The sigsetmask call does not appear to exist under early system V UNIX.
It is used by the collector to block and unblock signals at times at
which an asynchronous allocation inside a signal handler could not
be tolerated. Under system V, it is possible to remove these calls,
@@ -193,68 +244,93 @@ following are likely to require change:
For a different versions of Berkeley UN*X or different machines using the
Motorola 68000, Vax, SPARC, 80386, NS 32000, PC/RT, or MIPS architecture,
-it should frequently suffice to change definitions in gc.h.
+it should frequently suffice to change definitions in gc_private.h.
THE C INTERFACE TO THE ALLOCATOR
The following routines are intended to be directly called by the user.
-Note that only gc_malloc and gc_init are necessary. Gc_realloc is provided
-for applications that already use realloc. The remaining routines are used
-solely to enhance performance. It is suggested that they be used only after
-initial debugging.
-
-1) gc_init()
- - called once before allocation to initialize the collector.
-
-2) gc_malloc(nbytes)
+Note that usually only GC_malloc is necessary. GC_clear_roots and GC_add_roots
+calls may be required if the collector has to trace from nonstandard places
+(e.g. from dynamic library data areas on a machine on which the
+collector doesn't already understand them.) On some machines, it may
+be desirable to set GC_stacktop to a good approximation of the stack base.
+(This enhances code portability on HP PA machines, since there is no
+good way for the collector to compute this value.) Client code may include
+"gc.h", which defines all of the following, plus a few others.
+
+1) GC_malloc(nbytes)
- allocate an object of size nbytes. Unlike malloc, the object is
- cleared before being returned to the user. (For even better performance,
- it may help to expand the relevant part of gc_malloc in line.
- This is done by the Russell compiler, for example.) Gc_malloc will
+ cleared before being returned to the user. Gc_malloc will
invoke the garbage collector when it determines this to be appropriate.
- (A number of previous collector bugs resulted in objects not getting
- completely cleared. We claim these are all fixed. But if you encounter
- problems, this is a likely source to check for. The collector tries
- hard to avoid clearing any words that it doesn't have to. Thus this
- is a bit subtle.) Gc_malloc fails (generates a segmentation fault)
- if it is called with a 0 argument.
-
-3) gc_malloc_atomic(nbytes)
+ GC_malloc may return 0 if it is unable to acquire sufficient
+ space from the operating system. This is the most probable
+ consequence of running out of space. Other possible consequences
+ are that a function call will fail due to lack of stack space,
+ or that the collector will fail in other ways because it cannot
+ maintain its internal data structures, or that a crucial system
+ process will fail and take down the machine. Most of these
+ possibilities are independent of the malloc implementation.
+
+2) GC_malloc_atomic(nbytes)
- allocate an object of size nbytes that is guaranteed not to contain any
pointers. The returned object is not guaranteed to be cleeared.
- (Can always be replaced by gc_malloc, but results in faster collection
+ (Can always be replaced by GC_malloc, but results in faster collection
times. The collector will probably run faster if large character
- arrays, etc. are allocated with gc_malloc_atomic than if they are
+ arrays, etc. are allocated with GC_malloc_atomic than if they are
statically allocated.)
-4) gc_realloc(object, new_size)
+3) GC_realloc(object, new_size)
- change the size of object to be new_size. Returns a pointer to the
new object, which may, or may not, be the same as the pointer to
the old object. The new object is taken to be atomic iff the old one
was. If the new object is composite and larger than the original object,
then the newly added bytes are cleared (we hope). This is very likely
- to allocate a new object, unless MERGE_SIZES is defined in gc.h.
+ to allocate a new object, unless MERGE_SIZES is defined in gc_private.h.
Even then, it is likely to recycle the old object only if the object
is grown in small additive increments (which, we claim, is generally bad
coding practice.)
-5) gc_free(object)
- - explicitly deallocate an object returned by gc_malloc or
- gc_malloc_atomic. Not necessary, but can be used to minimize
+4) GC_free(object)
+ - explicitly deallocate an object returned by GC_malloc or
+ GC_malloc_atomic. Not necessary, but can be used to minimize
collections if performance is critical.
-6) expand_hp(number_of_4K_blocks)
+5) GC_expand_hp(number_of_4K_blocks)
- Explicitly increase the heap size. (This is normally done automatically
- if a garbage collection failed to reclaim enough memory. Explicit
- calls to expand_hp may prevent unnecessarily frequent collections at
+ if a garbage collection failed to GC_reclaim enough memory. Explicit
+ calls to GC_expand_hp may prevent unnecessarily frequent collections at
program startup.)
-
- The global variable dont_gc can be set to a non-zero value to inhibit
-collections, e.g. during a time-critical section of code. (This may cause
-otherwise unnecessary expansion of the process' memory.)
-
- The variable non_gc_bytes, which is normally 0, may be changed to reflect
+
+6) GC_clear_roots()
+ - Reset the collectors idea of where static variables containing pointers
+ may be located to the empty set of locations. No statically allocated
+ variables will be traced from after this call, unless there are
+ intervening GC_add_roots calls. The collector will still trace from
+ registers and the program stack.
+
+7) GC_add_roots(low_address, high_address_plus_1)
+ - Add [low_address, high_address) as an area that may contain root pointers
+ and should be traced by the collector. The static data and bss segments
+ are considered by default, and should not be added unless GC_clear_roots
+ has been called. The number of root areas is currently limited to 50.
+ This is intended as a way to register data areas for dynamic libraries,
+ or to replace the entire data ans bss segments by smaller areas that are
+ known to contain all the roots.
+
+8) Several routines to allow for registration of finalization code.
+ User supplied finalization code may be invoked when an object becomes
+ unreachable. To call (*f)(obj, x) when obj becomes inaccessible, use
+ GC_register_finalizer(obj, f, x, 0, 0);
+ For more sophisticated uses, and for finalization ordering issues,
+ see gc.h.
+
+ The global variable GC_free_space_divisor may be adjusted up from its
+default value of 4 to use less space and more collection time, or down for
+the opposite effect. Setting it to 1 or 0 will effectively disable collections
+and cause all allocations to simply grow the heap.
+
+ The variable GC_non_gc_bytes, which is normally 0, may be changed to reflect
the amount of memory allocated by the above routines that should not be
considered as a candidate for collection. Collections are inhibited
if this exceeds a given fraction (currently 3/4) of the total heap size.
@@ -262,53 +338,129 @@ The heap is simply expanded instead. Careless use may, of course, result
in excessive memory consumption.
Some additional tuning is possible through the parameters defined
-near the top of gc.h.
+near the top of gc_private.h.
- The two gc_malloc routines may be declared to return a suitable pointer
-type. It is not intended that gc.h be included by the user program.
-If only gc_malloc is intended to be used, it might be appropriate to define:
+ If only GC_malloc is intended to be used, it might be appropriate to define:
-#define malloc(n) gc_malloc(n)
-#define calloc(m,n) gc_malloc((m)*(n))
+#define malloc(n) GC_malloc(n)
+#define calloc(m,n) GC_malloc((m)*(n))
- More complete emulations of the standard C allocation routines are
-contained and described in "interface.c" (contributed by David Chase).
+ For small pieces of VERY allocation intensive code, gc_inline.h
+includes some allocation macros that may be used in place of GC_malloc
+and friends.
- No attempt is made to use obscure names for garbage collector routines
-and data structures. Name conflicts are possible. (Running "nm gc.a"
-should identify names to be avoided.)
+ Somewhat different emulations of the standard C allocation routines are
+contained and described in "interface.c" (contributed by David Chase, but
+subsequently mangled by Hans Boehm). These are appropriate for mixed
+systems, where part of the system uses explicit deallocation, and does not
+leak. Exclusive use of interface.c routines can result in needless
+fragmentation, since certain kinds of object coalescing are only done
+by the collector.
+ All externally visible names in the garbage collector start with "GC_".
+To avoid name conflicts, client code should avoid this prefix, except when
+accessing garbage collector routines or variables.
-ASSEMBLY LANGUAGE INTERFACE
+ The internals of the collector understand different object "kinds" (sometimes
+called "regions"). By default, the only two kinds are ATOMIC and NORMAL.
+Its should be possible to add others, e.g. for data types for which layout
+information is known. The allocation routine "GC_generic_malloc"
+takes an explicit kind argument. (You will probably want to add
+faster kind-specific routines as well.) You will need to add another kind
+descriptor, including your own mark routine to add a new object kind.
+This requires a fairly detailed understanding of at least GC_mark.
- There is a provision for a very fast assembly language and/or in-line
-C interface. See the beginning comments in alloc.c. On some architectures,
-additional code must be supplied near the beginning of mach_dep.c for
-this to work. Using an assembly language interface, and partially
-expanding the allocation code in-line, most allocations will take on the
-order of 4 or 5 instructions each. (Explicit deallocations can be kept
-down to something similar if the object is atomic and of known size.
-Note that in-line deallocation code for composite objects should clear
-the object before returning it to the appropriate free list.)
USE AS LEAK DETECTOR:
The collector may be used to track down leaks in C programs that are
intended to run with malloc/free (e.g. code with extreme real-time or
-portability constraints). To do so define FIND_LEAK somewhere in gc.h.
-This will cause the collector to invoke the report_leak routine defined
-near the top of reclaim.c whenever an inaccessible object is found that has
-not been explicitly freed.
+portability constraints). To do so define FIND_LEAK somewhere in
+gc_private.h. This will cause the collector to invoke the report_leak
+routine defined near the top of reclaim.c whenever an inaccessible
+object is found that has not been explicitly freed.
Productive use of this facility normally involves redefining report_leak
to do something more intelligent. This typically requires annotating
objects with additional information (e.g. creation time stack trace) that
identifies their origin. Such code is typically not very portable, and is
not included here.
-
-
-BUGS
-
- Recently fixed bugs:
+ If all objects are allocated with GC_DEBUG_MALLOC (see next section),
+then the default version of report_leak will report the source file
+and line number at which the leaked object was allocated. This may
+sometimes be sufficient.
+
+
+DEBUGGING FACILITIES:
+
+ The routines GC_debug_malloc, GC_debug_malloc_atomic, GC_debug_realloc,
+and GC_debug_free provide an alternate interface to the collector, which
+provides some help with memory overwrite errors, and the like.
+Objects allocated in this way are annotated with additional
+information. Some of this information is checked during garbage
+collections, and detected inconsistencies are reported to stderr.
+
+ Simple cases of writing past the end of an allocated object should
+be caught if the object is explicitly deallocated, or if the
+collector is invoked while the object is live. The first deallocation
+of an object will clear the debugging info associated with an
+object, so accidentally repeated calls to GC_debug_free will report the
+deallocation of an object without debugging information. Out of
+memory errors will be reported to stderr, in addition to returning
+NIL.
+
+ GC_debug_malloc checking during garbage collection is enabled
+with the first call to GC_debug_malloc. This will result in some
+slowdown during collections. If frequent heap checks are desired,
+this can be acheived by explicitly invoking GC_gcollect, e.g. from
+the debugger.
+
+ GC_debug_malloc allocated objects should not be passed to GC_realloc
+or GC_free, and conversely. It is however acceptable to allocate only
+some objects with GC_debug_malloc, and to use GC_malloc for other objects,
+provided the two pools are kept distinct. In this case, there is a very
+low probablility that GC_malloc allocated objects may be misidentified as
+having been overwritten. This should happen with probability at most
+one in 2**32. This probability is zero if GC_debug_malloc is never called.
+
+ GC_debug_malloc, GC_malloc_atomic, and GC_debug_realloc take two
+additional trailing arguments, a string and an integer. These are not
+interpreted by the allocator. They are stored in the object (the string is
+not copied). If an error involving the object is detected, they are printed.
+
+ The macros GC_MALLOC, GC_MALLOC_ATOMIC, GC_REALLOC, GC_FREE, and
+GC_REGISTER_FINALIZER are also provided. These require the same arguments
+as the corresponding (nondebugging) routines. If gc.h is included
+with GC_DEBUG defined, they call the debugging versions of these
+functions, passing the current file name and line number as the two
+extra arguments, where appropriate. If gc.h is included without GC_DEBUG
+defined, then all these macros will instead be defined to their nondebugging
+equivalents. (GC_REGISTER_FINALIZER is necessary, since pointers to
+objects with debugging information are really pointers to a displacement
+of 16 bytes form the object beginning, and some translation is necessary
+when finalization routines are invoked. For details, about what's stored
+in the header, see the definition of the type oh in debug_malloc.c)
+
+
+BUGS:
+
+ Any memory that does not have a recognizable pointer to it will be
+reclaimed. Exclusive-or'ing forward and backward links in a list
+doesn't cut it.
+ Some C optimizers may lose the last undisguised pointer to a memory
+object as a consequence of clever optimizations. This has almost
+never been observed in practice. Send mail to boehm@parc.xerox.com
+for suggestions on how to fix your compiler.
+ This is not a real-time collector. In the standard configuration,
+percentage of time required for collection should be constant across
+heap sizes. But collection pauses will increase for larger heaps.
+(On SPARCstation 2s collection times will be on the order of 300 msecs
+per MB of accessible memory that needs to be scanned. Your mileage
+may vary.) Much better real-time behavior would be possible if we
+had a portable way to identify sections of memory that were recently
+modified. Experience with PCR indicates that 100 msec pause times
+are probably possible, almost independent of heap size.
+
+RECENT VERSIONS:
Version 1.3 and immediately preceding versions contained spurious
assembly language assignments to TMP_SP. Only the assignment in the PC/RT
@@ -332,8 +484,61 @@ allocated on a sparc based machine.
a major addition, you might also send mail to ask whether it's already
been done.
- Version 1.8 added ULTRIX support in gc.h.
-
- Version 1.9 fixed a serious realloc bug. Expanding a large pointerful
-object by a small amount could result in pointers in the added section
-not getting scanned.
+ Version 1.8 added ULTRIX support in gc_private.h.
+
+ Version 1.9 fixed a major bug in gc_realloc.
+
+ Version 2.0 introduced a consistent naming convention for collector
+routines and added support for registering dynamic library data segments
+in the standard mark_roots.c. Most of the data structures were revamped.
+The treatment of interior pointers was completely changed. Finalization
+was added. Support for locking was added. Object kinds were added.
+We added a black listing facility to avoid allocating at addresses known
+to occur as integers somewhere in the address space. Much of this
+was accomplished by adapting ideas and code from the PCR collector.
+The test program was changed and expanded.
+
+ Version 2.1 was the first stable version since 1.9, and added support
+for PPCR.
+
+ Version 2.2 added debugging allocation, and fixed various bugs. Among them:
+- GC_realloc could fail to extend the size of the object for certain large object sizes.
+- A blatant subscript range error in GC_printf, which unfortunately
+ wasn't excercised on machines with sufficient stack alignment constraints.
+- GC_register_displacement did the wrong thing if it was called after
+ any allocation had taken place.
+- The leak finding code would eventually break after 2048 byte
+ byte objects leaked.
+- interface.c didn't compile.
+- The heap size remained much too small for large stacks.
+- The stack clearing code behaved badly for large stacks, and perhaps
+ on HP/PA machines.
+
+ Version 2.3 added ALL_INTERIOR_POINTERS and fixed the following bugs:
+- Missing declaration of etext in the A/UX version.
+- Some PCR root-finding problems.
+- Blacklisting was not 100% effective, because the plausible future
+ heap bounds were being miscalculated.
+- GC_realloc didn't handle out-of-memory correctly.
+- GC_base could return a nonzero value for addresses inside free blocks.
+- test.c wasn't really thread safe, and could erroneously report failure
+ in a multithreaded environment. (The locking primitives need to be
+ replaced for other threads packages.)
+- GC_CONS was thoroughly broken.
+- On a SPARC with dynamic linking, signals stayed diabled while the
+ client code was running.
+ (Thanks to Manuel Serrano at INRIA for reporting the last two.)
+
+ Version 2.4 added GC_free_space_divisor as a tuning knob, added
+ support for OS/2 and linux, and fixed the following bugs:
+- On machines with unaligned pointers (e.g. Sun 3), every 128th word could
+ fail to be considered for marking.
+- Dynamic_load.c erroneously added 4 bytes to the length of the data and
+ bss sections of the dynamic library. This could result in a bad memory
+ reference if the actual length was a multiple of a page. (Observed on
+ Sun 3. Can probably also happen on a Sun 4.)
+ (Thanks to Robert Brazile for pointing out that the Sun 3 version
+ was broken. Dynamic library handling is still broken on Sun 3s
+ under 4.1.1U1, but apparently not 4.1.1. If you have such a machine,
+ use -Bstatic.)
+
diff --git a/alloc.c b/alloc.c
index 0e2005ad..e97492f0 100644
--- a/alloc.c
+++ b/alloc.c
@@ -5,56 +5,27 @@
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
- * Permission is hereby granted to copy this compiler for any purpose,
+ * Permission is hereby granted to copy this garbage collector for any purpose,
* provided the above notices are retained on all copies.
*
* This file contains the functions:
- * void new_hblk(n)
* static void clear_marks()
- * mark(alignment)
- * mark_all(b,t,alignment)
- * void gcollect()
- * expand_hp: func[val Short] val Void
- * struct obj * _allocobj(sz)
- * struct obj * _allocaobj(sz)
+ * void GC_gcollect_inner(force)
+ * void GC_gcollect()
+ * bool GC_expand_hp(n)
+ * ptr_t GC_allocobj(sz, kind)
*/
# include <stdio.h>
# include <signal.h>
# include <sys/types.h>
-# include <sys/times.h>
-# include "gc.h"
-
-/* Leaving these defined enables output to stderr. In order of */
-/* increasing verbosity: */
-#define REPORT_FAILURE /* Print values that looked "almost" like pointers */
-#undef REPORT_FAILURE
-#define DEBUG /* Verbose debugging output */
-#undef DEBUG
-#define DEBUG2 /* EXTREMELY verbose debugging output */
-#undef DEBUG2
-#define USE_STACK /* Put mark stack onto process stack. This assumes */
- /* that it's safe to put data below the stack ptr, */
- /* and that the system will expand the stack as */
- /* necessary. This is known to be true under Sun */
- /* UNIX (tm) and Vax Berkeley UNIX. It is also */
- /* known to be false under some other UNIX */
- /* implementations. */
-#undef USE_HEAP
-#ifdef RT
-# define USE_HEAP
-# undef USE_STACK
-#endif
-#ifdef MIPS
-# define USE_HEAP
-# undef USE_STACK
-#endif
+# include "gc_private.h"
/*
- * This is an attempt at a garbage collecting storage allocator
+ * This is a garbage collecting storage allocator
* that should run on most UNIX systems. The garbage
- * collector is overly conservative in that it may fail to reclaim
+ * collector is overly conservative in that it may fail to GC_reclaim
* inaccessible storage. On the other hand, it does not assume
* any runtime tag information.
* We make the following assumptions:
@@ -70,770 +41,425 @@
/*
* Separate free lists are maintained for different sized objects
- * up to MAXOBJSZ or MAXAOBJSZ.
- * The lists objfreelist[i] contain free objects of size i which may
- * contain nested pointers. The lists aobjfreelist[i] contain free
- * atomic objects, which may not contain nested pointers.
- * The call allocobj(i) insures that objfreelist[i] points to a non-empty
- * free list it returns a pointer to the first entry on the free list.
- * Allocobj may be called to allocate an object of (small) size i
- * as follows:
+ * up to MAXOBJSZ.
+ * The call GC_allocobj(i,k) ensures that the freelist for
+ * kind k objects of size i points to a non-empty
+ * free list. It returns a pointer to the first entry on the free list.
+ * In a single-threaded world, GC_allocobj may be called to allocate
+ * an object of (small) size i as follows:
*
- * opp = &(objfreelist[i]);
- * if (*opp == (struct obj *)0) allocobj(i);
+ * opp = &(GC_objfreelist[i]);
+ * if (*opp == 0) GC_allocobj(i, NORMAL);
* ptr = *opp;
* *opp = ptr->next;
*
- * The call to allocobj may be replaced by a call to _allocobj if it
- * is made from C, or if C register save conventions are sufficient.
* Note that this is very fast if the free list is non-empty; it should
* only involve the execution of 4 or 5 simple instructions.
* All composite objects on freelists are cleared, except for
- * their first longword.
+ * their first word.
*/
/*
- * The allocator uses allochblk to allocate large chunks of objects.
+ * The allocator uses GC_allochblk to allocate large chunks of objects.
* These chunks all start on addresses which are multiples of
- * HBLKSZ. All starting addresses are maintained on a contiguous
- * list so that they can be traversed in the sweep phase of garbage collection.
+ * HBLKSZ. Each allocated chunk has an associated header,
+ * which can be located quickly based on the address of the chunk.
+ * (See headers.c for details.)
* This makes it possible to check quickly whether an
* arbitrary address corresponds to an object administered by the
* allocator.
- * We make the (probably false) claim that this can be interrupted
- * by a signal with at most the loss of some chunk of memory.
*/
-/* Declarations for fundamental data structures. These are grouped */
-/* together, so that the collector can skip over them. */
-/* This relies on some assumptions about the compiler that are not */
-/* guaranteed valid, but ... */
+word GC_non_gc_bytes = 0; /* Number of bytes not intended to be collected */
-long heapsize = 0; /* Heap size in bytes */
+word GC_gc_no = 0;
-long non_gc_bytes = 0; /* Number of bytes not intended to be collected */
-
-char copyright[] = "Copyright 1988,1989 Hans-J. Boehm and Alan J. Demers";
-char copyright2[] =
- "Copyright (c) 1991 by Xerox Corporation. All rights reserved.";
-char copyright3[] =
- "THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY";
-char copyright4[] =
- " EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.";
-
-/* Return a rough approximation to the stack pointer. A hack, */
-/* but it's semi-portable. */
-word * get_current_sp()
-{
- word x;
- return(&x);
-}
-
-/*
- * Allocate a new heapblock for objects of size n.
- * Add all of the heapblock's objects to the free list for objects
- * of that size. A negative n requests atomic objects.
- */
-void new_hblk(n)
-long n;
-{
- register word *p,
- *r;
- word *last_object; /* points to last object in new hblk */
- register struct hblk *h; /* the new heap block */
- register long abs_sz; /* |n| */
- register int i;
-
-# ifdef PRINTSTATS
- if ((sizeof (struct hblk)) > HBLKSIZE) {
- abort("HBLK SZ inconsistency");
- }
-# endif
-
- /* Allocate a new heap block */
- h = allochblk(n);
-
- /* Add it to hblklist */
- add_hblklist(h);
-
- /* Add objects to free list */
- abs_sz = abs(n);
- p = &(h -> hb_body[abs_sz]); /* second object in *h */
- r = &(h -> hb_body[0]); /* One object behind p */
- last_object = ((word *)((char *)h + HBLKSIZE)) - abs_sz;
- /* Last place for last object to start */
-
- /* make a list of all objects in *h with head as last object */
- while (p <= last_object) {
- /* current object's link points to last object */
- ((struct obj *)p) -> obj_link = (struct obj *)r;
- r = p;
- p += abs_sz;
- }
- p -= abs_sz; /* p now points to last object */
-
- /*
- * put p (which is now head of list of objects in *h) as first
- * pointer in the appropriate free list for this size.
- */
- if (n < 0) {
- ((struct obj *)(h -> hb_body)) -> obj_link = aobjfreelist[abs_sz];
- aobjfreelist[abs_sz] = ((struct obj *)p);
- } else {
- ((struct obj *)(h -> hb_body)) -> obj_link = objfreelist[abs_sz];
- objfreelist[abs_sz] = ((struct obj *)p);
- }
-
- /*
- * Set up mask in header to facilitate alignment checks
- * See "gc.h" for a description of how this works.
- */
-# ifndef RT
- switch (abs_sz) {
- case 1:
- h -> hb_mask = 0x3;
- break;
- case 2:
- h -> hb_mask = 0x7;
- break;
- case 4:
- h -> hb_mask = 0xf;
- break;
- case 8:
- h -> hb_mask = 0x1f;
- break;
- case 16:
- h -> hb_mask = 0x3f;
- break;
- /* By default it remains set to a negative value */
- }
-# else
- /* the 4.2 pcc C compiler did not produce correct code for the switch */
- if (abs_sz == 1) { h -> hb_mask = 0x3; }
- else if (abs_sz == 2) { h -> hb_mask = 0x7; }
- else if (abs_sz == 4) { h -> hb_mask = 0xf; }
- else if (abs_sz == 8) { h -> hb_mask = 0x1f; }
- else if (abs_sz == 16) { h -> hb_mask = 0x3f; }
- /* else skip; */
-# endif
-
-# ifdef DEBUG
- gc_printf("Allocated new heap block at address 0x%X\n",
- h);
-# endif
-}
+char * GC_copyright[] =
+{"Copyright 1988,1989 Hans-J. Boehm and Alan J. Demers",
+"Copyright (c) 1991,1992 by Xerox Corporation. All rights reserved.",
+"THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY",
+" EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK."};
/* some more variables */
-extern long mem_found; /* Number of reclaimed longwords */
- /* after garbage collection */
+extern signed_word GC_mem_found; /* Number of reclaimed longwords */
+ /* after garbage collection */
-extern long atomic_in_use, composite_in_use;
extern errno;
/*
- * Clear mark bits in all allocated heap blocks
+ * Clear all mark bits associated with block h.
*/
-static void clear_marks()
+/*ARGSUSED*/
+static void clear_marks_for_block(h, dummy)
+struct hblk *h;
+word dummy;
{
+ register hdr * hhdr = HDR(h);
register int j;
- register struct hblk **p;
- register struct hblk *q;
-
-# ifdef HBLK_MAP
- for (q = (struct hblk *) heapstart; ((char*)q) < heaplim; q++)
- if (is_hblk(q)) {
-# else
- for (p = hblklist; p < last_hblk; p++) {
- q = *p;
-# endif
- for (j = 0; j < MARK_BITS_SZ; j++) {
- q -> hb_marks[j] = 0;
- }
+
+ for (j = 0; j < MARK_BITS_SZ; j++) {
+ hhdr -> hb_marks[j] = 0;
}
}
-/* Limits of stack for mark routine. Set by caller to mark. */
-/* All items between mark_stack_top and mark_stack_bottom-1 still need */
-/* to be marked. All items on the stack satisfy quicktest. They do */
-/* not necessarily reference real objects. */
-word * mark_stack_bottom;
-word * mark_stack_top;
-
-#ifdef USE_STACK
-# define STACKGAP 1024 /* Gap in longwords between hardware stack and */
- /* the mark stack. */
- /* Must suffice for printf calls and signal */
- /* handling. */
-#endif
-
-
-#ifdef USE_STACK
-# define PUSH_MS(ptr) *(--mark_stack_top) = (word) ptr
-# define NOT_DONE(a,b) (a < b)
-#else
-# ifdef USE_HEAP
- char *cur_break = 0;
-
-# define STACKINCR 0x4000
-# define PUSH_MS(ptr) \
- mark_stack_top++; \
- if ((char*)mark_stack_top >= cur_break) { \
- if (sbrk(STACKINCR) == -1) { \
- fprintf(stderr, "sbrk failed, code = %d\n",errno); \
- exit(1); \
- } else { \
- cur_break += STACKINCR; \
- } \
- } \
- *mark_stack_top = (word) ptr
-# define NOT_DONE(a,b) (a > b)
-# else
- --> where does the mark stack go? <--
-# endif
-#endif
-
-
-/* Mark all objects corresponding to pointers between mark_stack_bottom */
-/* and mark_stack_top. Assume that nested pointers are aligned */
-/* on alignment-byte boundaries. */
-mark(alignment)
-int alignment;
+/*
+ * Clear mark bits in all allocated heap blocks
+ */
+static void clear_marks()
{
- register long sz;
- extern char end, etext;
- register struct obj *p; /* pointer to current object to be marked */
-
- while (NOT_DONE(mark_stack_top,mark_stack_bottom)) {
- register long word_no;
- register long mask;
- register struct hblk * h;
-
-# ifdef USE_STACK
- p = (struct obj *)(*mark_stack_top++);
-# else
-# ifdef USE_HEAP
- p = (struct obj *)(*mark_stack_top--);
-# else
- --> fixit <--
-# endif
-# endif
-
- /* if not a pointer to obj on heap, skip it */
- if (((char *) p) >= heaplim) {
- continue;
- }
-
- h = HBLKPTR(p);
-
-# ifndef INTERIOR_POINTERS
- /* Check mark bit first, since this test is much more likely to */
- /* fail than later ones. */
- word_no = ((word *)p) - ((word *)h);
- if (mark_bit(h, word_no)) {
- continue;
- }
-# endif
-
-# ifdef INTERIOR_POINTERS
- if (!is_hblk(h)) {
- char m = get_map(h);
- while (m > 0 && m < 0x7f) {
- h -= m;
- m = get_map(h);
- }
- if (m == HBLK_INVALID) {
-# ifdef REPORT_FAILURE
- gc_printf("-> Pointer to non-heap loc: %X\n", p);
-# endif
- continue;
- }
- }
- if (((long)p) - ((long)h) < sizeof (struct hblkhdr)) {
- continue;
- }
-# else
- if (!is_hblk(h)) {
-# ifdef REPORT_FAILURE
- gc_printf("-> Pointer to non-heap loc: %X\n", p);
-# endif
- continue;
- }
-# endif
- sz = HB_SIZE(h);
- mask = h -> hb_mask;
-
-# ifdef INTERIOR_POINTERS
- word_no = get_word_no(p,h,sz,mask);
-# else
- if (!is_proper_obj(p,h,sz,mask)) {
-# ifdef REPORT_FAILURE
- gc_printf("-> Bad pointer to heap block: %X,sz = %d\n",p,sz);
-# endif
- continue;
- }
-# endif
-
- if (word_no + sz > BYTES_TO_WORDS(HBLKSIZE)
- && word_no != BYTES_TO_WORDS(sizeof(struct hblkhdr))
- /* Not first object */) {
- /*
- * Note that we dont necessarily check for pointers to the block header.
- * This doesn't cause any problems, since we have mark
- * bits allocated for such bogus objects.
- * We have to check for references past the last object, since
- * marking from uch an "object" could cause an exception.
- */
-# ifdef REPORT_FAILURE
- gc_printf("-> Bad pointer to heap block: %X,sz = %d\n",p,sz);
-# endif
- continue;
- }
-
-# ifdef INTERIOR_POINTERS
- if (mark_bit(h, word_no)) {
- continue;
- }
-# endif
-
-# ifdef DEBUG2
- gc_printf("*** set bit for heap %x, word %x\n",h,word_no);
-# endif
- set_mark_bit(h, word_no);
- if (h -> hb_sz < 0) {
- /* Atomic object */
- continue;
- }
- {
- /* Mark from fields inside the object */
- register struct obj ** q;
- register struct obj * r;
- register long lim; /* Should be struct obj **, but we're out of */
- /* A registers on a 68000. */
-
-# ifdef INTERIOR_POINTERS
- /* Adjust p, so that it's properly aligned */
-# ifdef DEBUG
- if (p != ((struct obj *)(((word *)h) + word_no))) {
- gc_printf("Adjusting from %X to ", p);
- p = ((struct obj *)(((word *)h) + word_no));
- gc_printf("%X\n", p);
- } else {
- p = ((struct obj *)(((word *)h) + word_no));
- }
-# else
- p = ((struct obj *)(((word *)h) + word_no));
-# endif
-# endif
-# ifdef UNALIGNED
- lim = ((long)(&(p -> obj_component[sz]))) - 3;
-# else
- lim = (long)(&(p -> obj_component[sz]));
-# endif
- for (q = (struct obj **)(&(p -> obj_component[0]));
- q < (struct obj **)lim;) {
- r = *q;
- if (quicktest(r)) {
-# ifdef DEBUG2
- gc_printf("Found plausible nested pointer");
- gc_printf(": 0x%X inside 0x%X at 0x%X\n", r, p, q);
-# endif
- PUSH_MS(((word)r));
- }
-# ifdef UNALIGNED
- q = ((struct obj **)(((long)q)+alignment));
-# else
- q++;
-# endif
- }
- }
- }
+ GC_apply_to_all_blocks(clear_marks_for_block, (word)0);
}
+bool GC_dont_expand = 0;
-/*********************************************************************/
-/* Mark all locations reachable via pointers located between b and t */
-/* b is the first location to be checked. t is one past the last */
-/* location to be checked. */
-/* Assume that pointers are aligned on alignment-byte */
-/* boundaries. */
-/*********************************************************************/
-void mark_all(b, t, alignment)
-word * b;
-word * t;
-int alignment;
-{
- register word *p;
- register word r;
- register word *lim;
-
-# ifdef DEBUG
- gc_printf("Checking for pointers between 0x%X and 0x%X\n",
- b, t);
-# endif
-
- /* Allocate mark stack, leaving a hole below the real stack. */
-# ifdef USE_STACK
- mark_stack_bottom = get_current_sp() - STACKGAP;
- mark_stack_top = mark_stack_bottom;
-# else
-# ifdef USE_HEAP
- mark_stack_bottom = (word *) sbrk(0); /* current break */
- cur_break = (char *) mark_stack_bottom;
- mark_stack_top = mark_stack_bottom;
-# else
- -> then where should the mark stack go ? <-
-# endif
-# endif
+word GC_free_space_divisor = 4;
- /* Round b down so it is properly aligned */
-# ifdef UNALIGNED
- if (alignment == 2) {
- b = (word *)(((long) b) & ~1);
- } else if (alignment == 4) {
- b = (word *)(((long) b) & ~3);
- } else if (alignment != 1) {
- fprintf(stderr, "Bad alignment parameter to mark_all\n");
- abort(alignment);
- }
+/* Return the minimum number of words that must be allocated between */
+/* collections to amortize the collection cost. */
+static word min_words_allocd()
+{
+ int dummy;
+# ifdef THREADS
+ /* We punt, for now. */
+ register signed_word stack_size = 10000;
# else
- b = (word *)(((long) b) & ~3);
+ register signed_word stack_size = (ptr_t)(&dummy) - GC_stackbottom;
# endif
+ register word total_root_size; /* includes double stack size, */
+ /* since the stack is expensive */
+ /* to scan. */
+
+ if (stack_size < 0) stack_size = -stack_size;
+ total_root_size = 2 * stack_size + GC_root_size;
+ return(BYTES_TO_WORDS(GC_heapsize + total_root_size)/GC_free_space_divisor);
+}
- /* check all pointers in range and put on mark_stack if quicktest true */
- lim = t - 1 /* longword */;
- for (p = b; ((unsigned) p) <= ((unsigned) lim);) {
- /* Coercion to unsigned in the preceding appears to be necessary */
- /* due to a bug in the 4.2BSD C compiler. */
- r = *p;
- if (quicktest(r)) {
-# ifdef DEBUG2
- gc_printf("Found plausible pointer: %X\n", r);
-# endif
- PUSH_MS(r); /* push r onto the mark stack */
- }
-# ifdef UNALIGNED
- p = (word *)(((char *)p) + alignment);
-# else
- p++;
-# endif
- }
- if (mark_stack_top != mark_stack_bottom) mark(alignment);
-
-# ifdef USE_HEAP
- brk(mark_stack_bottom); /* reset break to where it was before */
- cur_break = (char *) mark_stack_bottom;
-# endif
+/* Clear up a few frames worth og garbage left at the top of the stack. */
+/* This is used to prevent us from accidentally treating garbade left */
+/* on the stack by other parts of the collector as roots. This */
+/* differs from the code in misc.c, which actually tries to keep the */
+/* stack clear of long-lived, client-generated garbage. */
+void GC_clear_a_few_frames()
+{
+# define NWORDS 64
+ word frames[NWORDS];
+ register int i;
+
+ for (i = 0; i < NWORDS; i++) frames[i] = 0;
}
/*
* Restore inaccessible objects to the free list
- * update mem_found (number of reclaimed longwords after garbage collection)
+ * update GC_mem_found (number of reclaimed longwords after
+ * garbage collection)
+ * We assume we hold the allocation lock, and are not interruptable by
+ * signals, if that matters.
*/
-void gcollect()
+void GC_gcollect_inner(force)
+bool force; /* Collect even if only a small amount of allocation */
+ /* has taken place. Otherwise we refuse, allowing the */
+ /* heap to grow. */
{
- extern void mark_regs();
-
- extern int holdsigs(); /* disables non-urgent signals - see the */
- /* file "callcc.c" */
-
- long Omask = 0; /* mask to restore signal mask to after
- * critical section.
- */
-
# ifdef PRINTTIMES
- /* some debugging values */
- double start_time = 0;
- double mark_time = 0;
- double done_time = 0;
- static struct tms time_buf;
-# define FTIME \
- (((double)(time_buf.tms_utime + time_buf.tms_stime))/FLOAT_HZ)
-
- /* Get starting time */
- times(&time_buf);
- start_time = FTIME;
+ CLOCK_TYPE start_time;
+ CLOCK_TYPE mark_time;
+ CLOCK_TYPE done_time;
# endif
-# ifdef DEBUG2
- gc_printf("Here we are in gcollect\n");
+ if (!force && !GC_dont_expand
+ && GC_words_allocd < min_words_allocd()) return;
+# ifdef PRINTTIMES
+ GET_TIME(start_time);
# endif
+# ifdef PRINTSTATS
+ GC_printf("Collection %lu reclaimed %ld bytes\n",
+ (unsigned long) GC_gc_no,
+ (long)WORDS_TO_BYTES(GC_mem_found));
+# endif
+ GC_gc_no++;
+# ifdef GATHERSTATS
+ GC_mem_found = 0;
+ GC_composite_in_use = 0;
+ GC_atomic_in_use = 0;
+# endif
+# ifdef PRINTSTATS
+ GC_printf("Collection number %lu after %lu allocated bytes ",
+ (unsigned long) GC_gc_no,
+ (unsigned long) WORDS_TO_BYTES(GC_words_allocd));
+ GC_printf("(heapsize = %lu bytes)\n",
+ (unsigned long) GC_heapsize);
+ /* Printf arguments may be pushed in funny places. Clear the */
+ /* space. */
+ GC_printf("", (unsigned long)0, (unsigned long)0, (unsigned long)0,
+ (unsigned long)0, (unsigned long)0, (unsigned long)0);
+# endif
- /* Don't want to deal with signals in the middle so mask 'em out */
- Omask = holdsigs();
+ clear_marks();
+
+ STOP_WORLD();
/* Mark from all roots. */
- mark_roots();
+ /* Minimize junk left in my registers and on the stack */
+ GC_clear_a_few_frames();
+ GC_noop(0,0,0,0,0,0);
+ GC_mark_roots();
+ GC_promote_black_lists();
+
+ /* Check all debugged objects for consistency */
+ if (GC_debugging_started) {
+ GC_check_heap();
+ }
+
+ START_WORLD();
+
+# ifdef PRINTTIMES
+ GET_TIME(mark_time);
+# endif
+
# ifdef FIND_LEAK
/* Mark all objects on the free list. All objects should be */
/* marked when we're done. */
{
- register int size; /* current object size */
- register struct obj * p; /* pointer to current object */
- register struct hblk * q; /* pointer to block containing *p */
+ register word size; /* current object size */
+ register ptr_t p; /* pointer to current object */
+ register struct hblk * h; /* pointer to block containing *p */
+ register hdr * hhdr;
register int word_no; /* "index" of *p in *q */
-
- for (size = 1; size < MAXOBJSZ; size++) {
- for (p= objfreelist[size]; p != ((struct obj *)0); p=p->obj_link){
- q = HBLKPTR(p);
- word_no = (((word *)p) - ((word *)q));
- set_mark_bit(q, word_no);
- }
- }
- for (size = 1; size < MAXAOBJSZ; size++) {
- for(p= aobjfreelist[size]; p != ((struct obj *)0); p=p->obj_link){
- q = HBLKPTR(p);
- word_no = (((long *)p) - ((long *)q));
- set_mark_bit(q, word_no);
+ int kind;
+
+ for (kind = 0; kind < GC_n_kinds; kind++) {
+ for (size = 1; size <= MAXOBJSZ; size++) {
+ for (p= GC_obj_kinds[kind].ok_freelist[size];
+ p != 0; p=obj_link(p)){
+ h = HBLKPTR(p);
+ hhdr = HDR(h);
+ word_no = (((word *)p) - ((word *)h));
+ set_mark_bit_from_hdr(hhdr, word_no);
+ }
}
}
}
- /* Check that everything is marked */
- reclaim(TRUE);
-# endif
+ /* Check that everything is marked */
+ GC_start_reclaim(TRUE);
+# else
- /* Clear free list mark bits, in case they got accidentally marked */
- /* Note: HBLKPTR(p) == pointer to head of block containing *p */
- /* Also subtract memory remaining from mem_found count. */
- /* Note that composite objects on free list are cleared. */
- /* Thus accidentally marking a free list is not a problem; only */
- /* objects on the list itself will be marked, and that's fixed here. */
+ GC_finalize();
+
+ /* Clear free list mark bits, in case they got accidentally marked */
+ /* Note: HBLKPTR(p) == pointer to head of block containing *p */
+ /* Also subtract memory remaining from GC_mem_found count. */
+ /* Note that composite objects on free list are cleared. */
+ /* Thus accidentally marking a free list is not a problem; only */
+ /* objects on the list itself will be marked, and that's fixed here. */
{
- register int size; /* current object size */
- register struct obj * p; /* pointer to current object */
- register struct hblk * q; /* pointer to block containing *p */
+ register word size; /* current object size */
+ register ptr_t p; /* pointer to current object */
+ register struct hblk * h; /* pointer to block containing *p */
+ register hdr * hhdr;
register int word_no; /* "index" of *p in *q */
-# ifdef REPORT_FAILURE
- int prev_failure = 0;
-# endif
-
- for (size = 1; size < MAXOBJSZ; size++) {
- for (p= objfreelist[size]; p != ((struct obj *)0); p=p->obj_link){
- q = HBLKPTR(p);
- word_no = (((word *)p) - ((word *)q));
-# ifdef REPORT_FAILURE
- if (!prev_failure && mark_bit(q, word_no)) {
- gc_printf("-> Pointer to composite free list: %X,sz = %d\n",
- p, size);
- prev_failure = 1;
- }
-# endif
- clear_mark_bit(q, word_no);
- mem_found -= size;
- }
-# ifdef REPORT_FAILURE
- prev_failure = 0;
-# endif
- }
- for (size = 1; size < MAXAOBJSZ; size++) {
- for(p= aobjfreelist[size]; p != ((struct obj *)0); p=p->obj_link){
- q = HBLKPTR(p);
- word_no = (((long *)p) - ((long *)q));
-# ifdef REPORT_FAILURE
- if (!prev_failure && mark_bit(q, word_no)) {
- gc_printf("-> Pointer to atomic free list: %X,sz = %d\n",
- p, size);
- prev_failure = 1;
- }
-# endif
- clear_mark_bit(q, word_no);
- mem_found -= size;
+ int kind;
+
+ for (kind = 0; kind < GC_n_kinds; kind++) {
+ for (size = 1; size <= MAXOBJSZ; size++) {
+ for (p= GC_obj_kinds[kind].ok_freelist[size];
+ p != 0; p=obj_link(p)){
+ h = HBLKPTR(p);
+ hhdr = HDR(h);
+ word_no = (((word *)p) - ((word *)h));
+ clear_mark_bit_from_hdr(hhdr, word_no);
+ GC_mem_found -= size;
}
-# ifdef REPORT_FAILURE
- prev_failure = 0;
-# endif
+ }
}
}
-# ifdef PRINTTIMES
- /* Get intermediate time */
- times(&time_buf);
- mark_time = FTIME;
-# endif
-# ifdef PRINTSTATS
- gc_printf("Bytes recovered before reclaim - f.l. count = %d\n",
- WORDS_TO_BYTES(mem_found));
-# endif
-
- /* Reconstruct free lists to contain everything not marked */
- reclaim(FALSE);
+# ifdef PRINTSTATS
+ GC_printf("Bytes recovered before GC_reclaim - f.l. count = %ld\n",
+ (long)WORDS_TO_BYTES(GC_mem_found));
+# endif
- /* clear mark bits in all allocated heap blocks */
- clear_marks();
+ /* Reconstruct free lists to contain everything not marked */
+ GC_start_reclaim(FALSE);
+
+# endif /* FIND_LEAK */
# ifdef PRINTSTATS
- gc_printf("Reclaimed %d bytes in heap of size %d bytes\n",
- WORDS_TO_BYTES(mem_found), heapsize);
- gc_printf("%d (atomic) + %d (composite) bytes in use\n",
- WORDS_TO_BYTES(atomic_in_use),
- WORDS_TO_BYTES(composite_in_use));
+ GC_printf("Immediately reclaimed %ld bytes in heap of size %lu bytes\n",
+ (long)WORDS_TO_BYTES(GC_mem_found),
+ (unsigned long)GC_heapsize);
+ GC_printf("%lu (atomic) + %lu (composite) bytes in use\n",
+ (unsigned long)WORDS_TO_BYTES(GC_atomic_in_use),
+ (unsigned long)WORDS_TO_BYTES(GC_composite_in_use));
# endif
- /*
- * What follows is somewhat heuristic. Constant may benefit
- * from tuning ...
- */
-# ifndef FIND_LEAK
- /* In the leak finding case, we expect gcollect to be called manually */
- /* before we're out of heap space. */
- if (WORDS_TO_BYTES(mem_found) * 4 < heapsize) {
- /* Less than about 1/4 of available memory was reclaimed - get more */
- {
- long size_to_get = HBLKSIZE + hincr * HBLKSIZE;
- struct hblk * thishbp;
- char * nheaplim;
-
- thishbp = HBLKPTR(((unsigned)sbrk(0))+HBLKSIZE-1 );
- nheaplim = (char *) (((unsigned)thishbp) + size_to_get);
- if( ((char *) brk(nheaplim)) == ((char *)-1) ) {
- write(2,"Out of memory, trying to continue ...\n",38);
- } else {
- heaplim = nheaplim;
- thishbp->hb_sz =
- BYTES_TO_WORDS(size_to_get - sizeof(struct hblkhdr));
- freehblk(thishbp);
- heapsize += size_to_get;
- update_hincr;
- }
-# ifdef PRINTSTATS
- gc_printf("Gcollect: needed to increase heap size by %d\n",
- size_to_get);
-# endif
- }
- }
-# endif
-
- /* Reset mem_found for next collection */
- mem_found = 0;
-
- /* Reenable signals */
- sigsetmask(Omask);
+ /* Reset or increment counters for next cycle */
+ GC_words_allocd_before_gc += GC_words_allocd;
+ GC_words_allocd = 0;
/* Get final time */
# ifdef PRINTTIMES
- times(&time_buf);
- done_time = FTIME;
- gc_printf("Garbage collection took %d + %d msecs\n",
- (int)(1000.0 * (mark_time - start_time)),
- (int)(1000.0 * (done_time - mark_time)));
+ GET_TIME(done_time);
+ GC_printf("Garbage collection took %lu + %lu msecs\n",
+ MS_TIME_DIFF(mark_time,start_time),
+ MS_TIME_DIFF(done_time,mark_time));
# endif
}
+/* Externally callable version of above */
+void GC_gcollect()
+{
+ DCL_LOCK_STATE;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ if (!GC_is_initialized) GC_init_inner();
+ /* Minimize junk left in my registers */
+ GC_noop(0,0,0,0,0,0);
+ GC_gcollect_inner(TRUE);
+ UNLOCK();
+ ENABLE_SIGNALS();
+}
+
/*
- * this explicitly increases the size of the heap. It is used
- * internally, but my also be invoked directly by the user.
- * The argument is in units of HBLKSIZE.
+ * Use the chunk of memory starting at p of syze bytes as part of the heap.
+ * Assumes p is HBLKSIZE aligned, and bytes is a multiple of HBLKSIZE.
*/
-void expand_hp(n)
-int n;
+void GC_add_to_heap(p, bytes)
+struct hblk *p;
+word bytes;
{
- struct hblk * thishbp = HBLKPTR(((unsigned)sbrk(0))+HBLKSIZE-1 );
- extern int holdsigs();
- int Omask;
-
- /* Don't want to deal with signals in the middle of this */
- Omask = holdsigs();
-
- heaplim = (char *) (((unsigned)thishbp) + n * HBLKSIZE);
- if (n > 2*hincr) {
- hincr = n/2;
+ word words;
+
+ GC_install_header(p);
+ words = BYTES_TO_WORDS(bytes - HDR_BYTES);
+ HDR(p) -> hb_sz = words;
+ GC_freehblk(p);
+ GC_heapsize += bytes;
+ if ((ptr_t)p <= GC_least_plausible_heap_addr
+ || GC_least_plausible_heap_addr == 0) {
+ GC_least_plausible_heap_addr = (ptr_t)p - sizeof(word);
+ /* Making it a little smaller than necessary prevents */
+ /* us from getting a false hit from the variable */
+ /* itself. There's some unintentional reflection */
+ /* here. */
}
- if( ((char *) brk(heaplim)) == ((char *)-1) ) {
- write(2,"Out of Memory!\n",15);
- exit(-1);
+ if ((ptr_t)p + bytes >= GC_greatest_plausible_heap_addr) {
+ GC_greatest_plausible_heap_addr = (ptr_t)p + bytes;
}
-# ifdef PRINTSTATS
- gc_printf("Voluntarily increasing heap size by %d\n",
- n*HBLKSIZE);
-# endif
- thishbp->hb_sz = BYTES_TO_WORDS(n * HBLKSIZE - sizeof(struct hblkhdr));
- freehblk(thishbp);
- heapsize += ((char *)heaplim) - ((char *)thishbp);
- /* Reenable signals */
- sigsetmask(Omask);
}
+ptr_t GC_least_plausible_heap_addr = (ptr_t)ONES;
+ptr_t GC_greatest_plausible_heap_addr = 0;
+
+ptr_t GC_max(x,y)
+ptr_t x, y;
+{
+ return(x > y? x : y);
+}
-extern int dont_gc; /* Unsafe to start garbage collection */
+ptr_t GC_min(x,y)
+ptr_t x, y;
+{
+ return(x < y? x : y);
+}
/*
- * Make sure the composite object free list for sz is not empty.
- * Return a pointer to the first object on the free list.
- * The object MUST BE REMOVED FROM THE FREE LIST BY THE CALLER.
- *
- * note: _allocobj
+ * this explicitly increases the size of the heap. It is used
+ * internally, but my also be invoked from GC_expand_hp by the user.
+ * The argument is in units of HBLKSIZE.
+ * Returns FALSE on failure.
*/
-struct obj * _allocobj(sz)
-long sz;
+bool GC_expand_hp_inner(n)
+word n;
{
- if (sz == 0) return((struct obj *)0);
-
-# ifdef DEBUG2
- gc_printf("here we are in _allocobj\n");
-# endif
+ word bytes = n * HBLKSIZE;
+ struct hblk * space = GET_MEM(bytes);
+ word expansion_slop; /* Number of bytes by which we expect the */
+ /* heap to expand soon. */
- if (objfreelist[sz] == ((struct obj *)0)) {
- if (hblkfreelist == ((struct hblk *)0) && !dont_gc) {
- if (GC_DIV * non_gc_bytes < GC_MULT * heapsize) {
-# ifdef DEBUG
- gc_printf("Calling gcollect\n");
-# endif
- gcollect();
- } else {
- expand_hp(NON_GC_HINCR);
- }
- }
- if (objfreelist[sz] == ((struct obj *)0)) {
-# ifdef DEBUG
- gc_printf("Calling new_hblk\n");
-# endif
- new_hblk(sz);
- }
+ if (n > 2*GC_hincr) {
+ GC_hincr = n/2;
+ }
+ if( space == 0 ) {
+ return(FALSE);
}
-# ifdef DEBUG2
- gc_printf("Returning %x from _allocobj\n",objfreelist[sz]);
- gc_printf("Objfreelist[%d] = %x\n",sz,objfreelist[sz]);
+# ifdef PRINTSTATS
+ GC_printf("Increasing heap size by %lu\n",
+ (unsigned long)bytes);
# endif
- return(objfreelist[sz]);
+ expansion_slop = 8 * WORDS_TO_BYTES(min_words_allocd());
+ if (5 * HBLKSIZE * MAXHINCR > expansion_slop) {
+ expansion_slop = 5 * HBLKSIZE * MAXHINCR;
+ }
+ if (GC_last_heap_addr == 0 && !((word)space & SIGNB)
+ || GC_last_heap_addr != 0 && GC_last_heap_addr < (ptr_t)space) {
+ /* Assume the heap is growing up */
+ GC_greatest_plausible_heap_addr =
+ GC_max(GC_greatest_plausible_heap_addr,
+ (ptr_t)space + bytes + expansion_slop);
+ } else {
+ /* Heap is growing down */
+ GC_least_plausible_heap_addr =
+ GC_min(GC_least_plausible_heap_addr,
+ (ptr_t)space - expansion_slop);
+ }
+ GC_prev_heap_addr = GC_last_heap_addr;
+ GC_last_heap_addr = (ptr_t)space;
+ GC_add_to_heap(space, bytes);
+ return(TRUE);
+}
+
+/* Really returns a bool, but it's externally visible, so that's clumsy. */
+int GC_expand_hp(n)
+int n;
+{
+ int result;
+ DCL_LOCK_STATE;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ if (!GC_is_initialized) GC_init_inner();
+ result = (int)GC_expand_hp_inner((word)n);
+ UNLOCK();
+ ENABLE_SIGNALS();
+ return(result);
}
/*
- * Make sure the atomic object free list for sz is not empty.
+ * Make sure the object free list for sz is not empty.
* Return a pointer to the first object on the free list.
* The object MUST BE REMOVED FROM THE FREE LIST BY THE CALLER.
*
- * note: this is called by allocaobj (see the file mach_dep.c)
*/
-struct obj * _allocaobj(sz)
-long sz;
+ptr_t GC_allocobj(sz, kind)
+word sz;
+int kind;
{
- if (sz == 0) return((struct obj *)0);
-
- if (aobjfreelist[sz] == ((struct obj *) 0)) {
- if (hblkfreelist == ((struct hblk *)0) && !dont_gc) {
- if (GC_DIV * non_gc_bytes < GC_MULT * heapsize) {
-# ifdef DEBUG
- gc_printf("Calling gcollect\n");
-# endif
- gcollect();
+ register ptr_t * flh = &(GC_obj_kinds[kind].ok_freelist[sz]);
+ if (sz == 0) return(0);
+
+ if (*flh == 0) {
+ GC_continue_reclaim(sz, kind);
+ }
+ if (*flh == 0) {
+ if (!GC_sufficient_hb(sz, kind) && !GC_dont_gc) {
+ if (GC_DIV * GC_non_gc_bytes < GC_MULT * GC_heapsize) {
+ GC_gcollect_inner(FALSE);
+ GC_continue_reclaim(sz, kind);
} else {
- expand_hp(NON_GC_HINCR);
+ if (!GC_expand_hp_inner(NON_GC_HINCR)) {
+ GC_gcollect_inner(FALSE);
+ GC_continue_reclaim(sz, kind);
+ }
}
}
- if (aobjfreelist[sz] == ((struct obj *) 0)) {
- new_hblk(-sz);
+ if (*flh == 0) {
+ GC_new_hblk(sz, kind);
}
}
- return(aobjfreelist[sz]);
+ return(*flh);
}
-
-# ifdef SPARC
- put_mark_stack_bottom(val)
- long val;
- {
- mark_stack_bottom = (word *)val;
- }
-# endif
diff --git a/allochblk.c b/allochblk.c
index f26fb339..1dcf581b 100644
--- a/allochblk.c
+++ b/allochblk.c
@@ -5,14 +5,14 @@
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
- * Permission is hereby granted to copy this compiler for any purpose,
+ * Permission is hereby granted to copy this garbage collector for any purpose,
* provided the above notices are retained on all copies.
*/
#define DEBUG
#undef DEBUG
#include <stdio.h>
-#include "gc.h"
+#include "gc_private.h"
/**/
@@ -27,337 +27,316 @@
* (in bytes) of the entire block.
* Neighbors are coalesced.
*/
+
+# define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
+ /* largest block we will allocate starting on a black */
+ /* listed block. Must be >= HBLKSIZE. */
+
+struct hblk * GC_hblkfreelist = 0;
-struct hblk *savhbp = (struct hblk *)0; /* heap block preceding next */
+struct hblk *GC_savhbp = (struct hblk *)0; /* heap block preceding next */
/* block to be examined by */
- /* allochblk. */
+ /* GC_allochblk. */
/*
- * Return 1 if there is a heap block sufficient for object size sz,
- * 0 otherwise. Advance savhbp to point to the block prior to the
+ * Return TRUE if there is a heap block sufficient for object size sz,
+ * FALSE otherwise. Advance GC_savhbp to point to the block prior to the
* first such block.
*/
-int sufficient_hb(sz)
-int sz;
+bool GC_sufficient_hb(sz, kind)
+word sz;
+int kind;
{
register struct hblk *hbp;
+register hdr * hhdr;
struct hblk *prevhbp;
-int size_needed, size_avail;
-int first_time = 1;
+signed_word size_needed;
+signed_word size_avail;
+bool first_time = TRUE;
- size_needed = WORDS_TO_BYTES(sz>0? sz : -sz);
- size_needed = (size_needed+sizeof(struct hblkhdr)+HBLKSIZE-1) & ~HBLKMASK;
+ size_needed = WORDS_TO_BYTES(sz);
+ size_needed = (size_needed+HDR_BYTES+HBLKSIZE-1) & ~HBLKMASK;
# ifdef DEBUG
- gc_printf("sufficient_hb: sz = %d, size_needed = 0x%X\n",
- sz, size_needed);
+ GC_printf("GC_sufficient_hb: sz = %ld, size_needed = 0x%lx\n",
+ (long)sz, (unsigned long)size_needed);
# endif
/* search for a big enough block in free list */
- hbp = savhbp;
+ hbp = GC_savhbp;
+ hhdr = HDR(hbp);
for(;;) {
prevhbp = hbp;
hbp = ((prevhbp == (struct hblk *)0)
- ? hblkfreelist
- : prevhbp->hb_next);
+ ? GC_hblkfreelist
+ : hhdr->hb_next);
+ hhdr = HDR(hbp);
- if( prevhbp == savhbp && !first_time) {
+ if( prevhbp == GC_savhbp && !first_time) {
/* no sufficiently big blocks on free list */
- return(0);
+ return(FALSE);
}
first_time = 0;
if( hbp == (struct hblk *)0 ) continue;
- size_avail = hbp->hb_sz;
+ size_avail = hhdr->hb_sz;
+ if ( kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC) {
+ struct hblk * thishbp;
+ struct hblk * lasthbp = hbp;
+
+ while ((ptr_t)lasthbp - (ptr_t)hbp < size_avail
+ && (thishbp = GC_is_black_listed(lasthbp,
+ (word)size_needed))) {
+ lasthbp = thishbp;
+ }
+ size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
+ }
if( size_avail >= size_needed ) {
- savhbp = prevhbp;
- return(1);
+ GC_savhbp = prevhbp;
+ return(TRUE);
}
}
}
/*
* Allocate (and return pointer to) a heap block
- * for objects of size |sz|.
+ * for objects of size |sz| words.
*
- * NOTE: Caller is responsible for adding it to global hblklist
- * and for building an object freelist in it.
+ * NOTE: We set obj_map field in header correctly.
+ * Caller is resposnsible for building an object freelist in block.
*
- * The new block is guaranteed to be cleared if sz > 0.
+ * We clear the block if it is destined for large objects, and if
+ * kind requires that newly allocated objects be cleared.
*/
struct hblk *
-allochblk(sz)
-long sz;
+GC_allochblk(sz, kind)
+word sz;
+int kind;
{
register struct hblk *thishbp;
+ register hdr * thishdr; /* Header corr. to thishbp */
register struct hblk *hbp;
+ register hdr * hhdr; /* Header corr. to hbp */
struct hblk *prevhbp;
- long size_needed, /* number of bytes in requested objects */
- uninit, /* => Found uninitialized block */
- size_avail;
- int first_time = 1;
+ register hdr * phdr; /* Header corr. to prevhbp */
+ signed_word size_needed; /* number of bytes in requested objects */
+ signed_word size_avail; /* bytes available in this block */
+ bool first_time = TRUE;
- char *sbrk(); /* data segment size increasing */
- char *brk(); /* functions */
-
- size_needed = WORDS_TO_BYTES(sz>0? sz : -sz);
- size_needed = (size_needed+sizeof(struct hblkhdr)+HBLKSIZE-1) & ~HBLKMASK;
-# ifdef DEBUG
- gc_printf("(allochblk) sz = %x, size_needed = 0x%X\n", sz, size_needed);
-# endif
+ size_needed = WORDS_TO_BYTES(sz);
+ size_needed = (size_needed+HDR_BYTES+HBLKSIZE-1) & ~HBLKMASK;
/* search for a big enough block in free list */
- hbp = savhbp;
+ hbp = GC_savhbp;
+ hhdr = HDR(hbp);
for(;;) {
prevhbp = hbp;
- hbp = ((prevhbp == (struct hblk *)0)
- ? hblkfreelist
- : prevhbp->hb_next);
+ phdr = hhdr;
+ hbp = (prevhbp == 0? GC_hblkfreelist : phdr->hb_next);
+ hhdr = HDR(hbp);
- if( prevhbp == savhbp && !first_time) {
+ if( prevhbp == GC_savhbp && !first_time) {
/* no sufficiently big blocks on free list, */
/* let thishbp --> a newly-allocated block, */
/* free it (to merge into existing block */
/* list) and start the search again, this */
/* time with guaranteed success. */
- int size_to_get = size_needed + hincr * HBLKSIZE;
- extern int holdsigs();
- int Omask;
-
- /* Don't want to deal with signals in the middle of this */
- Omask = holdsigs();
-
- update_hincr;
- thishbp = HBLKPTR(((unsigned)sbrk(0))+HBLKSIZE-1 );
- heaplim = (char *) (((unsigned)thishbp) + size_to_get);
-
- if( (brk(heaplim)) == ((char *)-1) ) {
- write(2,"Out of Memory! Giving up ...\n", 30);
- exit(-1);
- }
-# ifdef PRINTSTATS
- gc_printf("Need to increase heap size by %d\n",
- size_to_get);
-# endif
- heapsize += size_to_get;
- thishbp->hb_sz =
- BYTES_TO_WORDS(size_to_get - sizeof(struct hblkhdr));
- freehblk(thishbp);
- /* Reenable signals */
- sigsetmask(Omask);
- hbp = savhbp;
- first_time = 1;
- continue;
+ word size_to_get = size_needed + GC_hincr * HBLKSIZE;
+
+ if (! GC_expand_hp_inner(divHBLKSZ(size_to_get))) {
+ if (! GC_expand_hp_inner(divHBLKSZ((word)size_needed)))
+ {
+ /* GC_printf("Out of Memory! Giving up ...\n"); */
+ /* There are other things we could try. It */
+ /* would probably be reasonable to clear */
+ /* black lists at tthis point. */
+ return(0);
+ } else {
+ WARN("Out of Memory! Trying to continue ...\n");
+ GC_gcollect_inner(TRUE);
+ }
+ }
+ update_GC_hincr;
+ return (GC_allochblk(sz, kind));
}
first_time = 0;
- if( hbp == (struct hblk *)0 ) continue;
-
- size_avail = hbp->hb_sz;
+ if( hbp == 0 ) continue;
+
+ size_avail = hhdr->hb_sz;
+ if (size_avail < size_needed) continue;
+ /* If the next heap block is obviously better, go on. */
+ /* This prevents us from disassembling a single large block */
+ /* to get tiny blocks. */
+ {
+ word next_size;
+
+ thishbp = hhdr -> hb_next;
+ if (thishbp == 0) thishbp = GC_hblkfreelist;
+ thishdr = HDR(thishbp);
+ next_size = thishdr -> hb_sz;
+ if (next_size < size_avail
+ && next_size >= size_needed
+ && !GC_is_black_listed(thishbp, (word)size_needed)) {
+ continue;
+ }
+ }
+ if ( kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC) {
+ struct hblk * lasthbp = hbp;
+
+ while (size_avail >= size_needed
+ && (thishbp = GC_is_black_listed(lasthbp,
+ (word)size_needed))) {
+ lasthbp = thishbp;
+ }
+ size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
+ thishbp = lasthbp;
+ if (size_avail >= size_needed && thishbp != hbp) {
+ /* Split the block at thishbp */
+ GC_install_header(thishbp);
+ thishdr = HDR(thishbp);
+ /* GC_invalidate_map not needed, since we will */
+ /* allocate this block. */
+ thishdr -> hb_next = hhdr -> hb_next;
+ thishdr -> hb_sz = size_avail;
+ hhdr -> hb_sz = (ptr_t)thishbp - (ptr_t)hbp;
+ hhdr -> hb_next = thishbp;
+ /* Advance to thishbp */
+ prevhbp = hbp;
+ phdr = hhdr;
+ hbp = thishbp;
+ hhdr = thishdr;
+ }
+ }
if( size_avail >= size_needed ) {
/* found a big enough block */
/* let thishbp --> the block */
/* set prevhbp, hbp to bracket it */
thishbp = hbp;
+ thishdr = hhdr;
if( size_avail == size_needed ) {
- hbp = hbp->hb_next;
- uninit = thishbp -> hb_uninit;
+ hbp = hhdr->hb_next;
+ hhdr = HDR(hbp);
} else {
- uninit = thishbp -> hb_uninit;
- thishbp -> hb_uninit = 1;
- /* Just in case we get interrupted by a */
- /* signal */
hbp = (struct hblk *)
(((unsigned)thishbp) + size_needed);
- hbp->hb_uninit = uninit;
- hbp->hb_next = thishbp->hb_next;
- hbp->hb_sz = size_avail - size_needed;
+ GC_install_header(hbp);
+ hhdr = HDR(hbp);
+ GC_invalidate_map(hhdr);
+ hhdr->hb_next = thishdr->hb_next;
+ hhdr->hb_sz = size_avail - size_needed;
}
/* remove *thishbp from hblk freelist */
- if( prevhbp == (struct hblk *)0 ) {
- hblkfreelist = hbp;
+ if( prevhbp == 0 ) {
+ GC_hblkfreelist = hbp;
} else {
- prevhbp->hb_next = hbp;
+ phdr->hb_next = hbp;
}
/* save current list search position */
- savhbp = prevhbp;
+ GC_savhbp = hbp;
break;
}
}
/* set size and mask field of *thishbp correctly */
- thishbp->hb_sz = sz;
- thishbp->hb_mask = -1; /* may be changed by new_hblk */
+ thishdr->hb_sz = sz;
/* Clear block if necessary */
- if (uninit && sz > 0) {
- register word * p = &(thishbp -> hb_body[0]);
- register word * plim;
-
- plim = (word *)(((char *)thishbp) + size_needed);
- while (p < plim) {
- *p++ = 0;
- }
+ if (sz > MAXOBJSZ && GC_obj_kinds[kind].ok_init) {
+ bzero((char *)thishbp + HDR_BYTES, (int)(size_needed - HDR_BYTES));
}
+
/* Clear mark bits */
{
- register word *p = (word *)(&(thishbp -> hb_marks[0]));
- register word * plim = (word *)(&(thishbp -> hb_marks[MARK_BITS_SZ]));
+ register word *p = (word *)(&(thishdr -> hb_marks[0]));
+ register word * plim =
+ (word *)(&(thishdr -> hb_marks[MARK_BITS_SZ]));
while (p < plim) {
*p++ = 0;
}
}
+
+ /* Add it to data structure describing hblks in use */
+ GC_install_counts(thishbp, (word)size_needed);
+
+ /* Add description of valid object pointers. */
+ GC_add_map_entry(sz);
+ thishdr -> hb_map = GC_obj_map[sz > MAXOBJSZ? 0 : sz];
+
+ /* Set kind related fields */
+ thishdr -> hb_obj_kind = kind;
+ thishdr -> hb_mark_proc = GC_obj_kinds[kind].ok_mark_proc;
-# ifdef DEBUG
- gc_printf("Returning 0x%X\n", thishbp);
-# endif
return( thishbp );
}
-/* Clear the header information in a previously allocated heap block p */
-/* so that it can be coalesced with an initialized heap block. */
-static clear_header(p)
-register struct hblk *p;
-{
- p -> hb_sz = 0;
-# ifndef HBLK_MAP
- p -> hb_index = (struct hblk **)0;
-# endif
- p -> hb_next = 0;
- p -> hb_mask = 0;
-# if MARK_BITS_SZ <= 60
- /* Since this block was deallocated, only spurious mark */
- /* bits corresponding to the header could conceivably be set */
- p -> hb_marks[0] = 0;
- p -> hb_marks[1] = 0;
-# else
- --> fix it
-# endif
-}
-
/*
* Free a heap block.
*
- * Assume the block is not currently on hblklist.
- *
* Coalesce the block with its neighbors if possible.
-
- * All mark words (except possibly the first) are assumed to be cleared.
- * The body is assumed to be cleared unless hb_uninit is nonzero.
+ *
+ * All mark words are assumed to be cleared.
*/
void
-freehblk(p)
+GC_freehblk(p)
register struct hblk *p;
{
+register hdr *phdr; /* Header corresponding to p */
register struct hblk *hbp, *prevhbp;
-register int size;
+register hdr *hhdr, *prevhdr;
+register signed_word size;
- /* savhbp may become invalid due to coalescing. Clear it. */
- savhbp = (struct hblk *)0;
+ /* GC_savhbp may become invalid due to coalescing. Clear it. */
+ GC_savhbp = (struct hblk *)0;
- size = p->hb_sz;
- if( size < 0 ) size = -size;
+ phdr = HDR(p);
+ size = phdr->hb_sz;
size =
- ((WORDS_TO_BYTES(size)+sizeof(struct hblkhdr)+HBLKSIZE-1)
+ ((WORDS_TO_BYTES(size)+HDR_BYTES+HBLKSIZE-1)
& (~HBLKMASK));
- p->hb_sz = size;
+ GC_remove_counts(p, (word)size);
+ phdr->hb_sz = size;
+ GC_invalidate_map(phdr);
- prevhbp = (struct hblk *) 0;
- hbp = hblkfreelist;
+ prevhbp = 0;
+ hbp = GC_hblkfreelist;
+ hhdr = HDR(hbp);
- while( (hbp != (struct hblk *)0) && (hbp < p) ) {
+ while( (hbp != 0) && (hbp < p) ) {
prevhbp = hbp;
- hbp = hbp->hb_next;
+ prevhdr = hhdr;
+ hbp = hhdr->hb_next;
+ hhdr = HDR(hbp);
}
+
+ /* Check for duplicate deallocation in the easy case */
+ if (hbp != 0 && (ptr_t)p + size > (ptr_t)hbp
+ || prevhbp != 0 && (ptr_t)prevhbp + prevhdr->hb_sz > (ptr_t)p) {
+ GC_printf("Duplicate large block deallocation of 0x%lx\n",
+ (unsigned long) p);
+ GC_printf("Surrounding free blocks are 0x%lx and 0x%lx\n",
+ (unsigned long) prevhbp, (unsigned long) hbp);
+ }
/* Coalesce with successor, if possible */
- if( (((unsigned)p)+size) == ((unsigned)hbp) ) {
- (p -> hb_uninit) |= (hbp -> hb_uninit);
- p->hb_next = hbp->hb_next;
- p->hb_sz += hbp->hb_sz;
- if (!p -> hb_uninit) clear_header(hbp);
+ if( (((word)p)+size) == ((word)hbp) ) {
+ phdr->hb_next = hhdr->hb_next;
+ phdr->hb_sz += hhdr->hb_sz;
+ GC_remove_header(hbp);
} else {
- p->hb_next = hbp;
+ phdr->hb_next = hbp;
}
- if( prevhbp == (struct hblk *)0 ) {
- hblkfreelist = p;
- } else if( (((unsigned)prevhbp) + prevhbp->hb_hdr.hbh_sz) ==
- ((unsigned)p) ) {
+
+ if( prevhbp == 0 ) {
+ GC_hblkfreelist = p;
+ } else if( (((word)prevhbp) + prevhdr->hb_sz)
+ == ((word)p) ) {
/* Coalesce with predecessor */
- (prevhbp->hb_uninit) |= (p -> hb_uninit);
- prevhbp->hb_next = p->hb_next;
- prevhbp->hb_sz += p->hb_sz;
- if (!prevhbp -> hb_uninit) clear_header(p);
+ prevhdr->hb_next = phdr->hb_next;
+ prevhdr->hb_sz += phdr->hb_sz;
+ GC_remove_header(p);
} else {
- prevhbp->hb_next = p;
- }
-}
-
-/* Add a heap block to hblklist or hblkmap. */
-void add_hblklist(hbp)
-struct hblk * hbp;
-{
-# ifdef HBLK_MAP
- long size = hbp->hb_sz;
- long index = divHBLKSZ(((long)hbp) - ((long)heapstart));
- long i;
-
- if( size < 0 ) size = -size;
- size = (divHBLKSZ(WORDS_TO_BYTES(size)+sizeof(struct hblkhdr)+HBLKSIZE-1));
- /* in units of HBLKSIZE */
- hblkmap[index] = HBLK_VALID;
- for (i = 1; i < size; i++) {
- if (i < 0x7f) {
- hblkmap[index+i] = i;
- } else {
- /* May overflow a char. Store largest possible value */
- hblkmap[index+i] = 0x7e;
- }
- }
-# else
- if (last_hblk >= &hblklist[MAXHBLKS]) {
- fprintf(stderr, "Not configured for enough memory\n");
- exit(1);
- }
- *last_hblk = hbp;
- hbp -> hb_index = last_hblk;
- last_hblk++;
-# endif
-}
-
-/* Delete a heap block from hblklist or hblkmap. */
-void del_hblklist(hbp)
-struct hblk * hbp;
-{
-# ifdef HBLK_MAP
- long size = hbp->hb_sz;
- long index = divHBLKSZ(((long)hbp) - ((long)heapstart));
- long i;
-
- if( size < 0 ) size = -size;
- size = (divHBLKSZ(WORDS_TO_BYTES(size)+sizeof(struct hblkhdr)+HBLKSIZE-1));
- /* in units of HBLKSIZE */
- for (i = 0; i < size; i++) {
- hblkmap[index+i] = HBLK_INVALID;
+ prevhdr->hb_next = p;
}
-# else
- register struct hblk ** list_entry;
- last_hblk--;
- /* Let **last_hblk use the slot previously occupied by *hbp */
- list_entry = hbp -> hb_index;
- (*last_hblk) -> hb_index = list_entry;
- *list_entry = *last_hblk;
-# endif
}
-/* Initialize hblklist */
-void init_hblklist()
-{
-# ifdef DEBUG
- gc_printf("Here we are in init_hblklist - ");
- gc_printf("last_hblk = %x\n",&(hblklist[0]));
-# endif
-# ifndef HBLK_MAP
- last_hblk = &(hblklist[0]);
-# endif
-}
diff --git a/black_list.c b/black_list.c
new file mode 100644
index 00000000..9471dc52
--- /dev/null
+++ b/black_list.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991, 1992 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to copy this garbage collector for any purpose,
+ * provided the above notices are retained on all copies.
+ */
+# include "gc_private.h"
+
+/*
+ * We maintain several hash tables of hblks that have had false hits.
+ * Each contains one bit per hash bucket; If any page in the bucket
+ * has had a false hit, we assume that all of them have.
+ * False hits from the stack(s) are much more dangerous than false hits
+ * from elsewhere, since the former can pin a large object that spans the
+ * block, eventhough it does not start on the dangerous block.
+ */
+
+/*
+ * Externally callable routines are:
+
+ * GC_add_to_black_list_normal
+ * GC_add_to_black_list_stack
+ * GC_promote_black_lists
+ * GC_is_black_listed
+ *
+ * All require that the allocator lock is held.
+ */
+
+# define LOG_HT_ENTRIES 14 /* Collisions are likely if heap grows */
+ /* to more than 16K hblks = 64MB. */
+ /* Each hash table occupies 2K bytes. */
+# define HT_ENTRIES ((word)1 << LOG_HT_ENTRIES)
+# define HT_SIZE (HT_ENTRIES >> LOGWL)
+typedef word black_list_t[HT_SIZE];
+
+# define HASH(addr) (((addr) >> LOG_HBLKSIZE) & (HT_ENTRIES - 1))
+
+/* Pointers to individual tables. We replace one table by another by */
+/* switching these pointers. GC_black_lists is not used directly. */
+word * GC_new_normal_bl;
+ /* Nonstack false references seen at last complete */
+ /* collection. */
+word * GC_old_normal_bl;
+ /* Nonstack false references seen at preceding */
+ /* collection. */
+word * GC_incomplete_normal_bl;
+ /* Nonstack false references seen at current, */
+ /* not yet completed collection. */
+word * GC_new_stack_bl;
+word * GC_old_stack_bl;
+word * GC_incomplete_stack_bl;
+
+# define get_bl_entry_from_index(bl, index) \
+ (((bl)[divWORDSZ(index)] >> modWORDSZ(index)) & 1)
+# define set_bl_entry_from_index(bl, index) \
+ (bl)[divWORDSZ(index)] |= 1 << modWORDSZ(index)
+# define clear_bl_entry_from_index(bl, index) \
+ (bl)[divWORDSZ(index)] &= ~(1 << modWORDSZ(index))
+
+GC_bl_init()
+{
+# ifndef ALL_INTERIOR_POINTERS
+ GC_new_normal_bl = (word *)GC_scratch_alloc((word)(sizeof(black_list_t)));
+ GC_old_normal_bl = (word *)GC_scratch_alloc((word)(sizeof (black_list_t)));
+ GC_incomplete_normal_bl = (word *)GC_scratch_alloc
+ ((word)(sizeof(black_list_t)));
+# endif
+ GC_new_stack_bl = (word *)GC_scratch_alloc((word)(sizeof(black_list_t)));
+ GC_old_stack_bl = (word *)GC_scratch_alloc((word)(sizeof(black_list_t)));
+ GC_incomplete_stack_bl = (word *)GC_scratch_alloc
+ ((word)(sizeof(black_list_t)));
+}
+
+void GC_clear_bl(doomed)
+word *doomed;
+{
+ bzero((char *)doomed, (int)HT_SIZE*sizeof(word));
+}
+
+/* Signal the completion of a collection. Turn the incomplete black */
+/* lists into new black lists, etc. */
+void GC_promote_black_lists()
+{
+ word * very_old_normal_bl = GC_old_normal_bl;
+ word * very_old_stack_bl = GC_old_stack_bl;
+
+ GC_old_normal_bl = GC_new_normal_bl;
+ GC_new_normal_bl = GC_incomplete_normal_bl;
+ GC_old_stack_bl = GC_new_stack_bl;
+ GC_new_stack_bl = GC_incomplete_stack_bl;
+# ifndef ALL_INTERIOR_POINTERS
+ GC_clear_bl(very_old_normal_bl);
+# endif
+ GC_clear_bl(very_old_stack_bl);
+ GC_incomplete_normal_bl = very_old_normal_bl;
+ GC_incomplete_stack_bl = very_old_stack_bl;
+}
+
+# ifndef ALL_INTERIOR_POINTERS
+/* P is not a valid pointer reference, but it falls inside */
+/* the plausible heap bounds. */
+/* Add it to the normal incomplete black list if appropriate. */
+void GC_add_to_black_list_normal(p)
+word p;
+{
+ if (!(GC_modws_valid_offsets[p & (sizeof(word)-1)])) return;
+ {
+ register int index = HASH(p);
+
+ if (HDR(p) == 0 || get_bl_entry_from_index(GC_new_normal_bl, index)) {
+# ifdef PRINTBLACKLIST
+ if (!get_bl_entry_from_index(GC_incomplete_normal_bl, index)) {
+ GC_printf("Black listing (normal) 0x%lx\n",
+ (unsigned long) p);
+ }
+# endif
+ set_bl_entry_from_index(GC_incomplete_normal_bl, index);
+ } /* else this is probably just an interior pointer to an allocated */
+ /* object, and isn't worth black listing. */
+ }
+}
+# endif
+
+/* And the same for false pointers from the stack. */
+void GC_add_to_black_list_stack(p)
+word p;
+{
+ register int index = HASH(p);
+
+ if (HDR(p) == 0 || get_bl_entry_from_index(GC_new_stack_bl, index)) {
+# ifdef PRINTBLACKLIST
+ if (!get_bl_entry_from_index(GC_incomplete_stack_bl, index)) {
+ GC_printf("Black listing (stack) 0x%lx\n",
+ (unsigned long)p);
+ }
+# endif
+ set_bl_entry_from_index(GC_incomplete_stack_bl, index);
+ }
+}
+
+/*
+ * Is the block starting at h of size len bytes black listed? If so,
+ * return the address of the next plausible r such that (r, len) might not
+ * be black listed. (R may not actually be in the heap. We guarantee only
+ * that every smaller value of r after h is also black listed.)
+ * If (h,len) is not black listed, return 0.
+ * Knows about the structure of the black list hash tables.
+ */
+struct hblk * GC_is_black_listed(h, len)
+struct hblk * h;
+word len;
+{
+ register int index = HASH((word)h);
+ register word i;
+ word nblocks = divHBLKSZ(len);
+
+# ifndef ALL_INTERIOR_POINTERS
+ if (get_bl_entry_from_index(GC_new_normal_bl, index)
+ && get_bl_entry_from_index(GC_old_normal_bl, index)) {
+ return(h+1);
+ }
+# endif
+
+ for (i = 0; ; ) {
+ if (GC_new_stack_bl[divWORDSZ(index)] == 0) {
+ /* An easy case */
+ i += WORDSZ - modWORDSZ(index);
+ } else {
+ if (get_bl_entry_from_index(GC_new_stack_bl, index)
+ && get_bl_entry_from_index(GC_old_stack_bl, index)) {
+ return(h+i+1);
+ }
+ i++;
+ }
+ if (i >= nblocks) break;
+ index = HASH((word)(h+i));
+ }
+ return(0);
+}
+
diff --git a/cons.c b/cons.c
deleted file mode 100644
index 2d28d0e5..00000000
--- a/cons.c
+++ /dev/null
@@ -1,29 +0,0 @@
-/* Silly implementation of Lisp cons. Intentionally wastes lots of space */
-/* to test collector. */
-# include <stdio.h>
-# include "cons.h"
-
-int extra_count = 0; /* Amount of space wasted in cons node */
-
-sexpr cons (x, y)
-sexpr x;
-sexpr y;
-{
- register sexpr r;
- register int i;
- register int *p;
-
- extra_count++;
- extra_count %= 3000;
- r = (sexpr) gc_malloc(8 + extra_count);
- for (p = (int *)r; ((char *)p) < ((char *)r) + extra_count + 8; p++) {
- if (*p) {
- fprintf(stderr, "Found nonzero at %X\n", p);
- abort(p);
- }
- *p = 13;
- }
- r -> sexpr_car = x;
- r -> sexpr_cdr = y;
- return(r);
-}
diff --git a/cons.h b/cons.h
deleted file mode 100644
index 300de3c4..00000000
--- a/cons.h
+++ /dev/null
@@ -1,30 +0,0 @@
-struct SEXPR {
- struct SEXPR * sexpr_car;
- struct SEXPR * sexpr_cdr;
-};
-
-typedef struct SEXPR * sexpr;
-
-extern sexpr cons();
-
-# define nil ((sexpr) 0)
-# define car(x) ((x) -> sexpr_car)
-# define cdr(x) ((x) -> sexpr_cdr)
-# define null(x) ((x) == nil)
-
-# define head(x) car(x)
-# define tail(x) cdr(x)
-
-# define caar(x) car(car(x))
-# define cadr(x) car(cdr(x))
-# define cddr(x) cdr(cdr(x))
-# define cdar(x) cdr(car(x))
-# define caddr(x) car(cdr(cdr(x)))
-
-# define first(x) car(x)
-# define second(x) cadr(x)
-# define third(x) caddr(x)
-
-# define list1(x) cons(x, nil)
-# define list2(x,y) cons(x, cons(y, nil))
-# define list3(x,y,z) cons(x, cons(y, cons(z, nil)))
diff --git a/correct-output b/correct-output
deleted file mode 100644
index 3528e161..00000000
--- a/correct-output
+++ /dev/null
@@ -1,8 +0,0 @@
-1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100
-1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50
-100, 99, 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, 88, 87, 86, 85, 84, 83, 82, 81, 80, 79, 78, 77, 76, 75, 74, 73, 72, 71, 70, 69, 68, 67, 66, 65, 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1
-50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1
-1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100
-1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50
-100, 99, 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, 88, 87, 86, 85, 84, 83, 82, 81, 80, 79, 78, 77, 76, 75, 74, 73, 72, 71, 70, 69, 68, 67, 66, 65, 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1
-50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1
diff --git a/debug_malloc.c b/debug_malloc.c
new file mode 100644
index 00000000..f8b27d22
--- /dev/null
+++ b/debug_malloc.c
@@ -0,0 +1,308 @@
+# include "gc_private.h"
+# define START_FLAG ((word)0xfedcedcb)
+# define END_FLAG ((word)0xbcdecdef)
+ /* Stored both one past the end of user object, and one before */
+ /* the end of the object as seen by the allocator. */
+
+/* Object header */
+typedef struct {
+ char * oh_string; /* object descriptor string */
+ word oh_int; /* object descriptor integers */
+ word oh_sz; /* Original malloc arg. */
+ word oh_sf; /* start flag */
+} oh;
+/* The size of the above structure is assumed not to dealign things, */
+/* and to be a multiple of the word length. */
+
+#define DEBUG_BYTES (sizeof (oh) + sizeof (word))
+#define ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + WORDS_TO_BYTES(1) - 1)
+
+bool GC_debugging_started = FALSE;
+
+/* Check whether object with base pointer p has debugging info */
+/* p is assumed to point to a legitimate object in our part */
+/* of the heap. */
+bool GC_has_debug_info(p)
+ptr_t p;
+{
+ register oh * ohdr = (oh *)p;
+ register ptr_t body = (ptr_t)(ohdr + 1);
+ register word sz = GC_size((ptr_t) ohdr);
+
+ if (HBLKPTR((ptr_t)ohdr) != HBLKPTR((ptr_t)body)
+ || sz < sizeof (oh)) {
+ return(FALSE);
+ }
+ if (ohdr -> oh_sf == (START_FLAG ^ (word)body)) return(TRUE);
+ if (((word *)ohdr)[BYTES_TO_WORDS(sz)-1] == (END_FLAG ^ (word)body)) {
+ return(TRUE);
+ }
+ return(FALSE);
+}
+
+/* Store debugging info into p. Return displaced pointer. */
+/* Assumes we don't hold allocation lock. */
+ptr_t GC_store_debug_info(p, sz, string, integer)
+register ptr_t p; /* base pointer */
+word sz; /* bytes */
+char * string;
+word integer;
+{
+ register word * result = (word *)((oh *)p + 1);
+ DCL_LOCK_STATE;
+
+ /* There is some argument that we should dissble signals here. */
+ /* But that's expensive. And this way things should only appear */
+ /* inconsistent while we're in the handler. */
+ LOCK();
+ ((oh *)p) -> oh_string = string;
+ ((oh *)p) -> oh_int = integer;
+ ((oh *)p) -> oh_sz = sz;
+ ((oh *)p) -> oh_sf = START_FLAG ^ (word)result;
+ ((word *)p)[BYTES_TO_WORDS(GC_size(p))-1] =
+ result[ROUNDED_UP_WORDS(sz)] = END_FLAG ^ (word)result;
+ UNLOCK();
+ return((ptr_t)result);
+}
+
+/* Check the object with debugging info at p */
+/* return NIL if it's OK. Else return clobbered */
+/* address. */
+ptr_t GC_check_annotated_obj(ohdr)
+register oh * ohdr;
+{
+ register ptr_t body = (ptr_t)(ohdr + 1);
+ register word gc_sz = GC_size((ptr_t)ohdr);
+ if (ohdr -> oh_sz + DEBUG_BYTES > gc_sz) {
+ return((ptr_t)(&(ohdr -> oh_sz)));
+ }
+ if (ohdr -> oh_sf != (START_FLAG ^ (word)body)) {
+ return((ptr_t)(&(ohdr -> oh_sf)));
+ }
+ if (((word *)ohdr)[BYTES_TO_WORDS(gc_sz)-1] != (END_FLAG ^ (word)body)) {
+ return((ptr_t)((word *)ohdr + BYTES_TO_WORDS(gc_sz)-1));
+ }
+ if (((word *)body)[ROUNDED_UP_WORDS(ohdr -> oh_sz)]
+ != (END_FLAG ^ (word)body)) {
+ return((ptr_t)((word *)body + ROUNDED_UP_WORDS(ohdr -> oh_sz)));
+ }
+ return(0);
+}
+
+void GC_print_obj(p)
+ptr_t p;
+{
+ register oh * ohdr = (oh *)GC_base(p);
+
+ GC_err_printf("0x%lx (", (unsigned long)ohdr + sizeof(oh));
+ GC_err_puts(ohdr -> oh_string);
+ GC_err_printf(":%ld, sz=%ld)\n", (unsigned long)(ohdr -> oh_int),
+ (unsigned long)(ohdr -> oh_sz));
+}
+void GC_print_smashed_obj(p, clobbered_addr)
+ptr_t p, clobbered_addr;
+{
+ register oh * ohdr = (oh *)GC_base(p);
+
+ GC_err_printf("0x%lx in object at 0x%lx(", (unsigned long)clobbered_addr,
+ (unsigned long)p);
+ if (clobbered_addr <= (ptr_t)(&(ohdr -> oh_sz))) {
+ GC_err_printf("<smashed>, appr. sz = %ld)\n",
+ BYTES_TO_WORDS(GC_size((ptr_t)ohdr)));
+ } else {
+ GC_err_puts(ohdr -> oh_string);
+ GC_err_printf(":%ld, sz=%ld)\n", (unsigned long)(ohdr -> oh_int),
+ (unsigned long)(ohdr -> oh_sz));
+ }
+}
+
+# ifdef __STDC__
+ extern_ptr_t GC_debug_malloc(size_t lb, char * s, int i)
+# else
+ extern_ptr_t GC_debug_malloc(lb, s, i)
+ size_t lb;
+ char * s;
+ int i;
+# endif
+{
+ extern_ptr_t result = GC_malloc(lb + DEBUG_BYTES);
+
+ if (result == 0) {
+ GC_err_printf("GC_debug_malloc(%ld) returning NIL (",
+ (unsigned long) lb);
+ GC_err_puts(s);
+ GC_err_printf(":%ld)\n", (unsigned long)i);
+ return(0);
+ }
+ if (!GC_debugging_started) {
+ GC_debugging_started = TRUE;
+ GC_register_displacement((word)sizeof(oh));
+ }
+ return (GC_store_debug_info(result, (word)lb, s, (word)i));
+}
+
+# ifdef __STDC__
+ extern_ptr_t GC_debug_malloc_atomic(size_t lb, char * s, int i)
+# else
+ extern_ptr_t GC_debug_malloc_atomic(lb, s, i)
+ size_t lb;
+ char * s;
+ int i;
+# endif
+{
+ extern_ptr_t result = GC_malloc_atomic(lb + DEBUG_BYTES);
+
+ if (result == 0) {
+ GC_err_printf("GC_debug_malloc_atomic(%ld) returning NIL (",
+ (unsigned long) lb);
+ GC_err_puts(s);
+ GC_err_printf(":%ld)\n", (unsigned long)i);
+ return(0);
+ }
+ if (!GC_debugging_started) {
+ GC_debugging_started = TRUE;
+ GC_register_displacement((word)sizeof(oh));
+ }
+ return (GC_store_debug_info(result, (word)lb, s, (word)i));
+}
+# ifdef __STDC__
+ void GC_debug_free(extern_ptr_t p)
+# else
+ void GC_debug_free(p)
+ extern_ptr_t p;
+# endif
+{
+ register extern_ptr_t base = GC_base(p);
+ register ptr_t clobbered;
+
+ if (base == 0) {
+ GC_err_printf("Attempt to free invalid pointer %lx\n",
+ (unsigned long)p);
+ ABORT("free(invalid pointer)");
+ }
+ if ((ptr_t)p - (ptr_t)base != sizeof(oh)) {
+ GC_err_printf("GC_debug_free called on pointer %lx wo debugging info\n",
+ (unsigned long)p);
+ } else {
+ clobbered = GC_check_annotated_obj((oh *)base);
+ if (clobbered != 0) {
+ GC_err_printf("GC_debug_free: found smashed object at ");
+ GC_print_smashed_obj(p, clobbered);
+ }
+ }
+ GC_free(GC_base(p));
+}
+
+# ifdef __STDC__
+ extern_ptr_t GC_debug_realloc(extern_ptr_t p, size_t lb, char *s, int i)
+# else
+ extern_ptr_t GC_debug_realloc(p, lb, s, i)
+ extern_ptr_t p;
+ size_t lb;
+ char *s;
+ int i;
+# endif
+{
+ register extern_ptr_t base = GC_base(p);
+ register ptr_t clobbered;
+ register extern_ptr_t result = GC_debug_malloc(lb, s, i);
+ register size_t copy_sz = lb;
+ register size_t old_sz;
+
+ if (base == 0) {
+ GC_err_printf(
+ "Attempt to free invalid pointer %lx\n", (unsigned long)p);
+ ABORT("realloc(invalid pointer)");
+ }
+ if ((ptr_t)p - (ptr_t)base != sizeof(oh)) {
+ GC_err_printf(
+ "GC_debug_realloc called on pointer %lx wo debugging info\n",
+ (unsigned long)p);
+ return(GC_realloc(p, lb));
+ }
+ clobbered = GC_check_annotated_obj((oh *)base);
+ if (clobbered != 0) {
+ GC_err_printf("GC_debug_realloc: found smashed object at ");
+ GC_print_smashed_obj(p, clobbered);
+ }
+ old_sz = ((oh *)base) -> oh_sz;
+ if (old_sz < copy_sz) copy_sz = old_sz;
+ if (result == 0) return(0);
+ bcopy((char *)p, (char *)result, (int) copy_sz);
+ return(result);
+}
+
+/* Check all marked objects in the given block for validity */
+/*ARGSUSED*/
+void GC_check_heap_block(hbp, dummy)
+register struct hblk *hbp; /* ptr to current heap block */
+word dummy;
+{
+ register struct hblkhdr * hhdr = HDR(hbp);
+ register word sz = hhdr -> hb_sz;
+ register int word_no;
+ register word *p, *plim;
+
+ p = (word *)(hbp->hb_body);
+ word_no = HDR_WORDS;
+ plim = (word *)((((word)hbp) + HBLKSIZE)
+ - WORDS_TO_BYTES(sz));
+
+ /* go through all words in block */
+ do {
+ if( mark_bit_from_hdr(hhdr, word_no)
+ && GC_has_debug_info((ptr_t)p)) {
+ ptr_t clobbered = GC_check_annotated_obj((oh *)p);
+
+ if (clobbered != 0) {
+ GC_err_printf(
+ "GC_check_heap_block: found smashed object at ");
+ GC_print_smashed_obj((ptr_t)p, clobbered);
+ }
+ }
+ word_no += sz;
+ p += sz;
+ } while( p <= plim );
+}
+
+
+/* This assumes that all accessible objects are marked, and that */
+/* I hold the allocation lock. Normally called by collector. */
+void GC_check_heap()
+{
+ GC_apply_to_all_blocks(GC_check_heap_block, (word)0);
+}
+
+struct closure {
+ GC_finalization_proc cl_fn;
+ extern_ptr_t cl_data;
+};
+
+# ifdef __STDC__
+ void * GC_make_closure(GC_finalization_proc fn, void * data)
+# else
+ extern_ptr_t GC_make_closure(fn, data)
+ GC_finalization_proc fn;
+ extern_ptr_t data;
+# endif
+{
+ struct closure * result =
+ (struct closure *) GC_malloc(sizeof (struct closure));
+
+ result -> cl_fn = fn;
+ result -> cl_data = data;
+ return((extern_ptr_t)result);
+}
+
+# ifdef __STDC__
+ void GC_debug_invoke_finalizer(void * obj, void * data)
+# else
+ void GC_debug_invoke_finalizer(obj, data)
+ char * obj;
+ char * data;
+# endif
+{
+ register struct closure * cl = (struct closure *) data;
+
+ (*(cl -> cl_fn))((extern_ptr_t)((char *)obj + sizeof(oh)), cl -> cl_data);
+} \ No newline at end of file
diff --git a/dynamic_load.c b/dynamic_load.c
new file mode 100644
index 00000000..80eda3a9
--- /dev/null
+++ b/dynamic_load.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 1991,1992 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to copy this garbage collector for any purpose,
+ * provided the above notices are retained on all copies.
+ * Author: Bill Janssen
+ * Modified by: Hans Boehm
+ */
+#include "gc_private.h"
+#ifdef DYNAMIC_LOADING
+#if !defined(M68K_SUN) && !defined(SPARC)
+ --> We only know how to find data segments of dynamic libraries under SunOS 4.X
+#endif
+#include <sys/types.h>
+#include <stdio.h>
+#include <dlfcn.h>
+#include <link.h>
+#include <a.out.h>
+#include <stab.h>
+
+extern struct link_dynamic _DYNAMIC;
+
+void GC_setup_dynamic_loading()
+{
+ struct link_map *lm;
+ struct exec *e;
+
+ if (&_DYNAMIC == 0) {
+ /* No dynamic libraries. Furthermore, the rest of this would */
+ /* segment fault. */
+ return;
+ }
+ for (lm = _DYNAMIC.ld_un.ld_1->ld_loaded;
+ lm != (struct link_map *) 0; lm = lm->lm_next)
+ {
+ e = (struct exec *) lm->lm_addr;
+ GC_add_roots_inner(
+ ((char *) (N_DATOFF(*e) + lm->lm_addr)),
+ ((char *) (N_BSSADDR(*e) + e->a_bss + lm->lm_addr)));
+ }
+}
+
+#ifdef DEFINE_DLOPEN
+char *GC_dlopen (path, mode)
+ char *path;
+ int mode;
+{
+ char *etext, *end;
+ struct link_map *lm;
+ struct exec *e;
+ char *handle;
+
+ handle = dlopen(path, mode);
+ if (handle == NULL)
+ {
+ fprintf (stderr,
+ "GC_sun_dlopen: dlopen(%s, %d) failed: %s.\n",
+ path, mode, dlerror());
+ return (NULL);
+ }
+
+ for (lm = _DYNAMIC.ld_un.ld_1->ld_loaded;
+ lm != (struct link_map *) 0; lm = lm->lm_next)
+ {
+ if (strcmp(path, lm->lm_name) == 0)
+ {
+ e = (struct exec *) lm->lm_addr;
+ etext = (void *) (N_DATOFF(*e) + lm->lm_addr);
+ end = (void *) (N_BSSADDR(*e) + e->a_bss + lm->lm_addr);
+ GC_add_roots (etext, end);
+ break;
+ }
+ }
+
+ if (lm == (struct link_map *) 0)
+ {
+ fprintf (stderr,
+ "GC_sun_dlopen: couldn't find \"%s\" in _DYNAMIC link list.\n",
+ path);
+ dlclose(handle);
+ return (NULL);
+ }
+ else
+ return (handle);
+}
+#endif
+#else
+int GC_no_dynamic_loading;
+#endif
diff --git a/finalize.c b/finalize.c
new file mode 100644
index 00000000..0ab5926c
--- /dev/null
+++ b/finalize.c
@@ -0,0 +1,308 @@
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991, 1992 by Xerox Corporation. All rights reserved.
+
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to copy this garbage collector for any purpose,
+ * provided the above notices are retained on all copies.
+ */
+# define I_HIDE_POINTERS
+# include "gc.h"
+# include "gc_private.h"
+# ifdef __STDC__
+ typedef void * void_star;
+# else
+ typedef char * void_star;
+# endif
+
+# define LOG_TSIZE 7
+# define TSIZE (1 << LOG_TSIZE)
+# define HASH(addr) \
+ ((((word)(addr) >> 3) ^ ((word)(addr) >> (3+LOG_TSIZE))) \
+ & (TSIZE - 1))
+
+static struct disappearing_link {
+ word dl_hidden_base; /* Pointer to object base */
+ word dl_offset; /* byte offset within object. */
+ struct disappearing_link * dl_next;
+} * dl_head[TSIZE] = {0};
+
+static struct finalizable_object {
+ word fo_hidden_base; /* Pointer to object base */
+ GC_finalization_proc fo_fn; /* Finalizer. */
+ ptr_t fo_client_data;
+ word fo_object_size; /* In bytes. */
+ struct finalizable_object * fo_next;
+} * fo_head[TSIZE] = {0};
+
+# define ALLOC(x, t) t *x = (t *)GC_malloc(sizeof (t))
+
+int GC_register_disappearing_link(link)
+void_star * link;
+{
+ ptr_t base;
+ unsigned long offset;
+ struct disappearing_link *curr_dl;
+ int index;
+ /* Allocate before acquiring lock */
+ ALLOC(new_dl, struct disappearing_link);
+ DCL_LOCK_STATE;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ base = (ptr_t)GC_base((void_star)link);
+ index = HASH(base);
+ offset = (ptr_t)link - base;
+ if (base == 0 || ((word)link & (ALIGNMENT-1)))
+ ABORT("Bad arg to GC_register_disappearing_link");
+ curr_dl = dl_head[index];
+ for (curr_dl = dl_head[index]; curr_dl != 0; curr_dl = curr_dl -> dl_next) {
+ if (curr_dl -> dl_hidden_base == HIDE_POINTER(base)
+ && curr_dl -> dl_offset == offset) {
+ UNLOCK();
+ ENABLE_SIGNALS();
+ GC_free((extern_ptr_t)new_dl);
+ return(1);
+ }
+ }
+ {
+ new_dl -> dl_hidden_base = HIDE_POINTER(base);
+ new_dl -> dl_offset = offset;
+ new_dl -> dl_next = dl_head[index];
+ dl_head[index] = new_dl;
+ UNLOCK();
+ ENABLE_SIGNALS();
+ return(0);
+ }
+}
+
+
+int GC_unregister_disappearing_link(link)
+void_star * link;
+{
+ ptr_t base;
+ unsigned long offset;
+ struct disappearing_link *curr_dl, *prev_dl;
+ int index;
+ DCL_LOCK_STATE;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ base = (ptr_t)GC_base((void_star)link);
+ index = HASH(base);
+ offset = (ptr_t)link - base;
+ if (base == 0 || ((unsigned long)link & (ALIGNMENT-1)))
+ return(0);
+ prev_dl = 0; curr_dl = dl_head[index];
+ while (curr_dl != 0) {
+ if (curr_dl -> dl_hidden_base == HIDE_POINTER(base)
+ && curr_dl -> dl_offset == offset) {
+ if (prev_dl == 0) {
+ dl_head[index] = curr_dl -> dl_next;
+ } else {
+ prev_dl -> dl_next = curr_dl -> dl_next;
+ }
+ UNLOCK();
+ ENABLE_SIGNALS();
+ GC_free((extern_ptr_t)curr_dl);
+ return(1);
+ }
+ curr_dl = curr_dl -> dl_next;
+ prev_dl = curr_dl;
+ }
+ UNLOCK();
+ ENABLE_SIGNALS();
+ return(0);
+}
+
+bool GC_is_marked(p)
+ptr_t p;
+{
+ register struct hblk *h = HBLKPTR(p);
+ register hdr * hhdr = HDR(h);
+ register int word_no = (word *)p - (word *)h;
+
+ return(mark_bit_from_hdr(hhdr, word_no));
+}
+
+void GC_set_mark_bit(p)
+ptr_t p;
+{
+ register struct hblk *h = HBLKPTR(p);
+ register hdr * hhdr = HDR(h);
+ register int word_no = (word *)p - (word *)h;
+
+ set_mark_bit_from_hdr(hhdr, word_no);
+}
+
+void GC_clear_mark_bit(p)
+ptr_t p;
+{
+ register struct hblk *h = HBLKPTR(p);
+ register hdr * hhdr = HDR(h);
+ register int word_no = (word *)p - (word *)h;
+
+ clear_mark_bit_from_hdr(hhdr, word_no);
+}
+
+void GC_register_finalizer(obj, fn, cd, ofn, ocd)
+void_star obj;
+GC_finalization_proc fn;
+void_star cd;
+GC_finalization_proc * ofn;
+void_star * ocd;
+{
+ ptr_t base;
+ struct finalizable_object * curr_fo, * prev_fo;
+ int index;
+ /* Allocate before acquiring lock */
+ ALLOC(new_fo, struct finalizable_object);
+ DCL_LOCK_STATE;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ base = (ptr_t)GC_base((void_star)obj);
+ index = HASH(base);
+ if (base != obj)
+ ABORT("Bad arg to GC_register_finalizer");
+ prev_fo = 0; curr_fo = fo_head[index];
+ while (curr_fo != 0) {
+ if (curr_fo -> fo_hidden_base == HIDE_POINTER(base)) {
+ if (ofn) *ofn = curr_fo -> fo_fn;
+ if (ocd) *ocd = (void_star) curr_fo -> fo_client_data;
+ if (fn == 0) {
+ /* Delete the structure for base. */
+ if (prev_fo == 0) {
+ fo_head[index] = curr_fo -> fo_next;
+ } else {
+ prev_fo -> fo_next = curr_fo -> fo_next;
+ }
+ UNLOCK();
+ ENABLE_SIGNALS();
+ GC_free((extern_ptr_t)curr_fo);
+ } else {
+ curr_fo -> fo_fn = fn;
+ curr_fo -> fo_client_data = (ptr_t)cd;
+ UNLOCK();
+ ENABLE_SIGNALS();
+ }
+ GC_free((extern_ptr_t)new_fo);
+ return;
+ }
+ curr_fo = curr_fo -> fo_next;
+ prev_fo = curr_fo;
+ }
+ {
+ if (ofn) *ofn = 0;
+ if (ocd) *ocd = 0;
+ if (fn == 0) {
+ UNLOCK();
+ ENABLE_SIGNALS();
+ GC_free((extern_ptr_t)new_fo);
+ return;
+ }
+ new_fo -> fo_hidden_base = (word)HIDE_POINTER(base);
+ new_fo -> fo_fn = fn;
+ new_fo -> fo_client_data = (ptr_t)cd;
+ new_fo -> fo_object_size = GC_size(base);
+ new_fo -> fo_next = fo_head[index];
+ fo_head[index] = new_fo;
+ }
+ UNLOCK();
+ ENABLE_SIGNALS();
+}
+
+/* Called with world stopped. Cause disappearing links to disappear, */
+/* and invoke finalizers. */
+void GC_finalize()
+{
+ struct disappearing_link * curr_dl, * prev_dl, * next_dl;
+ struct finalizable_object * curr_fo, * prev_fo, * next_fo;
+ ptr_t real_ptr;
+ register int i;
+
+ /* Make disappearing links disappear */
+ for (i = 0; i < TSIZE; i++) {
+ curr_dl = dl_head[i];
+ prev_dl = 0;
+ while (curr_dl != 0) {
+ real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_base);
+ if (!GC_is_marked(real_ptr)) {
+ *(word *)(real_ptr + curr_dl -> dl_offset) = 0;
+ next_dl = curr_dl -> dl_next;
+ if (prev_dl == 0) {
+ dl_head[i] = next_dl;
+ } else {
+ prev_dl -> dl_next = next_dl;
+ }
+ GC_clear_mark_bit((ptr_t)curr_dl);
+ curr_dl = next_dl;
+ } else {
+ prev_dl = curr_dl;
+ curr_dl = curr_dl -> dl_next;
+ }
+ }
+ }
+ /* Mark all objects reachable via chains of 1 or more pointers */
+ /* from finalizable objects. */
+ for (i = 0; i < TSIZE; i++) {
+ for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = curr_fo -> fo_next) {
+ real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
+ if (!GC_is_marked(real_ptr)) {
+ GC_mark_all(real_ptr, real_ptr + curr_fo -> fo_object_size);
+ }
+ /*
+ if (GC_is_marked(real_ptr)) {
+ --> Report finalization cycle here, if desired
+ }
+ */
+ }
+ }
+ /* Invoke finalization code for all objects that are still */
+ /* unreachable. */
+ for (i = 0; i < TSIZE; i++) {
+ curr_fo = fo_head[i];
+ prev_fo = 0;
+ while (curr_fo != 0) {
+ real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
+ if (!GC_is_marked(real_ptr)) {
+ (*(curr_fo -> fo_fn))(real_ptr, curr_fo -> fo_client_data);
+ GC_set_mark_bit(real_ptr);
+ next_fo = curr_fo -> fo_next;
+ if (prev_fo == 0) {
+ fo_head[i] = next_fo;
+ } else {
+ prev_fo -> fo_next = next_fo;
+ }
+ if (!GC_is_marked((ptr_t)curr_fo)) {
+ ABORT("GC_finalize: found accessible unmarked object\n");
+ }
+ GC_clear_mark_bit((ptr_t)curr_fo);
+ curr_fo = next_fo;
+ } else {
+ prev_fo = curr_fo;
+ curr_fo = curr_fo -> fo_next;
+ }
+ }
+ }
+}
+
+# ifdef __STDC__
+ void_star GC_call_with_alloc_lock(GC_fn_type fn, void_star client_data)
+# else
+ void_star GC_call_with_alloc_lock(fn, client_data)
+ GC_fn_type fn;
+ void_star client_data;
+# endif
+{
+ DCL_LOCK_STATE;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ (*fn)(client_data);
+ UNLOCK();
+ ENABLE_SIGNALS();
+}
+
diff --git a/gc.h b/gc.h
index ba33af9b..e090522d 100644
--- a/gc.h
+++ b/gc.h
@@ -5,736 +5,279 @@
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
- * Permission is hereby granted to copy this compiler for any purpose,
+ * Permission is hereby granted to copy this garbage collector for any purpose,
* provided the above notices are retained on all copies.
*/
-/* Machine specific parts contributed by various people. See README file. */
+#ifndef GC_H
-/*********************************/
-/* */
-/* Definitions for conservative */
-/* collector */
-/* */
-/*********************************/
+# define GC_H
-/*********************************/
-/* */
-/* Easily changeable parameters */
-/* */
-/*********************************/
+# include <stddef.h>
-# if defined(sun) && defined(mc68000)
-# define M68K_SUN
-# define mach_type_known
-# endif
-# if defined(hp9000s300)
-# define M68K_HP
-# define mach_type_known
-# endif
-# if defined(vax)
-# define VAX
-# ifdef ultrix
-# define ULTRIX
-# else
-# define BSD
-# endif
-# define mach_type_known
-# endif
-# if defined(mips)
-# define MIPS
-# ifdef ultrix
-# define ULTRIX
-# else
-# define RISCOS
-# endif
-# define mach_type_known
-# endif
-# if defined(sequent) && defined(i386)
-# define I386
-# define mach_type_known
-# endif
-# if defined(ibm032)
-# define RT
-# define mach_type_known
-# endif
-# if defined(sun) && defined(sparc)
-# define SPARC
-# define mach_type_known
-# endif
-# if defined(_IBMR2)
-# define IBMRS6000
-# define mach_type_known
-# endif
-
-
-/* Feel free to add more clauses here */
-
-/* Or manually define the machine type here. A machine type is */
-/* characterized by the architecture and assembler syntax. Some */
-/* machine types are further subdivided by OS. In that case, we use */
-/* the macros ULTRIX, RISCOS, and BSD to distinguish. */
-/* The distinction in these cases is usually the stack starting address */
-# ifndef mach_type_known
-# define M68K_SUN /* Guess "Sun" */
- /* Mapping is: M68K_SUN ==> Sun3 assembler, */
- /* M68K_HP ==> HP9000/300, */
- /* I386 ==> Sequent Symmetry, */
- /* NS32K ==> Encore Multimax, */
- /* MIPS ==> R2000 or R3000 */
- /* (RISCOS, ULTRIX variants) */
- /* VAX ==> DEC VAX */
- /* (BSD, ULTRIX variants) */
-# endif
-
-#define PRINTSTATS /* Print garbage collection statistics */
- /* For less verbose output, undefine in reclaim.c */
-
-#define PRINTTIMES /* Print the amount of time consumed by each garbage */
- /* collection. */
-
-#define PRINTBLOCKS /* Print object sizes associated with heap blocks, */
- /* whether the objects are atomic or composite, and */
- /* whether or not the block was found to be empty */
- /* duing the reclaim phase. Typically generates */
- /* about one screenful per garbage collection. */
-#undef PRINTBLOCKS
-
-#ifdef SILENT
-# ifdef PRINTSTATS
-# undef PRINTSTATS
-# endif
-# ifdef PRINTTIMES
-# undef PRINTTIMES
-# endif
-# ifdef PRINTNBLOCKS
-# undef PRINTNBLOCKS
-# endif
-#endif
-
-#define HBLK_MAP /* Maintain a map of all potential heap blocks */
- /* starting at heapstart. */
- /* Normally, this performs about as well as the */
- /* standard stack of chunk pointers that is used */
- /* otherwise. It loses if a small section of the */
- /* heap consists of garbage collected objects. */
- /* It is ESSENTIAL if pointers to object interiors */
- /* are considered valid, i.e. if INTERIOR_POINTERS */
- /* is defined. */
-#undef HBLK_MAP
-
-#define MAP_SIZE 8192 /* total data size < MAP_SIZE * HBLKSIZE = 32 Meg */
-#define MAXHBLKS 4096 /* Maximum number of chunks which can be */
- /* allocated */
-#define INTERIOR_POINTERS
- /* Follow pointers to the interior of an object. */
- /* Substantially increases the probability of */
- /* unnnecessary space retention. May be necessary */
- /* with gcc -O or other C compilers that may clobber */
- /* values of dead variables prematurely. Pcc */
- /* derived compilers appear to pose no such problems. */
- /* Empirical evidence suggests that this is probably */
- /* still OK for most purposes, so long as pointers */
- /* are known to be 32 bit aligned. The combination */
- /* of INTERIOR_POINTERS and UNALIGNED (e.g. on a */
- /* Sun 3 with the standard compiler) causes easily */
- /* observable spurious retention and performance */
- /* degradation. */
-#undef INTERIOR_POINTERS
-
-#ifdef SPARC
-# define ALIGN_DOUBLE /* Align objects of size > 1 word on 2 word */
- /* boundaries. Wasteful of memory, but */
- /* apparently required by SPARC architecture. */
-
-#endif
-
-#if defined(INTERIOR_POINTERS) && !defined(HBLK_MAP)
- --> check for interior pointers requires a heap block map
-#endif
-
-#define MERGE_SIZES /* Round up some object sizes, so that fewer distinct */
- /* free lists are actually maintained. This applies */
- /* only to the top level routines in misc.c, not to */
- /* user generated code that calls allocobj and */
- /* allocaobj directly. */
- /* Slows down average programs slightly. May however */
- /* substantially reduce fragmentation if allocation */
- /* request sizes are widely scattered. */
-#undef MERGE_SIZES
-
-/* ALIGN_DOUBLE requires MERGE_SIZES at present. */
-# if defined(ALIGN_DOUBLE) && !defined(MERGE_SIZES)
-# define MERGE_SIZES
-# endif
-
-
-/* For PRINTTIMES to tell the truth, we need to know the value of HZ for
- this system. */
-
-#if defined(M68K_HP) || defined(M68K_SUN) || defined(SPARC)
-# include <sys/param.h>
-# define FLOAT_HZ (double)HZ
-#else
-# define FLOAT_HZ 60.0 /* Guess that we're in the U.S. */
-#endif
-
-#ifdef M68K_SUN
-# define UNALIGNED /* Pointers are not longword aligned */
-# define ALIGNMENT 2 /* Pointers are aligned on 2 byte boundaries */
- /* by the Sun C compiler. */
-#else
-# ifdef VAX
-# undef UNALIGNED /* Pointers are longword aligned by 4.2 C compiler */
-# define ALIGNMENT 4
-# else
-# ifdef RT
-# undef UNALIGNED
-# define ALIGNMENT 4
-# else
-# ifdef SPARC
-# undef UNALIGNED
-# define ALIGNMENT 4
-# else
-# ifdef I386
-# undef UNALIGNED /* Sequent compiler aligns pointers */
-# define ALIGNMENT 4
-# else
-# ifdef NS32K
-# undef UNALIGNED /* Pointers are aligned on NS32K */
-# define ALIGNMENT 4
-# else
-# ifdef MIPS
-# undef UNALIGNED /* MIPS hardware requires pointer */
- /* alignment */
-# define ALIGNMENT 4
-# else
-# ifdef M68K_HP
-# define UNALIGNED
-# define ALIGNMENT 2 /* 2 byte alignment inside struct/union, */
- /* 4 bytes elsewhere */
-# else
-# ifdef IBMRS6000
-# undef UNALIGNED
-# define ALIGNMENT 4
-# else
- --> specify alignment <--
-# endif
-# endif
-# endif
-# endif
-# endif
-# endif
-# endif
-# endif
-# endif
-
-# ifdef RT
-# define STACKTOP ((word *) 0x1fffd800)
+/* Define word and signed_word to be unsigned and signed types of the */
+/* size as char * or void *. There seems to be no way to do this */
+/* even semi-portably. The following is probably no better/worse */
+/* than almost anything else. */
+/* The ANSI standard suggests that size_t and ptr_diff_t might be */
+/* better choices. But those appear to have incorrect definitions */
+/* on may systems. Notably "typedef int size_t" seems to be both */
+/* frequent and WRONG. */
+typedef unsigned long word;
+typedef long signed_word;
+
+/* Public read-only variables */
+
+extern word GC_heapsize; /* Heap size in bytes */
+
+extern word GC_gc_no; /* Counter incremented per collection. */
+ /* Includes empty GCs at startup. */
+
+/* Public R/W variables */
+
+extern int GC_dont_gc; /* Dont collect unless explicitly requested, e.g. */
+ /* beacuse it's not safe. */
+
+extern int GC_dont_expand;
+ /* Dont expand heap unless explicitly requested */
+ /* or forced to. */
+
+extern word GC_non_gc_bytes;
+ /* Bytes not considered candidates for collection. */
+
+
+extern word GC_free_space_divisor;
+ /* We try to make sure that we allocate at */
+ /* least N/GC_free_space_divisor bytes between */
+ /* collections, where N is the heap size plus */
+ /* a rough estimate of the root set size. */
+ /* Initially, GC_free_space_divisor = 4. */
+ /* Increasing its value will use less space */
+ /* but more collection time. Decreasing it */
+ /* will appreciably decrease collection time */
+ /* at the expens of space. */
+ /* GC_free_space_divisor = 1 will effectively */
+ /* disable collections. */
+
+/* Public procedures */
+/*
+ * general purpose allocation routines, with roughly malloc calling conv.
+ * The atomic versions promise that no relevant pointers are contained
+ * in the object. The nonatomic version guarantees that the new object
+ * is cleared.
+ */
+# ifdef __STDC__
+ extern void * GC_malloc(size_t size_in_bytes);
+ extern void * GC_malloc_atomic(size_t size_in_bytes);
# else
-# ifdef I386
-# define STACKTOP ((word *) 0x3ffff000) /* For Sequent */
-# else
-# ifdef NS32K
-# define STACKTOP ((word *) 0xfffff000) /* for Encore */
-# else
-# ifdef MIPS
-# ifdef ULTRIX
-# define STACKTOP ((word *) 0x7fffc000)
-# else
-# ifdef RISCOS
-# define STACKTOP ((word *) 0x7ffff000)
- /* Could probably be slightly lower since */
- /* startup code allocates lots of junk */
-# else
- --> fix it
-# endif
-# endif
-# else
-# ifdef M68K_HP
-# define STACKTOP ((word *) 0xffeffffc)
- /* empirically determined. seems to work. */
-# else
-# ifdef IBMRS6000
-# define STACKTOP ((word *) 0x2ff80000)
-# else
-# if defined(VAX) && defined(ULTRIX)
-# define STACKTOP ((word *) 0x7fffc800)
-# else
- /* other VAXes, SPARC, and various flavors of Sun 2s and Sun 3s use */
- /* the default heuristic, which is to take the address of a local */
- /* variable in gc_init, and round it up to the next multiple */
- /* of 16 Meg. This is crucial on Suns, since various models */
- /* that are supposed to be able to share executables, do not */
- /* use the same stack base. In particular, Sun 3/80s are */
- /* different from other Sun 3s. */
- /* This probably works on some more of the above machines. */
-# endif
-# endif
-# endif
-# endif
-# endif
-# endif
+ extern char * GC_malloc(/* size_in_bytes */);
+ extern char * GC_malloc_atomic(/* size_in_bytes */);
# endif
-/* Start of data segment for each of the above systems. Note that the */
-/* default case works only for contiguous text and data, such as on a */
-/* Vax. */
-# ifdef M68K_SUN
-# define DATASTART ((char *)((((long) (&etext)) + 0x1ffff) & ~0x1ffff))
+/* Explicitly deallocate an object. Dangerous if used incorrectly. */
+/* Requires a pointer to the base of an object. */
+# ifdef __STDC__
+ extern void GC_free(void * object_addr);
# else
-# ifdef RT
-# define DATASTART ((char *) 0x10000000)
-# else
-# ifdef I386
-# define DATASTART ((char *)((((long) (&etext)) + 0xfff) & ~0xfff))
-# else
-# ifdef NS32K
- extern char **environ;
-# define DATASTART ((char *)(&environ))
- /* hideous kludge: environ is the first */
- /* word in crt0.o, and delimits the start */
- /* of the data segment, no matter which */
- /* ld options were passed through. */
-# else
-# ifdef MIPS
-# define DATASTART 0x10000000
- /* Could probably be slightly higher since */
- /* startup code allocates lots of junk */
-# else
-# ifdef M68K_HP
-# define DATASTART ((char *)((((long) (&etext)) + 0xfff) & ~0xfff))
-# else
-# ifdef IBMRS6000
-# define DATASTART ((char *)0x20000000)
-# else
-# define DATASTART (&etext)
-# endif
-# endif
-# endif
-# endif
-# endif
-# endif
+ extern void GC_free(/* object_addr */);
# endif
-# define HINCR 16 /* Initial heap increment, in blocks of 4K */
-# define MAXHINCR 512 /* Maximum heap increment, in blocks */
-# define HINCR_MULT 3 /* After each new allocation, hincr is multiplied */
-# define HINCR_DIV 2 /* by HINCR_MULT/HINCR_DIV */
-# define GC_MULT 3 /* Don't collect if the fraction of */
- /* non-collectable memory in the heap */
- /* exceeds GC_MUL/GC_DIV */
-# define GC_DIV 4
-
-# define NON_GC_HINCR 8 /* Heap increment if most of heap if collection */
- /* was suppressed because most of heap is not */
- /* collectable */
-
-/* heap address bounds. These are extreme bounds used for sanity checks. */
-/* HEAPLIM may have to be increased for machines with incredibly large */
-/* amounts of memory. */
-
-#ifdef RT
-# define HEAPSTART 0x10000000
-# define HEAPLIM 0x1fff0000
-#else
-# if defined(M68K_SUN) || defined(M68K_HP)
-# define HEAPSTART 0x00010000
-# define HEAPLIM 0x04000000
+/* Return a pointer to the base (lowest address) of an object given */
+/* a pointer to a location within the object. */
+/* Return 0 if displaced_pointer doesn't point to within a valid */
+/* object. */
+# ifdef __STDC__
+ void * GC_base(void * displaced_pointer);
# else
-# ifdef SPARC
-# define HEAPSTART 0x00010000
-# define HEAPLIM 0x10000000
-# else
-# ifdef VAX
-# define HEAPSTART 0x400
-# define HEAPLIM 0x10000000
-# else
-# ifdef I386
-# define HEAPSTART 0x1000
-# define HEAPLIM 0x10000000
-# else
-# ifdef NS32K
-# define HEAPSTART 0x2000
-# define HEAPLIM 0x10000000
-# else
-# ifdef MIPS
-# define HEAPSTART 0x10000000
-# define HEAPLIM 0x20000000
-# else
-# ifdef IBMRS6000
-# define HEAPSTART 0x20000000
-# define HEAPLIM 0x2ff70000
-# else
- --> values unknown <--
-# endif
-# endif
-# endif
-# endif
-# endif
-# endif
+ char * GC_base(/* char * displaced_pointer */);
# endif
-#endif
-
-/*********************************/
-/* */
-/* Machine-dependent defines */
-/* */
-/*********************************/
-
-#define WORDS_TO_BYTES(x) ((x)<<2)
-#define BYTES_TO_WORDS(x) ((x)>>2)
-
-#define WORDSZ 32
-#define LOGWL 5 /* log[2] of above */
-#define BYTES_PER_WORD (sizeof (word))
-#define ONES 0xffffffff
-#define MSBYTE 0xff000000
-#define SIGNB 0x80000000
-#define MAXSHORT 0x7fff
-#define modHALFWORDSZ(n) ((n) & 0xf) /* mod n by size of half word */
-#define divHALFWORDSZ(n) ((n) >> 4) /* divide n by size of half word */
-#define modWORDSZ(n) ((n) & 0x1f) /* mod n by size of word */
-#define divWORDSZ(n) ((n) >> 5) /* divide n by size of word */
-#define twice(n) ((n) << 1) /* double n */
-
-typedef unsigned long word;
-
-#define TRUE 1
-#define FALSE 0
-
-/*********************/
-/* */
-/* Size Parameters */
-/* */
-/*********************/
-
-/* heap block size, bytes */
-/* for RT see comment below */
-
-#define HBLKSIZE 0x1000
-
-
-/* max size objects supported by freelist (larger objects may be */
-/* allocated, but less efficiently) */
-/* asm(".set MAXOBJSZ,0x200") if HBLKSIZE/2 == 0x200 */
-
-#define MAXOBJSZ (HBLKSIZE/8)
- /* Should be BYTES_TO_WORDS(HBLKSIZE/2), but a cpp */
- /* misfeature prevents that. */
-#define MAXAOBJSZ (HBLKSIZE/8)
-
-# define divHBLKSZ(n) ((n) >> 12)
-
-# define modHBLKSZ(n) ((n) & 0xfff)
-
-# define HBLKPTR(objptr) ((struct hblk *)(((long) (objptr)) & ~0xfff))
-
-
-
-/********************************************/
-/* */
-/* H e a p B l o c k s */
-/* */
-/********************************************/
-/* heap block header */
-#define HBLKMASK (HBLKSIZE-1)
-
-#define BITS_PER_HBLK (HBLKSIZE * 8)
-
-#define MARK_BITS_PER_HBLK (BITS_PER_HBLK/WORDSZ)
- /* upper bound */
- /* We allocate 1 bit/word. Only the first word */
- /* in each object is actually marked. */
-
-# ifdef ALIGN_DOUBLE
-# define MARK_BITS_SZ (((MARK_BITS_PER_HBLK + 2*WORDSZ - 1)/(2*WORDSZ))*2)
+/* Given a pointer to the base of an object, return its size in bytes. */
+/* The returned size may be slightly larger than what was originally */
+/* requested. */
+# ifdef __STDC__
+ size_t GC_size(void * object_addr);
# else
-# define MARK_BITS_SZ ((MARK_BITS_PER_HBLK + WORDSZ - 1)/WORDSZ)
+ size_t GC_size(/* char * object_addr */);
# endif
- /* Upper bound on number of mark words per heap block */
-struct hblkhdr {
- long hbh_sz; /* sz > 0 ==> objects are sz-tuples of poss. pointers */
- /* sz < 0 ==> objects are sz-tuples not pointers */
- /* if free, the size in bytes of the whole block */
- /* Misc.c knows that hbh_sz comes first. */
-# ifndef HBLK_MAP
- struct hblk ** hbh_index; /* Pointer to heap block list entry */
- /* for this block */
+/* For compatibility with C library. This is occasionally faster than */
+/* a malloc followed by a bcopy. But if you rely on that, either here */
+/* or with the standard C library, your code is broken. In my */
+/* opinion, it shouldn't have been invented, but now we're stuck. -HB */
+# ifdef __STDC__
+ extern void * GC_realloc(void * old_object, size_t new_size_in_bytes);
# else
-# ifdef ALIGN_DOUBLE
- /* Add another 1 word field to make the total even. Gross, but ... */
- long hbh_dummy;
-# endif
+ extern char * GC_realloc(/* old_object, new_size_in_bytes */);
# endif
- struct hblk * hbh_next; /* Link field for hblk free list */
- long hbh_mask; /* If hbh_mask >= 0 then: */
- /* x % (4 * hbh_sz) == x & hbh_mask */
- /* sz is a power of 2 and < the size of a heap */
- /* block. */
- /* A hack to speed up pointer validity check on */
- /* machines with slow division. */
- long hbh_marks[MARK_BITS_SZ];
- /* Bit i in the array refers to the */
- /* object starting at the ith word (header */
- /* INCLUDED) in the heap block. */
- /* For free blocks, hbh_marks[0] = 1, indicates */
- /* block is uninitialized. */
-};
-/* heap block body */
-# define BODY_SZ ((HBLKSIZE-sizeof(struct hblkhdr))/sizeof(word))
-
-struct hblk {
- struct hblkhdr hb_hdr;
- word hb_body[BODY_SZ];
-};
-
-# define hb_sz hb_hdr.hbh_sz
-# ifndef HBLK_MAP
-# define hb_index hb_hdr.hbh_index
-# endif
-# define hb_marks hb_hdr.hbh_marks
-# define hb_next hb_hdr.hbh_next
-# define hb_uninit hb_hdr.hbh_marks[0]
-# define hb_mask hb_hdr.hbh_mask
-
-/* lists of all heap blocks and free lists */
-/* These are grouped together in a struct */
-/* so that they can be easily skipped by the */
-/* mark routine. */
-/* Mach_dep.c knows about the internals */
-/* of this structure. */
-
-struct __gc_arrays {
- struct obj * _aobjfreelist[MAXAOBJSZ+1];
- /* free list for atomic objs*/
- struct obj * _objfreelist[MAXOBJSZ+1];
- /* free list for objects */
-# ifdef HBLK_MAP
- char _hblkmap[MAP_SIZE];
-# define HBLK_INVALID 0 /* Not administered by collector */
-# define HBLK_VALID 0x7f /* Beginning of a valid heap block */
- /* A value n, 0 < n < 0x7f denotes the continuation of a valid heap */
- /* block which starts at the current address - n * HBLKSIZE or earlier */
+/* Explicitly increase the heap size. */
+/* Returns 0 on failure, 1 on success. */
+extern int GC_expand_hp(/* number_of_4K_blocks */);
+
+/* Clear the set of root segments */
+extern void GC_clear_roots();
+
+/* Add a root segment */
+extern void GC_add_roots(/* low_address, high_address_plus_1 */);
+
+/* Add a displacement to the set of those considered valid by the */
+/* collector. GC_register_displacement(n) means that if p was returned */
+/* by GC_malloc, then (char *)p + n will be considered to be a valid */
+/* pointer to n. N must be small and less than the size of p. */
+/* (All pointers to the interior of objects from the stack are */
+/* considered valid in any case. This applies to heap objects and */
+/* static data.) */
+/* Preferably, this should be called before any other GC procedures. */
+/* Calling it later adds to the probability of excess memory */
+/* retention. */
+void GC_register_displacement(/* n */);
+
+/* Explicitly trigger a collection. */
+void GC_gcollect();
+
+/* Debugging (annotated) allocation. GC_gcollect will check */
+/* objects allocated in this way for overwrites, etc. */
+# ifdef __STDC__
+ extern void * GC_debug_malloc(size_t size_in_bytes,
+ char * descr_string, int descr_int);
+ extern void * GC_debug_malloc_atomic(size_t size_in_bytes,
+ char * descr_string, int descr_int);
+ extern void GC_debug_free(void * object_addr);
+ extern void * GC_debug_realloc(void * old_object,
+ size_t new_size_in_bytes,
+ char * descr_string, int descr_int);
# else
- struct hblk * _hblklist[MAXHBLKS];
+ extern char * GC_debug_malloc(/* size_in_bytes, descr_string, descr_int */);
+ extern char * GC_debug_malloc_atomic(/* size_in_bytes, descr_string,
+ descr_int */);
+ extern void GC_debug_free(/* object_addr */);
+ extern char * GC_debug_realloc(/* old_object, new_size_in_bytes,
+ descr_string, descr_int */);
# endif
-};
-
-extern struct __gc_arrays _gc_arrays;
-
-# define objfreelist _gc_arrays._objfreelist
-# define aobjfreelist _gc_arrays._aobjfreelist
-# ifdef HBLK_MAP
-# define hblkmap _gc_arrays._hblkmap
+# ifdef GC_DEBUG
+# define GC_MALLOC(sz) GC_debug_malloc(sz, __FILE__, __LINE__)
+# define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, __FILE__, __LINE__)
+# define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, __FILE__, \
+ __LINE__)
+# define GC_FREE(p) GC_debug_free(p)
+# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
+ GC_register_finalizer(GC_base(p), GC_debug_invoke_finalizer, \
+ GC_make_closure(f,d), of, od)
# else
-# define hblklist _gc_arrays._hblklist
+# define GC_MALLOC(sz) GC_malloc(sz)
+# define GC_MALLOC_ATOMIC(sz) GC_malloc_atomic(sz)
+# define GC_REALLOC(old, sz) GC_realloc(old, sz)
+# define GC_FREE(p) GC_free(p)
+# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
+ GC_register_finalizer(p, f, d, of, od)
# endif
-# define begin_gc_arrays ((char *)(&_gc_arrays))
-# define end_gc_arrays (((char *)(&_gc_arrays)) + (sizeof _gc_arrays))
-
-struct hblk ** last_hblk; /* Pointer to one past the real end of hblklist */
-
-struct hblk * hblkfreelist;
-
-extern long heapsize; /* Heap size in bytes */
-
-long hincr; /* current heap increment, in blocks */
-
-/* Operations */
-# define update_hincr hincr = (hincr * HINCR_MULT)/HINCR_DIV; \
- if (hincr > MAXHINCR) {hincr = MAXHINCR;}
-# define HB_SIZE(p) abs((p) -> hb_sz)
-# define abs(x) ((x) < 0? (-(x)) : (x))
-
-/* procedures */
-
-extern void
-freehblk();
-
-extern struct hblk *
-allochblk();
-
-/****************************/
-/* */
-/* Objects */
-/* */
-/****************************/
-
-/* object structure */
-
-struct obj {
- union {
- struct obj *oun_link; /* --> next object in freelist */
-# define obj_link obj_un.oun_link
- word oun_component[1]; /* treats obj as list of words */
-# define obj_component obj_un.oun_component
- } obj_un;
-};
-
-/* Test whether something points to a legitimate heap object */
-
-
-extern char end;
-
-# ifdef HBLK_MAP
- char * heapstart; /* A lower bound on all heap addresses */
- /* Known to be HBLKSIZE aligned. */
-# endif
-
-char * heaplim; /* 1 + last address in heap */
-
-word * stacktop; /* 1 + highest address in stack. Set by gc_init. */
-
-/* Check whether the given HBLKSIZE aligned hblk pointer refers to the */
-/* beginning of a legitimate chunk. */
-/* Assumes that *p is addressable */
-# ifdef HBLK_MAP
-# define is_hblk(p) (hblkmap[divHBLKSZ(((long)p) - ((long)heapstart))] \
- == HBLK_VALID)
+/* Finalization. Some of these primitives are grossly unsafe. */
+/* The idea is to make them both cheap, and sufficient to build */
+/* a safer layer, closer to PCedar finalization. */
+/* The interface represents my conclusions from a long discussion */
+/* with Alan Demers, Dan Greene, Carl Hauser, Barry Hayes, */
+/* Christian Jacobi, and Russ Atkinson. It's not perfect, and */
+/* probably nobody else agrees with it. Hans-J. Boehm 3/13/92 */
+# ifdef __STDC__
+ typedef void (*GC_finalization_proc)(void * obj, void * client_data);
# else
-# define is_hblk(p) ( (p) -> hb_index >= hblklist \
- && (p) -> hb_index < last_hblk \
- && *((p)->hb_index) == (p))
-# endif
-# ifdef INTERIOR_POINTERS
- /* Return the hblk_map entry for the pointer p */
-# define get_map(p) (hblkmap[divHBLKSZ(((long)p) - ((long)heapstart))])
+ typedef void (*GC_finalization_proc)(/* void * obj, void * client_data */);
# endif
-
-# ifdef INTERIOR_POINTERS
- /* Return the word displacement of the beginning of the object to */
- /* which q points. q is an address inside hblk p for objects of size s */
- /* with mask m corresponding to s. */
-# define get_word_no(q,p,s,m) \
- (((long)(m)) >= 0 ? \
- (((((long)q) - ((long)p) - (sizeof (struct hblkhdr))) & ~(m)) \
- + (sizeof (struct hblkhdr)) >> 2) \
- : ((((long)q) - ((long)p) - (sizeof (struct hblkhdr)) >> 2) \
- / (s)) * (s) \
- + ((sizeof (struct hblkhdr)) >> 2))
+
+void GC_register_finalizer(/* void * obj,
+ GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void ** ocd */);
+ /* When obj is no longer accessible, invoke */
+ /* (*fn)(obj, cd). If a and b are inaccessible, and */
+ /* a points to b (after disappearing links have been */
+ /* made to disappear), then only a will be */
+ /* finalized. (If this does not create any new */
+ /* pointers to b, then b will be finalized after the */
+ /* next collection.) Any finalizable object that */
+ /* is reachable from itself by following one or more */
+ /* pointers will not be finalized (or collected). */
+ /* Thus cycles involving finalizable objects should */
+ /* be avoided, or broken by disappearing links. */
+ /* fn is invoked with the allocation lock held. It may */
+ /* not allocate. (Any storage it might need */
+ /* should be preallocated and passed as part of cd.) */
+ /* fn should terminate as quickly as possible, and */
+ /* defer extended computation. */
+ /* All but the last finalizer registered for an object */
+ /* is ignored. */
+ /* Finalization may be removed by passing 0 as fn. */
+ /* The old finalizer and client data are stored in */
+ /* *ofn and *ocd. */
+ /* Fn is never invoked on an accessible object, */
+ /* provided hidden pointers are converted to real */
+ /* pointers only if the allocation lock is held, and */
+ /* such conversions are not performed by finalization */
+ /* routines. */
+
+/* The following routine may be used to break cycles between */
+/* finalizable objects, thus causing cyclic finalizable */
+/* objects to be finalized in the cirrect order. Standard */
+/* use involves calling GC_register_disappearing_link(&p), */
+/* where p is a pointer that is not followed by finalization */
+/* code, and should not be considered in determining */
+/* finalization order. */
+int GC_register_disappearing_link(/* void ** link */);
+ /* Link should point to a field of a heap allocated */
+ /* object obj. *link will be cleared when obj is */
+ /* found to be inaccessible. This happens BEFORE any */
+ /* finalization code is invoked, and BEFORE any */
+ /* decisions about finalization order are made. */
+ /* This is useful in telling the finalizer that */
+ /* some pointers are not essential for proper */
+ /* finalization. This may avoid finalization cycles. */
+ /* Note that obj may be resurrected by another */
+ /* finalizer, and thus the clearing of *link may */
+ /* be visible to non-finalization code. */
+ /* There's an argument that an arbitrary action should */
+ /* be allowed here, instead of just clearing a pointer. */
+ /* But this causes problems if that action alters, or */
+ /* examines connectivity. */
+ /* Returns 1 if link was already registered, 0 */
+ /* otherise. */
+int GC_unregister_disappearing_link(/* void ** link */);
+ /* Returns 0 if link was not actually registered. */
+
+/* Auxiliary fns to make finalization work correctly with displaced */
+/* pointers introduced by the debugging allocators. */
+# ifdef __STDC__
+ void * GC_make_closure(GC_finalization_proc fn, void * data);
+ void GC_debug_invoke_finalizer(void * obj, void * data);
# else
- /* Check whether q points to an object inside hblk p for objects of size s */
- /* with mask m corresponding to s. */
-# define is_proper_obj(q,p,s,m) \
- (((long)(m)) >= 0 ? \
- (((((long)(q)) - (sizeof (struct hblkhdr))) & (m)) == 0) \
- : (((long) (q)) - ((long)(p)) - (sizeof (struct hblkhdr))) \
- % ((s) << 2) == 0)
-# endif
+ char * GC_make_closure(/* GC_finalization_proc fn, char * data */);
+ void GC_debug_invoke_finalizer(/* void * obj, void * data */);
+# endif
-/* The following is a quick test whether something is an object pointer */
-/* It may err in the direction of identifying bogus pointers */
-/* Assumes heap + text + data + bss < 64 Meg. */
-#ifdef M68K_SUN
-# define TMP_POINTER_MASK 0xfc000003 /* pointer & POINTER_MASK should be 0 */
-#else
-# ifdef RT
-# define TMP_POINTER_MASK 0xc0000003
-# else
-# ifdef VAX
-# define TMP_POINTER_MASK 0xfc000003
+
+/* The following is intended to be used by a higher level */
+/* (e.g. cedar-like) finalization facility. It is expected */
+/* that finalization code will arrange for hidden pointers to */
+/* disappear. Otherwise objects can be accessed after they */
+/* have been collected. */
+# ifdef I_HIDE_POINTERS
+# ifdef __STDC__
+# define HIDE_POINTER(p) (~(size_t)(p))
+# define REVEAL_POINTER(p) ((void *)(HIDE_POINTER(p)))
# else
-# ifdef SPARC
-# define TMP_POINTER_MASK 0xfc000003
-# else
-# ifdef I386
-# define TMP_POINTER_MASK 0xfc000003
-# else
-# ifdef NS32K
-# define TMP_POINTER_MASK 0xfc000003
-# else
-# ifdef MIPS
-# define TMP_POINTER_MASK 0xc0000003
-# else
-# ifdef M68K_HP
-# define TMP_POINTER_MASK 0xfc000003
-# else
-# ifdef IBMRS6000
-# define TMP_POINTER_MASK 0xd0000003
-# else
- --> dont know <--
-# endif
-# endif
-# endif
-# endif
-# endif
-# endif
+# define HIDE_POINTER(p) (~(unsigned long)(p))
+# define REVEAL_POINTER(p) ((char *)(HIDE_POINTER(p)))
+# endif
+ /* Converting a hidden pointer to a real pointer requires verifying */
+ /* that the object still exists. This involves acquiring the */
+ /* allocator lock to avoid a race with the collector. */
+ typedef char * (*GC_fn_type)();
+# ifdef __STDC__
+ void * GC_call_with_alloc_lock(GC_fn_type fn, void * client_data);
+# else
+ char * GC_call_with_alloc_lock(/* GC_fn_type fn, char * client_data */);
# endif
# endif
-#endif
-#ifdef INTERIOR_POINTERS
-# define POINTER_MASK (TMP_POINTER_MASK & 0xfffffff8)
- /* Don't pay attention to whether address is properly aligned */
-#else
-# define POINTER_MASK TMP_POINTER_MASK
#endif
-
-#ifdef HBLK_MAP
-# define quicktest(p) (((long)(p)) > ((long)(heapstart)) \
- && !(((unsigned long)(p)) & POINTER_MASK))
-#else
-# ifdef UNALIGNED
-# define quicktest(p) (((long)(p)) > ((long)(&end)) \
- && !(((unsigned long)(p)) & POINTER_MASK) \
- && (((long)(p)) & HBLKMASK))
- /* The last test throws out pointers to the beginning of heap */
- /* blocks. Small integers shifted by 16 bits tend to look */
- /* like these. */
-# else
-# define quicktest(p) (((long)(p)) > ((long)(&end)) \
- && !(((unsigned long)(p)) & POINTER_MASK))
-# endif
-#endif
-
-
-/* Marks are in a reserved area in */
-/* each heap block. Each word has one mark bits associated */
-/* with it. Only those corresponding to the beginning of an */
-/* object are used. */
-
-
-/* Operations */
-
-/*
- * Retrieve, set, clear the mark bit corresponding
- * to the nth word in a given heap block.
- * Note that retrieval will work, so long as *hblk is addressable.
- * In particular, the check whether hblk is a legitimate heap block
- * can be postponed until after the mark bit is examined.
- *
- * (Recall that bit n corresponds to object beginning at word n)
- */
-
-# define mark_bit(hblk,n) (((hblk)->hb_marks[divWORDSZ(n)] \
- >> (modWORDSZ(n))) & 1)
-
-/* The following assume the mark bit in question is either initially */
-/* cleared or it already has its final value */
-# define set_mark_bit(hblk,n) (hblk)->hb_marks[divWORDSZ(n)] \
- |= 1 << modWORDSZ(n)
-
-# define clear_mark_bit(hblk,n) (hblk)->hb_marks[divWORDSZ(n)] \
- &= ~(1 << modWORDSZ(n))
-
-/* procedures */
-
-/* Small object allocation routines */
-extern struct obj * allocobj();
-extern struct obj * allocaobj();
-
-/* Small object allocation routines that mark only from registers */
-/* expected to be preserved by C. */
-extern struct obj * _allocobj();
-extern struct obj * _allocaobj();
-
-/* general purpose allocation routines */
-extern struct obj * gc_malloc();
-extern struct obj * gc_malloc_atomic();
-
diff --git a/gc.man b/gc.man
new file mode 100644
index 00000000..73d2295e
--- /dev/null
+++ b/gc.man
@@ -0,0 +1,58 @@
+.TH GC_MALLOC 1L "12 November 1992"
+.SH NAME
+GC_malloc, GC_malloc_atomic, GC_free, GC_realloc, GC_register_displacement, GC_register_finalizer \- Garbage collecting malloc replacement
+.SH SYNOPSIS
+#include "gc.h"
+.br
+# define malloc(n) GC_malloc(n)
+.br
+... malloc(...) ...
+.br
+.sp
+cc ... gc.a
+.LP
+.SH DESCRIPTION
+.I GC_malloc
+and
+.I GC_free
+are plug-in replacements for standard malloc and free. However,
+.I
+GC_malloc
+will attempt to reclaim inaccessible space automaticaly by invoking a conservative garbage collector at appropriate points. The collector traverses all data structures accessible by following pointers from the machines registers, stack(s), data, and bss segments. Inaccessible structures will be reclaimed. A machine word is considered to be a valid pointer if it resides on a stack or in registers and is an address inside an object allocated by
+.I
+GC_malloc
+or friends, or if it resides inside the heap, and points to either the beginning of a heap object, or points to an offset \fIn\fP within the object that was registered by a call
+.I
+GC_register_displacement(n).
+.LP
+Unlike the standard implementations of malloc,
+.I
+GC_malloc
+clears the newly allocated storage.
+.I
+GC_malloc_atomic
+does not. Furthermore, it informs the collector that the resulting object will never contain any pointers, and should therefore not be scanned by the collector.
+.I
+GC_free
+can be used to deallocate objects, but its use is optional, and discouraged.
+.I
+GC_realloc
+has the standard realloc semantics. It preserves pointer-free-ness.
+.I
+GC_register_finalizer
+allows for registration of functions that are invoked when an object becomes inaccessible.
+.LP
+It is also possible to use the collector to find storage leaks in programs destined to be run with standard malloc/free. The collector can be compiled for thread-safe operation. Unlike standard malloc, it is safe to call malloc after a previous malloc call was interrupted by a signal, provided the original malloc call is not resumed.
+.LP
+Debugging versions of many of the above routines are provided as macros. Their names are identical to the above, but consist of all capital letters. If GC_DEBUG is defined before gc.h is included, these routines do additional checking, and allow the leak detecting version of the collector to produce slightly more useful output. Without GC_DEBUG defined, they behave exactly like the lower-case versions.
+.LP
+.SH "SEE ALSO"
+The README and gc.h files in the distribution. More detailed definitions of the functions exported by the collector are given there. (The above list is not complete.)
+.LP
+Boehm, H., and M. Weiser, "Garbage Collection in an Uncooperative Environment",
+\fISoftware Practice & Experience\fP, September 1988, pp. 807-820.
+.LP
+The malloc(3) man page.
+.LP
+.SH AUTHOR
+Hans-J. Boehm (boehm@parc.xerox.com). Some of the code was written by others, most notably Alan Demers.
diff --git a/gc_headers.h b/gc_headers.h
new file mode 100644
index 00000000..7c2b06d5
--- /dev/null
+++ b/gc_headers.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991, 1992 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to copy this garbage collector for any purpose,
+ * provided the above notices are retained on all copies.
+ */
+# ifndef GC_HEADERS_H
+# define GC_HEADERS_H
+typedef struct hblkhdr hdr;
+
+# define LOG_TOP_SZ 11
+# define LOG_BOTTOM_SZ (WORDSZ - LOG_TOP_SZ - LOG_HBLKSIZE)
+# define TOP_SZ (1 << LOG_TOP_SZ)
+# define BOTTOM_SZ (1 << LOG_BOTTOM_SZ)
+
+# define MAX_JUMP (HBLKSIZE - 1)
+
+extern hdr ** GC_top_index [];
+
+# define HDR(p) (GC_top_index \
+ [(unsigned long)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE)] \
+ [((unsigned long)(p) >> LOG_HBLKSIZE) & (BOTTOM_SZ - 1)])
+
+/* Is the result a forwarding address to someplace closer to the */
+/* beginning of the block or NIL? */
+# define IS_FORWARDING_ADDR_OR_NIL(hhdr) ((unsigned long) (hhdr) <= MAX_JUMP)
+
+/* Get an HBLKSIZE aligned address closer to the beginning of the block */
+/* h. Assumes hhdr == HDR(h) and IS_FORWARDING_ADDR(hhdr). */
+# define FORWARDED_ADDR(h, hhdr) ((struct hblk *)(h) - (unsigned long)(hhdr))
+# endif /* GC_HEADERS_H */
diff --git a/gc_inline.h b/gc_inline.h
new file mode 100644
index 00000000..fd7be474
--- /dev/null
+++ b/gc_inline.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991, 1992 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to copy this garbage collector for any purpose,
+ * provided the above notices are retained on all copies.
+ */
+
+# ifndef GC_PRIVATE_H
+# include "gc_private.h"
+# endif
+
+/* Allocate n words (NOT BYTES). X is made to point to the result. */
+/* It is assumed that n < MAXOBJSZ, and */
+/* that n > 0. On machines requiring double word alignment of some */
+/* data, we also assume that n is 1 or even. This bypasses the */
+/* MERGE_SIZES mechanism. In order to minimize the number of distinct */
+/* free lists that are maintained, the caller should ensure that a */
+/* small number of distinct values of n are used. (The MERGE_SIZES */
+/* mechanism normally does this by ensuring that only the leading three */
+/* bits of n may be nonzero. See misc.c for details.) We really */
+/* recommend this only in cases in which n is a constant, and no */
+/* locking is required. */
+/* In that case it may allow the compiler to perform substantial */
+/* additional optimizations. */
+# define GC_MALLOC_WORDS(result,n) \
+{ \
+ register ptr_t op; \
+ register ptr_t *opp; \
+ DCL_LOCK_STATE; \
+ \
+ opp = &(GC_objfreelist[n]); \
+ FASTLOCK(); \
+ if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) { \
+ FASTUNLOCK(); \
+ (result) = GC_generic_malloc_words_small((n), NORMAL); \
+ } else { \
+ *opp = obj_link(op); \
+ obj_link(op) = 0; \
+ GC_words_allocd += (n); \
+ FASTUNLOCK(); \
+ (result) = (extern_ptr_t) op; \
+ } \
+}
+
+
+/* The same for atomic objects: */
+# define GC_MALLOC_ATOMIC_WORDS(result,n) \
+{ \
+ register ptr_t op; \
+ register ptr_t *opp; \
+ DCL_LOCK_STATE; \
+ \
+ opp = &(GC_aobjfreelist[n]); \
+ FASTLOCK(); \
+ if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) { \
+ FASTUNLOCK(); \
+ (result) = GC_generic_malloc_words_small((n), PTRFREE); \
+ } else { \
+ *opp = obj_link(op); \
+ obj_link(op) = 0; \
+ GC_words_allocd += (n); \
+ FASTUNLOCK(); \
+ (result) = (extern_ptr_t) op; \
+ } \
+}
+
+/* And once more for two word initialized objects: */
+# define GC_CONS(result, first, second) \
+{ \
+ register ptr_t op; \
+ register ptr_t *opp; \
+ DCL_LOCK_STATE; \
+ \
+ opp = &(GC_objfreelist[2]); \
+ FASTLOCK(); \
+ if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) { \
+ FASTUNLOCK(); \
+ op = GC_generic_malloc_words_small(2, NORMAL); \
+ } else { \
+ *opp = obj_link(op); \
+ GC_words_allocd += 2; \
+ FASTUNLOCK(); \
+ } \
+ ((word *)op)[0] = (word)(first); \
+ ((word *)op)[1] = (word)(second); \
+ (result) = (extern_ptr_t) op; \
+}
diff --git a/gc_private.h b/gc_private.h
new file mode 100644
index 00000000..2ef44503
--- /dev/null
+++ b/gc_private.h
@@ -0,0 +1,1164 @@
+# define SILENT
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991, 1992 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to copy this garbage collector for any purpose,
+ * provided the above notices are retained on all copies.
+ */
+
+/* Machine specific parts contributed by various people. See README file. */
+
+# ifndef GC_PRIVATE_H
+# define GC_PRIVATE_H
+
+# ifndef GC_H
+# include "gc.h"
+# endif
+
+# ifndef HEADERS_H
+# include "gc_headers.h"
+# endif
+
+# ifndef bool
+ typedef int bool;
+# endif
+# define TRUE 1
+# define FALSE 0
+
+typedef char * ptr_t; /* A generic pointer to which we can add */
+ /* byte displacments. */
+
+#ifdef __STDC__
+# if !(defined( sony_news ) )
+# include <stddef.h>
+# endif
+ typedef void * extern_ptr_t;
+#else
+ typedef char * extern_ptr_t;
+#endif
+
+/*********************************/
+/* */
+/* Definitions for conservative */
+/* collector */
+/* */
+/*********************************/
+
+/*********************************/
+/* */
+/* Easily changeable parameters */
+/* */
+/*********************************/
+
+# if defined(sun) && defined(mc68000)
+# define M68K_SUN
+# define mach_type_known
+# endif
+# if defined(hp9000s300)
+# define M68K_HP
+# define mach_type_known
+# endif
+# if defined(vax)
+# define VAX
+# ifdef ultrix
+# define ULTRIX
+# else
+# define BSD
+# endif
+# define mach_type_known
+# endif
+# if defined(mips)
+# define MIPS
+# ifdef ultrix
+# define ULTRIX
+# else
+# define RISCOS
+# endif
+# define mach_type_known
+# endif
+# if defined(sequent) && defined(i386)
+# define I386
+# define SEQUENT
+# define mach_type_known
+# endif
+# if defined(__OS2__) && defined(__32BIT__)
+# define I386
+# define OS2
+# define mach_type_known
+# endif
+# if defined(ibm032)
+# define RT
+# define mach_type_known
+# endif
+# if defined(sun) && defined(sparc)
+# define SPARC
+# define mach_type_known
+# endif
+# if defined(_IBMR2)
+# define IBMRS6000
+# define mach_type_known
+# endif
+# if defined(SCO)
+# define I386
+# define SCO
+# define mach_type_known
+/* --> incompletely implemented */
+# endif
+# if defined(_AUX_SOURCE)
+# define M68K_SYSV
+# define mach_type_known
+# endif
+# if defined(_PA_RISC1_0) || defined(_PA_RISC1_1)
+# define HP_PA
+# define mach_type_known
+# endif
+# if defined(linux) && defined(i386)
+# define I386
+# define LINUX
+# define mach_type_known
+# endif
+
+# ifndef OS2
+# include <sys/types.h>
+# endif
+
+/* Feel free to add more clauses here */
+
+/* Or manually define the machine type here. A machine type is */
+/* characterized by the architecture and assembler syntax. Some */
+/* machine types are further subdivided by OS. In that case, we use */
+/* the macros ULTRIX, RISCOS, and BSD to distinguish. */
+/* Note that SGI IRIX is treated identically to RISCOS. */
+/* The distinction in these cases is usually the stack starting address */
+# ifndef mach_type_known
+# define M68K_SUN /* Guess "Sun" */
+ /* Mapping is: M68K_SUN ==> Sun3 assembler */
+ /* M68K_HP ==> HP9000/300 */
+ /* M68K_SYSV ==> A/UX, maybe others */
+ /* I386 ==> Intel 386 */
+ /* (SEQUENT, OS2, SCO, LINUX variants) */
+ /* SCO is incomplete. */
+ /* NS32K ==> Encore Multimax */
+ /* MIPS ==> R2000 or R3000 */
+ /* (RISCOS, ULTRIX variants) */
+ /* VAX ==> DEC VAX */
+ /* (BSD, ULTRIX variants) */
+ /* RS6000 ==> IBM RS/6000 AIX3.1 */
+ /* RT ==> IBM PC/RT */
+ /* HP_PA ==> HP9000/700 & /800 */
+ /* HP/UX */
+ /* SPARC ==> SPARC under SunOS */
+# endif
+
+# ifdef SPARC
+ /* Test for SunOS 5.x */
+# include <errno.h>
+# ifdef ECHRNG
+# define SUNOS5
+# endif
+# endif
+
+#define ALL_INTERIOR_POINTERS
+ /* Forces all pointers into the interior of an */
+ /* object to be considered valid. Also causes the */
+ /* sizes of all objects to be inflated by at least */
+ /* one byte. This should suffice to guarantee */
+ /* that in the presence of a compiler that does */
+ /* not perform garbage-collector-unsafe */
+ /* optimizations, all portable, strictly ANSI */
+ /* conforming C programs should be safely usable */
+ /* with malloc replaced by GC_malloc and free */
+ /* calls removed. There are several disadvantages: */
+ /* 1. There are probably no interesting, portable, */
+ /* strictly ANSI conforming C programs. */
+ /* 2. This option makes it hard for the collector */
+ /* to allocate space that is not ``pointed to'' */
+ /* by integers, etc. Under SunOS 4.X with a */
+ /* statically linked libc, we empiricaly */
+ /* observed that it would be difficult to */
+ /* allocate individual objects larger than 100K. */
+ /* Even if only smaller objects are allocated, */
+ /* more swap space is likely to be needed. */
+ /* Fortunately, much of this will never be */
+ /* touched. */
+ /* If you can easily avoid using this option, do. */
+ /* If not, try to keep individual objects small. */
+#undef ALL_INTERIOR_POINTERS
+
+#define PRINTSTATS /* Print garbage collection statistics */
+ /* For less verbose output, undefine in reclaim.c */
+
+#define PRINTTIMES /* Print the amount of time consumed by each garbage */
+ /* collection. */
+
+#define PRINTBLOCKS /* Print object sizes associated with heap blocks, */
+ /* whether the objects are atomic or composite, and */
+ /* whether or not the block was found to be empty */
+ /* duing the reclaim phase. Typically generates */
+ /* about one screenful per garbage collection. */
+#undef PRINTBLOCKS
+
+#define PRINTBLACKLIST /* Print black listed blocks, i.e. values that */
+ /* cause the allocator to avoid allocating certain */
+ /* blocks in order to avoid introducing "false */
+ /* hits". */
+#undef PRINTBLACKLIST
+
+#ifdef SILENT
+# ifdef PRINTSTATS
+# undef PRINTSTATS
+# endif
+# ifdef PRINTTIMES
+# undef PRINTTIMES
+# endif
+# ifdef PRINTNBLOCKS
+# undef PRINTNBLOCKS
+# endif
+#endif
+
+#if defined(PRINTSTATS) && !defined(GATHERSTATS)
+# define GATHERSTATS
+#endif
+
+
+#ifdef SPARC
+# define ALIGN_DOUBLE /* Align objects of size > 1 word on 2 word */
+ /* boundaries. Wasteful of memory, but */
+ /* apparently required by SPARC architecture. */
+#endif
+
+#if defined(SPARC) || defined(M68K_SUN)
+# if !defined(PCR) && !defined(SUNOS5)
+# define DYNAMIC_LOADING /* Search dynamic libraries for roots. */
+# endif
+#endif
+
+#define MERGE_SIZES /* Round up some object sizes, so that fewer distinct */
+ /* free lists are actually maintained. This applies */
+ /* only to the top level routines in misc.c, not to */
+ /* user generated code that calls GC_allocobj and */
+ /* GC_allocaobj directly. */
+ /* Slows down average programs slightly. May however */
+ /* substantially reduce fragmentation if allocation */
+ /* request sizes are widely scattered. */
+ /* May save significant amounts of space for obj_map */
+ /* entries. */
+
+/* ALIGN_DOUBLE requires MERGE_SIZES at present. */
+# if defined(ALIGN_DOUBLE) && !defined(MERGE_SIZES)
+# define MERGE_SIZES
+# endif
+
+#if defined(M68K_SUN) || defined(M68K_SYSV)
+# define ALIGNMENT 2 /* Pointers are aligned on 2 byte boundaries */
+ /* by the Sun C compiler. */
+#else
+# ifdef VAX
+# define ALIGNMENT 4 /* Pointers are longword aligned by 4.2 C compiler */
+# else
+# ifdef RT
+# define ALIGNMENT 4
+# else
+# ifdef SPARC
+# define ALIGNMENT 4
+# else
+# ifdef I386
+# define ALIGNMENT 4 /* 32-bit compilers align pointers */
+# else
+# ifdef NS32K
+# define ALIGNMENT 4 /* Pointers are aligned on NS32K */
+# else
+# ifdef MIPS
+# define ALIGNMENT 4 /* MIPS hardware requires pointer */
+ /* alignment */
+# else
+# ifdef M68K_HP
+# define ALIGNMENT 2 /* 2 byte alignment inside struct/union, */
+ /* 4 bytes elsewhere */
+# else
+# ifdef IBMRS6000
+# define ALIGNMENT 4
+# else
+# ifdef HP_PA
+# define ALIGNMENT 4
+# else
+ --> specify alignment <--
+# endif
+# endif
+# endif
+# endif
+# endif
+# endif
+# endif
+# endif
+# endif
+# endif
+
+/*
+ * STACKBOTTOM is the cool end of the stack, which is usually the
+ * highest address in the stack.
+ * Under PCR or OS/2, we have other ways of finding thread stacks.
+ * For each machine, the following should:
+ * 1) define STACK_GROWS_UP if the stack grows toward higher addresses, and
+ * 2) define exactly one of
+ * STACKBOTTOM (should be defined to be an expression)
+ * HEURISTIC1
+ * HEURISTIC2
+ * If either of the last two macros are defined, then STACKBOTTOM is computed
+ * during collector startup using one of the following two heuristics:
+ * HEURISTIC1: Take an address inside GC_init's frame, and round it up to
+ * the next multiple of 16 MB.
+ * HEURISTIC2: Take an address inside GC_init's frame, increment it repeatedly
+ * in small steps (decrement if STACK_GROWS_UP), and read the value
+ * at each location. Remember the value when the first
+ * Segmentation violation or Bus error is signalled. Round that
+ * to the nearest plausible page boundary, and use that instead
+ * of STACKBOTTOM.
+ *
+ * If no expression for STACKBOTTOM can be found, and neither of the above
+ * heuristics are usable, the collector can still be used with all of the above
+ * undefined, provided one of the following is done:
+ * 1) GC_mark_roots can be changed to somehow mark from the correct stack(s)
+ * without reference to STACKBOTTOM. This is appropriate for use in
+ * conjunction with thread packages, since there will be multiple stacks.
+ * (Allocating thread stacks in the heap, and treating them as ordinary
+ * heap data objects is also possible as a last resort. However, this is
+ * likely to introduce significant amounts of excess storage retention
+ * unless the dead parts of the thread stacks are periodically cleared.)
+ * 2) Client code may set GC_stackbottom before calling any GC_ routines.
+ * If the author of the client code controls the main program, this is
+ * easily accomplished by introducing a new main program, setting
+ * GC_stackbottom to the address of a local variable, and then calling
+ * the original main program. The new main program would read something
+ * like:
+ *
+ * # include "gc_private.h"
+ *
+ * main(argc, argv, envp)
+ * int argc;
+ * char **argv, **envp;
+ * {
+ * int dummy;
+ *
+ * GC_stackbottom = (ptr_t)(&dummy);
+ * return(real_main(argc, argv, envp));
+ * }
+ */
+#ifndef PCR
+# ifdef RT
+# define STACKBOTTOM ((ptr_t) 0x1fffd800)
+# else
+# ifdef I386
+# ifdef SEQUENT
+# define STACKBOTTOM ((ptr_t) 0x3ffff000) /* For Sequent */
+# else
+# ifdef SCO
+# define STACKBOTTOM ((ptr_t) 0x7ffffffc)
+# else
+# ifdef LINUX
+# define STACKBOTTOM ((ptr_t)0xc0000000)
+# else
+# ifdef OS2
+ /* This is handled specially in GC_init_inner. */
+ /* OS2 actually has the right system call! */
+# else
+ --> Your OS isnt supported yet
+# endif
+# endif
+# endif
+# endif
+# else
+# ifdef NS32K
+# define STACKBOTTOM ((ptr_t) 0xfffff000) /* for Encore */
+# else
+# ifdef M68K_SYSV
+# define STACKBOTTOM ((ptr_t)0xFFFFFFFE)
+ /* The stack starts at the top of memory, but */
+ /* 0x0 cannot be used as setjump_test complains */
+ /* that the stack direction is incorrect. Two */
+ /* bytes down from 0x0 should be safe enough. */
+ /* --Parag */
+# else
+# ifdef M68K_HP
+# define STACKBOTTOM ((ptr_t) 0xffeffffc)
+ /* empirically determined. seems to work. */
+# else
+# ifdef IBMRS6000
+# define STACKBOTTOM ((ptr_t) 0x2ff80000)
+# else
+# if defined(VAX) && defined(ULTRIX)
+# ifdef ULTRIX
+# define STACKBOTTOM ((ptr_t) 0x7fffc800)
+# else
+# define HEURISTIC1
+ /* HEURISTIC2 may be OK, but it's hard to test. */
+# endif
+# else
+ /* Sun systems, HP PA systems, and DEC MIPS systems have */
+ /* STACKBOTTOM values that differ between machines that */
+ /* are intended to be object code compatible. */
+# if defined(SPARC) || defined(M68K_SUN)
+# define HEURISTIC1
+# else
+# ifdef HP_PA
+# define STACK_GROWS_UP
+# endif
+# define HEURISTIC2
+# endif
+# endif
+# endif
+# endif
+# endif
+# endif
+# endif
+# endif
+#endif /* PCR */
+
+
+# ifndef STACK_GROWS_UP
+# define STACK_GROWS_DOWN
+# endif
+
+/* Start of data segment for each of the above systems. Note that the */
+/* default case works only for contiguous text and data, such as on a */
+/* Vax. */
+# ifdef M68K_SUN
+ extern char etext;
+# define DATASTART ((ptr_t)((((word) (&etext)) + 0x1ffff) & ~0x1ffff))
+# else
+# ifdef RT
+# define DATASTART ((ptr_t) 0x10000000)
+# else
+# if (defined(I386) && (defined(SEQUENT)||defined(LINUX))) || defined(SPARC)
+ extern int etext;
+# ifdef SUNOS5
+# define DATASTART ((ptr_t)((((word) (&etext)) + 0x10003) & ~0x3))
+ /* Experimentally determined. */
+ /* Inconsistent with man a.out, which appears */
+ /* to be wrong. */
+# else
+# define DATASTART ((ptr_t)((((word) (&etext)) + 0xfff) & ~0xfff))
+ /* On very old SPARCs this is too conservative. */
+# endif
+# else
+# ifdef NS32K
+ extern char **environ;
+# define DATASTART ((ptr_t)(&environ))
+ /* hideous kludge: environ is the first */
+ /* word in crt0.o, and delimits the start */
+ /* of the data segment, no matter which */
+ /* ld options were passed through. */
+# else
+# ifdef MIPS
+# define DATASTART 0x10000000
+ /* Could probably be slightly higher since */
+ /* startup code allocates lots of junk */
+# else
+# ifdef M68K_HP
+ extern char etext;
+# define DATASTART ((ptr_t)((((word) (&etext)) + 0xfff) & ~0xfff))
+# else
+# ifdef IBMRS6000
+# define DATASTART ((ptr_t)0x20000000)
+# else
+# ifdef I386
+# ifdef SCO
+# define DATASTART ((ptr_t)((((word) (&etext)) + 0x3fffff) \
+ & ~0x3fffff) \
+ +((word)&etext & 0xfff))
+# else
+# ifdef OS2
+# define DATASTART ((ptr_t)((((word) (&etext)) + 0x3fffff) \
+ & ~0x3fffff) \
+ +((word)&etext & 0xfff))
+# else
+ --> Your OS not supported yet
+# endif
+# endif
+# else
+# ifdef M68K_SYSV
+ /* This only works for shared-text binaries with magic number 0413.
+ The other sorts of SysV binaries put the data at the end of the text,
+ in which case the default of &etext would work. Unfortunately,
+ handling both would require having the magic-number available.
+ -- Parag
+ */
+ extern etext;
+# define DATASTART ((ptr_t)((((word) (&etext)) + 0x3fffff) \
+ & ~0x3fffff) \
+ +((word)&etext & 0x1fff))
+# else
+# ifdef HP_PA
+ extern int __data_start;
+# define DATASTART ((ptr_t)(&__data_start))
+# else
+ extern char etext;
+# define DATASTART ((ptr_t)(&etext))
+# endif
+# endif
+# endif
+# endif
+# endif
+# endif
+# endif
+# endif
+# endif
+# endif
+
+# define HINCR 16 /* Initial heap increment, in blocks of 4K */
+# define MAXHINCR 512 /* Maximum heap increment, in blocks */
+# define HINCR_MULT 3 /* After each new allocation, GC_hincr is multiplied */
+# define HINCR_DIV 2 /* by HINCR_MULT/HINCR_DIV */
+# define GC_MULT 3 /* Don't collect if the fraction of */
+ /* non-collectable memory in the heap */
+ /* exceeds GC_MUL/GC_DIV */
+# define GC_DIV 4
+
+# define NON_GC_HINCR ((word)8)
+ /* Heap increment if most of heap if collection */
+ /* was suppressed because most of heap is not */
+ /* collectable */
+
+/*********************************/
+/* */
+/* OS interface routines */
+/* */
+/*********************************/
+
+#include <time.h>
+#if !defined(CLOCKS_PER_SEC)
+# define CLOCKS_PER_SEC 1000000
+/*
+ * This is technically a bug in the implementation. ANSI requires that
+ * CLOCKS_PER_SEC be defined. But at least under SunOS4.1.1, it isn't.
+ * Also note that the combination of ANSI C and POSIX is incredibly gross
+ * here. The type clock_t is used by both clock() and times(). But on
+ * some machines thes use different notions of a clock tick, CLOCKS_PER_SEC
+ * seems to apply only to clock. Hence we use it here. On many machines,
+ * including SunOS, clock actually uses units of microseconds (which are
+ * not really clock ticks).
+ */
+#endif
+#define CLOCK_TYPE clock_t
+#define GET_TIME(x) x = clock()
+#define MS_TIME_DIFF(a,b) ((unsigned long) \
+ (1000.0*(double)((a)-(b))/(double)CLOCKS_PER_SEC))
+
+/* We use bzero and bcopy internally. They may not be available. */
+# ifdef OS2
+# include <string.h>
+# define bcopy(x,y,n) memcpy(y,x,n)
+# define bzero(x,n) memset(x, 0, n)
+# endif
+
+/* HBLKSIZE aligned allocation. 0 is taken to mean failure */
+/* space is assumed to be cleared. */
+# ifdef PCR
+ char * real_malloc();
+# define GET_MEM(bytes) HBLKPTR(real_malloc((size_t)bytes + HBLKSIZE) \
+ + HBLKSIZE-1)
+# define THREADS
+# else
+# ifdef OS2
+ void * os2_alloc(size_t bytes);
+# define GET_MEM(bytes) HBLKPTR((ptr_t)os2_alloc((size_t)bytes + HBLKSIZE) \
+ + HBLKSIZE-1)
+# else
+ caddr_t sbrk();
+# ifdef __STDC__
+# define GET_MEM(bytes) HBLKPTR(sbrk((size_t)(bytes + HBLKSIZE)) \
+ + HBLKSIZE-1)
+# else
+# define GET_MEM(bytes) HBLKPTR(sbrk((int)(bytes + HBLKSIZE)) \
+ + HBLKSIZE-1)
+# endif
+# endif
+# endif
+
+/*
+ * Mutual exclusion between allocator/collector routines.
+ * Needed if there is more than one allocator thread.
+ * FASTLOCK() is assumed to try to acquire the lock in a cheap and
+ * dirty way that is acceptable for a few instructions, e.g. by
+ * inhibiting preemption. This is assumed to have succeeded only
+ * if a subsequent call to FASTLOCK_SUCCEEDED() returns TRUE.
+ * If signals cannot be tolerated with the FASTLOCK held, then
+ * FASTLOCK should disable signals. The code executed under
+ * FASTLOCK is otherwise immune to interruption, provided it is
+ * not restarted.
+ * DCL_LOCK_STATE declares any local variables needed by LOCK and UNLOCK
+ * and/or DISABLE_SIGNALS and ENABLE_SIGNALS and/or FASTLOCK.
+ * (There is currently no equivalent for FASTLOCK.)
+ */
+# ifdef PCR
+# include "pcr/th/PCR_Th.h"
+# include "pcr/th/PCR_ThCrSec.h"
+ extern struct PCR_Th_MLRep GC_allocate_ml;
+# define DCL_LOCK_STATE PCR_sigset_t GC_old_sig_mask
+# define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
+# define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
+# define FASTLOCK() PCR_ThCrSec_EnterSys()
+ /* Here we cheat (a lot): */
+# define FASTLOCK_SUCCEEDED() (*(int *)(&GC_allocate_ml) == 0)
+ /* TRUE if nobody currently holds the lock */
+# define FASTUNLOCK() PCR_ThCrSec_ExitSys()
+# else
+# define DCL_LOCK_STATE
+# define LOCK()
+# define UNLOCK()
+# define FASTLOCK() LOCK()
+# define FASTLOCK_SUCCEEDED() TRUE
+# define FASTUNLOCK() UNLOCK()
+# endif
+
+/* Delay any interrupts or signals that may abort this thread. Data */
+/* structures are in a consistent state outside this pair of calls. */
+/* ANSI C allows both to be empty (though the standard isn't very */
+/* clear on that point). Standard malloc implementations are usually */
+/* neither interruptable nor thread-safe, and thus correspond to */
+/* empty definitions. */
+# ifdef PCR
+# define DISABLE_SIGNALS() \
+ PCR_Th_SetSigMask(PCR_allSigsBlocked,&GC_old_sig_mask)
+# define ENABLE_SIGNALS() \
+ PCR_Th_SetSigMask(&GC_old_sig_mask, NIL)
+# else
+# if 0 /* Useful for debugging, and unusually */
+ /* correct client code. */
+# define DISABLE_SIGNALS()
+# define ENABLE_SIGNALS()
+# else
+# define DISABLE_SIGNALS() GC_disable_signals()
+ void GC_disable_signals();
+# define ENABLE_SIGNALS() GC_enable_signals()
+ void GC_enable_signals();
+# endif
+# endif
+
+/*
+ * Stop and restart mutator threads.
+ */
+# ifdef PCR
+# include "pcr/th/PCR_ThCtl.h"
+# define STOP_WORLD() \
+ PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_stopNormal, \
+ PCR_allSigsBlocked, \
+ PCR_waitForever)
+# define START_WORLD() \
+ PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_null, \
+ PCR_allSigsBlocked, \
+ PCR_waitForever);
+# else
+# define STOP_WORLD()
+# define START_WORLD()
+# endif
+
+/* Abandon ship */
+# ifdef PCR
+ void PCR_Base_Panic(const char *fmt, ...);
+# define ABORT(s) PCR_Base_Panic(s)
+# else
+# define ABORT(s) abort(s)
+# endif
+
+/* Exit abnormally, but without making a mess (e.g. out of memory) */
+# ifdef PCR
+ void PCR_Base_Exit(int status);
+# define EXIT() PCR_Base_Exit(1)
+# else
+# define EXIT() (void)exit(1)
+# endif
+
+/* Print warning message, e.g. almost out of memory. */
+# define WARN(s) GC_printf(s)
+
+/*********************************/
+/* */
+/* Word-size-dependent defines */
+/* */
+/*********************************/
+
+#define WORDS_TO_BYTES(x) ((x)<<2)
+#define BYTES_TO_WORDS(x) ((x)>>2)
+
+#define CPP_WORDSZ 32
+#define WORDSZ ((word)CPP_WORDSZ)
+#define LOGWL ((word)5) /* log[2] of above */
+#define BYTES_PER_WORD ((word)(sizeof (word)))
+#define ONES 0xffffffff
+#define MSBYTE 0xff000000
+#define SIGNB 0x80000000
+#define MAXSHORT 0x7fff
+#define modHALFWORDSZ(n) ((n) & 0xf) /* mod n by size of half word */
+#define divHALFWORDSZ(n) ((n) >> 4) /* divide n by size of half word */
+#define modWORDSZ(n) ((n) & 0x1f) /* mod n by size of word */
+#define divWORDSZ(n) ((n) >> 5) /* divide n by size of word */
+#define twice(n) ((n) << 1) /* double n */
+
+/*********************/
+/* */
+/* Size Parameters */
+/* */
+/*********************/
+
+/* heap block size, bytes. Should be power of 2 */
+
+#define CPP_LOG_HBLKSIZE 12
+#define LOG_HBLKSIZE ((word)CPP_LOG_HBLKSIZE)
+#define CPP_HBLKSIZE (1 << CPP_LOG_HBLKSIZE)
+#define HBLKSIZE ((word)CPP_HBLKSIZE)
+
+
+/* max size objects supported by freelist (larger objects may be */
+/* allocated, but less efficiently) */
+
+#define CPP_MAXOBJSZ BYTES_TO_WORDS(CPP_HBLKSIZE/2)
+#define MAXOBJSZ ((word)CPP_MAXOBJSZ)
+
+# define divHBLKSZ(n) ((n) >> LOG_HBLKSIZE)
+
+# define modHBLKSZ(n) ((n) & (HBLKSIZE-1))
+
+# define HBLKPTR(objptr) ((struct hblk *)(((word) (objptr)) & ~(HBLKSIZE-1)))
+
+# define HBLKDISPL(objptr) (((word) (objptr)) & (HBLKSIZE-1))
+
+
+/********************************************/
+/* */
+/* H e a p B l o c k s */
+/* */
+/********************************************/
+
+/* heap block header */
+#define HBLKMASK (HBLKSIZE-1)
+
+#define BITS_PER_HBLK (HBLKSIZE * 8)
+
+#define MARK_BITS_PER_HBLK (BITS_PER_HBLK/CPP_WORDSZ)
+ /* upper bound */
+ /* We allocate 1 bit/word. Only the first word */
+ /* in each object is actually marked. */
+
+# ifdef ALIGN_DOUBLE
+# define MARK_BITS_SZ (((MARK_BITS_PER_HBLK + 2*CPP_WORDSZ - 1) \
+ / (2*CPP_WORDSZ))*2)
+# else
+# define MARK_BITS_SZ ((MARK_BITS_PER_HBLK + CPP_WORDSZ - 1)/CPP_WORDSZ)
+# endif
+ /* Upper bound on number of mark words per heap block */
+
+/* Mark stack entries. */
+typedef struct ms_entry {
+ word * mse_start; /* inclusive */
+ word * mse_end; /* exclusive */
+} mse;
+
+typedef mse * (*mark_proc)(/* word * addr, hdr * hhdr, mse * msp, mse * msl */);
+ /* Procedure to arrange for the descendents of the object at */
+ /* addr to be marked. Msp points at the top entry on the */
+ /* mark stack. Msl delimits the hot end of the mark stack. */
+ /* hhdr is the hdr structure corresponding to addr. */
+ /* Returns the new mark stack pointer. */
+
+struct hblkhdr {
+ word hb_sz; /* If in use, size in words, of objects in the block. */
+ /* if free, the size in bytes of the whole block */
+ struct hblk * hb_next; /* Link field for hblk free list */
+ /* and for lists of chunks waiting to be */
+ /* reclaimed. */
+ mark_proc hb_mark_proc; /* Procedure to mark objects. Can */
+ /* also be retrived through obj_kind. */
+ /* But one level of indirection matters */
+ /* here. */
+ char* hb_map; /* A pointer to a pointer validity map of the block. */
+ /* See GC_obj_map. */
+ /* Valid for all blocks with headers. */
+ /* Free blocks point to GC_invalid_map. */
+ int hb_obj_kind; /* Kind of objects in the block. Each kind */
+ /* identifies a mark procedure and a set of */
+ /* list headers. sometimes called regions. */
+
+ word hb_marks[MARK_BITS_SZ];
+ /* Bit i in the array refers to the */
+ /* object starting at the ith word (header */
+ /* INCLUDED) in the heap block. */
+};
+
+/* heap block body */
+
+# define DISCARD_WORDS 0
+ /* Number of words to be dropped at the beginning of each block */
+ /* Must be a multiple of 32. May reasonably be nonzero */
+ /* on mcachines that don't guarantee longword alignment of */
+ /* pointers, so that the number of false hits is minimized. */
+ /* 0 and 32 are probably the only reasonable values. */
+
+# define BODY_SZ ((HBLKSIZE-WORDS_TO_BYTES(DISCARD_WORDS))/sizeof(word))
+
+struct hblk {
+# if (DISCARD_WORDS != 0)
+ word garbage[DISCARD_WORDS];
+# endif
+ word hb_body[BODY_SZ];
+};
+
+# define HDR_WORDS ((word)DISCARD_WORDS)
+# define HDR_BYTES ((word)WORDS_TO_BYTES(DISCARD_WORDS))
+
+/* Object free list link */
+# define obj_link(p) (*(ptr_t *)(p))
+
+/* lists of all heap blocks and free lists */
+/* These are grouped together in a struct */
+/* so that they can be easily skipped by the */
+/* GC_mark routine. */
+/* The ordering is weird to make GC_malloc */
+/* faster by keeping the important fields */
+/* sufficiently close together that a */
+/* single load of a base register will do. */
+/* Scalars that could easily appear to */
+/* be pointers are also put here. */
+
+struct _GC_arrays {
+ word _heapsize;
+ ptr_t _last_heap_addr;
+ ptr_t _prev_heap_addr;
+ word _words_allocd_before_gc;
+ /* Number of words allocated before this */
+ /* collection cycle. */
+# ifdef GATHERSTATS
+ word _composite_in_use;
+ /* Number of words in accessible composite */
+ /* objects. */
+ word _atomic_in_use;
+ /* Number of words in accessible atomic */
+ /* objects. */
+# endif
+ word _words_allocd;
+ /* Number of words allocated during this collection cycle */
+ ptr_t _objfreelist[MAXOBJSZ+1];
+ /* free list for objects */
+# ifdef MERGE_SIZES
+ unsigned _size_map[WORDS_TO_BYTES(MAXOBJSZ+1)];
+ /* Number of words to allocate for a given allocation request in */
+ /* bytes. */
+# endif
+ ptr_t _aobjfreelist[MAXOBJSZ+1];
+ /* free list for atomic objs*/
+ ptr_t _obj_map[MAXOBJSZ+1];
+ /* If not NIL, then a pointer to a map of valid */
+ /* object addresses. hbh_map[sz][i] is j if the */
+ /* address block_start+i is a valid pointer */
+ /* to an object at */
+ /* block_start+i&~3 - WORDS_TO_BYTES(j). */
+ /* (If ALL_INTERIOR_POINTERS is defined, then */
+ /* instead ((short *)(hbh_map[sz])[i] is j if */
+ /* block_start+WORDS_TO_BYTES(i) is in the */
+ /* interior of an object starting at */
+ /* block_start+WORDS_TO_BYTES(i-j)). */
+ /* It is OBJ_INVALID if */
+ /* block_start+WORDS_TO_BYTES(i) is not */
+ /* valid as a pointer to an object. */
+ /* We assume that all values of j <= OBJ_INVALID */
+ /* The zeroth entry corresponds to large objects.*/
+# ifdef ALL_INTERIOR_POINTERS
+# define map_entry_type short
+# define OBJ_INVALID 0x7fff
+# define MAP_ENTRY(map, bytes) \
+ (((map_entry_type *)(map))[BYTES_TO_WORDS(bytes)])
+# define MAP_ENTRIES BYTES_TO_WORDS(HBLKSIZE)
+# define MAP_SIZE (MAP_ENTRIES * sizeof(map_entry_type))
+# define OFFSET_VALID(displ) TRUE
+# define CPP_MAX_OFFSET (HBLKSIZE - HDR_BYTES - 1)
+# define MAX_OFFSET ((word)CPP_MAX_OFFSET)
+# else
+# define map_entry_type char
+# define OBJ_INVALID 0x7f
+# define MAP_ENTRY(map, bytes) \
+ (map)[bytes]
+# define MAP_ENTRIES HBLKSIZE
+# define MAP_SIZE MAP_ENTRIES
+# define CPP_MAX_OFFSET (WORDS_TO_BYTES(OBJ_INVALID) - 1)
+# define MAX_OFFSET ((word)CPP_MAX_OFFSET)
+# define VALID_OFFSET_SZ \
+ (CPP_MAX_OFFSET > WORDS_TO_BYTES(CPP_MAXOBJSZ)? \
+ CPP_MAX_OFFSET+1 \
+ : WORDS_TO_BYTES(CPP_MAXOBJSZ)+1)
+ char _valid_offsets[VALID_OFFSET_SZ];
+ /* GC_valid_offsets[i] == TRUE ==> i */
+ /* is registered as a displacement. */
+# define OFFSET_VALID(displ) GC_valid_offsets[displ]
+ char _modws_valid_offsets[sizeof(word)];
+ /* GC_valid_offsets[i] ==> */
+ /* GC_modws_valid_offsets[i%sizeof(word)] */
+# endif
+ struct hblk * _reclaim_list[MAXOBJSZ+1];
+ struct hblk * _areclaim_list[MAXOBJSZ+1];
+};
+
+extern struct _GC_arrays GC_arrays;
+
+# define GC_objfreelist GC_arrays._objfreelist
+# define GC_aobjfreelist GC_arrays._aobjfreelist
+# define GC_valid_offsets GC_arrays._valid_offsets
+# define GC_modws_valid_offsets GC_arrays._modws_valid_offsets
+# define GC_reclaim_list GC_arrays._reclaim_list
+# define GC_areclaim_list GC_arrays._areclaim_list
+# define GC_obj_map GC_arrays._obj_map
+# define GC_last_heap_addr GC_arrays._last_heap_addr
+# define GC_prev_heap_addr GC_arrays._prev_heap_addr
+# define GC_words_allocd GC_arrays._words_allocd
+# define GC_heapsize GC_arrays._heapsize
+# define GC_words_allocd_before_gc GC_arrays._words_allocd_before_gc
+# ifdef GATHERSTATS
+# define GC_composite_in_use GC_arrays._composite_in_use
+# define GC_atomic_in_use GC_arrays._atomic_in_use
+# endif
+# ifdef MERGE_SIZES
+# define GC_size_map GC_arrays._size_map
+# endif
+
+# define beginGC_arrays ((ptr_t)(&GC_arrays))
+# define endGC_arrays (((ptr_t)(&GC_arrays)) + (sizeof GC_arrays))
+
+
+# define MAXOBJKINDS 16
+
+/* Object kinds: */
+extern struct obj_kind {
+ ptr_t *ok_freelist; /* Array of free listheaders for this kind of object */
+ /* Point either to GC_arrays or to storage allocated */
+ /* with GC_scratch_alloc. */
+ struct hblk **ok_reclaim_list;
+ /* List headers for lists of blocks waiting to be */
+ /* swept. */
+ mark_proc ok_mark_proc; /* Procedure to either mark referenced objects, */
+ /* or push them on the mark stack. */
+ bool ok_init; /* Clear objects before putting them on the free list. */
+} GC_obj_kinds[MAXOBJKINDS];
+/* Predefined kinds: */
+# define PTRFREE 0
+# define NORMAL 1
+
+extern int GC_n_kinds;
+
+extern char * GC_invalid_map;
+ /* Pointer to the nowhere valid hblk map */
+ /* Blocks pointing to this map are free. */
+
+extern struct hblk * GC_hblkfreelist;
+ /* List of completely empty heap blocks */
+ /* Linked through hb_next field of */
+ /* header structure associated with */
+ /* block. */
+
+extern bool GC_is_initialized; /* GC_init() has been run. */
+
+# ifndef PCR
+ extern ptr_t GC_stackbottom; /* Cool end of user stack */
+# endif
+
+extern word GC_hincr; /* current heap increment, in blocks */
+
+extern word GC_root_size; /* Total size of registered root sections */
+
+extern bool GC_debugging_started; /* GC_debug_malloc has been called. */
+
+extern ptr_t GC_least_plausible_heap_addr;
+extern ptr_t GC_greatest_plausible_heap_addr;
+ /* Bounds on the heap. Guaranteed valid */
+ /* Likely to include future heap expansion. */
+
+/* Operations */
+# define update_GC_hincr GC_hincr = (GC_hincr * HINCR_MULT)/HINCR_DIV; \
+ if (GC_hincr > MAXHINCR) {GC_hincr = MAXHINCR;}
+# ifndef abs
+# define abs(x) ((x) < 0? (-(x)) : (x))
+# endif
+
+/****************************/
+/* */
+/* Objects */
+/* */
+/****************************/
+
+
+/* Marks are in a reserved area in */
+/* each heap block. Each word has one mark bit associated */
+/* with it. Only those corresponding to the beginning of an */
+/* object are used. */
+
+
+/* Operations */
+
+/*
+ * Retrieve, set, clear the mark bit corresponding
+ * to the nth word in a given heap block.
+ *
+ * (Recall that bit n corresponds to object beginning at word n
+ * relative to the beginning of the block, including unused words)
+ */
+
+# define mark_bit_from_hdr(hhdr,n) (((hhdr)->hb_marks[divWORDSZ(n)] \
+ >> (modWORDSZ(n))) & 1)
+# define set_mark_bit_from_hdr(hhdr,n) (hhdr)->hb_marks[divWORDSZ(n)] \
+ |= 1 << modWORDSZ(n)
+
+# define clear_mark_bit_from_hdr(hhdr,n) (hhdr)->hb_marks[divWORDSZ(n)] \
+ &= ~(1 << modWORDSZ(n))
+
+/* Important internal collector routines */
+
+void GC_apply_to_all_blocks(/*fn, client_data*/);
+ /* Invoke fn(hbp, client_data) for each */
+ /* allocated heap block. */
+mse * GC_no_mark_proc(/*addr,hhdr,msp,msl*/);
+ /* Mark procedure for PTRFREE objects. */
+mse * GC_normal_mark_proc(/*addr,hhdr,msp,msl*/);
+ /* Mark procedure for NORMAL objects. */
+void GC_mark_init();
+void GC_mark(); /* Mark from everything on the mark stack. */
+void GC_mark_reliable(); /* as above, but fix things up after */
+ /* a mark stack overflow. */
+void GC_mark_all(/*b,t*/); /* Mark from everything in a range. */
+void GC_mark_all_stack(/*b,t*/); /* Mark from everything in a range, */
+ /* consider interior pointers as valid */
+void GC_remark(); /* Mark from all marked objects. Used */
+ /* only if we had to drop something. */
+void GC_tl_mark(/*p*/); /* Mark from a single root. */
+void GC_add_roots_inner();
+
+/* Machine dependent startup routines */
+ptr_t GC_get_stack_base();
+void GC_register_data_segments();
+
+# ifndef ALL_INTERIOR_POINTERS
+ void GC_add_to_black_list_normal(/* bits */);
+ /* Register bits as a possible future false */
+ /* reference from the heap or static data */
+# define GC_ADD_TO_BLACK_LIST_NORMAL(bits) GC_add_to_black_list_normal(bits)
+# else
+# define GC_ADD_TO_BLACK_LIST_NORMAL(bits) GC_add_to_black_list_stack(bits)
+# endif
+
+void GC_add_to_black_list_stack(/* bits */);
+struct hblk * GC_is_black_listed(/* h, len */);
+ /* If there are likely to be false references */
+ /* to a block starting at h of the indicated */
+ /* length, then return the next plausible */
+ /* starting location for h that might avoid */
+ /* these false references. */
+void GC_promote_black_lists();
+ /* Declare an end to a black listing phase. */
+
+ptr_t GC_scratch_alloc(/*bytes*/);
+ /* GC internal memory allocation for */
+ /* small objects. Deallocation is not */
+ /* possible. */
+
+void GC_invalidate_map(/* hdr */);
+ /* Remove the object map associated */
+ /* with the block. This identifies */
+ /* the block as invalid to the mark */
+ /* routines. */
+void GC_add_map_entry(/*sz*/);
+ /* Add a heap block map for objects of */
+ /* size sz to obj_map. */
+void GC_register_displacement_inner(/*offset*/);
+ /* Version of GC_register_displacement */
+ /* that assumes lock is already held */
+ /* and signals are already disabled. */
+
+void GC_init_inner();
+
+void GC_new_hblk(/*size_in_words, kind*/);
+ /* Allocate a new heap block, and build */
+ /* a free list in it. */
+struct hblk * GC_allochblk(/*size_in_words, kind*/);
+ /* Allocate a heap block, clear it if */
+ /* for composite objects, inform */
+ /* the marker that block is valid */
+ /* for objects of indicated size. */
+ /* sz < 0 ==> atomic. */
+void GC_freehblk(); /* Deallocate a heap block and mark it */
+ /* as invalid. */
+
+void GC_start_reclaim(/*abort_if_found*/);
+ /* Restore unmarked objects to free */
+ /* lists, or (if abort_if_found is */
+ /* TRUE) report them. */
+ /* Sweeping of small object pages is */
+ /* largely deferred. */
+void GC_continue_reclaim(/*size, kind*/);
+ /* Sweep pages of the given size and */
+ /* kind, as long as possible, and */
+ /* as long as the corr. free list is */
+ /* empty. */
+void GC_gcollect_inner(); /* Collect; caller must have acquired */
+ /* lock and disabled signals. */
+void GC_init(); /* Initialize collector. */
+
+ptr_t GC_generic_malloc(/* bytes, kind */);
+ /* Allocate an object of the given */
+ /* kind. By default, there are only */
+ /* two kinds: composite, and atomic. */
+ /* We claim it's possible for clever */
+ /* client code that understands GC */
+ /* internals to add more, e.g. to */
+ /* communicate object layout info */
+ /* to the collector. */
+ptr_t GC_generic_malloc_words_small(/*words, kind*/);
+ /* As above, but size in units of words */
+ /* Bypasses MERGE_SIZES. Assumes */
+ /* words <= MAXOBJSZ. */
+ptr_t GC_allocobj(/* sz_inn_words, kind */);
+ /* Make the indicated */
+ /* free list nonempty, and return its */
+ /* head. */
+
+void GC_install_header(/*h*/);
+ /* Install a header for block h. */
+void GC_install_counts(/*h, sz*/);
+ /* Set up forwarding counts for block */
+ /* h of size sz. */
+void GC_remove_header(/*h*/);
+ /* Remove the header for block h. */
+void GC_remove_counts(/*h, sz*/);
+ /* Remove forwarding counts for h. */
+hdr * GC_find_header(/*p*/); /* Debugging only. */
+
+void GC_finalize(); /* Perform all indicated finalization actions */
+ /* on unmarked objects. */
+
+void GC_add_to_heap(/*p, bytes*/);
+ /* Add a HBLKSIZE aligned chunk to the heap. */
+
+void GC_print_obj(/* ptr_t p */);
+ /* P points to somewhere inside an object with */
+ /* debugging info. Print a human readable */
+ /* description of the object to stderr. */
+void GC_check_heap();
+ /* Check that all objects in the heap with */
+ /* debugging info are intact. Print */
+ /* descriptions of any that are not. */
+
+void GC_printf(/* format, ... */);
+ /* A version of printf that doesn't allocate, */
+ /* is restricted to long arguments, and */
+ /* (unfortunately) doesn't use varargs for */
+ /* portability. Restricted to 6 args and */
+ /* 1K total output length. */
+ /* (We use sprintf. Hopefully that doesn't */
+ /* allocate for long arguments. */
+void GC_err_printf(/* format, ... */);
+ /* Ditto, writes to stderr. */
+void GC_err_puts(/* char *s */);
+ /* Write s to stderr, don't buffer, don't add */
+ /* newlines, don't ... */
+
+# endif /* GC_PRIVATE_H */
diff --git a/headers.c b/headers.c
new file mode 100644
index 00000000..3873ee8f
--- /dev/null
+++ b/headers.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991, 1992 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to copy this garbage collector for any purpose,
+ * provided the above notices are retained on all copies.
+ */
+
+/*
+ * This implements:
+ * 1. allocation of heap block headers
+ * 2. A map from addresses to heap block addresses to heap block headers
+ *
+ * Access speed is crucial. We implement an index structure based on a 2
+ * level tree.
+ * For 64 bit machines this will have to be rewritten. We expect that the
+ * winning strategy there is to use a hash table as a cache, with
+ * collisions resolved through a 4 or 5 level tree.
+ */
+
+# include "gc_private.h"
+
+# if CPP_WORDSZ != 32
+# if CPP_WORDSZ > 32
+ --> This needs to be reimplemented. See above.
+# else
+ --> Get a real machine.
+# endif
+# endif
+
+hdr ** GC_top_index [TOP_SZ];
+
+typedef hdr * bottom_index[BOTTOM_SZ];
+
+/*
+ * The bottom level index contains one of three kinds of values:
+ * 0 means we're not responsible for this block.
+ * 1 < (long)X <= MAX_JUMP means the block starts at least
+ * X * HBLKSIZE bytes before the current address.
+ * A valid pointer points to a hdr structure. (The above can't be
+ * valid pointers due to the GET_MEM return convention.)
+ */
+
+static bottom_index all_nils = { 0 };
+
+/* Non-macro version of header location routine */
+hdr * GC_find_header(h)
+ptr_t h;
+{
+ return(HDR(h));
+}
+
+/* Routines to dynamically allocate collector data structures that will */
+/* never be freed. */
+
+static char * scratch_free_ptr = 0;
+
+static char * scratch_end_ptr = 0;
+
+ptr_t GC_scratch_alloc(bytes)
+register word bytes;
+{
+ register char * result = scratch_free_ptr;
+ scratch_free_ptr += bytes;
+ if (scratch_free_ptr <= scratch_end_ptr) {
+ return(result);
+ }
+ {
+ long bytes_to_get = ((HINCR+1) * HBLKSIZE + bytes) & ~(HBLKSIZE - 1);
+
+ scratch_free_ptr = (char *)GET_MEM(bytes_to_get);
+ if (scratch_free_ptr == 0) {
+ GC_printf("Out of memory - trying to allocate less\n");
+ result = (char *)GET_MEM(bytes);
+ if (result == 0) {
+ GC_printf("Out of memory - giving up\n");
+ } else {
+ scratch_free_ptr -= bytes;
+ return(result);
+ }
+ }
+ scratch_end_ptr = scratch_free_ptr + bytes_to_get;
+ return(GC_scratch_alloc(bytes));
+ }
+}
+
+static hdr * hdr_free_list = 0;
+
+/* Return an uninitialized header */
+static hdr * alloc_hdr()
+{
+ register hdr * result;
+
+ if (hdr_free_list == 0) {
+ result = (hdr *) GC_scratch_alloc((word)(sizeof(hdr)));
+ } else {
+ result = hdr_free_list;
+ hdr_free_list = (hdr *) (result -> hb_next);
+ }
+ return(result);
+}
+
+static void free_hdr(hhdr)
+hdr * hhdr;
+{
+ hhdr -> hb_next = (struct hblk *) hdr_free_list;
+ hdr_free_list = hhdr;
+}
+
+GC_init_headers()
+{
+ register int i;
+
+ for (i = 0; i < TOP_SZ; i++) {
+ GC_top_index[i] = all_nils;
+ }
+}
+
+/* Make sure that there is a bottom level index block for address addr */
+static void get_index(addr)
+register word addr;
+{
+ register word indx =
+ (word)(addr) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
+
+ if (GC_top_index[indx] == all_nils) {
+ GC_top_index[indx] = (hdr **)
+ GC_scratch_alloc((word)(sizeof (bottom_index)));
+ bzero((char *)(GC_top_index[indx]), (int)(sizeof (bottom_index)));
+ }
+}
+
+/* Install a header for block h. */
+/* The header is uninitialized. */
+void GC_install_header(h)
+register struct hblk * h;
+{
+ get_index((word) h);
+ HDR(h) = alloc_hdr();
+}
+
+/* Set up forwarding counts for block h of size sz */
+void GC_install_counts(h, sz)
+register struct hblk * h;
+register word sz; /* bytes */
+{
+ register struct hblk * hbp;
+ register int i;
+
+ for (hbp = h; (char *)hbp < (char *)h + sz; hbp += BOTTOM_SZ) {
+ get_index((word) hbp);
+ }
+ get_index((word)h + sz - 1);
+ for (hbp = h + 1; (char *)hbp < (char *)h + sz; hbp += 1) {
+ i = hbp - h;
+ HDR(hbp) = (hdr *)(i > MAX_JUMP? MAX_JUMP : i);
+ }
+}
+
+/* Remove the header for block h */
+void GC_remove_header(h)
+register struct hblk * h;
+{
+ free_hdr(HDR(h));
+ HDR(h) = 0;
+}
+
+/* Remove forwarding counts for h */
+void GC_remove_counts(h, sz)
+register struct hblk * h;
+register word sz; /* bytes */
+{
+ register struct hblk * hbp;
+
+ for (hbp = h+1; (char *)hbp < (char *)h + sz; hbp += 1) {
+ HDR(hbp) = 0;
+ }
+}
+
+/* Apply fn to all allocated blocks */
+/*VARARGS1*/
+void GC_apply_to_all_blocks(fn, client_data)
+void (*fn)(/* struct hblk *h, word client_data */);
+word client_data;
+{
+ register int i, j;
+ register hdr ** index_p;
+
+ for (i = 0; i < TOP_SZ; i++) {
+ index_p = GC_top_index[i];
+ if (index_p != all_nils) {
+ for (j = BOTTOM_SZ-1; j >= 0;) {
+ if (!IS_FORWARDING_ADDR_OR_NIL(index_p[j])) {
+ if (index_p[j]->hb_map != GC_invalid_map) {
+ (*fn)(((struct hblk *)
+ (((i << LOG_BOTTOM_SZ) + j) << LOG_HBLKSIZE)),
+ client_data);
+ }
+ j--;
+ } else if (index_p[j] == 0) {
+ j--;
+ } else {
+ j -= (int)(index_p[j]);
+ }
+ }
+ }
+ }
+}
diff --git a/interface.c b/interface.c
index 29e35f0b..66a1771f 100644
--- a/interface.c
+++ b/interface.c
@@ -1,44 +1,26 @@
-#include "gc.h"
-/* These are some additional routines to interface the collector to C */
-/* They were contributed by David Chase (chase@orc.olivetti.com) */
-/* They illustrates the use of non_gc_bytes, and provide an interface to */
-/* the storage allocator's size information. Note that there is a */
-/* check to guard against 0 length allocations. */
-/* Hacked by H. Boehm (11/16/89) to accomodate gc_realloc. */
-
-initialize_allocator() {
- non_gc_bytes = 0;
- gc_init();
-}
-
-
-/* Use of gc_gasp to report errors reduces risk of bizarre
- interactions with I/O system in desperate situations. */
-
-gc_gasp(s) char * s;
-{
- write(2,s,strlen(s));
-}
-
+#include "gc_private.h"
+#include <stddef.h>
-/* This reports how many bytes are actually available to an object.
- It is a fatal error to request the size of memory addressed by a
- pointer not obtained from the storage allocator. */
-
-size_of_obj_in_bytes(p)
- struct obj * p;
-{
- register struct hblk * h;
- register int size;
-
- h = HBLKPTR(p);
-
- if (is_hblk(h)) {
- return (HB_SIZE(h))<<2;
- }
- gc_gasp("GC/size_of_obj_in_bytes: requested byte size of non-pointer!\n");
- exit(1);
-}
+/* These are some additional routines to interface the collector to C */
+/* This is a rather special purpose interface that tries to keep down the */
+/* number of collections in the presence of explicit deallocations. */
+/* A call to this malloc effectively declares that the resulting object */
+/* will be explicitly deallocated with very high probability. */
+/* The reduced collection frequency may interfere with object */
+/* coalescing. */
+/* If you just want to rename GC_malloc and friends, this is NOT */
+/* the right way to do it. */
+
+/* This contributed by David Chase (chase@eng.sun.com) a long time */
+/* ago. Much of its original functionality has since been absorbed */
+/* elsewhere. */
+/* They illustrates the use of GC_non_gc_bytes */
+/* Hacked by H. Boehm (11/16/89) to accomodate GC_realloc. */
+/* Further updated (2/20/92) to reflect changes in interfaces and data */
+/* structures. */
+/* Further updated (8/25/92) to correct previously introduced bugs and */
+/* make it compile with semi-modern compilers. */
+/* Note that extern_ptr_t is either void * or char *, as appropriate. */
/* This free routine is merely advisory -- it reduces the estimate of
@@ -46,19 +28,23 @@ size_of_obj_in_bytes(p)
making it more likely that the collector will run next time more
memory is needed. */
-void free(p) {
- int inc = size_of_obj_in_bytes(p);
- non_gc_bytes -= inc;
+void free(p)
+extern_ptr_t p;
+{
+ size_t inc = GC_size(p);
+ GC_non_gc_bytes -= inc;
}
/* This free routine adjusts the collector estimates of space in use,
but also actually releases the memory for reuse. It is thus "unsafe"
if the programmer "frees" memory that is actually still in use. */
-void unsafe_free(p) {
- int inc = size_of_obj_in_bytes(p);
- non_gc_bytes -= inc;
- gc_free(p);
+void unsafe_free(p)
+extern_ptr_t p;
+{
+ size_t inc = GC_size(p);
+ GC_non_gc_bytes -= inc;
+ GC_free(p);
}
@@ -68,26 +54,33 @@ void unsafe_free(p) {
its size is added to non_gc_bytes.
*/
-word malloc(bytesize) {
-word result;
-if (bytesize == 0) bytesize = 4;
-result = (word) gc_malloc (bytesize);
-non_gc_bytes += (bytesize + 3) & ~3;
-return result;
+extern_ptr_t malloc(bytesize)
+size_t bytesize;
+{
+ extern_ptr_t result;
+
+ result = (extern_ptr_t) GC_malloc (bytesize);
+ GC_non_gc_bytes += (bytesize + 3) & ~3;
+ return result;
}
-word malloc_atomic(bytesize) {
-word result;
-if (bytesize == 0) bytesize = 4;
-result = (word) gc_malloc_atomic (bytesize);
-non_gc_bytes += (bytesize + 3) & ~3;
-return result;
+extern_ptr_t malloc_atomic(bytesize)
+size_t bytesize;
+{
+ extern_ptr_t result;
+
+ result = (extern_ptr_t) GC_malloc_atomic (bytesize);
+ GC_non_gc_bytes += (bytesize + 3) & ~3;
+ return result;
}
-word realloc(old,size) word old,size; {
- int inc = size_of_obj_in_bytes(old);
+extern_ptr_t realloc(old,size)
+extern_ptr_t old;
+size_t size;
+{
+ int inc = GC_size(old);
- non_gc_bytes += ((size + 3) & ~3) - inc;
- return(gc_realloc(old, size);
- }
+ GC_non_gc_bytes += ((size + 3) & ~3) - inc;
+ return(GC_realloc(old, size));
+}
diff --git a/mach_dep.c b/mach_dep.c
index 1acfe111..670579d8 100644
--- a/mach_dep.c
+++ b/mach_dep.c
@@ -1,228 +1,9 @@
-# include "gc.h"
+# include "gc_private.h"
+# include <stdio.h>
# include <setjmp.h>
-
-
-/* If no assembly calls are anticipated, it is only necessary to port */
-/* the mark_regs routine near the end of the file to your machine. */
-/* The allocobj and allocaobj routines are designed only as an assembly */
-/* language interface. The definitions of objfreelist and aobjfreelist */
-/* are useful only if in-line allocation code is generated. */
-
-/* Definitions similar to the following make it easier to access the free */
-/* lists from an assembly lnguage, or in-line C interface. */
-/* They should be added for other architectures. */
-
-
-struct __gc_arrays _gc_arrays = { 0 };
- /* The purpose of the initialization is to force _gc_arrays */
- /* into the data segment. The Fortran-based object file */
- /* format used by many versions of UNIX otherwise makes the */
- /* following impossible. (Note that some assemblers and */
- /* linkers, notably those for Sun-3s, don't realize that */
- /* this is impossible, and simply generate garbage.) */
-
-# ifdef M68K_SUN
- asm(".globl _aobjfreelist");
- asm(".globl _objfreelist");
- asm("_aobjfreelist = __gc_arrays");
- asm("_objfreelist = __gc_arrays+0x804");
-# endif
-# ifdef SPARC
- asm(".global _aobjfreelist");
- asm(".global _objfreelist");
- asm("_aobjfreelist = __gc_arrays");
- asm("_objfreelist = __gc_arrays+0x804");
-# endif
-# ifdef VAX
- asm(".globl _aobjfreelist");
- asm(".globl _objfreelist");
- asm(".set _aobjfreelist,__gc_arrays");
- asm(".set _objfreelist,__gc_arrays+0x804");
-# endif
-# ifdef RT
- asm(".globl _aobjfreelist");
- asm(".globl _objfreelist");
- asm(".set _aobjfreelist,__gc_arrays");
- asm(".set _objfreelist,__gc_arrays+0x804");
-# endif
-
-/* Call allocobj or allocaobj after first saving at least those registers */
-/* not preserved by the C compiler. The register used for return values */
-/* is not saved, since it will be clobbered anyway. */
-# ifdef RT
- /* This is done in rt_allocobj.s */
-# else
-# ifdef M68K_HP
- /* Optimizer is not safe, we want these suckers stored. */
-/* # pragma OPTIMIZE OFF - we claim this is unnecessary if -O flag */
-/* is not used. It breaks the collector */
-/* on other machines. */
- asm(" text"); /* HP/Motorola assembler syntax */
- asm(" global __allocobj");
- asm(" global __allocaobj");
- asm(" global _allocobj");
- asm(" global _allocaobj");
-# else
- asm(" .text"); /* Default (PDP-11 Unix syntax) */
- asm(" .globl __allocobj");
- asm(" .globl __allocaobj");
- asm(" .globl _allocobj");
- asm(" .globl _allocaobj");
-# endif
-
-# ifdef M68K_SUN
- asm("_allocobj:");
- asm(" link a6,#0");
- asm(" movl d1,sp@-");
- asm(" movl a0,sp@-");
- asm(" movl a1,sp@-");
- asm(" movl sp@(20),sp@-");
- asm(" jbsr __allocobj");
- asm(" addl #4,sp");
- asm(" movl sp@+,a1");
- asm(" movl sp@+,a0");
- asm(" movl sp@+,d1");
- asm(" unlk a6");
- asm(" rts");
-
- asm("_allocaobj:");
- asm(" link a6,#0");
- asm(" movl d1,sp@-");
- asm(" movl a0,sp@-");
- asm(" movl a1,sp@-");
- asm(" movl sp@(20),sp@-");
- asm(" jbsr __allocaobj");
- asm(" addl #4,sp");
- asm(" movl sp@+,a1");
- asm(" movl sp@+,a0");
- asm(" movl sp@+,d1");
- asm(" unlk a6");
- asm(" rts");
-# endif
-
-# ifdef M68K_HP
- asm("_allocobj:");
- asm(" link %a6,&0");
- asm(" mov.l %d1,-(%sp)");
- asm(" mov.l %a0,-(%sp)");
- asm(" mov.l %a1,-(%sp)");
- asm(" mov.l 20(%sp),-(%sp)");
- asm(" jsr __allocobj");
- asm(" add.l &4,%sp");
- asm(" mov.l (%sp)+,%a1");
- asm(" mov.l (%sp)+,%a0");
- asm(" mov.l (%sp)+,%d1");
- asm(" unlk %a6");
- asm(" rts");
-
- asm("_allocaobj:");
- asm(" link %a6,&0");
- asm(" mov.l %d1,-(%sp)");
- asm(" mov.l %a0,-(%sp)");
- asm(" mov.l %a1,-(%sp)");
- asm(" mov.l 20(%sp),-(%sp)");
- asm(" jsr __allocaobj");
- asm(" add.l &4,%sp");
- asm(" mov.l (%sp)+,%a1");
- asm(" mov.l (%sp)+,%a0");
- asm(" mov.l (%sp)+,%d1");
- asm(" unlk %a6");
- asm(" rts");
-# endif /* M68K_HP */
-
-# ifdef I386
- asm(".data");
- asm("gc_ret_value: .word 0");
- asm(".word 0");
- asm(".text");
-
- asm("_allocaobj:");
- asm("pushl %ebp");
- asm("movl %esp,%ebp");
- asm("pushal");
- asm("pushl 8(%ebp)"); /* Push orignal argument */
- asm("call __allocaobj");
- asm("popl %ecx");
- asm("movl %eax,gc_ret_value"); /* Save return value */
- asm("popal");
- asm("movl gc_ret_value,%eax");
- asm("leave");
- asm("ret");
-
- asm("_allocobj:");
- asm("pushl %ebp");
- asm("movl %esp,%ebp");
- asm("pushal");
- asm("pushl 8(%ebp)"); /* Push orignal argument */
- asm("call __allocobj");
- asm("popl %ecx");
- asm("movl %eax,gc_ret_value"); /* Save return value */
- asm("popal");
- asm("movl gc_ret_value,%eax");
- asm("leave");
- asm("ret");
-# endif
-
-# ifdef SPARC
- asm("_allocaobj:");
- asm(" ba __allocaobj");
- asm(" nop");
- asm("_allocobj:");
- asm(" ba __allocobj");
- asm(" nop");
-
-# include <sun4/trap.h>
- asm(" .globl _save_regs_in_stack");
- asm("_save_regs_in_stack:");
- asm(" t 0x3 ! ST_FLUSH_WINDOWS");
- asm(" mov %sp,%o0");
- asm(" retl");
- asm(" nop");
-# endif
-
-# ifdef VAX
- asm("_allocobj:");
- asm(".word 0x3e");
- asm("pushl 4(ap)");
- asm("calls $1,__allocobj");
- asm("ret");
- asm("_allocaobj:");
- asm(".word 0x3e");
- asm("pushl 4(ap)");
- asm("calls $1,__allocaobj");
- asm("ret");
-# endif
-
-# ifdef NS32K
- asm("_allocobj:");
- asm("enter [],$0");
- asm("movd r1,tos");
- asm("movd r2,tos");
- asm("movd 8(fp),tos");
- asm("bsr ?__allocobj");
- asm("adjspb $-4");
- asm("movd tos,r2");
- asm("movd tos,r1");
- asm("exit []");
- asm("ret $0");
- asm("_allocaobj:");
- asm("enter [],$0");
- asm("movd r1,tos");
- asm("movd r2,tos");
- asm("movd 8(fp),tos");
- asm("bsr ?__allocaobj");
- asm("adjspb $-4");
- asm("movd tos,r2");
- asm("movd tos,r1");
- asm("exit []");
- asm("ret $0");
-# endif
-
-
-# if !defined(VAX) && !defined(M68K_SUN) && !defined(M68K_HP)&& !defined(SPARC) && !defined(I386) && !defined(NS32K)
- /* Assembly language interface routines undefined */
-# endif
-
+# ifdef OS2
+# define _setjmp(b) setjmp(b)
+# define _longjmp(b,v) longjmp(b,v)
# endif
/* Routine to mark from registers that are preserved by the C compiler. */
@@ -230,7 +11,7 @@ struct __gc_arrays _gc_arrays = { 0 };
/* version at the end, that is likely, but not guaranteed to work */
/* on your architecture. Run the test_setjmp program to see whether */
/* there is any chance it will work. */
-mark_regs()
+GC_mark_regs()
{
# ifdef RT
register long TMP_SP; /* must be bound to r11 */
@@ -239,12 +20,12 @@ mark_regs()
/* VAX - generic code below does not work under 4.2 */
/* r1 through r5 are caller save, and therefore */
/* on the stack or dead. */
- asm("pushl r11"); asm("calls $1,_tl_mark");
- asm("pushl r10"); asm("calls $1,_tl_mark");
- asm("pushl r9"); asm("calls $1,_tl_mark");
- asm("pushl r8"); asm("calls $1,_tl_mark");
- asm("pushl r7"); asm("calls $1,_tl_mark");
- asm("pushl r6"); asm("calls $1,_tl_mark");
+ asm("pushl r11"); asm("calls $1,_GC_tl_mark");
+ asm("pushl r10"); asm("calls $1,_GC_tl_mark");
+ asm("pushl r9"); asm("calls $1,_GC_tl_mark");
+ asm("pushl r8"); asm("calls $1,_GC_tl_mark");
+ asm("pushl r7"); asm("calls $1,_GC_tl_mark");
+ asm("pushl r6"); asm("calls $1,_GC_tl_mark");
# endif
# ifdef M68K_SUN
/* M68K_SUN - could be replaced by generic code */
@@ -253,18 +34,18 @@ mark_regs()
asm("subqw #0x4,sp"); /* allocate word on top of stack */
- asm("movl a2,sp@"); asm("jbsr _tl_mark");
- asm("movl a3,sp@"); asm("jbsr _tl_mark");
- asm("movl a4,sp@"); asm("jbsr _tl_mark");
- asm("movl a5,sp@"); asm("jbsr _tl_mark");
+ asm("movl a2,sp@"); asm("jbsr _GC_tl_mark");
+ asm("movl a3,sp@"); asm("jbsr _GC_tl_mark");
+ asm("movl a4,sp@"); asm("jbsr _GC_tl_mark");
+ asm("movl a5,sp@"); asm("jbsr _GC_tl_mark");
/* Skip frame pointer and stack pointer */
- asm("movl d1,sp@"); asm("jbsr _tl_mark");
- asm("movl d2,sp@"); asm("jbsr _tl_mark");
- asm("movl d3,sp@"); asm("jbsr _tl_mark");
- asm("movl d4,sp@"); asm("jbsr _tl_mark");
- asm("movl d5,sp@"); asm("jbsr _tl_mark");
- asm("movl d6,sp@"); asm("jbsr _tl_mark");
- asm("movl d7,sp@"); asm("jbsr _tl_mark");
+ asm("movl d1,sp@"); asm("jbsr _GC_tl_mark");
+ asm("movl d2,sp@"); asm("jbsr _GC_tl_mark");
+ asm("movl d3,sp@"); asm("jbsr _GC_tl_mark");
+ asm("movl d4,sp@"); asm("jbsr _GC_tl_mark");
+ asm("movl d5,sp@"); asm("jbsr _GC_tl_mark");
+ asm("movl d6,sp@"); asm("jbsr _GC_tl_mark");
+ asm("movl d7,sp@"); asm("jbsr _GC_tl_mark");
asm("addqw #0x4,sp"); /* put stack back where it was */
# endif
@@ -275,61 +56,104 @@ mark_regs()
asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
- asm("mov.l %a2,(%sp)"); asm("jsr _tl_mark");
- asm("mov.l %a3,(%sp)"); asm("jsr _tl_mark");
- asm("mov.l %a4,(%sp)"); asm("jsr _tl_mark");
- asm("mov.l %a5,(%sp)"); asm("jsr _tl_mark");
+ asm("mov.l %a2,(%sp)"); asm("jsr _GC_tl_mark");
+ asm("mov.l %a3,(%sp)"); asm("jsr _GC_tl_mark");
+ asm("mov.l %a4,(%sp)"); asm("jsr _GC_tl_mark");
+ asm("mov.l %a5,(%sp)"); asm("jsr _GC_tl_mark");
/* Skip frame pointer and stack pointer */
- asm("mov.l %d1,(%sp)"); asm("jsr _tl_mark");
- asm("mov.l %d2,(%sp)"); asm("jsr _tl_mark");
- asm("mov.l %d3,(%sp)"); asm("jsr _tl_mark");
- asm("mov.l %d4,(%sp)"); asm("jsr _tl_mark");
- asm("mov.l %d5,(%sp)"); asm("jsr _tl_mark");
- asm("mov.l %d6,(%sp)"); asm("jsr _tl_mark");
- asm("mov.l %d7,(%sp)"); asm("jsr _tl_mark");
+ asm("mov.l %d1,(%sp)"); asm("jsr _GC_tl_mark");
+ asm("mov.l %d2,(%sp)"); asm("jsr _GC_tl_mark");
+ asm("mov.l %d3,(%sp)"); asm("jsr _GC_tl_mark");
+ asm("mov.l %d4,(%sp)"); asm("jsr _GC_tl_mark");
+ asm("mov.l %d5,(%sp)"); asm("jsr _GC_tl_mark");
+ asm("mov.l %d6,(%sp)"); asm("jsr _GC_tl_mark");
+ asm("mov.l %d7,(%sp)"); asm("jsr _GC_tl_mark");
asm("addq.w &0x4,%sp"); /* put stack back where it was */
# endif /* M68K_HP */
-# ifdef I386
+# if defined(I386) && !defined(OS2)
/* I386 code, generic code does not appear to work */
- asm("pushl %eax"); asm("call _tl_mark"); asm("addl $4,%esp");
- asm("pushl %ecx"); asm("call _tl_mark"); asm("addl $4,%esp");
- asm("pushl %edx"); asm("call _tl_mark"); asm("addl $4,%esp");
- asm("pushl %esi"); asm("call _tl_mark"); asm("addl $4,%esp");
- asm("pushl %edi"); asm("call _tl_mark"); asm("addl $4,%esp");
- asm("pushl %ebx"); asm("call _tl_mark"); asm("addl $4,%esp");
+ /* It does appear to work under OS2, and asms dont */
+ asm("pushl %eax"); asm("call _GC_tl_mark"); asm("addl $4,%esp");
+ asm("pushl %ecx"); asm("call _GC_tl_mark"); asm("addl $4,%esp");
+ asm("pushl %edx"); asm("call _GC_tl_mark"); asm("addl $4,%esp");
+ asm("pushl %esi"); asm("call _GC_tl_mark"); asm("addl $4,%esp");
+ asm("pushl %edi"); asm("call _GC_tl_mark"); asm("addl $4,%esp");
+ asm("pushl %ebx"); asm("call _GC_tl_mark"); asm("addl $4,%esp");
# endif
# ifdef NS32K
- asm ("movd r3, tos"); asm ("bsr ?_tl_mark"); asm ("adjspb $-4");
- asm ("movd r4, tos"); asm ("bsr ?_tl_mark"); asm ("adjspb $-4");
- asm ("movd r5, tos"); asm ("bsr ?_tl_mark"); asm ("adjspb $-4");
- asm ("movd r6, tos"); asm ("bsr ?_tl_mark"); asm ("adjspb $-4");
- asm ("movd r7, tos"); asm ("bsr ?_tl_mark"); asm ("adjspb $-4");
+ asm ("movd r3, tos"); asm ("bsr ?_GC_tl_mark"); asm ("adjspb $-4");
+ asm ("movd r4, tos"); asm ("bsr ?_GC_tl_mark"); asm ("adjspb $-4");
+ asm ("movd r5, tos"); asm ("bsr ?_GC_tl_mark"); asm ("adjspb $-4");
+ asm ("movd r6, tos"); asm ("bsr ?_GC_tl_mark"); asm ("adjspb $-4");
+ asm ("movd r7, tos"); asm ("bsr ?_GC_tl_mark"); asm ("adjspb $-4");
# endif
# ifdef SPARC
/* generic code will not work */
- save_regs_in_stack();
+ GC_save_regs_in_stack();
# endif
# ifdef RT
- tl_mark(TMP_SP); /* tl_mark from r11 */
-
- asm("cas r11, r6, r0"); tl_mark(TMP_SP); /* r6 */
- asm("cas r11, r7, r0"); tl_mark(TMP_SP); /* through */
- asm("cas r11, r8, r0"); tl_mark(TMP_SP); /* r10 */
- asm("cas r11, r9, r0"); tl_mark(TMP_SP);
- asm("cas r11, r10, r0"); tl_mark(TMP_SP);
-
- asm("cas r11, r12, r0"); tl_mark(TMP_SP); /* r12 */
- asm("cas r11, r13, r0"); tl_mark(TMP_SP); /* through */
- asm("cas r11, r14, r0"); tl_mark(TMP_SP); /* r15 */
- asm("cas r11, r15, r0"); tl_mark(TMP_SP);
+ GC_tl_mark(TMP_SP); /* GC_tl_mark from r11 */
+
+ asm("cas r11, r6, r0"); GC_tl_mark(TMP_SP); /* r6 */
+ asm("cas r11, r7, r0"); GC_tl_mark(TMP_SP); /* through */
+ asm("cas r11, r8, r0"); GC_tl_mark(TMP_SP); /* r10 */
+ asm("cas r11, r9, r0"); GC_tl_mark(TMP_SP);
+ asm("cas r11, r10, r0"); GC_tl_mark(TMP_SP);
+
+ asm("cas r11, r12, r0"); GC_tl_mark(TMP_SP); /* r12 */
+ asm("cas r11, r13, r0"); GC_tl_mark(TMP_SP); /* through */
+ asm("cas r11, r14, r0"); GC_tl_mark(TMP_SP); /* r15 */
+ asm("cas r11, r15, r0"); GC_tl_mark(TMP_SP);
# endif
-# if 0
+# ifdef M68K_SYSV
+ /* Once again similar to SUN and HP, though setjmp appears to work.
+ --Parag
+ */
+# ifdef __GNUC__
+ asm("subqw #0x4,%sp"); /* allocate word on top of stack */
+
+ asm("movl %a2,%sp@"); asm("jbsr GC_tl_mark");
+ asm("movl %a3,%sp@"); asm("jbsr GC_tl_mark");
+ asm("movl %a4,%sp@"); asm("jbsr GC_tl_mark");
+ asm("movl %a5,%sp@"); asm("jbsr GC_tl_mark");
+ /* Skip frame pointer and stack pointer */
+ asm("movl %d1,%sp@"); asm("jbsr GC_tl_mark");
+ asm("movl %d2,%sp@"); asm("jbsr GC_tl_mark");
+ asm("movl %d3,%sp@"); asm("jbsr GC_tl_mark");
+ asm("movl %d4,%sp@"); asm("jbsr GC_tl_mark");
+ asm("movl %d5,%sp@"); asm("jbsr GC_tl_mark");
+ asm("movl %d6,%sp@"); asm("jbsr GC_tl_mark");
+ asm("movl %d7,%sp@"); asm("jbsr GC_tl_mark");
+
+ asm("addqw #0x4,%sp"); /* put stack back where it was */
+# else /* !__GNUC__*/
+ asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
+
+ asm("mov.l %a2,(%sp)"); asm("jsr GC_tl_mark");
+ asm("mov.l %a3,(%sp)"); asm("jsr GC_tl_mark");
+ asm("mov.l %a4,(%sp)"); asm("jsr GC_tl_mark");
+ asm("mov.l %a5,(%sp)"); asm("jsr GC_tl_mark");
+ /* Skip frame pointer and stack pointer */
+ asm("mov.l %d1,(%sp)"); asm("jsr GC_tl_mark");
+ asm("mov.l %d2,(%sp)"); asm("jsr GC_tl_mark");
+ asm("mov.l %d3,(%sp)"); asm("jsr GC_tl_mark");
+ asm("mov.l %d4,(%sp)"); asm("jsr GC_tl_mark");
+ asm("mov.l %d5,(%sp)"); asm("jsr GC_tl_mark");
+ asm("mov.l %d6,(%sp)"); asm("jsr GC_tl_mark");
+ asm("mov.l %d7,(%sp)"); asm("jsr GC_tl_mark");
+
+ asm("addq.w &0x4,%sp"); /* put stack back where it was */
+# endif /* !__GNUC__ */
+# endif /* M68K_SYSV */
+
+
+# if defined(HP_PA) || (defined(I386) && defined(OS2))
/* Generic code */
/* The idea is due to Parag Patel at HP. */
/* We're not sure whether he would like */
@@ -337,20 +161,37 @@ mark_regs()
{
jmp_buf regs;
register word * i = (word *) regs;
- register word * lim = (word *) (((char *)(regs)) + (sizeof regs));
+ register ptr_t lim = (ptr_t)(regs) + (sizeof regs);
/* Setjmp on Sun 3s doesn't clear all of the buffer. */
/* That tends to preserve garbage. Clear it. */
- for (; i < lim; i++) {
+ for (; (char *)i < lim; i++) {
*i = 0;
}
(void) _setjmp(regs);
- tl_mark_all(regs, lim);
+ GC_mark_all_stack((ptr_t)regs, lim);
}
# endif
/* other machines... */
-# if !(defined M68K_SUN) && !defined(M68K_HP) && !(defined VAX) && !(defined RT) && !(defined SPARC) && !(defined I386) &&!(defined NS32K)
+# if !(defined M68K_SUN) && !defined(M68K_HP) && !(defined VAX) && !(defined RT) && !(defined SPARC) && !(defined I386) &&!(defined NS32K) &&!defined(HP_PA) && !defined(M68K_SYSV)
--> bad news <--
# endif
}
+
+/* On register window machines, we need a way to force registers into */
+/* the stack. */
+# ifdef SPARC
+# ifdef SUNOS5
+ asm(" .globl GC_save_regs_in_stack");
+ asm("GC_save_regs_in_stack:");
+# else
+ asm(" .globl _GC_save_regs_in_stack");
+ asm("_GC_save_regs_in_stack:");
+# endif
+ asm(" ta 0x3 ! ST_FLUSH_WINDOWS");
+ asm(" mov %sp,%o0");
+ asm(" retl");
+ asm(" nop");
+# endif
+
diff --git a/mark.c b/mark.c
new file mode 100644
index 00000000..aab8aa44
--- /dev/null
+++ b/mark.c
@@ -0,0 +1,361 @@
+
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991,1992 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to copy this garbage collector for any purpose,
+ * provided the above notices are retained on all copies.
+ *
+ * This file contains the functions:
+ * GC_mark() - Mark from the mark stack
+ * GC_mark_reliable() - as above, but fix things up after
+ * a mark stack overflow.
+ * GC_mark_all(b,t) - Mark from everything in a range
+ * GC_mark_all_stack(b,t) - Mark from everything in a range,
+ * consider interior pointers as valid
+ * GC_remark() - Mark from all marked objects. Used
+ * only if we had to drop something.
+ */
+
+
+# include <stdio.h>
+# include "gc_private.h"
+
+# define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE)
+ /* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a */
+ /* multiple of HBLKSIZE. */
+
+/*
+ * Limits of stack for GC_mark routine. Set by caller to GC_mark.
+ * All items between GC_mark_stack_top and GC_mark_stack_bottom-1 still need
+ * to be marked. All items on the stack satisfy quicktest. They do
+ * not necessarily reference real objects.
+ */
+
+mse * GC_mark_stack;
+
+word GC_mark_stack_size = 0;
+
+mse * GC_mark_stack_top;
+
+static bool dropped_some = FALSE;
+ /* We ran out of space and were forced */
+ /* to drop some pointers during marking */
+
+/* Mark procedure for objects that may contain arbitrary pointers. */
+/* Msp is the current mark stack pointer. Msl limits the stack. */
+/* We return the new stack pointer value. */
+/* The object at addr has already been marked. Our job is to make */
+/* sure that its descendents are marked. */
+mse * GC_normal_mark_proc(addr, hhdr, msp, msl)
+register word * addr;
+register hdr * hhdr;
+register mse * msp, * msl;
+{
+ register word sz = hhdr -> hb_sz;
+
+ msp++;
+ /* Push the contents of the object on the mark stack. */
+ if (msp >= msl) {
+ dropped_some = TRUE;
+ return(msp-1);
+ }
+ msp -> mse_start = addr;
+ msp -> mse_end = addr + sz;
+# ifdef GATHERSTATS
+ GC_composite_in_use += sz;
+# endif
+ return(msp);
+}
+
+/* Mark procedure for objects that are known to contain no pointers. */
+/*ARGSUSED*/
+mse * GC_no_mark_proc(addr, hhdr, msp, msl)
+register word * addr;
+register hdr * hhdr;
+register mse * msp, * msl;
+{
+# ifdef GATHERSTATS
+ GC_atomic_in_use += hhdr -> hb_sz;
+# endif
+ return(msp);
+}
+
+
+/*
+ * Mark all objects pointed to by the regions described by
+ * mark stack entries between GC_mark_stack and GC_mark_stack_top,
+ * inclusive. We assume and preserve the invariant
+ * that everything on the mark stack points into a hblk that has an
+ * allocated header. Assumes the upper limit of a mark stack entry
+ * is never 0.
+ */
+void GC_mark()
+{
+ mse * GC_mark_stack_reg = GC_mark_stack;
+ mse * GC_mark_stack_top_reg = GC_mark_stack_top;
+ mse * mark_stack_limit = &(GC_mark_stack[GC_mark_stack_size]);
+ register word * current_p; /* Pointer to current candidate ptr. */
+ register word current; /* Candidate pointer. */
+ register word * limit; /* (Incl) limit of current candidate */
+ /* range */
+ register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
+ register ptr_t least_ha = GC_least_plausible_heap_addr;
+# define SPLIT_RANGE_WORDS 128
+
+ while (GC_mark_stack_top_reg >= GC_mark_stack_reg) {
+ register int displ; /* Displacement in block; first bytes, then words */
+ register hdr * hhdr;
+ register map_entry_type map_entry;
+
+ current_p = GC_mark_stack_top_reg -> mse_start;
+ limit = GC_mark_stack_top_reg -> mse_end;
+ if (limit - current_p > SPLIT_RANGE_WORDS) {
+ /* Process part of the range to avoid pushing too much on the */
+ /* stack. */
+ GC_mark_stack_top_reg -> mse_start =
+ limit = current_p + SPLIT_RANGE_WORDS;
+ /* Make sure that pointers overlapping the two ranges are */
+ /* considered. */
+ limit += sizeof(word) - ALIGNMENT;
+ } else {
+ GC_mark_stack_top_reg--;
+ }
+ limit -= 1;
+
+ while (current_p <= limit) {
+ current = *current_p;
+ current_p = (word *)((char *)current_p + ALIGNMENT);
+
+ if ((ptr_t)current < least_ha) continue;
+ if ((ptr_t)current >= greatest_ha) continue;
+ hhdr = HDR(current);
+ if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
+# ifdef ALL_INTERIOR_POINTERS
+ if (hhdr != 0) {
+ register word orig = current;
+
+ current = (word)HBLKPTR(current) + HDR_BYTES;
+ do {
+ current = current - HBLKSIZE*(int)hhdr;
+ hhdr = HDR(current);
+ } while(IS_FORWARDING_ADDR_OR_NIL(hhdr));
+ /* current points to the start of the large object */
+ if ((word *)orig - (word *)current
+ >= hhdr->hb_sz) {
+ /* Pointer past the end of the block */
+ GC_ADD_TO_BLACK_LIST_NORMAL(current);
+ continue;
+ }
+ } else {
+ GC_ADD_TO_BLACK_LIST_NORMAL(current);
+ continue;
+ }
+# else
+ GC_ADD_TO_BLACK_LIST_NORMAL(current);
+ continue;
+# endif
+ }
+ displ = HBLKDISPL(current);
+ map_entry = MAP_ENTRY((hhdr -> hb_map), displ);
+ if (map_entry == OBJ_INVALID) {
+ GC_ADD_TO_BLACK_LIST_NORMAL(current);
+ continue;
+ }
+ displ = BYTES_TO_WORDS(displ);
+ displ -= map_entry;
+
+ {
+ register word * mark_word_addr = hhdr -> hb_marks + divWORDSZ(displ);
+ register word mark_word = *mark_word_addr;
+ register word mark_bit = 1 << modWORDSZ(displ);
+
+ if (mark_word & mark_bit) {
+ /* Mark bit is already set */
+ continue;
+ }
+
+ *mark_word_addr = mark_word | mark_bit;
+ }
+
+ GC_mark_stack_top_reg =
+ (* (hhdr -> hb_mark_proc))((word *)(HBLKPTR(current)) + displ, hhdr,
+ GC_mark_stack_top_reg, mark_stack_limit);
+ }
+ }
+ GC_mark_stack_top = GC_mark_stack_top_reg;
+}
+
+/* Allocate or reallocate space for mark stack of size s words */
+/* May silently fail. */
+static void alloc_mark_stack(n)
+word n;
+{
+ mse * new_stack = (mse *)GET_MEM(n * sizeof(struct ms_entry));
+
+ if (GC_mark_stack_size != 0) {
+ if (new_stack != 0) {
+ /* Recycle old space */
+ GC_add_to_heap((struct hblk *)GC_mark_stack,
+ GC_mark_stack_size * sizeof(struct ms_entry));
+ GC_mark_stack = new_stack;
+ GC_mark_stack_size = n;
+ }
+ } else {
+ if (new_stack == 0) {
+ GC_printf("No space for mark stack\n");
+ exit(1);
+ }
+ GC_mark_stack = new_stack;
+ GC_mark_stack_size = n;
+ }
+ GC_mark_stack_top = GC_mark_stack-1;
+}
+
+void GC_mark_init()
+{
+ alloc_mark_stack(INITIAL_MARK_STACK_SIZE);
+}
+
+/* Identical to GC_mark, but guarantee that dropped_some is false */
+/* when we finish. */
+void GC_mark_reliable()
+{
+ dropped_some = FALSE;
+ GC_mark();
+ while (dropped_some) {
+ dropped_some = FALSE;
+# ifdef PRINTSTATS
+ GC_printf("Mark stack overflow; current size = %lu entries\n",
+ GC_mark_stack_size);
+# endif
+ alloc_mark_stack(2*GC_mark_stack_size);
+ GC_remark();
+ }
+}
+
+/*********************************************************************/
+/* Mark all locations reachable via pointers located between b and t */
+/* b is the first location to be checked. t is one past the last */
+/* location to be checked. */
+/*********************************************************************/
+void GC_mark_all(bottom, top)
+ptr_t bottom;
+ptr_t top;
+{
+ word * b = (word *)(((long) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
+ word * t = (word *)(((long) top) & ~(ALIGNMENT-1));
+
+ if (GC_mark_stack_top != GC_mark_stack-1) {
+ ABORT("GC_mark_all: bad mark stack\n");
+ }
+ if (top == 0) return;
+ GC_mark_stack_top++;
+ GC_mark_stack_top -> mse_start = b;
+ GC_mark_stack_top -> mse_end = t;
+ GC_mark_reliable();
+}
+
+word * GC_buffer; /* Buffer for stack marking */
+# define BUFSIZE 1024
+
+/*
+ * A version of GC_mark_all that treats all interior pointers as valid
+ */
+void GC_mark_all_stack(bottom, top)
+ptr_t bottom;
+ptr_t top;
+{
+# ifdef ALL_INTERIOR_POINTERS
+ GC_mark_all(bottom, top);
+# else
+ word * b = (word *)(((long) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
+ word * t = (word *)(((long) top) & ~(ALIGNMENT-1));
+ register word *p;
+ register word q;
+ register word r;
+ register word *lim;
+ word * bufptr;
+ word * limit;
+ register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
+ register ptr_t least_ha = GC_least_plausible_heap_addr;
+
+ if (top == 0) return;
+ /* Allocate GC_buffer someplace where collector won't accidentally */
+ /* see old sections. */
+ if (GC_buffer == 0) {
+ GC_buffer = (word *)GC_scratch_alloc((word)(BUFSIZE * sizeof(word)));
+ }
+ bufptr = GC_buffer;
+ limit = GC_buffer+BUFSIZE;
+ /* check all pointers in range and put in buffer if they appear */
+ /* to be valid. */
+ lim = t - 1 /* longword */;
+ for (p = b; p <= lim; p = (word *)(((char *)p) + ALIGNMENT)) {
+ q = *p;
+ if ((ptr_t)q < least_ha
+ || (ptr_t)q >= greatest_ha) {
+ continue;
+ }
+# ifdef __STDC__
+ r = (word)GC_base((void *)q);
+# else
+ r = (word)GC_base((char *)q);
+# endif
+ if (r == 0) {
+ GC_add_to_black_list_stack(*p);
+ } else {
+ *(bufptr++) = r;
+ if (bufptr == limit) {
+ GC_mark_all((ptr_t)GC_buffer, (ptr_t)limit);
+ bufptr = GC_buffer;
+ }
+ }
+ }
+ if (bufptr != GC_buffer) GC_mark_all((ptr_t)GC_buffer, (ptr_t)bufptr);
+# endif
+}
+
+/* Mark all objects reachable from marked objects in the given block */
+/*ARGSUSED*/
+static void remark_block(h, dummy)
+struct hblk *h;
+word dummy;
+{
+ register hdr * hhdr = HDR(h);
+ register int sz = hhdr -> hb_sz;
+ register word * p;
+ register int word_no;
+ register word * lim;
+ register mse * GC_mark_stack_top_reg = GC_mark_stack_top;
+
+ if (sz < 0) return;
+ if (sz > MAXOBJSZ) {
+ lim = (word *)(h + 1);
+ } else {
+ lim = (word *)(h + 1) - sz;
+ }
+
+ for (p = (word *)h + HDR_WORDS, word_no = HDR_WORDS; p <= lim;
+ p += sz, word_no += sz) {
+ if (mark_bit_from_hdr(hhdr, word_no)) {
+ /* Mark from fields inside the object */
+ GC_mark_stack_top_reg++;
+ GC_mark_stack_top_reg -> mse_start = p;
+ GC_mark_stack_top_reg -> mse_end = p + sz;
+ }
+ }
+ GC_mark();
+}
+
+/*
+ * Traverse the heap. Mark all objects reachable from marked objects.
+ */
+void GC_remark()
+{
+ GC_apply_to_all_blocks(remark_block, 0);
+}
+
diff --git a/mark_roots.c b/mark_roots.c
index a2607b58..5cc55649 100644
--- a/mark_roots.c
+++ b/mark_roots.c
@@ -1,60 +1,203 @@
# include <stdio.h>
-# include "gc.h"
+# include "gc_private.h"
+# define MAX_ROOT_SETS 50
+# ifdef PCR
+# include "pcr/il/PCR_IL.h"
+# include "pcr/th/PCR_ThCtl.h"
+# include "pcr/mm/PCR_MM.h"
+# endif
-/* Call the mark routines (tl_mark for a single pointer, mark_all */
-/* on groups of pointers) on every top level accessible pointer. */
-/* This is source language specific. The following works for C. */
+struct roots {
+ ptr_t r_start;
+ ptr_t r_end;
+};
-mark_roots()
+static struct roots static_roots[MAX_ROOT_SETS];
+static n_root_sets = 0;
+
+word GC_root_size = 0;
+
+void GC_add_roots(b, e)
+char * b; char * e;
{
- int * dummy = 0;
- long sp_approx = 0;
+ DCL_LOCK_STATE;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ GC_add_roots_inner(b, e);
+ UNLOCK();
+ ENABLE_SIGNALS();
+}
- /*
- * mark from registers - i.e., call tl_mark(i) for each
- * register i
- */
- mark_regs(ALIGNMENT); /* usually defined in machine_dep.c */
-# ifdef DEBUG
- gc_printf("done marking from regs - calling mark_all\n");
-# endif
- /* put stack pointer into sp_approx */
- /* and mark everything on the stack. */
- /* A hack */
- sp_approx = ((long)(&dummy));
- mark_all( sp_approx, stacktop, ALIGNMENT );
+void GC_add_roots_inner(b, e)
+char * b; char * e;
+{
+ /* We exclude GC data structures from root sets. It's usually safe */
+ /* to mark from those, but it is a waste of time. */
+ if ( (ptr_t)b < beginGC_arrays && (ptr_t)e > beginGC_arrays) {
+ if ((ptr_t)e <= endGC_arrays) {
+ e = (char *)beginGC_arrays;
+ } else {
+ GC_add_roots_inner(b, (char *)beginGC_arrays);
+ GC_add_roots_inner((char *)endGC_arrays, e);
+ return;
+ }
+ } else if ((ptr_t)b < endGC_arrays && (ptr_t)e > endGC_arrays) {
+ b = (char *)endGC_arrays;
+ }
+ if (n_root_sets == MAX_ROOT_SETS) {
+ ABORT("Too many root sets\n");
+ }
+ static_roots[n_root_sets].r_start = (ptr_t)b;
+ static_roots[n_root_sets].r_end = (ptr_t)e;
+ GC_root_size += (ptr_t)e - (ptr_t)b;
+ n_root_sets++;
+}
+void GC_clear_roots()
+{
+ DCL_LOCK_STATE;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ n_root_sets = 0;
+ GC_root_size = 0;
+ UNLOCK();
+ ENABLE_SIGNALS();
+}
- /* Mark everything in data and bss segments. */
- /* Skip gc data structures. (It's OK to mark these, but it wastes time.) */
- {
- extern char etext, end;
+# ifdef PCR
+PCR_ERes GC_mark_thread_stack(PCR_Th_T *t, PCR_Any dummy)
+{
+ struct PCR_ThCtl_TInfoRep info;
+ PCR_ERes result;
+
+ info.ti_stkLow = info.ti_stkHi = 0;
+ result = PCR_ThCtl_GetInfo(t, &info);
+ GC_mark_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi));
+ return(result);
+}
- mark_all(DATASTART, begin_gc_arrays, ALIGNMENT);
- mark_all(end_gc_arrays, &end, ALIGNMENT);
- }
+PCR_ERes GC_mark_old_obj(void *p, size_t size, PCR_Any data)
+{
+ GC_mark_all((ptr_t)p, (ptr_t)p + size);
+ return(PCR_ERes_okay);
}
+# else
+ptr_t GC_approx_sp()
+{
+ word dummy;
+
+ return((ptr_t)(&dummy));
+}
+# endif
+/*
+ * Call the mark routines (GC_tl_mark for a single pointer, GC_mark_all
+ * on groups of pointers) on every top level accessible pointer.
+ * This is source language specific. The following works for C.
+ */
-/* Top level mark routine. Mark from the object pointed to by p. */
-/* This is defined here, since alignment is not an explicit parameter. */
-/* Thus the routine is language specific. */
-/* Tl_mark is normally called by mark_regs, and thus must be defined. */
-void tl_mark(p)
-word * p;
+GC_mark_roots()
{
- word * q;
+ register int i;
- q = p;
- mark_all(&q, (&q)+1, ALIGNMENT);
+ /*
+ * mark from registers - i.e., call GC_tl_mark(i) for each
+ * register i
+ */
+ GC_mark_regs(); /* usually defined in machine_dep.c */
+
+
+# ifdef PCR
+ /* Traverse data allocated by previous memory managers. */
+ {
+ extern struct PCR_MM_ProcsRep * GC_old_allocator;
+
+ if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false,
+ GC_mark_old_obj, 0)
+ != PCR_ERes_okay) {
+ ABORT("Old object enumeration failed");
+ }
+ }
+
+ /* Add new static data areas of dynamically loaded modules. */
+ {
+ PCR_IL_LoadedFile * p = PCR_IL_GetLoadedFiles();
+ static PCR_IL_LoadedFile * last_already_added = NIL;
+ /* Last file that was already added to the list of roots. */
+ PCR_IL_LoadedFile * last_committed;
+ PCR_IL_LoadedSegment * q;
+
+ if (p != NIL && last_already_added == NIL) {
+ /* Switch to obtaining roots from the dynamic loader. */
+ /* Make sure the loader is properly initialized and that */
+ /* it has a correct description of PCR static data. */
+ PCR_IL_Lock(PCR_Bool_false,
+ PCR_allSigsBlocked, PCR_waitForever);
+ PCR_IL_Unlock();
+ /* Discard old root sets. */
+ n_root_sets = 0;
+ GC_root_size = 0;
+ /* We claim there are no dynamic libraries, or they */
+ /* don't contain roots, since they allocate using the */
+ /* system malloc, and they can't retain our pointers. */
+ }
+ /* Skip uncommited files */
+ while (p != NIL && !(p -> lf_commitPoint)) {
+ /* The loading of this file has not yet been committed */
+ /* Hence its description could be inconsistent. */
+ /* Furthermore, it hasn't yet been run. Hence it's data */
+ /* segments can possibly reference heap allocated objects.*/
+ p = p -> lf_prev;
+ }
+ last_committed = p;
+ for (; p != last_already_added; p = p -> lf_prev) {
+ for (q = p -> lf_ls; q != NIL; q = q -> ls_next) {
+ if ((q -> ls_flags & PCR_IL_SegFlags_Traced_MASK)
+ == PCR_IL_SegFlags_Traced_on) {
+ GC_add_roots_inner
+ ((char *)(q -> ls_addr),
+ (char *)(q -> ls_addr) + q -> ls_bytes);
+ }
+ }
+ }
+ last_already_added = last_committed;
+ }
+
+
+ /* Traverse all thread stacks. */
+ if (PCR_ERes_IsErr(
+ PCR_ThCtl_ApplyToAllOtherThreads(GC_mark_thread_stack,0))
+ || PCR_ERes_IsErr(GC_mark_thread_stack(PCR_Th_CurrThread(), 0))) {
+ ABORT("Thread stack marking failed\n");
+ }
+# else
+ /* Mark everything on the stack. */
+# ifdef STACK_GROWS_DOWN
+ GC_mark_all_stack( GC_approx_sp(), GC_stackbottom );
+# else
+ GC_mark_all_stack( GC_stackbottom, GC_approx_sp() );
+# endif
+# endif
+
+ /* Mark everything in static data areas */
+ for (i = 0; i < n_root_sets; i++) {
+ GC_mark_all(static_roots[i].r_start, static_roots[i].r_end);
+ }
}
-/* Interface to mark_all that does not require alignment parameter. */
-/* Defined here to keep mach_dep.c programming language independent. */
-void tl_mark_all(b,t)
-word *b, *t;
+/*
+ * Top level GC_mark routine. Mark from the object pointed to by p.
+ * GC_tl_mark is normally called by GC_mark_regs, and thus must be defined.
+ */
+void GC_tl_mark(p)
+word p;
{
- mark_all(b, t, ALIGNMENT);
+ word q;
+
+ q = p;
+ GC_mark_all_stack((ptr_t)(&q), (ptr_t)((&q)+1));
}
diff --git a/mips_mach_dep.s b/mips_mach_dep.s
index 13bab310..319c24e0 100644
--- a/mips_mach_dep.s
+++ b/mips_mach_dep.s
@@ -1,140 +1,26 @@
-# define call_mark(x) move $4,x; jal tl_mark
-
- # Set up _gc_arrays with labels in the middle
- .data
- .globl _gc_arrays
- .globl aobjfreelist
- .globl objfreelist
- .align 2
-_gc_arrays:
-aobjfreelist:
- .word 0 : 513
-objfreelist:
- .word 0 : 513
- # either hblkmap or hblklist. Reserve space for HBLK_MAP, which is bigger.
- .word 0 : 8192
+# define call_GC_mark(x) move $4,x; jal GC_tl_mark
.text
# Mark from machine registers that are saved by C compiler
- .globl mark_regs
- .ent mark_regs
-mark_regs:
+ .globl GC_mark_regs
+ .ent GC_mark_regs
+GC_mark_regs:
subu $sp,4 ## Need to save only return address
sw $31,4($sp)
.mask 0x80000000,0
.frame $sp,4,$31
- call_mark($2)
- call_mark($3)
- call_mark($16)
- call_mark($17)
- call_mark($18)
- call_mark($19)
- call_mark($20)
- call_mark($21)
- call_mark($22)
- call_mark($23)
- call_mark($30)
+ call_GC_mark($2)
+ call_GC_mark($3)
+ call_GC_mark($16)
+ call_GC_mark($17)
+ call_GC_mark($18)
+ call_GC_mark($19)
+ call_GC_mark($20)
+ call_GC_mark($21)
+ call_GC_mark($22)
+ call_GC_mark($23)
+ call_GC_mark($30)
lw $31,4($sp)
addu $sp,4
j $31
- .end mark_regs
-
- .globl allocobj
- .ent allocobj
-allocobj:
- subu $sp,68
- sw $31,68($sp)
- sw $25,64($sp)
- sw $24,60($sp)
- sw $15,56($sp)
- sw $14,52($sp)
- sw $13,48($sp)
- sw $12,44($sp)
- sw $11,40($sp)
- sw $10,36($sp)
- sw $9,32($sp)
- sw $8,28($sp)
- sw $7,24($sp)
- sw $6,20($sp)
- sw $5,16($sp)
- sw $4,12($sp)
- sw $3,8($sp)
- .set noat
- sw $at,4($sp)
- .set at
- .mask 0x8300fffa,0
- .frame $sp,68,$31
- jal _allocobj
- lw $31,68($sp)
- lw $25,64($sp)
- lw $24,60($sp)
- lw $15,56($sp)
- lw $14,52($sp)
- lw $13,48($sp)
- lw $12,44($sp)
- lw $11,40($sp)
- lw $10,36($sp)
- lw $9,32($sp)
- lw $8,28($sp)
- lw $7,24($sp)
- lw $6,20($sp)
- lw $5,16($sp)
- lw $4,12($sp)
- lw $3,8($sp)
- # don't restore $2, since it's the return value
- .set noat
- lw $at,4($sp)
- .set at
- addu $sp,68
- j $31
- .end allocobj
-
- .globl allocaobj
- .ent allocaobj
-allocaobj:
- subu $sp,68
- sw $31,68($sp)
- sw $25,64($sp)
- sw $24,60($sp)
- sw $15,56($sp)
- sw $14,52($sp)
- sw $13,48($sp)
- sw $12,44($sp)
- sw $11,40($sp)
- sw $10,36($sp)
- sw $9,32($sp)
- sw $8,28($sp)
- sw $7,24($sp)
- sw $6,20($sp)
- sw $5,16($sp)
- sw $4,12($sp)
- sw $3,8($sp)
- .set noat
- sw $at,4($sp)
- .set at
- .mask 0x8300fffa,0
- .frame $sp,68,$31
- jal _allocaobj
- lw $31,68($sp)
- lw $25,64($sp)
- lw $24,60($sp)
- lw $15,56($sp)
- lw $14,52($sp)
- lw $13,48($sp)
- lw $12,44($sp)
- lw $11,40($sp)
- lw $10,36($sp)
- lw $9,32($sp)
- lw $8,28($sp)
- lw $7,24($sp)
- lw $6,20($sp)
- lw $5,16($sp)
- lw $4,12($sp)
- lw $3,8($sp)
- # don't restore $2, since it's the return value
- .set noat
- lw $at,4($sp)
- .set at
- addu $sp,68
- j $31
- .end allocaobj
+ .end GC_mark_regs
diff --git a/misc.c b/misc.c
index 31264e92..a19e0dca 100644
--- a/misc.c
+++ b/misc.c
@@ -1,11 +1,11 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
- * Copyright (c) 1991 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1991,1992 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
- * Permission is hereby granted to copy this compiler for any purpose,
+ * Permission is hereby granted to copy this garbage collector for any purpose,
* provided the above notices are retained on all copies.
*/
@@ -16,329 +16,670 @@
#include <stdio.h>
#include <signal.h>
-#include "gc.h"
+#define I_HIDE_POINTERS /* To make GC_call_with_alloc_lock visible */
+#include "gc_private.h"
-int dont_gc = 0;
-extern long mem_found;
+# ifdef THREADS
+# ifdef PCR
+# include "pcr/il/PCR_IL.h"
+ struct PCR_Th_MLRep GC_allocate_ml;
+# else
+ --> declare allocator lock here
+# endif
+# endif
+
+struct _GC_arrays GC_arrays = { 0 };
+
+/* Initialize GC_obj_kinds properly and standard free lists properly. */
+/* This must be done statically since they may be accessed before */
+/* GC_init is called. */
+struct obj_kind GC_obj_kinds[MAXOBJKINDS] = {
+/* PTRFREE */ { GC_aobjfreelist, GC_areclaim_list, GC_no_mark_proc, FALSE },
+/* NORMAL */ { GC_objfreelist, GC_reclaim_list, GC_normal_mark_proc, TRUE },
+};
+
+ptr_t GC_stackbottom = 0;
+
+word GC_hincr;
+
+int GC_n_kinds = 2;
+
+bool GC_dont_gc = 0;
+
+extern signed_word GC_mem_found;
+
+# ifdef ALL_INTERIOR_POINTERS
+# define ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + WORDS_TO_BYTES(1))
+# else
+# define ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + WORDS_TO_BYTES(1) - 1)
+# endif
# ifdef MERGE_SIZES
-# if MAXOBJSZ == MAXAOBJSZ
-# define MAXSZ MAXOBJSZ
-# else
- --> causes problems here, since we cant map any size to a
- size that doesnt have a free list. Either initialization
- needs to be cleverer, or we need separate maps for atomic
- and composite objects.
+ /* Set things up so that GC_size_map[i] >= words(i), */
+ /* but not too much bigger */
+ /* and so that size_map contains relatively few distinct entries */
+ /* This is stolen from Russ Atkinson's Cedar quantization */
+ /* alogrithm (but we precompute it). */
+
+# if (CPP_WORDSZ != 32)
+ --> fix the following code
# endif
- long size_map[MAXSZ+1];
- /* Set things up so that size_map[i] >= i, but not too much bigger */
- /* and so that size_map contains relatively few distinct entries */
- void init_size_map()
+
+
+ void GC_init_size_map()
{
- register int i;
- register int i_rounded_up = 0;
+ register unsigned i;
+ register unsigned sz_rounded_up = 0;
- for (i = 1; i < 8; i++) {
+ /* Map size 0 to 1. This avoids problems at lower levels. */
+ GC_size_map[0] = 1;
+ /* One word objects don't have to be 2 word aligned. */
+ GC_size_map[1] = 1;
+ GC_size_map[2] = 1;
+ GC_size_map[3] = 1;
+ GC_size_map[4] = ROUNDED_UP_WORDS(4);
+ for (i = 5; i <= 32; i++) {
# ifdef ALIGN_DOUBLE
- size_map[i] = (i + 1) & (~1);
+ GC_size_map[i] = (ROUNDED_UP_WORDS(i) + 1) & (~1);
# else
- size_map[i] = i;
+ GC_size_map[i] = ROUNDED_UP_WORDS(i);
# endif
}
- for (i = 8; i <= MAXSZ; i++) {
- if (i_rounded_up < i) {
-# ifdef ALIGN_DOUBLE
- i_rounded_up = (i + (i >> 1) + 1) & (~1);
-# else
- i_rounded_up = i + (i >> 1);
-# endif
- if (i_rounded_up > MAXSZ) {
- i_rounded_up = MAXSZ;
+
+ for (i = 33; i <= WORDS_TO_BYTES(MAXOBJSZ); i++) {
+ if (sz_rounded_up < ROUNDED_UP_WORDS(i)) {
+ register int size = ROUNDED_UP_WORDS(i);
+ register unsigned m = 0;
+
+ while (size > 7) {
+ m += 1;
+ size += 1;
+ size >>= 1;
+ }
+ sz_rounded_up = size << m;
+ if (sz_rounded_up > MAXOBJSZ) {
+ sz_rounded_up = MAXOBJSZ;
}
}
- size_map[i] = i_rounded_up;
+ GC_size_map[i] = sz_rounded_up;
}
}
# endif
+# ifdef ALL_INTERIOR_POINTERS
+# define SMALL_OBJ(bytes) ((bytes) < WORDS_TO_BYTES(MAXOBJSZ))
+# define ADD_SLOP(bytes) ((bytes)+1)
+# else
+# define SMALL_OBJ(bytes) ((bytes) <= WORDS_TO_BYTES(MAXOBJSZ))
+# define ADD_SLOP(bytes) (bytes)
+# endif
-/* allocate lb bytes of atomic data */
-struct obj * gc_malloc_atomic(lb)
-int lb;
-{
-register struct obj *op;
-register struct obj **opp;
-register int lw = BYTES_TO_WORDS(lb + (sizeof (word)) -1);
+/*
+ * The following is a gross hack to deal with a problem that can occur
+ * on machines that are sloppy about stack frame sizes, notably SPARC.
+ * Bogus pointers may be written to the stack and not cleared for
+ * a LONG time, because they always fall into holes in stack frames
+ * that are not written. We partially address this by randomly clearing
+ * sections of the stack whenever we get control.
+ */
+word GC_stack_last_cleared = 0; /* GC_no when we last did this */
+# define CLEAR_SIZE 213
+# define CLEAR_THRESHOLD 10000
+# define DEGRADE_RATE 50
+
+ptr_t GC_min_sp; /* Coolest stack pointer value from which we've */
+ /* already cleared the stack. */
+
+# ifdef STACK_GROWS_DOWN
+# define COOLER_THAN >
+# define HOTTER_THAN <
+# define MAKE_COOLER(x,y) if ((word)(x)+(y) > (word)(x)) {(x) += (y);} \
+ else {(x) = (ptr_t)ONES;}
+# define MAKE_HOTTER(x,y) (x) -= (y)
+# else
+# define COOLER_THAN <
+# define HOTTER_THAN >
+# define MAKE_COOLER(x,y) if ((word)(x)-(y) < (word)(x)) {(x) -= (y);} else {(x) = 0;}
+# define MAKE_HOTTER(x,y) (x) += (y)
+# endif
-# ifdef VERBOSE
- gc_printf("Here we are in gc_malloc_atomic(%d)\n",lw);
+ptr_t GC_high_water;
+ /* "hottest" stack pointer value we have seen */
+ /* recently. Degrades over time. */
+/*ARGSUSED*/
+void GC_clear_stack_inner(d)
+word *d;
+{
+ word dummy[CLEAR_SIZE];
+
+ bzero((char *)dummy, (int)(CLEAR_SIZE*sizeof(word)));
+# ifdef THREADS
+ GC_noop(dummy);
+# else
+ if ((ptr_t)(dummy) COOLER_THAN GC_min_sp) {
+ GC_clear_stack_inner(dummy);
+ }
# endif
- if( lw <= MAXAOBJSZ ) {
+}
+
+void GC_clear_stack()
+{
+ word dummy;
+
+
+# ifdef THREADS
+ GC_clear_stack_inner(&dummy);
+# else
+ if (GC_gc_no > GC_stack_last_cleared) {
+ /* Start things over, so we clear the entire stack again */
+ if (GC_stack_last_cleared == 0) GC_high_water = GC_stackbottom;
+ GC_min_sp = GC_high_water;
+ GC_stack_last_cleared = GC_gc_no;
+ }
+ /* Adjust GC_high_water */
+ MAKE_COOLER(GC_high_water, WORDS_TO_BYTES(DEGRADE_RATE));
+ if ((word)(&dummy) HOTTER_THAN (word)GC_high_water) {
+ GC_high_water = (ptr_t)(&dummy);
+ }
+ if ((word)(&dummy) COOLER_THAN (word)GC_min_sp) {
+ GC_clear_stack_inner(&dummy);
+ GC_min_sp = (ptr_t)(&dummy);
+ }
+# endif
+}
+
+/* allocate lb bytes for an object of kind k */
+ptr_t GC_generic_malloc(lb, k)
+register word lb;
+register int k;
+{
+register word lw;
+register ptr_t op;
+register ptr_t *opp;
+DCL_LOCK_STATE;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ if( SMALL_OBJ(lb) ) {
# ifdef MERGE_SIZES
- lw = size_map[lw];
+ lw = GC_size_map[lb];
+# else
+ lw = ROUNDED_UP_WORDS(lb);
+ if (lw == 0) lw = 1;
# endif
- opp = &(aobjfreelist[lw]);
- if( (op = *opp) == ((struct obj *)0) ) {
- op = _allocaobj(lw);
- }
-# ifdef DEBUG
- if ((op -> obj_link != ((struct obj *) 0)
- && (((unsigned)(op -> obj_link)) > ((unsigned) HEAPLIM)
- || ((unsigned)(op -> obj_link)) < ((unsigned) HEAPSTART)))) {
- fprintf(stderr, "Bad free list in gc_malloc_atomic\n");
- abort(op);
+ opp = &(GC_obj_kinds[k].ok_freelist[lw]);
+ if( (op = *opp) == 0 ) {
+ if (!GC_is_initialized) {
+ GC_init_inner();
+ ENABLE_SIGNALS();
+ /* This may have fixed GC_size_map */
+ UNLOCK();
+ return(GC_generic_malloc(lb, k));
}
-# endif
- *opp = op->obj_link;
- op->obj_link = (struct obj *)0;
+ GC_clear_stack();
+ op = GC_allocobj(lw, k);
+ if (op == 0) goto out;
+ }
+ /* Here everything is in a consistent state. */
+ /* We assume the following assignment is */
+ /* atomic. If we get aborted */
+ /* after the assignment, we lose an object, */
+ /* but that's benign. */
+ /* Volatile declarations may need to be added */
+ /* to prevent the compiler from breaking things.*/
+ *opp = obj_link(op);
+ obj_link(op) = 0;
} else {
register struct hblk * h;
- if (!sufficient_hb(-lw) && !dont_gc) {
- gcollect();
+
+ if (!GC_is_initialized) GC_init_inner();
+ lw = ROUNDED_UP_WORDS(lb);
+ if (!GC_sufficient_hb(lw, k) && !GC_dont_gc) {
+ GC_gcollect_inner(FALSE);
+ }
+ h = GC_allochblk(lw, k);
+ if (h == 0) {
+ op = 0;
+ } else {
+ op = (ptr_t) (h -> hb_body);
}
-# ifdef VERBOSE
- gc_printf("gc_malloc_atomic calling allochblk(%x)\n",lw);
-# endif
- h = allochblk(-lw);
- add_hblklist(h);
- op = (struct obj *) (h -> hb_body);
}
- return(op);
+ GC_words_allocd += lw;
+
+out:
+ UNLOCK();
+ ENABLE_SIGNALS();
+ return((ptr_t)op);
}
-/* allocate lb bytes of possibly composite data */
-struct obj * gc_malloc(lb)
-int lb;
+/* Analogous to the above, but assumes a small object size, and */
+/* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
+ptr_t GC_generic_malloc_words_small(lw, k)
+register word lw;
+register int k;
{
-register struct obj *op;
-register struct obj **opp;
-register int lw = BYTES_TO_WORDS(lb + (sizeof (word)) -1);
+register ptr_t op;
+register ptr_t *opp;
+DCL_LOCK_STATE;
- if( lw <= MAXOBJSZ ) {
+ LOCK();
+ DISABLE_SIGNALS();
+ opp = &(GC_obj_kinds[k].ok_freelist[lw]);
+ if( (op = *opp) == 0 ) {
+ if (!GC_is_initialized) {
+ GC_init_inner();
+ }
+ GC_clear_stack();
+ op = GC_allocobj(lw, k);
+ if (op == 0) goto out;
+ }
+ *opp = obj_link(op);
+ obj_link(op) = 0;
+ GC_words_allocd += lw;
+
+out:
+ UNLOCK();
+ ENABLE_SIGNALS();
+ return((ptr_t)op);
+}
+
+/* Allocate lb bytes of atomic (pointerfree) data */
+# ifdef __STDC__
+ extern_ptr_t GC_malloc_atomic(size_t lb)
+# else
+ extern_ptr_t GC_malloc_atomic(lb)
+ size_t lb;
+# endif
+{
+register ptr_t op;
+register ptr_t * opp;
+register word lw;
+DCL_LOCK_STATE;
+
+ if( SMALL_OBJ(lb) ) {
# ifdef MERGE_SIZES
- lw = size_map[lw];
+ lw = GC_size_map[lb];
+# else
+ lw = ROUNDED_UP_WORDS(lb);
# endif
- opp = &(objfreelist[lw]);
- if( (op = *opp) == ((struct obj *)0) ) {
- op = _allocobj(lw);
+ opp = &(GC_aobjfreelist[lw]);
+ FASTLOCK();
+ if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
+ FASTUNLOCK();
+ return(GC_generic_malloc((word)lb, PTRFREE));
}
-# ifdef DEBUG
- if ((op -> obj_link != ((struct obj *) 0)
- && (((unsigned)(op -> obj_link)) > ((unsigned) HEAPLIM)
- || ((unsigned)(op -> obj_link)) < ((unsigned) HEAPSTART)))) {
- fprintf(stderr, "Bad free list in gc_malloc\n");
- abort(op);
- }
-# endif
- *opp = op->obj_link;
- op->obj_link = (struct obj *)0;
- } else {
- register struct hblk * h;
-
- if (!sufficient_hb(lw) && !dont_gc) {
- gcollect();
- }
-# ifdef VERBOSE
- gc_printf("gc_malloc calling allochblk(%x)\n",lw);
-# endif
- h = allochblk(lw);
- add_hblklist(h);
- op = (struct obj *) (h -> hb_body);
- }
- return(op);
+ /* See above comment on signals. */
+ *opp = obj_link(op);
+ GC_words_allocd += lw;
+ FASTUNLOCK();
+ return((extern_ptr_t) op);
+ } else {
+ return((extern_ptr_t)
+ GC_generic_malloc((word)lb, PTRFREE));
+ }
}
-void gc_free();
+/* Allocate lb bytes of composite (pointerful) data */
+# ifdef __STDC__
+ extern_ptr_t GC_malloc(size_t lb)
+# else
+ extern_ptr_t GC_malloc(lb)
+ size_t lb;
+# endif
+{
+register ptr_t op;
+register ptr_t *opp;
+register word lw;
+DCL_LOCK_STATE;
+
+ if( SMALL_OBJ(lb) ) {
+# ifdef MERGE_SIZES
+ lw = GC_size_map[lb];
+# else
+ lw = ROUNDED_UP_WORDS(lb);
+# endif
+ opp = &(GC_objfreelist[lw]);
+ FASTLOCK();
+ if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
+ FASTUNLOCK();
+ return(GC_generic_malloc((word)lb, NORMAL));
+ }
+ /* See above comment on signals. */
+ *opp = obj_link(op);
+ obj_link(op) = 0;
+ GC_words_allocd += lw;
+ FASTUNLOCK();
+ return((extern_ptr_t) op);
+ } else {
+ return((extern_ptr_t)
+ GC_generic_malloc((word)lb, NORMAL));
+ }
+}
/* Change the size of the block pointed to by p to contain at least */
/* lb bytes. The object may be (and quite likely will be) moved. */
-/* The new object is assumed to be atomic if the original object was. */
+/* The kind (e.g. atomic) is the same as that of the old. */
/* Shrinking of large blocks is not implemented well. */
-struct obj * gc_realloc(p,lb)
-struct obj * p;
-int lb;
+# ifdef __STDC__
+ extern_ptr_t GC_realloc(extern_ptr_t p, size_t lb)
+# else
+ extern_ptr_t GC_realloc(p,lb)
+ extern_ptr_t p;
+ size_t lb;
+# endif
{
-register struct obj *op;
-register struct obj **opp;
register struct hblk * h;
-register int sz; /* Current size in bytes */
-register int orig_sz; /* Original sz in bytes */
-int is_atomic;
+register hdr * hhdr;
+register signed_word sz; /* Current size in bytes */
+register word orig_sz; /* Original sz in bytes */
+int obj_kind;
+ if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
h = HBLKPTR(p);
- sz = h -> hb_sz;
- if (sz < 0) {
- sz = -sz;
- is_atomic = TRUE;
- } else {
- is_atomic = FALSE;
- }
+ hhdr = HDR(h);
+ sz = hhdr -> hb_sz;
+ obj_kind = hhdr -> hb_obj_kind;
sz = WORDS_TO_BYTES(sz);
orig_sz = sz;
- if (is_atomic) {
- if (sz > WORDS_TO_BYTES(MAXAOBJSZ)) {
- /* Round it up to the next whole heap block */
- sz = (sz+sizeof(struct hblkhdr)+HBLKSIZE-1)
- & (~HBLKMASK);
- sz -= sizeof(struct hblkhdr);
- h -> hb_sz = BYTES_TO_WORDS(sz);
- }
- if (lb <= sz) {
- if (lb >= (sz >> 1)) {
- /* Already big enough, but not too much bigger than object. */
- /* Ignore the request. */
- /* If sz is big enough, we should probably deallocate */
- /* part of the heap block here, but ... */
- return(p);
- } else {
- /* shrink */
- struct obj * result = gc_malloc_atomic(lb);
-
- bcopy(p, result, lb);
- gc_free(p);
- return(result);
- }
- } else {
- /* grow */
- struct obj * result = gc_malloc_atomic(lb);
-
- bcopy(p, result, sz);
- gc_free(p);
- return(result);
- }
- } else /* composite */ {
- if (sz > WORDS_TO_BYTES(MAXOBJSZ)) {
+ if (sz > WORDS_TO_BYTES(MAXOBJSZ)) {
/* Round it up to the next whole heap block */
- sz = (sz+sizeof(struct hblkhdr)+HBLKSIZE-1)
+
+ sz = (sz+HDR_BYTES+HBLKSIZE-1)
& (~HBLKMASK);
- sz -= sizeof(struct hblkhdr);
- h -> hb_sz = BYTES_TO_WORDS(sz);
+ sz -= HDR_BYTES;
+ hhdr -> hb_sz = BYTES_TO_WORDS(sz);
/* Extra area is already cleared by allochblk. */
- }
- if (lb <= sz) {
+ }
+ if (ADD_SLOP(lb) <= sz) {
if (lb >= (sz >> 1)) {
if (orig_sz > lb) {
/* Clear unneeded part of object to avoid bogus pointer */
/* tracing. */
- bzero(((char *)p) + lb, orig_sz - lb);
+ bzero(((char *)p) + lb, (int)(orig_sz - lb));
}
return(p);
} else {
/* shrink */
- struct obj * result = gc_malloc(lb);
+ extern_ptr_t result = GC_generic_malloc((word)lb, obj_kind);
- bcopy(p, result, lb);
- gc_free(p);
+ if (result == 0) return(0);
+ /* Could also return original object. But this */
+ /* gives the client warning of imminent disaster. */
+ bcopy(p, result, (int)lb);
+ GC_free(p);
return(result);
}
- } else {
+ } else {
/* grow */
- struct obj * result = gc_malloc(lb);
+ extern_ptr_t result = GC_generic_malloc((word)lb, obj_kind);
- bcopy(p, result, sz);
- gc_free(p);
+ if (result == 0) return(0);
+ bcopy(p, result, (int)sz);
+ GC_free(p);
return(result);
- }
}
}
-/* Explicitly deallocate an object p */
-void gc_free(p)
-struct obj *p;
+/* Return a pointer to the base address of p, given a pointer to a */
+/* an address within an object. Return 0 o.w. */
+# ifdef __STDC__
+ extern_ptr_t GC_base(extern_ptr_t p)
+# else
+ extern_ptr_t GC_base(p)
+ extern_ptr_t p;
+# endif
{
+ register word r;
register struct hblk *h;
+ register hdr *candidate_hdr;
+
+ r = (word)p;
+ h = HBLKPTR(r);
+ candidate_hdr = HDR(r);
+ if (candidate_hdr == 0) return(0);
+ /* If it's a pointer to the middle of a large object, move it */
+ /* to the beginning. */
+ while (IS_FORWARDING_ADDR_OR_NIL(candidate_hdr)) {
+ h = h - (int)candidate_hdr;
+ r = (word)h + HDR_BYTES;
+ candidate_hdr = HDR(h);
+ }
+ if (candidate_hdr -> hb_map == GC_invalid_map) return(0);
+ /* Make sure r points to the beginning of the object */
+ r &= ~(WORDS_TO_BYTES(1) - 1);
+ {
+ register int offset =
+ (word *)r - (word *)(HBLKPTR(r)) - HDR_WORDS;
+ register signed_word sz = candidate_hdr -> hb_sz;
+ register int correction;
+
+ correction = offset % sz;
+ r -= (WORDS_TO_BYTES(correction));
+ if (((word *)r + sz) > (word *)(h + 1)
+ && sz <= MAXOBJSZ) {
+ return(0);
+ }
+ }
+ return((extern_ptr_t)r);
+}
+
+/* Return the size of an object, given a pointer to its base. */
+/* (For small obects this also happens to work from interior pointers, */
+/* but that shouldn't be relied upon.) */
+# ifdef __STDC__
+ size_t GC_size(extern_ptr_t p)
+# else
+ size_t GC_size(p)
+ extern_ptr_t p;
+# endif
+{
register int sz;
- register word * i;
- register word * limit;
+ register hdr * hhdr = HDR(p);
+
+ sz = WORDS_TO_BYTES(hhdr -> hb_sz);
+ if (sz < 0) {
+ return(-sz);
+ } else {
+ return(sz);
+ }
+}
+/* Explicitly deallocate an object p. */
+# ifdef __STDC__
+ void GC_free(extern_ptr_t p)
+# else
+ void GC_free(p)
+ extern_ptr_t p;
+# endif
+{
+ register struct hblk *h;
+ register hdr *hhdr;
+ register signed_word sz;
+ register ptr_t * flh;
+ register struct obj_kind * ok;
+
+ if (p == 0) return;
+ /* Required by ANSI. It's not my fault ... */
h = HBLKPTR(p);
- sz = h -> hb_sz;
- if (sz < 0) {
- sz = -sz;
- if (sz > MAXAOBJSZ) {
- h -> hb_uninit = 1;
- del_hblklist(h);
- freehblk(h);
- } else {
- p -> obj_link = aobjfreelist[sz];
- aobjfreelist[sz] = p;
- }
+ hhdr = HDR(h);
+ sz = hhdr -> hb_sz;
+ ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
+
+ if (sz > MAXOBJSZ) {
+ GC_freehblk(h);
} else {
- /* Clear the object, other than link field */
- limit = &(p -> obj_component[sz]);
- for (i = &(p -> obj_component[1]); i < limit; i++) {
- *i = 0;
- }
- if (sz > MAXOBJSZ) {
- p -> obj_link = 0;
- h -> hb_uninit = 0;
- del_hblklist(h);
- freehblk(h);
- } else {
- p -> obj_link = objfreelist[sz];
- objfreelist[sz] = p;
+ ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
+ if (ok -> ok_init) {
+ bzero((char *)((word *)p + 1), (int)(WORDS_TO_BYTES(sz-1)));
}
+ flh = &(ok -> ok_freelist[sz]);
+ obj_link(p) = *flh;
+ *flh = (ptr_t)p;
}
- /* Add it to mem_found to prevent anomalous heap expansion */
- /* in the event of repeated explicit frees of objects of */
- /* varying sizes. */
- mem_found += sz;
}
+bool GC_is_initialized = FALSE;
-/*
- * Disable non-urgent signals
- */
-int holdsigs()
+void GC_init()
{
- unsigned mask = 0xffffffff;
-
- mask &= ~(1<<(SIGSEGV-1));
- mask &= ~(1<<(SIGILL-1));
- mask &= ~(1<<(SIGBUS-1));
- mask &= ~(1<<(SIGIOT-1));
- mask &= ~(1<<(SIGEMT-1));
- mask &= ~(1<<(SIGTRAP-1));
- mask &= ~(1<<(SIGQUIT-1));
- return(sigsetmask(mask));
+ DCL_LOCK_STATE;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ GC_init_inner();
+ UNLOCK();
+ ENABLE_SIGNALS();
+
}
-void gc_init()
+void GC_init_inner()
{
word dummy;
-# define STACKTOP_ALIGNMENT_M1 0xffffff
-
- heaplim = (char *) (sbrk(0));
-# ifdef HBLK_MAP
- heapstart = (char *) (HBLKPTR(((unsigned)sbrk(0))+HBLKSIZE-1 ));
+
+ if (GC_is_initialized) return;
+ GC_is_initialized = TRUE;
+# ifndef THREADS
+ if (GC_stackbottom == 0) {
+ GC_stackbottom = GC_get_stack_base();
+ }
# endif
-# ifdef STACKTOP
- stacktop = STACKTOP;
-# else
- stacktop = (word *)((((long)(&dummy)) + STACKTOP_ALIGNMENT_M1)
- & ~STACKTOP_ALIGNMENT_M1);
+ if (sizeof (ptr_t) != sizeof(word)) {
+ GC_printf("sizeof (ptr_t) != sizeof(word)\n");
+ ABORT("sizeof (ptr_t) != sizeof(word)\n");
+ }
+ if (sizeof (signed_word) != sizeof(word)) {
+ GC_printf("sizeof (signed_word) != sizeof(word)\n");
+ ABORT("sizeof (signed_word) != sizeof(word)\n");
+ }
+ if (sizeof (struct hblk) != HBLKSIZE) {
+ GC_printf("sizeof (struct hblk) != HBLKSIZE\n");
+ ABORT("sizeof (struct hblk) != HBLKSIZE\n");
+ }
+# ifndef THREADS
+# if defined(STACK_GROWS_UP) && defined(STACK_GROWS_DOWN)
+ GC_printf(
+ "Only one of STACK_GROWS_UP and STACK_GROWS_DOWN should be defd\n");
+ ABORT("stack direction 1\n");
+# endif
+# if !defined(STACK_GROWS_UP) && !defined(STACK_GROWS_DOWN)
+ GC_printf(
+ "One of STACK_GROWS_UP and STACK_GROWS_DOWN should be defd\n");
+ ABORT("stack direction 2\n");
+# endif
+# ifdef STACK_GROWS_DOWN
+ if ((word)(&dummy) > (word)GC_stackbottom) {
+ GC_printf("STACK_GROWS_DOWN is defd, but stack appears to grow up\n");
+ GC_printf("sp = 0x%lx, GC_stackbottom = 0x%lx\n",
+ (unsigned long) (&dummy),
+ (unsigned long) GC_stackbottom);
+ ABORT("stack direction 3\n");
+ }
+# else
+ if ((word)(&dummy) < (word)GC_stackbottom) {
+ GC_printf("STACK_GROWS_UP is defd, but stack appears to grow down\n");
+ GC_printf("sp = 0x%lx, GC_stackbottom = 0x%lx\n",
+ (unsigned long) (&dummy),
+ (unsigned long) GC_stackbottom);
+ ABORT("stack direction 4");
+ }
+# endif
+# endif
+# if !defined(_AUX_SOURCE) || defined(__GNUC__)
+ if ((word)(-1) < (word)0) {
+ GC_printf("The type word should be an unsigned integer type\n");
+ GC_printf("It appears to be signed\n");
+ ABORT("word");
+ }
# endif
- hincr = HINCR;
- expand_hp(hincr);
- init_hblklist();
+ if ((signed_word)(-1) >= (signed_word)0) {
+ GC_printf("The type signed_word should be a signed integer type\n");
+ GC_printf("It appears to be unsigned\n");
+ ABORT("signed_word");
+ }
+
+ GC_hincr = HINCR;
+ GC_init_headers();
+ GC_bl_init();
+ GC_mark_init();
+ if (!GC_expand_hp_inner(GC_hincr)) {
+ GC_printf("Can't start up: no memory\n");
+ EXIT();
+ }
+ GC_register_displacement_inner(0L);
# ifdef MERGE_SIZES
- init_size_map();
+ GC_init_size_map();
+# endif
+ /* Add initial guess of root sets */
+ GC_register_data_segments();
+# ifdef PCR
+ GC_pcr_install();
# endif
+ /* Get black list set up */
+ GC_gcollect_inner(TRUE);
+ GC_gcollect_inner(TRUE);
+ /* Convince lint that some things are used */
+ {
+ extern char * GC_copyright[];
+ GC_noop(GC_copyright, GC_find_header,
+ GC_tl_mark, GC_call_with_alloc_lock);
+ }
}
/* A version of printf that is unlikely to call malloc, and is thus safer */
-/* to call from the collector in case malloc has been bound to gc_malloc. */
+/* to call from the collector in case malloc has been bound to GC_malloc. */
/* Assumes that no more than 1023 characters are written at once. */
-gc_printf(format, a, b, c, d, e, f)
+/* Assumes that all arguments have been converted to something of the */
+/* same size as long, and that the format conversions expect something */
+/* of that size. */
+/*VARARGS1*/
+void GC_printf(format, a, b, c, d, e, f)
+char * format;
+long a, b, c, d, e, f;
+{
+ char buf[1025];
+
+ buf[1024] = 0x15;
+ (void) sprintf(buf, format, a, b, c, d, e, f);
+ if (buf[1024] != 0x15) ABORT("GC_printf clobbered stack");
+# ifdef OS2
+ /* We hope this doesn't allocate */
+ if (fwrite(buf, 1, strlen(buf), stdout) != strlen(buf))
+ ABORT("write to stdout failed");
+# else
+ if (write(1, buf, strlen(buf)) < 0) ABORT("write to stdout failed");
+# endif
+}
+
+/*VARARGS1*/
+void GC_err_printf(format, a, b, c, d, e, f)
char * format;
-int a, b, c, d, e, f;
+long a, b, c, d, e, f;
{
char buf[1025];
- buf[1025] = 0x15;
- sprintf(buf, format, a, b, c, d, e, f);
- if (buf[1025] != 0x15) abort("gc_printf clobbered stack");
- if (write(1, buf, strlen(buf)) < 0) abort("write to stdout failed");
-} \ No newline at end of file
+ buf[1024] = 0x15;
+ (void) sprintf(buf, format, a, b, c, d, e, f);
+ if (buf[1024] != 0x15) ABORT("GC_err_printf clobbered stack");
+# ifdef OS2
+ /* We hope this doesn't allocate */
+ if (fwrite(buf, 1, strlen(buf), stderr) != strlen(buf))
+ ABORT("write to stderr failed");
+# else
+ if (write(2, buf, strlen(buf)) < 0) ABORT("write to stderr failed");
+# endif
+}
+
+void GC_err_puts(s)
+char *s;
+{
+# ifdef OS2
+ /* We hope this doesn't allocate */
+ if (fwrite(s, 1, strlen(s), stderr) != strlen(s))
+ ABORT("write to stderr failed");
+# else
+ if (write(2, s, strlen(s)) < 0) ABORT("write to stderr failed");
+# endif
+}
+
diff --git a/new_hblk.c b/new_hblk.c
new file mode 100644
index 00000000..7f7bf054
--- /dev/null
+++ b/new_hblk.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991,1992 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to copy this garbage collector for any purpose,
+ * provided the above notices are retained on all copies.
+ *
+ * This file contains the functions:
+ * ptr_t GC_build_flXXX(h, old_fl)
+ * void GC_new_hblk(n)
+ */
+
+
+# include <stdio.h>
+# include "gc_private.h"
+
+/*
+ * Build a free list for size 1 objects inside hblk h. Set the last link to
+ * be ofl. Return a pointer tpo the first free list entry.
+ */
+ptr_t GC_build_fl1(h, ofl)
+struct hblk *h;
+ptr_t ofl;
+{
+ register word * p = (word *)h;
+ register word * lim = (word *)(h + 1);
+
+ p[0] = (word)ofl;
+ p[1] = (word)(p);
+ p[2] = (word)(p+1);
+ p[3] = (word)(p+2);
+ p += 4;
+ for (; p < lim; p += 4) {
+ p[0] = (word)(p-1);
+ p[1] = (word)(p);
+ p[2] = (word)(p+1);
+ p[3] = (word)(p+2);
+ };
+ return((ptr_t)(p-1));
+}
+
+/* The same for size 2 cleared objects */
+ptr_t GC_build_fl_clear2(h, ofl)
+struct hblk *h;
+ptr_t ofl;
+{
+ register word * p = (word *)h;
+ register word * lim = (word *)(h + 1);
+
+ p[0] = (word)ofl;
+ p[1] = 0;
+ p[2] = (word)p;
+ p[3] = 0;
+ p += 4;
+ for (; p < lim; p += 4) {
+ p[0] = (word)(p-2);
+ p[1] = 0;
+ p[2] = (word)p;
+ p[3] = 0;
+ };
+ return((ptr_t)(p-2));
+}
+
+/* The same for size 3 cleared objects */
+ptr_t GC_build_fl_clear3(h, ofl)
+struct hblk *h;
+ptr_t ofl;
+{
+ register word * p = (word *)h;
+ register word * lim = (word *)(h + 1) - 2;
+
+ p[0] = (word)ofl;
+ p[1] = 0;
+ p[2] = 0;
+ p += 3;
+ for (; p < lim; p += 3) {
+ p[0] = (word)(p-3);
+ p[1] = 0;
+ p[2] = 0;
+ };
+ return((ptr_t)(p-3));
+}
+
+/* The same for size 4 cleared objects */
+ptr_t GC_build_fl_clear4(h, ofl)
+struct hblk *h;
+ptr_t ofl;
+{
+ register word * p = (word *)h;
+ register word * lim = (word *)(h + 1);
+
+ p[0] = (word)ofl;
+ p[1] = 0;
+ p[2] = 0;
+ p[3] = 0;
+ p += 4;
+ for (; p < lim; p += 4) {
+ p[0] = (word)(p-4);
+ p[1] = 0;
+ p[2] = 0;
+ p[3] = 0;
+ };
+ return((ptr_t)(p-4));
+}
+
+/* The same for size 2 uncleared objects */
+ptr_t GC_build_fl2(h, ofl)
+struct hblk *h;
+ptr_t ofl;
+{
+ register word * p = (word *)h;
+ register word * lim = (word *)(h + 1);
+
+ p[0] = (word)ofl;
+ p[2] = (word)p;
+ p += 4;
+ for (; p < lim; p += 4) {
+ p[0] = (word)(p-2);
+ p[2] = (word)p;
+ };
+ return((ptr_t)(p-2));
+}
+
+/* The same for size 4 uncleared objects */
+ptr_t GC_build_fl4(h, ofl)
+struct hblk *h;
+ptr_t ofl;
+{
+ register word * p = (word *)h;
+ register word * lim = (word *)(h + 1);
+
+ p[0] = (word)ofl;
+ p[4] = (word)p;
+ p += 8;
+ for (; p < lim; p += 8) {
+ p[0] = (word)(p-4);
+ p[4] = (word)p;
+ };
+ return((ptr_t)(p-4));
+}
+
+
+/*
+ * Allocate a new heapblock for small objects of size n.
+ * Add all of the heapblock's objects to the free list for objects
+ * of that size. Will fail to do anything if we are out of memory.
+ */
+void GC_new_hblk(sz, kind)
+register word sz;
+int kind;
+{
+ register word *p,
+ *prev;
+ word *last_object; /* points to last object in new hblk */
+ register struct hblk *h; /* the new heap block */
+ register bool clear = GC_obj_kinds[kind].ok_init;
+
+# ifdef PRINTSTATS
+ if ((sizeof (struct hblk)) > HBLKSIZE) {
+ abort("HBLK SZ inconsistency");
+ }
+# endif
+
+ /* Allocate a new heap block */
+ h = GC_allochblk(sz, kind);
+ if (h == 0) return;
+
+ /* Handle small objects sizes more efficiently. For larger objects */
+ /* the difference is less significant. */
+ switch (sz) {
+ case 1: GC_obj_kinds[kind].ok_freelist[1] =
+ GC_build_fl1(h, GC_obj_kinds[kind].ok_freelist[1]);
+ return;
+ case 2: if (clear) {
+ GC_obj_kinds[kind].ok_freelist[2] =
+ GC_build_fl_clear2(h, GC_obj_kinds[kind].ok_freelist[2]);
+ } else {
+ GC_obj_kinds[kind].ok_freelist[2] =
+ GC_build_fl2(h, GC_obj_kinds[kind].ok_freelist[2]);
+ }
+ return;
+ case 3: if (clear) {
+ GC_obj_kinds[kind].ok_freelist[3] =
+ GC_build_fl_clear3(h, GC_obj_kinds[kind].ok_freelist[3]);
+ return;
+ } else {
+ /* It's messy to do better than the default here. */
+ break;
+ }
+ case 4: if (clear) {
+ GC_obj_kinds[kind].ok_freelist[4] =
+ GC_build_fl_clear4(h, GC_obj_kinds[kind].ok_freelist[4]);
+ } else {
+ GC_obj_kinds[kind].ok_freelist[4] =
+ GC_build_fl4(h, GC_obj_kinds[kind].ok_freelist[4]);
+ }
+ return;
+ default:
+ break;
+ }
+
+ /* Clear the page if necessary. */
+ if (clear) bzero((char *)h, (int)HBLKSIZE);
+
+ /* Add objects to free list */
+ p = &(h -> hb_body[sz]); /* second object in *h */
+ prev = &(h -> hb_body[0]); /* One object behind p */
+ last_object = ((word *)((char *)h + HBLKSIZE)) - sz;
+ /* Last place for last object to start */
+
+ /* make a list of all objects in *h with head as last object */
+ while (p <= last_object) {
+ /* current object's link points to last object */
+ obj_link(p) = (ptr_t)prev;
+ prev = p;
+ p += sz;
+ }
+ p -= sz; /* p now points to last object */
+
+ /*
+ * put p (which is now head of list of objects in *h) as first
+ * pointer in the appropriate free list for this size.
+ */
+ obj_link(h -> hb_body) = GC_obj_kinds[kind].ok_freelist[sz];
+ GC_obj_kinds[kind].ok_freelist[sz] = ((ptr_t)p);
+}
+
diff --git a/obj_map.c b/obj_map.c
new file mode 100644
index 00000000..6ea00763
--- /dev/null
+++ b/obj_map.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991, 1992 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to copy this garbage collector for any purpose,
+ * provided the above notices are retained on all copies.
+ */
+
+/* Routines for maintaining maps describing heap block
+ * layouts for various object sizes. Allows fast pointer validity checks
+ * and fast location of object start locations on machines (such as SPARC)
+ * with slow division.
+ *
+ * Boehm, February 6, 1992 1:00:09 pm PST
+ */
+
+# include "gc_private.h"
+
+char * GC_invalid_map = 0;
+
+/* Invalidate the object map associated with a block. Free blocks */
+/* are identified by invalid maps. */
+void GC_invalidate_map(hhdr)
+hdr *hhdr;
+{
+ register int displ;
+
+ if (GC_invalid_map == 0) {
+ GC_invalid_map = GC_scratch_alloc(MAP_SIZE);
+ for (displ = 0; displ < HBLKSIZE; displ++) {
+ MAP_ENTRY(GC_invalid_map, displ) = OBJ_INVALID;
+ }
+ }
+ hhdr -> hb_map = GC_invalid_map;
+}
+
+/* Consider pointers that are offset bytes displaced from the beginning */
+/* of an object to be valid. */
+void GC_register_displacement(offset)
+word offset;
+{
+# ifndef ALL_INTERIOR_POINTERS
+ DCL_LOCK_STATE;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ GC_register_displacement_inner(offset);
+ UNLOCK();
+ ENABLE_SIGNALS();
+# endif
+}
+
+void GC_register_displacement_inner(offset)
+word offset;
+{
+# ifndef ALL_INTERIOR_POINTERS
+ register int i;
+
+ if (offset > MAX_OFFSET) {
+ ABORT("Bad argument to GC_register_displacement");
+ }
+ if (!GC_valid_offsets[offset]) {
+ GC_valid_offsets[offset] = TRUE;
+ GC_modws_valid_offsets[offset % sizeof(word)] = TRUE;
+ for (i = 0; i <= MAXOBJSZ; i++) {
+ if (GC_obj_map[i] != 0) {
+ if (i == 0) {
+ GC_obj_map[i][offset + HDR_BYTES] = offset >> 2;
+ } else {
+ register int j;
+ register int lb = WORDS_TO_BYTES(i);
+
+ if (offset < lb) {
+ for (j = offset + HDR_BYTES; j < HBLKSIZE; j += lb) {
+ GC_obj_map[i][j] = offset >> 2;
+ }
+ }
+ }
+ }
+ }
+ }
+# endif
+}
+
+
+/* Add a heap block map for objects of size sz to obj_map. */
+void GC_add_map_entry(sz)
+word sz;
+{
+ register int obj_start;
+ register int displ;
+ register char * new_map;
+
+ if (sz > MAXOBJSZ) sz = 0;
+ if (GC_obj_map[sz] != 0) {
+ return;
+ }
+ new_map = GC_scratch_alloc(MAP_SIZE);
+# ifdef PRINTSTATS
+ GC_printf("Adding block map for size %lu\n", (unsigned long)sz);
+# endif
+ for (displ = 0; displ < HBLKSIZE; displ++) {
+ MAP_ENTRY(new_map,displ) = OBJ_INVALID;
+ }
+ if (sz == 0) {
+ for(displ = 0; displ <= MAX_OFFSET; displ++) {
+ if (OFFSET_VALID(displ)) {
+ MAP_ENTRY(new_map,displ+HDR_BYTES) = BYTES_TO_WORDS(displ);
+ }
+ }
+ } else {
+ for (obj_start = HDR_BYTES;
+ obj_start + WORDS_TO_BYTES(sz) <= HBLKSIZE;
+ obj_start += WORDS_TO_BYTES(sz)) {
+ for (displ = 0; displ < WORDS_TO_BYTES(sz); displ++) {
+ if (OFFSET_VALID(displ)) {
+ MAP_ENTRY(new_map, obj_start + displ) =
+ BYTES_TO_WORDS(displ);
+ }
+ }
+ }
+ }
+ GC_obj_map[sz] = new_map;
+}
diff --git a/os_dep.c b/os_dep.c
new file mode 100644
index 00000000..c1726d4c
--- /dev/null
+++ b/os_dep.c
@@ -0,0 +1,295 @@
+# include "gc_private.h"
+# include <stdio.h>
+# include <signal.h>
+
+/* Disable and enable signals during nontrivial allocations */
+
+# ifdef OS2
+
+# define INCL_DOSEXCEPTIONS
+# define INCL_DOSPROCESS
+# define INCL_DOSERRORS
+# define INCL_DOSMODULEMGR
+# include <os2.h>
+
+/* A kludge to get around what appears to be a header file bug */
+# ifndef WORD
+# define WORD unsigned short
+# endif
+# ifndef DWORD
+# define DWORD unsigned long
+# endif
+
+# define EXE386 1
+# include <newexe.h>
+# include <exe386.h>
+
+void GC_disable_signals(void)
+{
+ ULONG nest;
+
+ DosEnterMustComplete(&nest);
+ if (nest != 1) ABORT("nested GC_disable_signals");
+}
+
+void GC_enable_signals(void)
+{
+ ULONG nest;
+
+ DosExitMustComplete(&nest);
+ if (nest != 0) ABORT("GC_enable_signals");
+}
+
+
+# else
+
+static int old_mask;
+
+void GC_disable_signals()
+{
+ int mask = 0x7fffffff;
+ /* Setting the leading bit appears to provoke a bug in some */
+ /* longjmp implementations. Most systems appear not to have */
+ /* a signal 32. */
+
+ mask &= ~(1<<(SIGSEGV-1));
+ mask &= ~(1<<(SIGILL-1));
+ mask &= ~(1<<(SIGQUIT-1));
+# ifdef SIGBUS
+ mask &= ~(1<<(SIGBUS-1));
+# endif
+# ifdef SIGIOT
+ mask &= ~(1<<(SIGIOT-1));
+# endif
+# ifdef SIGEMT
+ mask &= ~(1<<(SIGEMT-1));
+# endif
+# ifdef SIGTRAP
+ mask &= ~(1<<(SIGTRAP-1));
+# endif
+ old_mask = sigsetmask(mask);
+}
+
+void GC_enable_signals()
+{
+ (void)sigsetmask(old_mask);
+}
+
+
+# endif
+
+/*
+ * Find the base of the stack.
+ * Used only in single-threaded environment.
+ * With threads, GC_mark_roots needs to know how to do this.
+ * Called with allocator lock held.
+ */
+
+# ifdef OS2
+
+ptr_t GC_get_stack_base()
+{
+ PTIB ptib;
+ PPIB ppib;
+
+ if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
+ fprintf(stderr, "DosGetInfoBlocks failed\n");
+ ABORT("DosGetInfoBlocks failed\n");
+ }
+ return((ptr_t)(ptib -> tib_pstacklimit));
+}
+
+# else
+
+# if !defined(THREADS) && !defined(STACKBOTTOM) && defined(HEURISTIC2)
+ /* Some tools to implement HEURISTIC2 */
+# define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
+# include <setjmp.h>
+ /* static */ jmp_buf GC_jmp_buf;
+
+ /*ARGSUSED*/
+ void GC_fault_handler(sig)
+ int sig;
+ {
+ longjmp(GC_jmp_buf, 1);
+ }
+# endif
+
+ptr_t GC_get_stack_base()
+{
+ word dummy;
+ static ptr_t result;
+ /* Needs to be static, since otherwise it may not be */
+ /* preserved across the longjmp. Can safely be */
+ /* static since it's only called once, with the */
+ /* allocation lock held. */
+# ifdef __STDC__
+ typedef void (*handler)(int);
+# else
+ typedef void (*handler)();
+# endif
+# ifdef HEURISTIC2
+ static handler old_segv_handler, old_bus_handler;
+ /* See above for static declaration. */
+# endif
+# define STACKBOTTOM_ALIGNMENT_M1 0xffffff
+
+# ifdef STACKBOTTOM
+ return(STACKBOTTOM);
+# else
+# ifdef HEURISTIC1
+# ifdef STACK_GROWS_DOWN
+ result = (ptr_t)((((word)(&dummy))
+ + STACKBOTTOM_ALIGNMENT_M1)
+ & ~STACKBOTTOM_ALIGNMENT_M1);
+# else
+ result = (ptr_t)(((word)(&dummy))
+ & ~STACKBOTTOM_ALIGNMENT_M1);
+# endif
+# endif /* HEURISTIC1 */
+# ifdef HEURISTIC2
+ old_segv_handler = signal(SIGSEGV, GC_fault_handler);
+# ifdef SIGBUS
+ old_bus_handler = signal(SIGBUS, GC_fault_handler);
+# endif
+ if (setjmp(GC_jmp_buf) == 0) {
+ result = (ptr_t)(((word)(&dummy))
+ & ~(MIN_PAGE_SIZE-1));
+ for (;;) {
+# ifdef STACK_GROWS_DOWN
+ result += MIN_PAGE_SIZE;
+# else
+ result -= MIN_PAGE_SIZE;
+# endif
+ GC_noop(*result);
+ }
+ }
+ (void) signal(SIGSEGV, old_segv_handler);
+# ifdef SIGBUS
+ (void) signal(SIGBUS, old_bus_handler);
+# endif
+# ifdef STACK_GROWS_UP
+ result += MIN_PAGE_SIZE;
+# endif
+# endif /* HEURISTIC2 */
+# endif /* STACKBOTTOM */
+ return(result);
+}
+
+# endif /* ! OS2 */
+
+/*
+ * Register static data segment(s) as roots.
+ * If more data segments are added later then they need to be registered
+ * add that point (as we do with SunOS dynamic loading),
+ * or GC_mark_roots needs to check for them (as we do with PCR).
+ * Called with allocator lock held.
+ */
+
+# ifdef OS2
+
+void GC_register_data_segments()
+{
+ PTIB ptib;
+ PPIB ppib;
+ HMODULE module_handle;
+# define PBUFSIZ 512
+ UCHAR path[PBUFSIZ];
+ FILE * myexefile;
+ struct exe_hdr hdrdos; /* MSDOS header. */
+ struct e32_exe hdr386; /* Real header for my executable */
+ struct o32_obj seg; /* Currrent segment */
+ int nsegs;
+
+
+ if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
+ fprintf(stderr, "DosGetInfoBlocks failed\n");
+ ABORT("DosGetInfoBlocks failed\n");
+ }
+ module_handle = ppib -> pib_hmte;
+ if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) {
+ fprintf(stderr, "DosQueryModuleName failed\n");
+ ABORT("DosGetInfoBlocks failed\n");
+ }
+ myexefile = fopen(path, "rb");
+ if (myexefile == 0) {
+ GC_err_puts("Couldn't open executable ");
+ GC_err_puts(path); GC_err_puts("\n");
+ ABORT("Failed to open executable\n");
+ }
+ if (fread((char *)(&hdrdos), 1, sizeof hdrdos, myexefile) < sizeof hdrdos) {
+ GC_err_puts("Couldn't read MSDOS header from ");
+ GC_err_puts(path); GC_err_puts("\n");
+ ABORT("Couldn't read MSDOS header");
+ }
+ if (E_MAGIC(hdrdos) != EMAGIC) {
+ GC_err_puts("Executable has wrong DOS magic number: ");
+ GC_err_puts(path); GC_err_puts("\n");
+ ABORT("Bad DOS magic number");
+ }
+ if (fseek(myexefile, E_LFANEW(hdrdos), SEEK_SET) != 0) {
+ GC_err_puts("Seek to new header failed in ");
+ GC_err_puts(path); GC_err_puts("\n");
+ ABORT("Bad DOS magic number");
+ }
+ if (fread((char *)(&hdr386), 1, sizeof hdr386, myexefile) < sizeof hdr386) {
+ GC_err_puts("Couldn't read MSDOS header from ");
+ GC_err_puts(path); GC_err_puts("\n");
+ ABORT("Couldn't read OS/2 header");
+ }
+ if (E32_MAGIC1(hdr386) != E32MAGIC1 || E32_MAGIC2(hdr386) != E32MAGIC2) {
+ GC_err_puts("Executable has wrong OS/2 magic number:");
+ GC_err_puts(path); GC_err_puts("\n");
+ ABORT("Bad OS/2 magic number");
+ }
+ if ( E32_BORDER(hdr386) != E32LEBO || E32_WORDER(hdr386) != E32LEWO) {
+ GC_err_puts("Executable %s has wrong byte order: ");
+ GC_err_puts(path); GC_err_puts("\n");
+ ABORT("Bad byte order");
+ }
+ if ( E32_CPU(hdr386) == E32CPU286) {
+ GC_err_puts("GC can't handle 80286 executables: ");
+ GC_err_puts(path); GC_err_puts("\n");
+ EXIT();
+ }
+ if (fseek(myexefile, E_LFANEW(hdrdos) + E32_OBJTAB(hdr386),
+ SEEK_SET) != 0) {
+ GC_err_puts("Seek to object table failed: ");
+ GC_err_puts(path); GC_err_puts("\n");
+ ABORT("Seek to object table failed");
+ }
+ for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) {
+ int flags;
+ if (fread((char *)(&seg), 1, sizeof seg, myexefile) < sizeof seg) {
+ GC_err_puts("Couldn't read obj table entry from ");
+ GC_err_puts(path); GC_err_puts("\n");
+ ABORT("Couldn't read obj table entry");
+ }
+ flags = O32_FLAGS(seg);
+ if (!(flags & OBJWRITE)) continue;
+ if (!(flags & OBJREAD)) continue;
+ if (flags & OBJINVALID) {
+ fprintf(stderr, "Object with invalid pages?\n");
+ continue;
+ }
+ GC_add_roots_inner(O32_BASE(seg), O32_BASE(seg)+O32_SIZE(seg));
+ }
+}
+
+# else
+
+void GC_register_data_segments()
+{
+ extern int end;
+
+ GC_add_roots_inner(DATASTART, (char *)(&end));
+# ifdef DYNAMIC_LOADING
+ {
+ extern void GC_setup_dynamic_loading();
+
+ GC_setup_dynamic_loading();
+ }
+# endif
+}
+
+# endif /* ! OS2 */
diff --git a/pcr_interface.c b/pcr_interface.c
new file mode 100644
index 00000000..85167804
--- /dev/null
+++ b/pcr_interface.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991, 1992 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to copy this garbage collector for any purpose,
+ * provided the above notices are retained on all copies.
+ */
+# include "gc_private.h"
+
+# ifdef PCR
+/*
+ * Note that POSIX PCR requires an ANSI C compiler. Hence we are allowed
+ * to make the same assumption here.
+ * We wrap all of the allocator functions to avoid questions of
+ * compatibility between the prototyped and nonprototyped versions of the f
+ */
+# include "pcr/mm/PCR_MM.h"
+
+# define MY_MAGIC 17L
+
+void * GC_AllocProc(size_t size, PCR_Bool ptrFree, PCR_Bool clear )
+{
+ if (ptrFree) {
+ void * result = (void *)GC_malloc_atomic(size);
+ if (clear && result != 0) bzero(result, size);
+ return(result);
+ } else {
+ return((void *)GC_malloc(size));
+ }
+}
+
+# define GC_ReallocProc GC_realloc
+
+# define GC_FreeProc GC_free
+
+void GC_NoOpProc () {}
+
+typedef struct {
+ PCR_ERes (*ed_proc)(void *p, size_t size, PCR_Any data);
+ bool ed_pointerfree;
+ PCR_ERes ed_fail_code;
+ PCR_Any ed_client_data;
+} enumerate_data;
+
+void GC_enumerate_block(h, ed)
+register struct hblk *h;
+enumerate_data * ed;
+{
+ register hdr * hhdr;
+ register int sz;
+ word *p;
+ word * lim;
+
+ hhdr = HDR(h);
+ sz = hhdr -> hb_sz;
+ if (sz >= 0 && ed -> ed_pointerfree
+ || sz <= 0 && !(ed -> ed_pointerfree)) return;
+ if (sz < 0) sz = -sz;
+ lim = (word *)(h+1) - sz;
+ p = (word *)h;
+ do {
+ if (PCR_ERes_IsErr(ed -> ed_fail_code)) return;
+ ed -> ed_fail_code =
+ (*(ed -> ed_proc))(p, WORDS_TO_BYTES(sz), ed -> ed_client_data);
+ p+= sz;
+ } while (p <= lim);
+}
+
+struct PCR_MM_ProcsRep * GC_old_allocator = 0;
+
+PCR_ERes GC_EnumerateProc(
+ PCR_Bool ptrFree,
+ PCR_ERes (*proc)(void *p, size_t size, PCR_Any data),
+ PCR_Any data
+)
+{
+ enumerate_data ed;
+
+ ed.ed_proc = proc;
+ ed.ed_pointerfree = ptrFree;
+ ed.ed_fail_code = PCR_ERes_okay;
+ ed.ed_client_data = data;
+ GC_apply_to_all_blocks(GC_enumerate_block, &ed);
+ if (ed.ed_fail_code != PCR_ERes_okay) {
+ return(ed.ed_fail_code);
+ } else {
+ /* Also enumerate objects allocated by my predecessors */
+ return((*(GC_old_allocator->mmp_enumerate))(ptrFree, proc, data));
+ }
+}
+
+struct PCR_MM_ProcsRep GC_Rep = {
+ MY_MAGIC,
+ GC_AllocProc,
+ GC_ReallocProc,
+ GC_NoOpProc, /* mmp_free */
+ GC_FreeProc, /* mmp_unsafeFree */
+ GC_EnumerateProc,
+ GC_NoOpProc, /* mmp_shutdown */
+};
+
+void GC_pcr_install()
+{
+ PCR_MM_Install(&GC_Rep, &GC_old_allocator);
+}
+# endif
diff --git a/real_malloc.c b/real_malloc.c
new file mode 100644
index 00000000..5718ca2b
--- /dev/null
+++ b/real_malloc.c
@@ -0,0 +1,45 @@
+/* We put this here to minimize the risk of inlining. */
+/*VARARGS*/
+GC_noop() {}
+
+# ifdef PCR
+/*
+ * This definition should go in its own file that includes no other
+ * header files. Otherwise, we risk not getting the underlying system
+ * malloc.
+ */
+# define PCR_NO_RENAME
+# include <stdlib.h>
+
+# ifdef __STDC__
+ char * real_malloc(size_t size)
+# else
+ char * real_malloc()
+ int size;
+# endif
+{
+ return((char *)malloc(size));
+}
+#endif /* PCR */
+
+# ifdef __OS2__
+
+# include <stddef.h>
+# define INCL_DOSMEMMGR
+# define INCL_DOSERRORS
+# include <os2.h>
+
+void * os2_alloc(size_t bytes)
+{
+ void * result;
+
+ if (DosAllocMem(&result, bytes, PAG_EXECUTE | PAG_READ |
+ PAG_WRITE | PAG_COMMIT)
+ != NO_ERROR) {
+ return(0);
+ }
+ if (result == 0) return(os2_alloc(bytes));
+ return(result);
+}
+
+# endif /* OS2 */
diff --git a/reclaim.c b/reclaim.c
index 729044ef..357c0a67 100644
--- a/reclaim.c
+++ b/reclaim.c
@@ -1,42 +1,42 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
- * Copyright (c) 1991 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1991, 1992 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
- * Permission is hereby granted to copy this compiler for any purpose,
+ * Permission is hereby granted to copy this garbage collector for any purpose,
* provided the above notices are retained on all copies.
*/
#include <stdio.h>
-#include "gc.h"
-#define DEBUG
-#undef DEBUG
-#ifdef PRINTSTATS
-# define GATHERSTATS
-#endif
+#include "gc_private.h"
-long mem_found = 0; /* Number of longwords of memory reclaimed */
-
-long composite_in_use; /* Number of longwords in accessible composite */
- /* objects. */
-
-long atomic_in_use; /* Number of longwords in accessible atomic */
- /* objects. */
+signed_word GC_mem_found = 0;
+ /* Number of longwords of memory GC_reclaimed */
# ifdef FIND_LEAK
static report_leak(p, sz)
-long p, sz;
+ptr_t p;
+word sz;
{
- /* Negative size ==> pointer-free (atomic) object */
- /* sz is in words. */
- abort(p, sz);
+ if (HDR(p) -> hb_obj_kind == PTRFREE) {
+ GC_err_printf("Leaked atomic object at ");
+ } else {
+ GC_err_printf("Leaked composite object at ");
+ }
+ if (GC_debugging_started && GC_has_debug_info(p)) {
+ GC_print_obj(p);
+ } else {
+ GC_err_printf("0x%lx (appr. size = %ld)\n",
+ (unsigned long)WORDS_TO_BYTES(sz));
+ }
}
# define FOUND_FREE(hblk, word_no) \
if (abort_if_found) { \
- report_leak((long)hblk + WORDS_TO_BYTES(word_no), hblk -> hb_sz); \
+ report_leak((long)hblk + WORDS_TO_BYTES(word_no), \
+ HDR(hblk) -> hb_sz); \
}
# else
# define FOUND_FREE(hblk, word_no)
@@ -47,168 +47,498 @@ long p, sz;
*
*/
-reclaim(abort_if_found)
-int abort_if_found; /* Abort if a reclaimable object is found */
+
+/*
+ * Test whether a block is completely empty, i.e. contains no marked
+ * objects. This does not require the block to be in physical
+ * memory.
+ */
+
+bool GC_block_empty(hhdr)
+register hdr * hhdr;
{
+ register word *p = (word *)(&(hhdr -> hb_marks[0]));
+ register word * plim =
+ (word *)(&(hhdr -> hb_marks[MARK_BITS_SZ]));
+ while (p < plim) {
+ if (*p++) return(FALSE);
+ }
+ return(TRUE);
+}
+
+# ifdef GATHERSTATS
+# define INCR_WORDS(sz) n_words_found += (sz)
+# else
+# define INCR_WORDS(sz)
+# endif
+/*
+ * Restore unmarked small objects in h of size sz to the object
+ * free list. Returns the new list.
+ * Clears unmarked objects.
+ */
+/*ARGSUSED*/
+ptr_t GC_reclaim_clear(hbp, hhdr, sz, list, abort_if_found)
register struct hblk *hbp; /* ptr to current heap block */
-register int word_no; /* Number of word in block */
-register long i;
-register word *p; /* pointer to current word in block */
-register int mb; /* mark bit of current word */
-int sz; /* size of objects in current block */
-word *plim;
-struct hblk **nexthbp; /* ptr to ptr to current heap block */
-int nonempty; /* nonempty ^ done with block => block empty*/
-struct obj *list; /* used to build list of free words in block*/
-register int is_atomic; /* => current block contains atomic objs */
-
-# ifdef DEBUG
- gc_printf("clearing all between %x and %x, %x and %x\n",
- objfreelist, &objfreelist[MAXOBJSZ+1],
- aobjfreelist,&aobjfreelist[MAXAOBJSZ+1]);
+register hdr * hhdr;
+bool abort_if_found; /* Abort if a reclaimable object is found */
+register ptr_t list;
+register word sz;
+{
+ register int word_no;
+ register word *p, *q, *plim;
+# ifdef GATHERSTATS
+ register int n_words_found = 0;
+# endif
+
+ p = (word *)(hbp->hb_body);
+ word_no = HDR_WORDS;
+ plim = (word *)((((word)hbp) + HBLKSIZE)
+ - WORDS_TO_BYTES(sz));
+
+ /* go through all words in block */
+ while( p <= plim ) {
+ if( mark_bit_from_hdr(hhdr, word_no) ) {
+ p += sz;
+ } else {
+ FOUND_FREE(hbp, word_no);
+ INCR_WORDS(sz);
+ /* object is available - put on list */
+ obj_link(p) = list;
+ list = ((ptr_t)p);
+ /* Clear object, advance p to next object in the process */
+ q = p + sz;
+ p++; /* Skip link field */
+ while (p < q) {
+ *p++ = 0;
+ }
+ }
+ word_no += sz;
+ }
+# ifdef GATHERSTATS
+ GC_mem_found += n_words_found;
+# endif
+ return(list);
+}
+
+/*
+ * A special case for 2 word composite objects (e.g. cons cells):
+ */
+/*ARGSUSED*/
+ptr_t GC_reclaim_clear2(hbp, hhdr, list, abort_if_found)
+register struct hblk *hbp; /* ptr to current heap block */
+hdr * hhdr;
+bool abort_if_found; /* Abort if a reclaimable object is found */
+register ptr_t list;
+{
+ register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
+ register word *p, *plim;
+# ifdef GATHERSTATS
+ register int n_words_found = 0;
# endif
- if (!abort_if_found) {
- register struct obj **fop;
-
- for( fop = objfreelist; fop < &objfreelist[MAXOBJSZ+1]; fop++ ) {
- *fop = (struct obj *)0;
+ register int mark_word;
+# define DO_OBJ(start_displ) \
+ if (!(mark_word & (1 << start_displ))) { \
+ FOUND_FREE(hbp, p - (word *)hbp + start_displ); \
+ p[start_displ] = (word)list; \
+ list = (ptr_t)(p+start_displ); \
+ p[start_displ+1] = 0; \
+ INCR_WORDS(2); \
}
- for( fop = aobjfreelist; fop < &aobjfreelist[MAXAOBJSZ+1]; fop++ ) {
- *fop = (struct obj *)0;
+
+ p = (word *)(hbp->hb_body);
+ plim = (word *)(((unsigned)hbp) + HBLKSIZE);
+
+ /* go through all words in block */
+ while( p < plim ) {
+ mark_word = *mark_word_addr++;
+ DO_OBJ(0);
+ DO_OBJ(2);
+ DO_OBJ(4);
+ DO_OBJ(6);
+ DO_OBJ(8);
+ DO_OBJ(10);
+ DO_OBJ(12);
+ DO_OBJ(14);
+ DO_OBJ(16);
+ DO_OBJ(18);
+ DO_OBJ(20);
+ DO_OBJ(22);
+ DO_OBJ(24);
+ DO_OBJ(26);
+ DO_OBJ(28);
+ DO_OBJ(30);
+ p+=32;
+ }
+# ifdef GATHERSTATS
+ GC_mem_found += n_words_found;
+# endif
+ return(list);
+# undef DO_OBJ
+}
+
+/*
+ * Another special case for 4 word composite objects:
+ */
+/*ARGSUSED*/
+ptr_t GC_reclaim_clear4(hbp, hhdr, list, abort_if_found)
+register struct hblk *hbp; /* ptr to current heap block */
+hdr * hhdr;
+bool abort_if_found; /* Abort if a reclaimable object is found */
+register ptr_t list;
+{
+ register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
+ register word *p, *plim;
+# ifdef GATHERSTATS
+ register int n_words_found = 0;
+# endif
+ register int mark_word;
+# define DO_OBJ(start_displ) \
+ if (!(mark_word & (1 << start_displ))) { \
+ FOUND_FREE(hbp, p - (word *)hbp + start_displ); \
+ p[start_displ] = (word)list; \
+ list = (ptr_t)(p+start_displ); \
+ p[start_displ+1] = 0; \
+ p[start_displ+2] = 0; \
+ p[start_displ+3] = 0; \
+ INCR_WORDS(4); \
}
- } /* otherwise free list objects are marked, and its safe to leave them */
- atomic_in_use = 0;
- composite_in_use = 0;
+ p = (word *)(hbp->hb_body);
+ plim = (word *)(((unsigned)hbp) + HBLKSIZE);
-# ifdef PRINTBLOCKS
- gc_printf("reclaim: current block sizes:\n");
+ /* go through all words in block */
+ while( p < plim ) {
+ mark_word = *mark_word_addr++;
+ DO_OBJ(0);
+ DO_OBJ(4);
+ DO_OBJ(8);
+ DO_OBJ(12);
+ DO_OBJ(16);
+ DO_OBJ(20);
+ DO_OBJ(24);
+ DO_OBJ(28);
+ p+=32;
+ }
+# ifdef GATHERSTATS
+ GC_mem_found += n_words_found;
# endif
+ return(list);
+# undef DO_OBJ
+}
- /* go through all heap blocks (in hblklist) and reclaim unmarked objects */
-# ifdef HBLK_MAP
- hbp = (struct hblk *) heapstart;
- for (; ((char *)hbp) < heaplim; hbp++) if (is_hblk(hbp)) {
-/* fprintf(stderr, "Reclaiming in 0x%X\n", hbp); */
-# else
- nexthbp = hblklist;
- while( nexthbp < last_hblk ) {
- hbp = *nexthbp++;
-# endif
+/* The same thing, but don't clear objects: */
+/*ARGSUSED*/
+ptr_t GC_reclaim_uninit(hbp, hhdr, sz, list, abort_if_found)
+register struct hblk *hbp; /* ptr to current heap block */
+register hdr * hhdr;
+bool abort_if_found; /* Abort if a reclaimable object is found */
+register ptr_t list;
+register word sz;
+{
+ register int word_no;
+ register word *p, *plim;
+# ifdef GATHERSTATS
+ register int n_words_found = 0;
+# endif
+
+ p = (word *)(hbp->hb_body);
+ word_no = HDR_WORDS;
+ plim = (word *)((((unsigned)hbp) + HBLKSIZE)
+ - WORDS_TO_BYTES(sz));
- nonempty = FALSE;
- sz = hbp -> hb_sz;
- is_atomic = 0;
- if (sz < 0) {
- sz = -sz;
- is_atomic = 1; /* this block contains atomic objs */
- }
-# ifdef PRINTBLOCKS
- gc_printf("%d(%c",sz, (is_atomic)? 'a' : 'c');
-# endif
-
- if( sz > (is_atomic? MAXAOBJSZ : MAXOBJSZ) ) { /* 1 big object */
- mb = mark_bit(hbp, (hbp -> hb_body) - ((word *)(hbp)));
- if( mb ) {
-# ifdef GATHERSTATS
- if (is_atomic) {
- atomic_in_use += sz;
- } else {
- composite_in_use += sz;
- }
-# endif
- nonempty = TRUE;
- } else {
- FOUND_FREE(hbp, (hbp -> hb_body) - ((word *)(hbp)));
- mem_found += sz;
- }
- } else { /* group of smaller objects */
- p = (word *)(hbp->hb_body);
- word_no = ((word *)p) - ((word *)hbp);
- plim = (word *)((((unsigned)hbp) + HBLKSIZE)
- - WORDS_TO_BYTES(sz));
-
- list = (is_atomic) ? aobjfreelist[sz] : objfreelist[sz];
-
- /* go through all words in block */
- while( p <= plim ) {
- mb = mark_bit(hbp, word_no);
-
- if( mb ) {
-# ifdef GATHERSTATS
- if (is_atomic) atomic_in_use += sz;
- else composite_in_use += sz;
-# endif
-# ifdef DEBUG
- gc_printf("found a reachable obj\n");
-# endif
- nonempty = TRUE;
- p += sz;
- } else {
- FOUND_FREE(hbp, word_no);
- mem_found += sz;
- /* word is available - put on list */
- ((struct obj *)p)->obj_link = list;
- list = ((struct obj *)p);
- if (is_atomic) {
- p += sz;
- } else {
- /* Clear object, advance p to next object in the process */
- i = (long)(p + sz);
- p++; /* Skip link field */
- while (p < (word *)i) {
- *p++ = 0;
- }
- }
- }
- word_no += sz;
+ /* go through all words in block */
+ while( p <= plim ) {
+ if( !mark_bit_from_hdr(hhdr, word_no) ) {
+ FOUND_FREE(hbp, word_no);
+ INCR_WORDS(sz);
+ /* object is available - put on list */
+ obj_link(p) = list;
+ list = ((ptr_t)p);
}
+ p += sz;
+ word_no += sz;
+ }
+# ifdef GATHERSTATS
+ GC_mem_found += n_words_found;
+# endif
+ return(list);
+}
- /*
- * if block has reachable words in it, we can't reclaim the
- * whole thing so put list of free words in block back on
- * free list for this size.
- */
- if( nonempty ) {
- if ( is_atomic ) aobjfreelist[sz] = list;
- else objfreelist[sz] = list;
- }
- }
-
-# ifdef PRINTBLOCKS
- gc_printf("%c),", nonempty ? 'n' : 'e' );
-# endif
- if (!nonempty) {
- if (!is_atomic && sz <= MAXOBJSZ) {
- /* Clear words at beginning of objects */
- /* Since most of it is already cleared */
- p = (word *)(hbp->hb_body);
- plim = (word *)((((unsigned)hbp) + HBLKSIZE)
- - WORDS_TO_BYTES(sz));
- while (p <= plim) {
- *p = 0;
- p += sz;
- }
- hbp -> hb_uninit = 0;
- } else {
- /* Mark it as being uninitialized */
- hbp -> hb_uninit = 1;
- }
+/*
+ * Another special case for 2 word atomic objects:
+ */
+/*ARGSUSED*/
+ptr_t GC_reclaim_uninit2(hbp, hhdr, list, abort_if_found)
+register struct hblk *hbp; /* ptr to current heap block */
+hdr * hhdr;
+bool abort_if_found; /* Abort if a reclaimable object is found */
+register ptr_t list;
+{
+ register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
+ register word *p, *plim;
+# ifdef GATHERSTATS
+ register int n_words_found = 0;
+# endif
+ register int mark_word;
+# define DO_OBJ(start_displ) \
+ if (!(mark_word & (1 << start_displ))) { \
+ FOUND_FREE(hbp, p - (word *)hbp + start_displ); \
+ p[start_displ] = (word)list; \
+ list = (ptr_t)(p+start_displ); \
+ INCR_WORDS(2); \
+ }
+
+ p = (word *)(hbp->hb_body);
+ plim = (word *)(((unsigned)hbp) + HBLKSIZE);
+
+ /* go through all words in block */
+ while( p < plim ) {
+ mark_word = *mark_word_addr++;
+ DO_OBJ(0);
+ DO_OBJ(2);
+ DO_OBJ(4);
+ DO_OBJ(6);
+ DO_OBJ(8);
+ DO_OBJ(10);
+ DO_OBJ(12);
+ DO_OBJ(14);
+ DO_OBJ(16);
+ DO_OBJ(18);
+ DO_OBJ(20);
+ DO_OBJ(22);
+ DO_OBJ(24);
+ DO_OBJ(26);
+ DO_OBJ(28);
+ DO_OBJ(30);
+ p+=32;
+ }
+# ifdef GATHERSTATS
+ GC_mem_found += n_words_found;
+# endif
+ return(list);
+# undef DO_OBJ
+}
+
+/*
+ * Another special case for 4 word atomic objects:
+ */
+/*ARGSUSED*/
+ptr_t GC_reclaim_uninit4(hbp, hhdr, list, abort_if_found)
+register struct hblk *hbp; /* ptr to current heap block */
+hdr * hhdr;
+bool abort_if_found; /* Abort if a reclaimable object is found */
+register ptr_t list;
+{
+ register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
+ register word *p, *plim;
+# ifdef GATHERSTATS
+ register int n_words_found = 0;
+# endif
+ register int mark_word;
+# define DO_OBJ(start_displ) \
+ if (!(mark_word & (1 << start_displ))) { \
+ FOUND_FREE(hbp, p - (word *)hbp + start_displ); \
+ p[start_displ] = (word)list; \
+ list = (ptr_t)(p+start_displ); \
+ INCR_WORDS(4); \
+ }
+
+ p = (word *)(hbp->hb_body);
+ plim = (word *)(((unsigned)hbp) + HBLKSIZE);
+
+ /* go through all words in block */
+ while( p < plim ) {
+ mark_word = *mark_word_addr++;
+ DO_OBJ(0);
+ DO_OBJ(4);
+ DO_OBJ(8);
+ DO_OBJ(12);
+ DO_OBJ(16);
+ DO_OBJ(20);
+ DO_OBJ(24);
+ DO_OBJ(28);
+ p+=32;
+ }
+# ifdef GATHERSTATS
+ GC_mem_found += n_words_found;
+# endif
+ return(list);
+# undef DO_OBJ
+}
+
+/*
+ * Restore unmarked small objects in the block pointed to by hbp
+ * to the appropriate object free list.
+ * If entirely empty blocks are to be completely deallocated, then
+ * caller should perform that check.
+ */
+GC_reclaim_small_nonempty_block(hbp, abort_if_found)
+register struct hblk *hbp; /* ptr to current heap block */
+int abort_if_found; /* Abort if a reclaimable object is found */
+{
+ hdr * hhdr;
+ register word sz; /* size of objects in current block */
+ register struct obj_kind * ok;
+ register ptr_t * flh;
+
+ hhdr = HDR(hbp);
+ sz = hhdr -> hb_sz;
+ ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
+ flh = &(ok -> ok_freelist[sz]);
+
+ if (ok -> ok_init) {
+ switch(sz) {
+ case 2:
+ *flh = GC_reclaim_clear2(hbp, hhdr, *flh, abort_if_found);
+ break;
+ case 4:
+ *flh = GC_reclaim_clear4(hbp, hhdr, *flh, abort_if_found);
+ break;
+ default:
+ *flh = GC_reclaim_clear(hbp, hhdr, sz, *flh, abort_if_found);
+ break;
+ }
+ } else {
+ switch(sz) {
+ case 2:
+ *flh = GC_reclaim_uninit2(hbp, hhdr, *flh, abort_if_found);
+ break;
+ case 4:
+ *flh = GC_reclaim_uninit4(hbp, hhdr, *flh, abort_if_found);
+ break;
+ default:
+ *flh = GC_reclaim_uninit(hbp, hhdr, sz, *flh, abort_if_found);
+ break;
+ }
+ }
+}
+
+/*
+ * Restore an unmarked large object or an entirely empty blocks of small objects
+ * to the heap block free list.
+ * Otherwise enqueue the block for later processing
+ * by GC_reclaim_small_nonempty_block.
+ * If abort_if_found is TRUE, then process any block immediately.
+ */
+void GC_reclaim_block(hbp, abort_if_found)
+register struct hblk *hbp; /* ptr to current heap block */
+int abort_if_found; /* Abort if a reclaimable object is found */
+{
+ register hdr * hhdr;
+ register word sz; /* size of objects in current block */
+ bool empty; /* used only for PRINTBLOCKS */
+ register struct obj_kind * ok;
+ struct hblk ** rlh;
- /* remove this block from list of active blocks */
- del_hblklist(hbp);
+ hhdr = HDR(hbp);
+ sz = hhdr -> hb_sz;
+ ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
+# ifdef PRINTBLOCKS
+ GC_printf("%ld(", (unsigned long)sz);
+ if (hhdr -> hb_obj_kind == PTRFREE) {
+ GC_printf("a");
+ } else if (hhdr -> hb_obj_kind == NORMAL){
+ GC_printf("c");
+ } else {
+ GC_printf("o");
+ }
+# endif
-# ifndef HBLK_MAP
- /* This entry in hblklist just got replaced; look at it again */
- /* This admittedly depends on the internals of del_hblklist... */
- nexthbp--;
-# endif
+ if( sz > MAXOBJSZ ) { /* 1 big object */
+ if( mark_bit_from_hdr(hhdr, HDR_WORDS) ) {
+ empty = FALSE;
+ } else {
+ FOUND_FREE(hbp, HDR_WORDS);
+# ifdef GATHERSTATS
+ GC_mem_found += sz;
+# endif
+ GC_freehblk(hbp);
+ empty = TRUE;
+ }
+ } else {
+ empty = GC_block_empty(hhdr);
+ if (abort_if_found) {
+ GC_reclaim_small_nonempty_block(hbp, abort_if_found);
+ } else if (empty) {
+# ifdef GATHERSTATS
+ GC_mem_found += BYTES_TO_WORDS(HBLKSIZE);
+# endif
+ GC_freehblk(hbp);
+ } else {
+ /* group of smaller objects, enqueue the real work */
+ rlh = &(ok -> ok_reclaim_list[sz]);
+ hhdr -> hb_next = *rlh;
+ *rlh = hbp;
+ }
+ }
+# ifdef PRINTBLOCKS
+ if (empty) {GC_printf("e),");} else {GC_printf("n),");}
+# endif
+}
- freehblk(hbp);
- } /* end if (one big object...) */
- } /* end while (nexthbp ...) */
+/*
+ * Do the same thing on the entire heap, after first clearing small object
+ * free lists (if we are not just looking for leaks).
+ */
+void GC_start_reclaim(abort_if_found)
+int abort_if_found; /* Abort if a GC_reclaimable object is found */
+{
+ int kind;
+
+ /* Clear reclaim- and free-lists */
+ for (kind = 0; kind < GC_n_kinds; kind++) {
+ register ptr_t *fop;
+ register ptr_t *lim;
+ register struct hblk ** hbpp;
+ register struct hblk ** hlim;
+
+ if (!abort_if_found) {
+ lim = &(GC_obj_kinds[kind].ok_freelist[MAXOBJSZ+1]);
+ for( fop = GC_obj_kinds[kind].ok_freelist; fop < lim; fop++ ) {
+ *fop = 0;
+ }
+ } /* otherwise free list objects are marked, */
+ /* and its safe to leave them */
+ hlim = &(GC_obj_kinds[kind].ok_reclaim_list[MAXOBJSZ+1]);
+ for( hbpp = GC_obj_kinds[kind].ok_reclaim_list;
+ hbpp < hlim; hbpp++ ) {
+ *hbpp = 0;
+ }
+ }
+
+# ifdef PRINTBLOCKS
+ GC_printf("GC_reclaim: current block sizes:\n");
+# endif
+ /* Go through all heap blocks (in hblklist) and reclaim unmarked objects */
+ /* or enqueue the block for later processing. */
+ GC_apply_to_all_blocks(GC_reclaim_block, abort_if_found);
+
# ifdef PRINTBLOCKS
- gc_printf("\n");
+ GC_printf("\n");
# endif
}
+
+/*
+ * Sweep blocks of the indicated object size and kind until either the
+ * appropriate free list is nonempty, or there are no more blocks to
+ * sweep.
+ */
+void GC_continue_reclaim(sz, kind)
+word sz; /* words */
+int kind;
+{
+ register hdr * hhdr;
+ register struct hblk * hbp;
+ register struct obj_kind * ok = &(GC_obj_kinds[kind]);
+ struct hblk ** rlh = &(ok -> ok_reclaim_list[sz]);
+ ptr_t *flh = &(ok -> ok_freelist[sz]);
+
+
+ while ((hbp = *rlh) != 0) {
+ hhdr = HDR(hbp);
+ *rlh = hhdr -> hb_next;
+ GC_reclaim_small_nonempty_block(hbp, FALSE);
+ if (*flh != 0) break;
+ }
+}
diff --git a/rs6000_mach_dep.s b/rs6000_mach_dep.s
index f0e597d4..c056f039 100644
--- a/rs6000_mach_dep.s
+++ b/rs6000_mach_dep.s
@@ -1,17 +1,3 @@
- # Set up _gc_arrays with labels in the middle
- .csect data[RW]
- .globl _gc_arrays
- .globl aobjfreelist
- .globl objfreelist
- .align 2
-_gc_arrays:
-aobjfreelist:
- .space 4*513
-objfreelist:
- .space 4*513
- # either hblkmap or hblklist. Reserve space for HBLK_MAP, which is bigger.
- .space 4*8192
-
.csect
.set r0,0
.set r1,1
@@ -47,71 +33,71 @@ objfreelist:
.set r31,31
# Mark from machine registers that are saved by C compiler
- .globl .mark_regs
-.mark_regs:
- .extern .tl_mark
+ .globl .GC_mark_regs
+.GC_mark_regs:
+ .extern .GC_tl_mark
stu r1,-64(r1) # reserve stack frame
mflr r0 # save link register
st r0,0x48(r1)
- oril r3,r2,0x0 # mark from r2
- bl .tl_mark
+ oril r3,r2,0x0 # GC_mark from r2
+ bl .GC_tl_mark
cror 15,15,15
- oril r3,r13,0x0 # mark from r13-r31
- bl .tl_mark
+ oril r3,r13,0x0 # GC_mark from r13-r31
+ bl .GC_tl_mark
cror 15,15,15
oril r3,r14,0x0
- bl .tl_mark
+ bl .GC_tl_mark
cror 15,15,15
oril r3,r15,0x0
- bl .tl_mark
+ bl .GC_tl_mark
cror 15,15,15
oril r3,r16,0x0
- bl .tl_mark
+ bl .GC_tl_mark
cror 15,15,15
oril r3,r17,0x0
- bl .tl_mark
+ bl .GC_tl_mark
cror 15,15,15
oril r3,r18,0x0
- bl .tl_mark
+ bl .GC_tl_mark
cror 15,15,15
oril r3,r19,0x0
- bl .tl_mark
+ bl .GC_tl_mark
cror 15,15,15
oril r3,r20,0x0
- bl .tl_mark
+ bl .GC_tl_mark
cror 15,15,15
oril r3,r21,0x0
- bl .tl_mark
+ bl .GC_tl_mark
cror 15,15,15
oril r3,r22,0x0
- bl .tl_mark
+ bl .GC_tl_mark
cror 15,15,15
oril r3,r23,0x0
- bl .tl_mark
+ bl .GC_tl_mark
cror 15,15,15
oril r3,r24,0x0
- bl .tl_mark
+ bl .GC_tl_mark
cror 15,15,15
oril r3,r25,0x0
- bl .tl_mark
+ bl .GC_tl_mark
cror 15,15,15
oril r3,r26,0x0
- bl .tl_mark
+ bl .GC_tl_mark
cror 15,15,15
oril r3,r27,0x0
- bl .tl_mark
+ bl .GC_tl_mark
cror 15,15,15
oril r3,r28,0x0
- bl .tl_mark
+ bl .GC_tl_mark
cror 15,15,15
oril r3,r29,0x0
- bl .tl_mark
+ bl .GC_tl_mark
cror 15,15,15
oril r3,r30,0x0
- bl .tl_mark
+ bl .GC_tl_mark
cror 15,15,15
oril r3,r31,0x0
- bl .tl_mark
+ bl .GC_tl_mark
cror 15,15,15
l r0,0x48(r1)
mtlr r0
diff --git a/rt_allocobj.s b/rt_allocobj.s
deleted file mode 100644
index dcfaa0b8..00000000
--- a/rt_allocobj.s
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * This (assembly) file contains the functions:
- * struct obj * allocobj(sz)
- * struct obj * allocaobj(sz)
- */
-
-
-/*
- * allocobj(i) insures that the free list entry for objects of size
- * i is not empty.
- *
- * Call _allocobj after first saving the registers which
- * are not guaranteed to be preserved (r0-r5 and r15).
- *
- * Note: the reason we have to use this interface between the caller
- * and the garbage collector is in order to preserve the caller's registers
- * which the C compiler would normally trash. We just stick 'em on the stack
- * so that the mark_all procedure (which marks everything on the stack) will
- * see them.
- *
- * this is the RT version.
- */
-
-/* this prolog was copied from a cc-produced .s file */
- .text
- .align 2
- .data
- .align 2
- .ltorg
- .text
- .ascii "<allocobj>"
- .align 2
- .globl _.allocobj
-_.allocobj:
- .data
- .globl _allocobj
-_allocobj: .long _.allocobj /* text area contains instr ptr */
- .text
- /*
- * save registers which will be trashed on the stack in the place
- * the RT linkage convention uses for saving registers
- */
- .using _allocobj,r14 /* tell assembler r14 is reliable base */
- stm r3, -100+(3*4)(r1) /* we don't save r1 cause it's sp */
- ai r1,r1,-(36+13*4)
- mr r14, r0 /* initialize data area pointer */
-
- balix r15, _._allocobj /* call _allocobj() */
- get r0,$.long(__allocobj) /* get data area pointer */
-
- lm r3, -100+(36+13*4)+(3*4)(r1) /* restore regs */
- brx r15 /* return to caller (no restore req'd) */
- ai r1, $(36+13*4) /* restore r1 to where it belongs */
-
-/* trace table for allocobj */
- .align 2
- .byte 0xdf /* magic1 */
- .byte 0x07 /* code */
- .byte 0xdf /* magic2 */
- .byte 0x08 /* first_gpr << 4 | opt stuff */
- .byte 0x01 /* no. args and stack reg num */
- .byte 0x3c /* 0011 1100 ==> stack frame sz = 60 */
- .data
- .ltorg
-
- .text
- .ascii "<allocaobj>"
- .align 2
- .globl _.allocaobj
-_.allocaobj:
- .data
- .globl _allocaobj
-_allocaobj: .long _.allocaobj /* text area contains instr ptr */
- .text
- /*
- * save registers which will be trashed on the stack in the place
- * the RT linkage convention uses for saving registers
- */
- .using _allocaobj,r14 /* tell assembler r14 is reliable base */
- stm r3, -100+(3*4)(r1) /* we don't save r1 cause it's sp */
- ai r1,r1,-(36+13*4)
- mr r14, r0 /* initialize data area pointer */
-
- balix r15, _._allocaobj /* call _allocaobj() */
- get r0,$.long(__allocaobj) /* get data area pointer */
-
- lm r3, -100+(36+13*4)+(3*4)(r1) /* restore regs */
- brx r15 /* return to caller (no restore req'd) */
- ai r1, $(36+13*4) /* restore r1 to where it belongs */
-
-/* trace table for allocaobj */
- .align 2
- .byte 0xdf /* magic1 */
- .byte 0x07 /* code */
- .byte 0xdf /* magic2 */
- .byte 0x08 /* first_gpr << 4 | opt stuff */
- .byte 0x01 /* no. args and stack reg num */
- .byte 0x3c /* 0011 1100 ==> stack frame sz = 60 */
- .data
- .ltorg
-
-
-.globl .oVpcc
-.globl .oVncs
-.set .oVpcc, 0
-.set .oVncs, 0
diff --git a/setjmp_test.c b/setjmp_test.c
index fe9c2448..8426fd3d 100644
--- a/setjmp_test.c
+++ b/setjmp_test.c
@@ -10,15 +10,84 @@
/* code.) */
#include <stdio.h>
#include <setjmp.h>
-#include "gc.h"
+#include "gc_private.h"
+
+#ifdef __hpux
+/* X/OPEN PG3 defines "void* sbrk();" and this clashes with the definition */
+/* in gc_private.h, so we set the clock backwards with _CLASSIC_XOPEN_TYPES. */
+/* This is for HP-UX 8.0.
+/* sbrk() is not used in this file, of course. W. Underwood, 15 Jun 1992 */
+#define _CLASSIC_XOPEN_TYPES
+#include <unistd.h>
+int
+getpagesize()
+{
+ return sysconf(_SC_PAGE_SIZE);
+}
+#endif
+
+#ifdef _AUX_SOURCE
+#include <sys/mmu.h>
+int
+getpagesize()
+{
+ return PAGESIZE;
+}
+#endif
+
+#ifdef __OS2__
+#define INCL_DOSFILEMGR
+#define INCL_DOSMISC
+#define INCL_DOSERRORS
+#include <os2.h>
+
+int
+getpagesize()
+{
+ ULONG result[1];
+
+ if (DosQuerySysInfo(QSV_PAGE_SIZE, QSV_PAGE_SIZE,
+ (void *)result, sizeof(ULONG)) != NO_ERROR) {
+ fprintf(stderr, "DosQuerySysInfo failed\n");
+ result[0] = 4096;
+ }
+ return((int)(result[0]));
+}
+#endif
+
+struct {char a_a; char * a_b;} a;
+
+int * nested_sp()
+{
+ int dummy;
+
+ return(&dummy);
+}
+
main()
{
+ int dummy;
+ long ps = getpagesize();
jmp_buf b;
register int x = strlen("a"); /* 1, slightly disguised */
static int y = 0;
+ if (nested_sp() < &dummy) {
+ printf("Stack appears to grow down, which is the default.\n");
+ printf("A good guess for STACKBOTTOM on this machine is 0x%X.\n",
+ ((long)(&dummy) + ps) & ~(ps-1));
+ } else {
+ printf("Stack appears to grow up.\n");
+ printf("Define STACK_GROWS_UP in gc_private.h\n");
+ printf("A good guess for STACKBOTTOM on this machine is 0x%X.\n",
+ ((long)(&dummy) + ps) & ~(ps-1));
+ }
+ printf("Note that this may vary between machines of ostensibly\n");
+ printf("the same architecture (e.g. Sun 3/50s and 3/80s).\n");
+ printf("A good guess for ALIGNMENT on this machine is %d.\n",
+ (unsigned long)(&(a.a_b))-(unsigned long)(&a));
+
/* Encourage the compiler to keep x in a callee-save register */
- printf("");
x = 2*x-1;
printf("");
x = 2*x-1;
diff --git a/test.c b/test.c
index 6aed1b14..027aa80c 100644
--- a/test.c
+++ b/test.c
@@ -1,21 +1,79 @@
-/* Somewhat nonconvincing test for garbage collector. */
-/* Note that this intentionally uses the worlds worst implementation */
-/* of cons. It eats up gobs of memory in an attempt to break the */
-/* collector. Process size should grow to about 1.5 Meg and stay */
-/* there. */
-/* Should take about 25 seconds (2 minutes) to run on a */
-/* Sun 3/60 (Vax 11/750) */
-/* (The Vax does reasonably well here because the compiler assures */
-/* longword pointer alignment.) */
-
+/* An incomplete test for the garbage collector. */
+/* Some more obscure entry points are not tested at all. */
+# include <stdlib.h>
# include <stdio.h>
-# include "cons.h"
+# include "gc.h"
+# ifdef PCR
+# include "th/PCR_ThCrSec.h"
+# include "th/PCR_Th.h"
+# endif
+
+/* AT_END may be defined to excercise the interior pointer test */
+/* if the collector is configured with ALL_INTERIOR_POINTERS. */
+/* As it stands, this test should succeed with either */
+/* configuration. In the FIND_LEAK configuration, it should */
+/* find lots of leaks, since we free almost nothing. */
+
+struct SEXPR {
+ struct SEXPR * sexpr_car;
+ struct SEXPR * sexpr_cdr;
+};
+
+# ifdef __STDC__
+ typedef void * void_star;
+# else
+ typedef char * void_star;
+# endif
+
+typedef struct SEXPR * sexpr;
+
+extern sexpr cons();
+
+# define nil ((sexpr) 0)
+# define car(x) ((x) -> sexpr_car)
+# define cdr(x) ((x) -> sexpr_cdr)
+# define is_nil(x) ((x) == nil)
+
+
+int extra_count = 0; /* Amount of space wasted in cons node */
+
+/* Silly implementation of Lisp cons. Intentionally wastes lots of space */
+/* to test collector. */
+sexpr cons (x, y)
+sexpr x;
+sexpr y;
+{
+ register sexpr r;
+ register int *p;
+ register my_extra = extra_count;
+
+ r = (sexpr) GC_MALLOC(sizeof(struct SEXPR) + my_extra);
+ if (r == 0) {
+ (void)printf("Out of memory\n");
+ exit(1);
+ }
+ for (p = (int *)r;
+ ((char *)p) < ((char *)r) + my_extra + sizeof(struct SEXPR); p++) {
+ if (*p) {
+ (void)printf("Found nonzero at %X\n - allocator is broken", p);
+ exit(1);
+ }
+ *p = 13;
+ }
+# ifdef AT_END
+ r = (sexpr)((char *)r + (my_extra & ~7));
+# endif
+ r -> sexpr_car = x;
+ r -> sexpr_cdr = y;
+ extra_count = (my_extra + 1) % 5000;
+ return(r);
+}
/* Return reverse(x) concatenated with y */
sexpr reverse1(x, y)
sexpr x, y;
{
- if (null(x)) {
+ if (is_nil(x)) {
return(y);
} else {
return( reverse1(cdr(x), cons(car(x), y)) );
@@ -34,22 +92,42 @@ int low, up;
if (low > up) {
return(nil);
} else {
- return(cons(low, ints(low+1, up)));
+ return(cons((sexpr)low, ints(low+1, up)));
}
}
+void check_ints(list, low, up)
+sexpr list;
+int low, up;
+{
+ if ((int)(car(list)) != low) {
+ (void)printf(
+ "List reversal produced incorrect list - collector is broken\n");
+ exit(1);
+ }
+ if (low == up) {
+ if (cdr(list) != nil) {
+ (void)printf("List too long - collector is broken\n");
+ exit(1);
+ }
+ } else {
+ check_ints(cdr(list), low+1, up);
+ }
+}
+
+/* Not used, but useful for debugging: */
void print_int_list(x)
sexpr x;
{
- if (null(x)) {
- printf("NIL\n");
+ if (is_nil(x)) {
+ (void)printf("NIL\n");
} else {
- printf("%d", car(x));
- if (!null(cdr(x))) {
- printf(", ");
- print_int_list(cdr(x));
+ (void)printf("%d", car(x));
+ if (!is_nil(cdr(x))) {
+ (void)printf(", ");
+ (void)print_int_list(cdr(x));
} else {
- printf("\n");
+ (void)printf("\n");
}
}
}
@@ -61,25 +139,225 @@ struct {
} A;
#define a A.aa
-main()
+/*
+ * Repeatedly reverse lists built out of very different sized cons cells.
+ * Check that we didn't lose anything.
+ */
+reverse_test()
{
int i;
sexpr b;
- gc_init();
a = ints(1, 100);
b = ints(1, 50);
- print_int_list(a);
- print_int_list(b);
- print_int_list(reverse(a));
- print_int_list(reverse(b));
- for (i = 0; i < 100; i++) {
+ for (i = 0; i < 50; i++) {
b = reverse(reverse(b));
}
- print_int_list(a);
- print_int_list(b);
- print_int_list(reverse(a));
- print_int_list(reverse(b));
+ for (i = 0; i < 10; i++) {
+ /* This maintains the invariant that a always points to a list of */
+ /* 100 integers. Thus this is thread safe without locks. */
+ a = reverse(reverse(a));
+# if !defined(AT_END) && !defined(PCR)
+ /* This is not thread safe, since realloc explicitly deallocates */
+ if (i & 1) {
+ a = (sexpr)GC_REALLOC((void_star)a, 500);
+ } else {
+ a = (sexpr)GC_REALLOC((void_star)a, 4200);
+ }
+# endif
+ }
+ check_ints(a,1,100);
+ check_ints(b,1,50);
+ a = b = 0;
+}
+
+/*
+ * The rest of this builds balanced binary trees, checks that they don't
+ * disappear, and tests finalization.
+ */
+typedef struct treenode {
+ int level;
+ struct treenode * lchild;
+ struct treenode * rchild;
+} tn;
+
+int finalizable_count = 0;
+int finalized_count = 0;
+int dropped_something = 0;
+
+# ifdef __STDC__
+ void finalizer(void * obj, void * client_data)
+# else
+ void finalizer(obj, client_data)
+ char * obj;
+ char * client_data;
+# endif
+{
+ tn * t = (tn *)obj;
+ if ((int)client_data != t -> level) {
+ (void)printf("Wrong finalization data - collector is broken\n");
+ exit(1);
+ }
+ finalized_count++;
+}
+
+size_t counter = 0;
+
+tn * mktree(n)
+int n;
+{
+ tn * result = (tn *)GC_MALLOC(sizeof(tn));
+
+ if (n == 0) return(0);
+ if (result == 0) {
+ (void)printf("Out of memory\n");
+ exit(1);
+ }
+ result -> level = n;
+ result -> lchild = mktree(n-1);
+ result -> rchild = mktree(n-1);
+ if (counter++ % 119 == 0) {
+ GC_REGISTER_FINALIZER((void_star)result, finalizer, (void_star)n,
+ (GC_finalization_proc *)0, (void_star *)0);
+# ifdef PCR
+ PCR_ThCrSec_EnterSys();
+ /* Losing a count here causes erroneous report of failure. */
+# endif
+ finalizable_count++;
+# ifdef PCR
+ PCR_ThCrSec_ExitSys();
+# endif
+ }
+ return(result);
+}
+
+void chktree(t,n)
+tn *t;
+int n;
+{
+ if (n == 0 && t != 0) {
+ (void)printf("Clobbered a leaf - collector is broken\n");
+ exit(1);
+ }
+ if (n == 0) return;
+ if (t -> level != n) {
+ (void)printf("Lost a node at level %d - collector is broken\n", n);
+ exit(1);
+ }
+ if (counter++ % 373 == 0) (void) GC_MALLOC(counter%5001);
+ chktree(t -> lchild, n-1);
+ if (counter++ % 73 == 0) (void) GC_MALLOC(counter%373);
+ chktree(t -> rchild, n-1);
+}
+
+void alloc_small(n)
+int n;
+{
+ register int i;
+
+ for (i = 0; i < n; i += 8) {
+ if (GC_MALLOC_ATOMIC(8) == 0) {
+ (void)printf("Out of memory\n");
+ exit(1);
+ }
+ }
+}
+
+tree_test()
+{
+ tn * root = mktree(16);
+ register int i;
+
+ alloc_small(5000000);
+ chktree(root, 16);
+ if (finalized_count && ! dropped_something) {
+ (void)printf("Premature finalization - collector is broken\n");
+ exit(1);
+ }
+ dropped_something = 1;
+ root = mktree(16);
+ chktree(root, 16);
+ for (i = 16; i >= 0; i--) {
+ root = mktree(i);
+ chktree(root, i);
+ }
+ alloc_small(5000000);
+}
+
+# include "gc_private.h"
+
+int n_tests = 0;
+
+void run_one_test()
+{
+ DCL_LOCK_STATE;
+
+ reverse_test();
+ tree_test();
+ LOCK();
+ n_tests++;
+ UNLOCK();
+
+}
+
+void check_heap_stats()
+{
+ (void)printf("Completed %d tests\n", n_tests);
+ (void)printf("Finalized %d/%d objects - ",
+ finalized_count, finalizable_count);
+ if (finalized_count > finalizable_count
+ || finalized_count < finalizable_count/2) {
+ (void)printf ("finalization is probably broken\n");
+ exit(1);
+ } else {
+ (void)printf ("finalization is probably ok\n");
+ }
+ (void)printf("Total number of bytes allocated is %d\n",
+ WORDS_TO_BYTES(GC_words_allocd + GC_words_allocd_before_gc));
+ (void)printf("Final heap size is %d bytes\n", GC_heapsize);
+ if (WORDS_TO_BYTES(GC_words_allocd + GC_words_allocd_before_gc)
+ < 33500000*n_tests) {
+ (void)printf("Incorrect execution - missed some allocations\n");
+ exit(1);
+ }
+ if (GC_heapsize > 10000000*n_tests) {
+ (void)printf("Unexpected heap growth - collector may be broken\n");
+ exit(1);
+ }
+ (void)printf("Collector appears to work\n");
+}
+
+#ifndef PCR
+main()
+{
+ n_tests = 0;
+ run_one_test();
+ check_heap_stats();
+ (void)fflush(stdout);
+ return(0);
+}
+# else
+test()
+{
+ PCR_Th_T * th1;
+ PCR_Th_T * th2;
+ int code;
+
+ n_tests = 0;
+ th1 = PCR_Th_Fork(run_one_test, 0);
+ th2 = PCR_Th_Fork(run_one_test, 0);
+ run_one_test();
+ if (PCR_Th_T_Join(th1, &code, NIL, PCR_allSigsBlocked, PCR_waitForever)
+ != PCR_ERes_okay || code != 0) {
+ (void)printf("Thread 1 failed\n");
+ }
+ if (PCR_Th_T_Join(th2, &code, NIL, PCR_allSigsBlocked, PCR_waitForever)
+ != PCR_ERes_okay || code != 0) {
+ (void)printf("Thread 2 failed\n");
+ }
+ check_heap_stats();
+ (void)fflush(stdout);
return(0);
}
+#endif