summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIvan Maidanski <ivmai@mail.ru>2011-07-26 15:31:21 +0400
committerIvan Maidanski <ivmai@mail.ru>2011-07-26 15:31:21 +0400
commite955362cfcef47fdc3ad2140f50ea4638fd86a4d (patch)
tree07b9d240b8bc1a7be60cc79c1ce1e43866e2b4a7
parentc63ec8250de446bca83601966c918d37ad120a83 (diff)
downloadbdwgc-e955362cfcef47fdc3ad2140f50ea4638fd86a4d.tar.gz
gc5.0alpha3 tarball importgc5_0alpha3
-rw-r--r--Makefile71
-rw-r--r--README70
-rw-r--r--README.amiga47
-rw-r--r--allchblk.c663
-rw-r--r--alloc.c116
-rw-r--r--backptr.h56
-rw-r--r--cord/cordxtra.c2
-rw-r--r--cord/gc.h23
-rw-r--r--dbg_mlc.c180
-rw-r--r--dyn_load.c3
-rw-r--r--finalize.c69
-rw-r--r--gc.h23
-rw-r--r--gc_alloc.h2
-rw-r--r--gc_copy_descr.h26
-rw-r--r--gc_hdrs.h6
-rw-r--r--gc_mark.h2
-rw-r--r--gc_priv.h65
-rw-r--r--gcconfig.h62
-rw-r--r--headers.c86
-rw-r--r--include/backptr.h56
-rw-r--r--include/gc.h23
-rw-r--r--include/gc_alloc.h2
-rw-r--r--include/leak_detector.h7
-rw-r--r--include/private/gc_hdrs.h6
-rw-r--r--include/private/gc_priv.h65
-rw-r--r--include/private/gcconfig.h62
-rw-r--r--linux_threads.c6
-rw-r--r--mach_dep.c34
-rw-r--r--malloc.c12
-rw-r--r--mallocx.c12
-rw-r--r--mark.c9
-rw-r--r--misc.c17
-rw-r--r--nursery.c175
-rwxr-xr-xnursery.h90
-rw-r--r--os_dep.c245
-rw-r--r--reclaim.c120
-rw-r--r--solaris_threads.c27
-rw-r--r--sparc_mach_dep.s4
-rw-r--r--sparc_sunos4_mach_dep.s2
-rw-r--r--test.c18
-rw-r--r--version.h6
41 files changed, 2060 insertions, 510 deletions
diff --git a/Makefile b/Makefile
index 063d394a..cfbfc452 100644
--- a/Makefile
+++ b/Makefile
@@ -24,10 +24,8 @@ CFLAGS= -O -DATOMIC_UNCOLLECTABLE -DNO_SIGNALS -DNO_EXECUTE_PERMISSION -DALL_INT
# Setjmp_test may yield overly optimistic results when compiled
# without optimization.
# -DSILENT disables statistics printing, and improves performance.
-# -DCHECKSUMS reports on erroneously clear dirty bits, and unexpectedly
-# altered stubborn objects, at substantial performance cost.
-# Use only for incremental collector debugging.
-# -DFIND_LEAK causes the collector to assume that all inaccessible
+# -DFIND_LEAK causes GC_find_leak to be initially set.
+# This causes the collector to assume that all inaccessible
# objects should have been explicitly deallocated, and reports exceptions.
# Finalization and the test program are not usable in this mode.
# -DSOLARIS_THREADS enables support for Solaris (thr_) threads.
@@ -85,9 +83,12 @@ CFLAGS= -O -DATOMIC_UNCOLLECTABLE -DNO_SIGNALS -DNO_EXECUTE_PERMISSION -DALL_INT
# finalize.c). Objects reachable from finalizable objects will be marked
# in a sepearte postpass, and hence their memory won't be reclaimed.
# Not recommended unless you are implementing a language that specifies
-# these semantics.
+# these semantics. Since 5.0, determines only only the initial value
+# of GC_java_finalization variable.
# -DFINALIZE_ON_DEMAND causes finalizers to be run only in response
# to explicit GC_invoke_finalizers() calls.
+# In 5.0 this became runtime adjustable, and this only determines the
+# initial value of GC_finalize_on_demand.
# -DATOMIC_UNCOLLECTABLE includes code for GC_malloc_atomic_uncollectable.
# This is useful if either the vendor malloc implementation is poor,
# or if REDIRECT_MALLOC is used.
@@ -98,6 +99,10 @@ CFLAGS= -O -DATOMIC_UNCOLLECTABLE -DNO_SIGNALS -DNO_EXECUTE_PERMISSION -DALL_INT
# fragmentation, but generally better performance for large heaps.
# -DUSE_MMAP use MMAP instead of sbrk to get new memory.
# Works for Solaris and Irix.
+# -DUSE_MUNMAP causes memory to be returned to the OS under the right
+# circumstances. This currently disables VM-based incremental collection.
+# This is currently experimental, and works only under some Unix and
+# Linux versions.
# -DMMAP_STACKS (for Solaris threads) Use mmap from /dev/zero rather than
# GC_scratch_alloc() to get stack memory.
# -DPRINT_BLACK_LIST Whenever a black list entry is added, i.e. whenever
@@ -109,12 +114,25 @@ CFLAGS= -O -DATOMIC_UNCOLLECTABLE -DNO_SIGNALS -DNO_EXECUTE_PERMISSION -DALL_INT
# allocation strategy. The new strategy tries harder to minimize
# fragmentation, sometimes at the expense of spending more time in the
# large block allocator and/or collecting more frequently.
-# If you expect the allocator to promtly use an explicitly expanded
+# If you expect the allocator to promptly use an explicitly expanded
# heap, this is highly recommended.
+# -DKEEP_BACK_PTRS Add code to save back pointers in debugging headers
+# for objects allocated with the debugging allocator. If all objects
+# through GC_MALLOC with GC_DEBUG defined, this allows the client
+# to determine how particular or randomly chosen objects are reachable
+# for debugging/profiling purposes. The backptr.h interface is
+# implemented only if this is defined.
+# -DGC_ASSERTIONS Enable some internal GC assertion checking. Currently
+# this facility is only used in a few places. It is intended primarily
+# for debugging of the garbage collector itself, but could also
+# occasionally be useful for debugging of client code. Slows down the
+# collector somewhat, but not drastically.
+# -DCHECKSUMS reports on erroneously clear dirty bits, and unexpectedly
+# altered stubborn objects, at substantial performance cost.
+# Use only for debugging of the incremental collector.
#
-
LIBGC_CFLAGS= -O -DNO_SIGNALS -DSILENT \
-DREDIRECT_MALLOC=GC_malloc_uncollectable \
-DDONT_ADD_BYTE_AT_END -DALL_INTERIOR_POINTERS
@@ -145,7 +163,7 @@ SRCS= $(CSRCS) mips_sgi_mach_dep.s rs6000_mach_dep.s alpha_mach_dep.s \
threadlibs.c if_mach.c if_not_there.c gc_cpp.cc gc_cpp.h weakpointer.h \
gcc_support.c mips_ultrix_mach_dep.s include/gc_alloc.h gc_alloc.h \
include/new_gc_alloc.h include/javaxfc.h sparc_sunos4_mach_dep.s \
- solaris_threads.h $(CORD_SRCS)
+ solaris_threads.h backptr.h $(CORD_SRCS)
OTHER_FILES= Makefile PCR-Makefile OS2_MAKEFILE NT_MAKEFILE BCC_MAKEFILE \
README test.c test_cpp.cc setjmp_t.c SMakefile.amiga \
@@ -153,7 +171,7 @@ OTHER_FILES= Makefile PCR-Makefile OS2_MAKEFILE NT_MAKEFILE BCC_MAKEFILE \
cord/gc.h include/gc.h include/gc_typed.h include/cord.h \
include/ec.h include/private/cord_pos.h include/private/gcconfig.h \
include/private/gc_hdrs.h include/private/gc_priv.h \
- include/gc_cpp.h README.rs6000 \
+ include/gc_cpp.h README.rs6000 include/backptr.h \
include/weakpointer.h README.QUICK callprocs pc_excludes \
barrett_diagram README.OS2 README.Mac MacProjects.sit.hqx \
MacOS.c EMX_MAKEFILE makefile.depend README.debugging \
@@ -162,7 +180,8 @@ OTHER_FILES= Makefile PCR-Makefile OS2_MAKEFILE NT_MAKEFILE BCC_MAKEFILE \
add_gc_prefix.c README.solaris2 README.sgi README.hp README.uts \
win32_threads.c NT_THREADS_MAKEFILE gc.mak README.dj Makefile.dj \
README.alpha README.linux version.h Makefile.DLLs \
- WCC_MAKEFILE
+ WCC_MAKEFILE nursery.c nursery.h gc_copy_descr.h \
+ include/leak_detector.h
CORD_INCLUDE_FILES= $(srcdir)/gc.h $(srcdir)/cord/cord.h $(srcdir)/cord/ec.h \
$(srcdir)/cord/private/cord_pos.h
@@ -199,19 +218,23 @@ mark.o typd_mlc.o finalize.o: $(srcdir)/gc_mark.h
base_lib gc.a: $(OBJS) dyn_load.o $(UTILS)
echo > base_lib
- rm -f on_sparc_sunos5_1
- ./if_mach SPARC SUNOS5 touch on_sparc_sunos5_1
+ rm -f dont_ar_1
+ ./if_mach SPARC SUNOS5 touch dont_ar_1
./if_mach SPARC SUNOS5 $(AR) rus gc.a $(OBJS) dyn_load.o
- ./if_not_there on_sparc_sunos5_1 $(AR) ru gc.a $(OBJS) dyn_load.o
- ./if_not_there on_sparc_sunos5_1 $(RANLIB) gc.a || cat /dev/null
+ ./if_mach M68K AMIGA touch dont_ar_1
+ ./if_mach M68K AMIGA $(AR) -vrus gc.a $(OBJS) dyn_load.o
+ ./if_not_there dont_ar_1 $(AR) ru gc.a $(OBJS) dyn_load.o
+ ./if_not_there dont_ar_1 $(RANLIB) gc.a || cat /dev/null
# ignore ranlib failure; that usually means it doesn't exist, and isn't needed
cords: $(CORD_OBJS) cord/cordtest $(UTILS)
- rm -f on_sparc_sunos5_3
- ./if_mach SPARC SUNOS5 touch on_sparc_sunos5_3
+ rm -f dont_ar_3
+ ./if_mach SPARC SUNOS5 touch dont_ar_3
./if_mach SPARC SUNOS5 $(AR) rus gc.a $(CORD_OBJS)
- ./if_not_there on_sparc_sunos5_3 $(AR) ru gc.a $(CORD_OBJS)
- ./if_not_there on_sparc_sunos5_3 $(RANLIB) gc.a || cat /dev/null
+ ./if_mach M68K AMIGA touch dont_ar_3
+ ./if_mach M68K AMIGA $(AR) -vrus gc.a $(CORD_OBJS)
+ ./if_not_there dont_ar_3 $(AR) ru gc.a $(CORD_OBJS)
+ ./if_not_there dont_ar_3 $(RANLIB) gc.a || cat /dev/null
gc_cpp.o: $(srcdir)/gc_cpp.cc $(srcdir)/gc_cpp.h $(srcdir)/gc.h Makefile
$(CXX) -c $(CXXFLAGS) $(srcdir)/gc_cpp.cc
@@ -223,11 +246,13 @@ base_lib $(UTILS)
./if_not_there test_cpp $(CXX) $(CXXFLAGS) -o test_cpp $(srcdir)/test_cpp.cc gc_cpp.o gc.a `./threadlibs`
c++: gc_cpp.o $(srcdir)/gc_cpp.h test_cpp
- rm -f on_sparc_sunos5_4
- ./if_mach SPARC SUNOS5 touch on_sparc_sunos5_4
+ rm -f dont_ar_4
+ ./if_mach SPARC SUNOS5 touch dont_ar_4
./if_mach SPARC SUNOS5 $(AR) rus gc.a gc_cpp.o
- ./if_not_there on_sparc_sunos5_4 $(AR) ru gc.a gc_cpp.o
- ./if_not_there on_sparc_sunos5_4 $(RANLIB) gc.a || cat /dev/null
+ ./if_mach M68K AMIGA touch dont_ar_4
+ ./if_mach M68K AMIGA $(AR) -vrus gc.a gc_cpp.o
+ ./if_not_there dont_ar_4 $(AR) ru gc.a gc_cpp.o
+ ./if_not_there dont_ar_4 $(RANLIB) gc.a || cat /dev/null
./test_cpp 1
echo > c++
@@ -276,6 +301,7 @@ mach_dep.o: $(srcdir)/mach_dep.c $(srcdir)/mips_sgi_mach_dep.s $(srcdir)/mips_ul
./if_mach ALPHA "" $(AS) -o mach_dep.o $(srcdir)/alpha_mach_dep.s
./if_mach SPARC SUNOS5 $(AS) -o mach_dep.o $(srcdir)/sparc_mach_dep.s
./if_mach SPARC SUNOS4 $(AS) -o mach_dep.o $(srcdir)/sparc_sunos4_mach_dep.s
+ ./if_mach SPARC OPENBSD $(AS) -o mach_dep.o $(srcdir)/sparc_sunos4_mach_dep.s
./if_not_there mach_dep.o $(CC) -c $(SPECIALCFLAGS) $(srcdir)/mach_dep.c
mark_rts.o: $(srcdir)/mark_rts.c if_mach if_not_there $(UTILS)
@@ -313,6 +339,7 @@ cord/de: $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a $(UTILS)
./if_mach RS6000 "" $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a -lcurses
./if_mach I386 LINUX $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a -lcurses `./threadlibs`
./if_mach ALPHA LINUX $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a -lcurses
+ ./if_mach M68K AMIGA $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a -lcurses
./if_not_there cord/de $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a $(CURSES) `./threadlibs`
if_mach: $(srcdir)/if_mach.c $(srcdir)/gcconfig.h
diff --git a/README b/README
index 5c572ce3..80cb26ab 100644
--- a/README
+++ b/README
@@ -1,6 +1,6 @@
Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
-Copyright (c) 1996-1998 by Silicon Graphics. All rights reserved.
+Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -11,19 +11,19 @@ Permission to modify the code and to distribute modified code is granted,
provided the above notices are retained, and a notice that the code was
modified is included with the above copyright notice.
-This is version 4.14 of a conservative garbage collector for C and C++.
+This is version 5.0alpha3 of a conservative garbage collector for C and C++.
You might find a more recent version of this at
-http://reality.sgi.com/boehm/gc.html
+http://www.hpl.hp.com/personal/Hans_Boehm/gc
HISTORY -
Early versions of this collector were developed as a part of research
projects supported in part by the National Science Foundation
and the Defense Advance Research Projects Agency.
-Much of the code was rewritten by Hans-J. Boehm at Xerox PARC
-and is now maintained by him at SGI (boehm@sgi.com).
+Much of the code was rewritten by Hans-J. Boehm (boehm@acm.org) at Xerox PARC
+and at SGI.
Some other contributors:
@@ -40,8 +40,8 @@ Robert Brazile (brazile@diamond.bbn.com) originally supplied the ULTRIX code.
Al Dosser (dosser@src.dec.com) and Regis Cridlig (Regis.Cridlig@cl.cam.ac.uk)
subsequently provided updates and information on variation between ULTRIX
systems. Parag Patel (parag@netcom.com) supplied the A/UX code.
-Jesper Peterson(jep@mtiame.mtia.oz.au) and
-Michel Schinz supplied the Amiga port.
+Jesper Peterson(jep@mtiame.mtia.oz.au), Michel Schinz, and
+Martin Tauchmann (martintauchmann@bigfoot.com) supplied the Amiga port.
Thomas Funke (thf@zelator.in-berlin.de(?)) and
Brian D.Carlstrom (bdc@clark.lcs.mit.edu) supplied the NeXT ports.
Douglas Steel (doug@wg.icl.co.uk) provided ICL DRS6000 code.
@@ -612,7 +612,7 @@ reclaimed. Exclusive-or'ing forward and backward links in a list
doesn't cut it.
Some C optimizers may lose the last undisguised pointer to a memory
object as a consequence of clever optimizations. This has almost
-never been observed in practice. Send mail to boehm@sgi.com
+never been observed in practice. Send mail to boehm@acm.org
for suggestions on how to fix your compiler.
This is not a real-time collector. In the standard configuration,
percentage of time required for collection should be constant across
@@ -621,7 +621,7 @@ heap sizes. But collection pauses will increase for larger heaps.
per MB of accessible memory that needs to be scanned. Your mileage
may vary.) The incremental/generational collection facility helps,
but is portable only if "stubborn" allocation is used.
- Please address bug reports to boehm@sgi.com. If you are
+ Please address bug reports to boehm@acm.org. If you are
contemplating a major addition, you might also send mail to ask whether
it's already been done (or whether we tried and discarded it).
@@ -1451,6 +1451,51 @@ Since 4.14alpha1
Since 4.14alpha2
- changed STACKBOTTOM for DJGPP (Thanks to Salvador Eduardo Tropea).
+
+Since 4.14
+ - Reworked large block allocator. Now uses multiple doubly linked free
+ lists to approximate best fit.
+ - Changed heap expansion heuristic. Entirely free blocks are no longer
+ counted towards the heap size. This seems to have a major impact on
+ heap size stability; the old version could expand the heap way too
+ much in the presence of large block fragmentation.
+ - added -DGC_ASSERTIONS and some simple assertions inside the collector.
+ This is mainlyt for collector debugging.
+ - added -DUSE_MUNMAP to allow the heap to shrink. Suupported on only
+ a few UNIX-like platforms for now.
+ - added GC_dump_regions() for debugging of fragmentation issues.
+ - Changed PowerPC pointer alignment under Linux to 4. (This needs
+ checking by someone who has one. The suggestions came to me via a
+ rather circuitous path.)
+ - Changed the Linux/Alpha port to walk the data segment backwards until
+ it encounters a SIGSEGV. The old way to find the start of the data
+ segment broke with a recent release.
+ - cordxtra.c needed to call GC_REGISTER_FINALIZER instead of
+ GC_register_finalizer, so that it would continue to work with GC_DEBUG.
+ - allochblk sometimes cleared the wrong block for debugging purposes
+ when it dropped blacklisted blocks. This could result in spurious
+ error reports with GC_DEBUG.
+ - added MACOS X Server support. (Thanks to Andrew Stone.)
+ - Changed the Solaris threads code to ignore stack limits > 8 MB with
+ a warning. Empirically, it is not safe to access arbitrary pages
+ in such large stacks. And the dirty bit implementation does not
+ guarantee that none of them will be accessed.
+ - Integrated Martin Tauchmann's Amiga changes.
+ - Integrated James Dominy's OpenBSD/SPARC port.
+
+Since 5.0alpha1
+ - Fixed bugs introduced in alpha1 (OpenBSD & large block initialization).
+ - Added -DKEEP_BACK_PTRS and backptr.h interface. (The implementation
+ idea came from Al Demers.)
+
+Since 5.0alpha2
+ - Added some highly incomplete code to support a copied young generation.
+ Comments on nursery.h are appreciated.
+ - Changed -DFIND_LEAK, -DJAVA_FINALIZATION, and -DFINALIZE_ON_DEMAND,
+ so the same effect could be obtained with a runtime switch. This is
+ a step towards standardizing on a single dynamic GC library.
+ - Significantly changed the way leak detection is handled, as a consequence
+ of the above.
To do:
- Very large root set sizes (> 16 MB or so) could cause the collector
@@ -1469,9 +1514,4 @@ To do:
blocks reside in the newly allocated heap section, the heuristic for
temporarily ignoring black-listing fails, and the heap grows too much.
(This was observed in only one case, and could be worked around, but ...)
- - I've started work on rewriting the large block allocator to use approximate
- best fit. There are rare cases in which the current allocator results in
- excessive large block fragmentation, even with the 4.13 fixes. This should
- also reduce large block allocation time, whcih has become occasionally
- noticable in 4.13.
-
+ - Some platform specific updates are waiting for 4.15alpha1.
diff --git a/README.amiga b/README.amiga
index 865642be..47b15884 100644
--- a/README.amiga
+++ b/README.amiga
@@ -1,4 +1,51 @@
+===========================================================================
+ Martin Tauchmann's notes (1-Apr-99)
+===========================================================================
+
+Works now, also with the GNU-C compiler V2.7.2.1. <ftp://ftp.unina.it/pub/amiga/geekgadgets/amiga/m68k/snapshots/971125/amiga-bin/>
+Modify the `Makefile`
+CC=cc $(ABI_FLAG)
+to
+CC=gcc $(ABI_FLAG)
+
+TECHNICAL NOTES
+
+- `GC_get_stack_base()`, `GC_register_data_segments()` works now with every
+ C compiler; also Workbench.
+
+- Removed AMIGA_SKIP_SEG, but the Code-Segment must not be scanned by GC.
+
+
+PROBLEMS
+- When the Linker, does`t merge all Code-Segments to an single one. LD of GCC
+ do it always.
+
+- With ixemul.library V47.3, when an GC program launched from another program
+ (example: `Make` or `if_mach M68K AMIGA gctest`), `GC_register_data_segments()`
+ found the Segment-List of the caller program.
+ Can be fixed, if the run-time initialization code (for C programs, usually *crt0*)
+ support `__data` and `__bss`.
+
+- PowerPC Amiga currently not supported.
+
+- Dynamic libraries (dyn_load.c) not supported.
+
+
+TESTED WITH SOFTWARE
+
+`Optimized Oberon 2 C` (oo2c) <http://cognac.informatik.uni-kl.de/download/index.html>
+
+
+TESTED WITH HARDWARE
+
+MC68030
+
+
+CONTACT
+Please, contact me at <martintauchmann@bigfoot.com>, when you change the
+Amiga port. <http://martintauchmann.home.pages.de>
+
===========================================================================
Michel Schinz's notes
===========================================================================
diff --git a/allchblk.c b/allchblk.c
index ff94b480..d8d0afdf 100644
--- a/allchblk.c
+++ b/allchblk.c
@@ -1,7 +1,7 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1998 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -12,7 +12,6 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, August 9, 1995 5:08 pm PDT */
#define DEBUG
#undef DEBUG
@@ -21,39 +20,68 @@
/*
- * allocate/free routines for heap blocks
- * Note that everything called from outside the garbage collector
- * should be prepared to abort at any point as the result of a signal.
+ * Free heap blocks are kept on one of several free lists,
+ * depending on the size of the block. Each free list is doubly linked.
+ * Adjacent free blocks are coalesced.
*/
-/*
- * Free heap blocks are kept on a list sorted by address.
- * The hb_hdr.hbh_sz field of a free heap block contains the length
- * (in bytes) of the entire block.
- * Neighbors are coalesced.
- */
# define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
/* largest block we will allocate starting on a black */
/* listed block. Must be >= HBLKSIZE. */
-struct hblk * GC_hblkfreelist = 0;
-struct hblk *GC_savhbp = (struct hblk *)0; /* heap block preceding next */
- /* block to be examined by */
- /* GC_allochblk. */
+# define UNIQUE_THRESHOLD 32
+ /* Sizes up to this many HBLKs each have their own free list */
+# define HUGE_THRESHOLD 256
+ /* Sizes of at least this many heap blocks are mapped to a */
+ /* single free list. */
+# define FL_COMPRESSION 8
+ /* In between sizes map this many distinct sizes to a single */
+ /* bin. */
+
+# define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
+ + UNIQUE_THRESHOLD
+
+struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
+
+/* Map a number of blocks to the appropriate large block free list index. */
+int GC_hblk_fl_from_blocks(blocks_needed)
+word blocks_needed;
+{
+ if (blocks_needed <= UNIQUE_THRESHOLD) return blocks_needed;
+ if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
+ return (blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
+ + UNIQUE_THRESHOLD;
+
+}
+
+# define HBLK_IS_FREE(hdr) ((hdr) -> hb_map == GC_invalid_map)
+# define PHDR(hhdr) HDR(hhdr -> hb_prev)
+# define NHDR(hhdr) HDR(hhdr -> hb_next)
+
+# ifdef USE_MUNMAP
+# define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
+# else /* !USE_MMAP */
+# define IS_MAPPED(hhdr) 1
+# endif /* USE_MUNMAP */
# if !defined(NO_DEBUGGING)
void GC_print_hblkfreelist()
{
- struct hblk * h = GC_hblkfreelist;
+ struct hblk * h;
word total_free = 0;
- hdr * hhdr = HDR(h);
+ hdr * hhdr;
word sz;
+ int i;
- while (h != 0) {
+ for (i = 0; i <= N_HBLK_FLS; ++i) {
+ h = GC_hblkfreelist[i];
+ if (0 != h) GC_printf1("Free list %ld:\n", (unsigned long)i);
+ while (h != 0) {
+ hhdr = HDR(h);
sz = hhdr -> hb_sz;
- GC_printf2("0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
+ GC_printf2("\t0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
total_free += sz;
if (GC_is_black_listed(h, HBLKSIZE) != 0) {
GC_printf0("start black listed\n");
@@ -63,11 +91,90 @@ void GC_print_hblkfreelist()
GC_printf0("not black listed\n");
}
h = hhdr -> hb_next;
- hhdr = HDR(h);
+ }
+ }
+ if (total_free != GC_large_free_bytes) {
+ GC_printf1("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
+ (unsigned long) GC_large_free_bytes);
}
GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);
}
+/* Return the free list index on which the block described by the header */
+/* appears, or -1 if it appears nowhere. */
+int free_list_index_of(wanted)
+hdr * wanted;
+{
+ struct hblk * h;
+ hdr * hhdr;
+ int i;
+
+ for (i = 0; i <= N_HBLK_FLS; ++i) {
+ h = GC_hblkfreelist[i];
+ while (h != 0) {
+ hhdr = HDR(h);
+ if (hhdr == wanted) return i;
+ h = hhdr -> hb_next;
+ }
+ }
+ return -1;
+}
+
+void GC_dump_regions()
+{
+ int i;
+ ptr_t start, end;
+ ptr_t p;
+ size_t bytes;
+ hdr *hhdr;
+ for (i = 0; i < GC_n_heap_sects; ++i) {
+ start = GC_heap_sects[i].hs_start;
+ bytes = GC_heap_sects[i].hs_bytes;
+ end = start + bytes;
+ /* Merge in contiguous sections. */
+ while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
+ ++i;
+ end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
+ }
+ GC_printf2("***Section from 0x%lx to 0x%lx\n", start, end);
+ for (p = start; p < end;) {
+ hhdr = HDR(p);
+ GC_printf1("\t0x%lx ", (unsigned long)p);
+ if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
+ GC_printf1("Missing header!!\n", hhdr);
+ p += HBLKSIZE;
+ continue;
+ }
+ if (HBLK_IS_FREE(hhdr)) {
+ int correct_index = GC_hblk_fl_from_blocks(
+ divHBLKSZ(hhdr -> hb_sz));
+ int actual_index;
+
+ GC_printf1("\tfree block of size 0x%lx bytes",
+ (unsigned long)(hhdr -> hb_sz));
+ if (IS_MAPPED(hhdr)) {
+ GC_printf0("\n");
+ } else {
+ GC_printf0("(unmapped)\n");
+ }
+ actual_index = free_list_index_of(hhdr);
+ if (-1 == actual_index) {
+ GC_printf1("\t\tBlock not on free list %ld!!\n",
+ correct_index);
+ } else if (correct_index != actual_index) {
+ GC_printf2("\t\tBlock on list %ld, should be on %ld!!\n",
+ actual_index, correct_index);
+ }
+ p += hhdr -> hb_sz;
+ } else {
+ GC_printf1("\tused for blocks of size 0x%lx bytes\n",
+ (unsigned long)WORDS_TO_BYTES(hhdr -> hb_sz));
+ p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
+ }
+ }
+ }
+}
+
# endif /* NO_DEBUGGING */
/* Initialize hdr for a block containing the indicated size and */
@@ -100,20 +207,265 @@ unsigned char flags;
return(TRUE);
}
-#ifdef EXACT_FIRST
-# define LAST_TRIP 2
-#else
-# define LAST_TRIP 1
-#endif
+#define FL_UNKNOWN -1
+/*
+ * Remove hhdr from the appropriate free list.
+ * We assume it is on the nth free list, or on the size
+ * appropriate free list if n is FL_UNKNOWN.
+ */
+void GC_remove_from_fl(hhdr, n)
+hdr * hhdr;
+int n;
+{
+ GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
+ if (hhdr -> hb_prev == 0) {
+ int index;
+ if (FL_UNKNOWN == n) {
+ index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
+ } else {
+ index = n;
+ }
+ GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
+ GC_hblkfreelist[index] = hhdr -> hb_next;
+ } else {
+ PHDR(hhdr) -> hb_next = hhdr -> hb_next;
+ }
+ if (0 != hhdr -> hb_next) {
+ GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
+ NHDR(hhdr) -> hb_prev = hhdr -> hb_prev;
+ }
+}
+
+/*
+ * Return a pointer to the free block ending just before h, if any.
+ */
+struct hblk * GC_free_block_ending_at(h)
+struct hblk *h;
+{
+ struct hblk * p = h - 1;
+ hdr * phdr = HDR(p);
+
+ while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
+ p = FORWARDED_ADDR(p,phdr);
+ phdr = HDR(p);
+ }
+ if (0 != phdr && HBLK_IS_FREE(phdr)) return p;
+ p = GC_prev_block(h - 1);
+ if (0 != p) {
+ phdr = HDR(p);
+ if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
+ return p;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Add hhdr to the appropriate free list.
+ * We maintain individual free lists sorted by address.
+ */
+void GC_add_to_fl(h, hhdr)
+struct hblk *h;
+hdr * hhdr;
+{
+ int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
+ struct hblk *second = GC_hblkfreelist[index];
+# ifdef GC_ASSERTIONS
+ struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
+ hdr * nexthdr = HDR(next);
+ struct hblk *prev = GC_free_block_ending_at(h);
+ hdr * prevhdr = HDR(prev);
+ GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr) || !IS_MAPPED(nexthdr));
+ GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr) || !IS_MAPPED(prevhdr));
+# endif
+ GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
+ GC_hblkfreelist[index] = h;
+ hhdr -> hb_next = second;
+ hhdr -> hb_prev = 0;
+ if (0 != second) HDR(second) -> hb_prev = h;
+ GC_invalidate_map(hhdr);
+}
+
+#ifdef USE_MUNMAP
+
+/* Unmap blocks that haven't been recently touched. This is the only way */
+/* way blocks are ever unmapped. */
+void GC_unmap_old(void)
+{
+ struct hblk * h;
+ hdr * hhdr;
+ word sz;
+ unsigned short last_rec, threshold;
+ int i;
+# define UNMAP_THRESHOLD 6
+
+ for (i = 0; i <= N_HBLK_FLS; ++i) {
+ for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
+ hhdr = HDR(h);
+ if (!IS_MAPPED(hhdr)) continue;
+ threshold = (unsigned short)(GC_gc_no - UNMAP_THRESHOLD);
+ last_rec = hhdr -> hb_last_reclaimed;
+ if (last_rec > GC_gc_no
+ || last_rec < threshold && threshold < GC_gc_no
+ /* not recently wrapped */) {
+ sz = hhdr -> hb_sz;
+ GC_unmap((ptr_t)h, sz);
+ hhdr -> hb_flags |= WAS_UNMAPPED;
+ }
+ }
+ }
+}
+
+/* Merge all unmapped blocks that are adjacent to other free */
+/* blocks. This may involve remapping, since all blocks are either */
+/* fully mapped or fully unmapped. */
+void GC_merge_unmapped(void)
+{
+ struct hblk * h, *next;
+ hdr * hhdr, *nexthdr;
+ word size, nextsize;
+ int i;
+
+ for (i = 0; i <= N_HBLK_FLS; ++i) {
+ h = GC_hblkfreelist[i];
+ while (h != 0) {
+ hhdr = HDR(h);
+ size = hhdr->hb_sz;
+ next = (struct hblk *)((word)h + size);
+ nexthdr = HDR(next);
+ /* Coalesce with successor, if possible */
+ if (0 != nexthdr && HBLK_IS_FREE(nexthdr)) {
+ nextsize = nexthdr -> hb_sz;
+ if (IS_MAPPED(hhdr)) {
+ GC_ASSERT(!IS_MAPPED(nexthdr));
+ /* make both consistent, so that we can merge */
+ if (size > nextsize) {
+ GC_remap((ptr_t)next, nextsize);
+ } else {
+ GC_unmap((ptr_t)h, size);
+ hhdr -> hb_flags |= WAS_UNMAPPED;
+ }
+ } else if (IS_MAPPED(nexthdr)) {
+ GC_ASSERT(!IS_MAPPED(hhdr));
+ if (size > nextsize) {
+ GC_unmap((ptr_t)next, nextsize);
+ } else {
+ GC_remap((ptr_t)h, size);
+ hhdr -> hb_flags &= ~WAS_UNMAPPED;
+ }
+ } else {
+ /* Unmap any gap in the middle */
+ GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz);
+ }
+ /* If they are both unmapped, we merge, but leave unmapped. */
+ GC_remove_from_fl(hhdr, i);
+ GC_remove_from_fl(nexthdr, FL_UNKNOWN);
+ hhdr -> hb_sz += nexthdr -> hb_sz;
+ GC_remove_header(next);
+ GC_add_to_fl(h, hhdr);
+ /* Start over at beginning of list */
+ h = GC_hblkfreelist[i];
+ } else /* not mergable with successor */ {
+ h = hhdr -> hb_next;
+ }
+ } /* while (h != 0) ... */
+ } /* for ... */
+}
+
+#endif /* USE_MUNMAP */
+
+/*
+ * Return a pointer to a block starting at h of length bytes.
+ * Memory for the block is mapped.
+ * Remove the block from its free list, and return the remainder (if any)
+ * to its appropriate free list.
+ * May fail by returning 0.
+ * The header for the returned block must be set up by the caller.
+ * If the return value is not 0, then hhdr is the header for it.
+ */
+struct hblk * GC_get_first_part(h, hhdr, bytes, index)
+struct hblk *h;
+hdr * hhdr;
+word bytes;
+int index;
+{
+ word total_size = hhdr -> hb_sz;
+ struct hblk * rest;
+ hdr * rest_hdr;
+
+ GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
+ GC_remove_from_fl(hhdr, index);
+ if (total_size == bytes) return h;
+ rest = (struct hblk *)((word)h + bytes);
+ if (!GC_install_header(rest)) return(0);
+ rest_hdr = HDR(rest);
+ rest_hdr -> hb_sz = total_size - bytes;
+ rest_hdr -> hb_flags = 0;
+# ifdef GC_ASSERTIONS
+ // Mark h not free, to avoid assertion about adjacent free blocks.
+ hhdr -> hb_map = 0;
+# endif
+ GC_add_to_fl(rest, rest_hdr);
+ return h;
+}
+
+/*
+ * H is a free block. N points at an address inside it.
+ * A new header for n has already been set up. Fix up h's header
+ * to reflect the fact that it is being split, move it to the
+ * appropriate free list.
+ * N replaces h in the original free list.
+ *
+ * Nhdr is not completely filled in, since it is about to allocated.
+ * It may in fact end up on the wrong free list for its size.
+ * (Hence adding it to a free list is silly. But this path is hopefully
+ * rare enough that it doesn't matter. The code is cleaner this way.)
+ */
+void GC_split_block(h, hhdr, n, nhdr, index)
+struct hblk *h;
+hdr * hhdr;
+struct hblk *n;
+hdr * nhdr;
+int index; /* Index of free list */
+{
+ word total_size = hhdr -> hb_sz;
+ word h_size = (word)n - (word)h;
+ struct hblk *prev = hhdr -> hb_prev;
+ struct hblk *next = hhdr -> hb_next;
-word GC_max_hblk_size = HBLKSIZE;
+ /* Replace h with n on its freelist */
+ nhdr -> hb_prev = prev;
+ nhdr -> hb_next = next;
+ nhdr -> hb_sz = total_size - h_size;
+ nhdr -> hb_flags = 0;
+ if (0 != prev) {
+ HDR(prev) -> hb_next = n;
+ } else {
+ GC_hblkfreelist[index] = n;
+ }
+ if (0 != next) {
+ HDR(next) -> hb_prev = n;
+ }
+# ifdef GC_ASSERTIONS
+ nhdr -> hb_map = 0; /* Don't fail test for consecutive */
+ /* free blocks in GC_add_to_fl. */
+# endif
+# ifdef USE_MUNMAP
+ hhdr -> hb_last_reclaimed = GC_gc_no;
+# endif
+ hhdr -> hb_sz = h_size;
+ GC_add_to_fl(h, hhdr);
+ GC_invalidate_map(nhdr);
+}
+struct hblk * GC_allochblk_nth();
+
/*
* Allocate (and return pointer to) a heap block
- * for objects of size sz words.
+ * for objects of size sz words, searching the nth free list.
*
* NOTE: We set obj_map field in header correctly.
- * Caller is resposnsible for building an object freelist in block.
+ * Caller is responsible for building an object freelist in block.
*
* We clear the block if it is destined for large objects, and if
* kind requires that newly allocated objects be cleared.
@@ -124,48 +476,42 @@ word sz;
int kind;
unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
{
- register struct hblk *thishbp;
- register hdr * thishdr; /* Header corr. to thishbp */
+ int start_list = GC_hblk_fl_from_blocks(OBJ_SZ_TO_BLOCKS(sz));
+ int i;
+ for (i = start_list; i <= N_HBLK_FLS; ++i) {
+ struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
+ if (0 != result) return result;
+ }
+ return 0;
+}
+/*
+ * The same, but with search restricted to nth free list.
+ */
+struct hblk *
+GC_allochblk_nth(sz, kind, flags, n)
+word sz;
+int kind;
+unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
+int n;
+{
register struct hblk *hbp;
register hdr * hhdr; /* Header corr. to hbp */
- struct hblk *prevhbp;
- register hdr * phdr; /* Header corr. to prevhbp */
+ register struct hblk *thishbp;
+ register hdr * thishdr; /* Header corr. to hbp */
signed_word size_needed; /* number of bytes in requested objects */
signed_word size_avail; /* bytes available in this block */
- int trip_count = 0;
size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
- if ((word)size_needed > GC_max_hblk_size)
- GC_max_hblk_size = size_needed;
/* search for a big enough block in free list */
- hbp = GC_savhbp;
+ hbp = GC_hblkfreelist[n];
hhdr = HDR(hbp);
- for(;;) {
-
- prevhbp = hbp;
- phdr = hhdr;
- hbp = (prevhbp == 0? GC_hblkfreelist : phdr->hb_next);
- hhdr = HDR(hbp);
-
- if( prevhbp == GC_savhbp) {
- if (trip_count == LAST_TRIP) return(0);
- ++trip_count;
- }
-
- if( hbp == 0 ) continue;
-
+ for(; 0 != hbp; hbp = hhdr -> hb_next, hhdr = HDR(hbp)) {
size_avail = hhdr->hb_sz;
-# ifdef EXACT_FIRST
- if (trip_count <= 1 && size_avail != size_needed) continue;
-# endif
if (size_avail < size_needed) continue;
# ifdef PRESERVE_LAST
if (size_avail != size_needed
- && !GC_incremental
- && (word)size_needed <= GC_max_hblk_size/2
- && GC_in_last_heap_sect((ptr_t)hbp)
- && GC_should_collect()) {
+ && !GC_incremental && GC_should_collect()) {
continue;
}
# endif
@@ -176,13 +522,14 @@ unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
signed_word next_size;
thishbp = hhdr -> hb_next;
- if (thishbp == 0) thishbp = GC_hblkfreelist;
- thishdr = HDR(thishbp);
- next_size = (signed_word)(thishdr -> hb_sz);
- if (next_size < size_avail
+ if (thishbp != 0) {
+ thishdr = HDR(thishbp);
+ next_size = (signed_word)(thishdr -> hb_sz);
+ if (next_size < size_avail
&& next_size >= size_needed
&& !GC_is_black_listed(thishbp, (word)size_needed)) {
continue;
+ }
}
}
if ( !IS_UNCOLLECTABLE(kind) &&
@@ -204,19 +551,21 @@ unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
thishbp = lasthbp;
if (size_avail >= size_needed) {
if (thishbp != hbp && GC_install_header(thishbp)) {
+ /* Make sure it's mapped before we mangle it. */
+# ifdef USE_MUNMAP
+ if (!IS_MAPPED(hhdr)) {
+ GC_remap((ptr_t)hbp, size_avail);
+ hhdr -> hb_flags &= ~WAS_UNMAPPED;
+ }
+# endif
/* Split the block at thishbp */
thishdr = HDR(thishbp);
- /* GC_invalidate_map not needed, since we will */
- /* allocate this block. */
- thishdr -> hb_next = hhdr -> hb_next;
- thishdr -> hb_sz = size_avail;
- hhdr -> hb_sz = (ptr_t)thishbp - (ptr_t)hbp;
- hhdr -> hb_next = thishbp;
+ GC_split_block(hbp, hhdr, thishbp, thishdr, n);
/* Advance to thishbp */
- prevhbp = hbp;
- phdr = hhdr;
hbp = thishbp;
hhdr = thishdr;
+ /* We must now allocate thishbp, since it may */
+ /* be on the wrong free list. */
}
} else if (size_needed > (signed_word)BL_LIMIT
&& orig_avail - size_needed
@@ -224,12 +573,10 @@ unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
/* Punt, since anything else risks unreasonable heap growth. */
WARN("Needed to allocate blacklisted block at 0x%lx\n",
(word)hbp);
- thishbp = hbp;
size_avail = orig_avail;
- } else if (size_avail == 0
- && size_needed == HBLKSIZE
- && prevhbp != 0) {
-# ifndef FIND_LEAK
+ } else if (size_avail == 0 && size_needed == HBLKSIZE
+ && IS_MAPPED(hhdr)) {
+ if (!GC_find_leak) {
static unsigned count = 0;
/* The block is completely blacklisted. We need */
@@ -241,11 +588,14 @@ unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
/* Allocate and drop the block in small chunks, to */
/* maximize the chance that we will recover some */
/* later. */
- struct hblk * limit = hbp + (hhdr->hb_sz/HBLKSIZE);
+ word total_size = hhdr -> hb_sz;
+ struct hblk * limit = hbp + divHBLKSZ(total_size);
struct hblk * h;
+ struct hblk * prev = hhdr -> hb_prev;
- GC_words_wasted += hhdr->hb_sz;
- phdr -> hb_next = hhdr -> hb_next;
+ GC_words_wasted += total_size;
+ GC_large_free_bytes -= total_size;
+ GC_remove_from_fl(hhdr, n);
for (h = hbp; h < limit; h++) {
if (h == hbp || GC_install_header(h)) {
hhdr = HDR(h);
@@ -254,70 +604,53 @@ unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
BYTES_TO_WORDS(HBLKSIZE - HDR_BYTES),
PTRFREE, 0); /* Cant fail */
if (GC_debugging_started) {
- BZERO(hbp + HDR_BYTES, HBLKSIZE - HDR_BYTES);
+ BZERO(h + HDR_BYTES, HBLKSIZE - HDR_BYTES);
}
}
}
/* Restore hbp to point at free block */
- if (GC_savhbp == hbp) GC_savhbp = prevhbp;
- hbp = prevhbp;
- hhdr = phdr;
- if (hbp == GC_savhbp) --trip_count;
+ hbp = prev;
+ if (0 == hbp) {
+ return GC_allochblk_nth(sz, kind, flags, n);
+ }
+ hhdr = HDR(hbp);
}
-# endif
+ }
}
}
if( size_avail >= size_needed ) {
- /* found a big enough block */
- /* let thishbp --> the block */
- /* set prevhbp, hbp to bracket it */
- thishbp = hbp;
- thishdr = hhdr;
- if( size_avail == size_needed ) {
- hbp = hhdr->hb_next;
- hhdr = HDR(hbp);
- } else {
- hbp = (struct hblk *)
- (((word)thishbp) + size_needed);
- if (!GC_install_header(hbp)) {
- hbp = thishbp;
- continue;
- }
- hhdr = HDR(hbp);
- GC_invalidate_map(hhdr);
- hhdr->hb_next = thishdr->hb_next;
- hhdr->hb_sz = size_avail - size_needed;
- }
- /* remove *thishbp from hblk freelist */
- if( prevhbp == 0 ) {
- GC_hblkfreelist = hbp;
- } else {
- phdr->hb_next = hbp;
- }
- /* save current list search position */
- GC_savhbp = hbp;
+# ifdef USE_MUNMAP
+ if (!IS_MAPPED(hhdr)) {
+ GC_remap((ptr_t)hbp, size_avail);
+ hhdr -> hb_flags &= ~WAS_UNMAPPED;
+ }
+# endif
+ /* hbp may be on the wrong freelist; the parameter n */
+ /* is important. */
+ hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
break;
}
}
+
+ if (0 == hbp) return 0;
/* Notify virtual dirty bit implementation that we are about to write. */
- GC_write_hint(thishbp);
- /* This should deal better with large blocks. */
+ GC_write_hint(hbp);
/* Add it to map of valid blocks */
- if (!GC_install_counts(thishbp, (word)size_needed)) return(0);
+ if (!GC_install_counts(hbp, (word)size_needed)) return(0);
/* This leaks memory under very rare conditions. */
/* Set up header */
- if (!setup_header(thishdr, sz, kind, flags)) {
- GC_remove_counts(thishbp, (word)size_needed);
+ if (!setup_header(hhdr, sz, kind, flags)) {
+ GC_remove_counts(hbp, (word)size_needed);
return(0); /* ditto */
}
/* Clear block if necessary */
if (GC_debugging_started
|| sz > MAXOBJSZ && GC_obj_kinds[kind].ok_init) {
- BZERO(thishbp + HDR_BYTES, size_needed - HDR_BYTES);
+ BZERO(hbp + HDR_BYTES, size_needed - HDR_BYTES);
}
/* We just successfully allocated a block. Restart count of */
@@ -327,8 +660,11 @@ unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
GC_fail_count = 0;
}
+
+ GC_large_free_bytes -= size_needed;
- return( thishbp );
+ GC_ASSERT(IS_MAPPED(hhdr));
+ return( hbp );
}
struct hblk * GC_freehblk_ptr = 0; /* Search position hint for GC_freehblk */
@@ -341,75 +677,50 @@ struct hblk * GC_freehblk_ptr = 0; /* Search position hint for GC_freehblk */
* All mark words are assumed to be cleared.
*/
void
-GC_freehblk(p)
-register struct hblk *p;
+GC_freehblk(hbp)
+struct hblk *hbp;
{
-register hdr *phdr; /* Header corresponding to p */
-register struct hblk *hbp, *prevhbp;
-register hdr *hhdr, *prevhdr;
-register signed_word size;
+struct hblk *next, *prev;
+hdr *hhdr, *prevhdr, *nexthdr;
+signed_word size;
- /* GC_savhbp may become invalid due to coalescing. Clear it. */
- GC_savhbp = (struct hblk *)0;
- phdr = HDR(p);
- size = phdr->hb_sz;
- size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
- GC_remove_counts(p, (word)size);
- phdr->hb_sz = size;
- GC_invalidate_map(phdr);
- prevhbp = 0;
-
- /* The following optimization was suggested by David Detlefs. */
- /* Note that the header cannot be NIL, since there cannot be an */
- /* intervening call to GC_freehblk without resetting */
- /* GC_freehblk_ptr. */
- if (GC_freehblk_ptr != 0 &&
- HDR(GC_freehblk_ptr)->hb_map == GC_invalid_map &&
- (ptr_t)GC_freehblk_ptr < (ptr_t)p) {
- hbp = GC_freehblk_ptr;
- } else {
- hbp = GC_hblkfreelist;
- };
hhdr = HDR(hbp);
-
- while( (hbp != 0) && (hbp < p) ) {
- prevhbp = hbp;
- prevhdr = hhdr;
- hbp = hhdr->hb_next;
- hhdr = HDR(hbp);
- }
- GC_freehblk_ptr = prevhbp;
+ size = hhdr->hb_sz;
+ size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
+ GC_remove_counts(hbp, (word)size);
+ hhdr->hb_sz = size;
/* Check for duplicate deallocation in the easy case */
- if (hbp != 0 && (ptr_t)p + size > (ptr_t)hbp
- || prevhbp != 0 && (ptr_t)prevhbp + prevhdr->hb_sz > (ptr_t)p) {
+ if (HBLK_IS_FREE(hhdr)) {
GC_printf1("Duplicate large block deallocation of 0x%lx\n",
- (unsigned long) p);
- GC_printf2("Surrounding free blocks are 0x%lx and 0x%lx\n",
- (unsigned long) prevhbp, (unsigned long) hbp);
+ (unsigned long) hbp);
}
+ GC_ASSERT(IS_MAPPED(hhdr));
+ GC_invalidate_map(hhdr);
+ next = (struct hblk *)((word)hbp + size);
+ nexthdr = HDR(next);
+ prev = GC_free_block_ending_at(hbp);
/* Coalesce with successor, if possible */
- if( (((word)p)+size) == ((word)hbp) ) {
- phdr->hb_next = hhdr->hb_next;
- phdr->hb_sz += hhdr->hb_sz;
- GC_remove_header(hbp);
- } else {
- phdr->hb_next = hbp;
+ if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)) {
+ GC_remove_from_fl(nexthdr, FL_UNKNOWN);
+ hhdr -> hb_sz += nexthdr -> hb_sz;
+ GC_remove_header(next);
+ }
+ /* Coalesce with predecessor, if possible. */
+ if (0 != prev) {
+ prevhdr = HDR(prev);
+ if (IS_MAPPED(prevhdr)) {
+ GC_remove_from_fl(prevhdr, FL_UNKNOWN);
+ prevhdr -> hb_sz += hhdr -> hb_sz;
+ GC_remove_header(hbp);
+ hbp = prev;
+ hhdr = prevhdr;
+ }
}
-
- if( prevhbp == 0 ) {
- GC_hblkfreelist = p;
- } else if( (((word)prevhbp) + prevhdr->hb_sz)
- == ((word)p) ) {
- /* Coalesce with predecessor */
- prevhdr->hb_next = phdr->hb_next;
- prevhdr->hb_sz += phdr->hb_sz;
- GC_remove_header(p);
- } else {
- prevhdr->hb_next = p;
- }
+ GC_large_free_bytes += size;
+ GC_add_to_fl(hbp, hhdr);
}
diff --git a/alloc.c b/alloc.c
index 171dc780..1c57951f 100644
--- a/alloc.c
+++ b/alloc.c
@@ -82,7 +82,7 @@ extern signed_word GC_mem_found; /* Number of reclaimed longwords */
GC_bool GC_dont_expand = 0;
-word GC_free_space_divisor = 4;
+word GC_free_space_divisor = 3;
extern GC_bool GC_collection_in_progress();
/* Collection is in progress, or was abandoned. */
@@ -130,18 +130,22 @@ static word min_words_allocd()
int dummy;
register signed_word stack_size = (ptr_t)(&dummy) - GC_stackbottom;
# endif
- register word total_root_size; /* includes double stack size, */
+ word total_root_size; /* includes double stack size, */
/* since the stack is expensive */
/* to scan. */
+ word scan_size; /* Estimate of memory to be scanned */
+ /* during normal GC. */
if (stack_size < 0) stack_size = -stack_size;
total_root_size = 2 * stack_size + GC_root_size;
+ scan_size = BYTES_TO_WORDS(GC_heapsize - GC_large_free_bytes
+ + (GC_large_free_bytes >> 2)
+ /* use a bit more of large empty heap */
+ + total_root_size);
if (GC_incremental) {
- return(BYTES_TO_WORDS(GC_heapsize + total_root_size)
- / (2 * GC_free_space_divisor));
+ return scan_size / (2 * GC_free_space_divisor);
} else {
- return(BYTES_TO_WORDS(GC_heapsize + total_root_size)
- / GC_free_space_divisor);
+ return scan_size / GC_free_space_divisor;
}
}
@@ -470,7 +474,7 @@ void GC_finish_collection()
# ifdef GATHERSTATS
GC_mem_found = 0;
# endif
-# ifdef FIND_LEAK
+ if (GC_find_leak) {
/* Mark all objects on the free list. All objects should be */
/* marked when we're done. */
{
@@ -493,25 +497,26 @@ void GC_finish_collection()
}
}
}
- /* Check that everything is marked */
GC_start_reclaim(TRUE);
-# else
+ /* The above just checks; it doesn't really reclaim anything. */
+ }
+
+ GC_finalize();
+# ifdef STUBBORN_ALLOC
+ GC_clean_changing_list();
+# endif
- GC_finalize();
-# ifdef STUBBORN_ALLOC
- GC_clean_changing_list();
-# endif
-
-# ifdef PRINTTIMES
- GET_TIME(finalize_time);
-# endif
-
- /* Clear free list mark bits, in case they got accidentally marked */
- /* Note: HBLKPTR(p) == pointer to head of block containing *p */
- /* Also subtract memory remaining from GC_mem_found count. */
- /* Note that composite objects on free list are cleared. */
- /* Thus accidentally marking a free list is not a problem; only */
- /* objects on the list itself will be marked, and that's fixed here. */
+# ifdef PRINTTIMES
+ GET_TIME(finalize_time);
+# endif
+
+ /* Clear free list mark bits, in case they got accidentally marked */
+ /* Note: HBLKPTR(p) == pointer to head of block containing *p */
+ /* (or GC_find_leak is set and they were intentionally marked.) */
+ /* Also subtract memory remaining from GC_mem_found count. */
+ /* Note that composite objects on free list are cleared. */
+ /* Thus accidentally marking a free list is not a problem; only */
+ /* objects on the list itself will be marked, and that's fixed here. */
{
register word size; /* current object size */
register ptr_t p; /* pointer to current object */
@@ -537,24 +542,25 @@ void GC_finish_collection()
}
-# ifdef PRINTSTATS
+# ifdef PRINTSTATS
GC_printf1("Bytes recovered before sweep - f.l. count = %ld\n",
(long)WORDS_TO_BYTES(GC_mem_found));
-# endif
-
+# endif
/* Reconstruct free lists to contain everything not marked */
- GC_start_reclaim(FALSE);
-
-# endif /* !FIND_LEAK */
+ GC_start_reclaim(FALSE);
# ifdef PRINTSTATS
GC_printf2(
- "Immediately reclaimed %ld bytes in heap of size %lu bytes\n",
+ "Immediately reclaimed %ld bytes in heap of size %lu bytes",
(long)WORDS_TO_BYTES(GC_mem_found),
(unsigned long)GC_heapsize);
- GC_printf2("%lu (atomic) + %lu (composite) collectable bytes in use\n",
- (unsigned long)WORDS_TO_BYTES(GC_atomic_in_use),
- (unsigned long)WORDS_TO_BYTES(GC_composite_in_use));
+# ifdef USE_MUNMAP
+ GC_printf1("(%lu unmapped)", GC_unmapped_bytes);
+# endif
+ GC_printf2(
+ "\n%lu (atomic) + %lu (composite) collectable bytes in use\n",
+ (unsigned long)WORDS_TO_BYTES(GC_atomic_in_use),
+ (unsigned long)WORDS_TO_BYTES(GC_composite_in_use));
# endif
GC_n_attempts = 0;
@@ -565,6 +571,9 @@ void GC_finish_collection()
GC_words_wasted = 0;
GC_mem_freed = 0;
+# ifdef USE_MUNMAP
+ GC_unmap_old();
+# endif
# ifdef PRINTTIMES
GET_TIME(done_time);
GC_printf2("Finalize + initiate sweep took %lu + %lu msecs\n",
@@ -608,7 +617,7 @@ void GC_gcollect GC_PROTO(())
word GC_n_heap_sects = 0; /* Number of sections currently in heap. */
/*
- * Use the chunk of memory starting at p of syze bytes as part of the heap.
+ * Use the chunk of memory starting at p of size bytes as part of the heap.
* Assumes p is HBLKSIZE aligned, and bytes is a multiple of HBLKSIZE.
*/
void GC_add_to_heap(p, bytes)
@@ -616,6 +625,7 @@ struct hblk *p;
word bytes;
{
word words;
+ hdr * phdr;
if (GC_n_heap_sects >= MAX_HEAP_SECTS) {
ABORT("Too many heap sections: Increase MAXHINCR or MAX_HEAP_SECTS");
@@ -630,7 +640,10 @@ word bytes;
GC_heap_sects[GC_n_heap_sects].hs_bytes = bytes;
GC_n_heap_sects++;
words = BYTES_TO_WORDS(bytes - HDR_BYTES);
- HDR(p) -> hb_sz = words;
+ phdr = HDR(p);
+ phdr -> hb_sz = words;
+ phdr -> hb_map = (char *)1; /* A value != GC_invalid_map */
+ phdr -> hb_flags = 0;
GC_freehblk(p);
GC_heapsize += bytes;
if ((ptr_t)p <= GC_least_plausible_heap_addr
@@ -646,27 +659,6 @@ word bytes;
}
}
-#ifdef PRESERVE_LAST
-
-GC_bool GC_protect_last_block = FALSE;
-
-GC_bool GC_in_last_heap_sect(p)
-ptr_t p;
-{
- struct HeapSect * last_heap_sect;
- ptr_t start;
- ptr_t end;
-
- if (!GC_protect_last_block) return FALSE;
- last_heap_sect = &(GC_heap_sects[GC_n_heap_sects-1]);
- start = last_heap_sect -> hs_start;
- if (p < start) return FALSE;
- end = start + last_heap_sect -> hs_bytes;
- if (p >= end) return FALSE;
- return TRUE;
-}
-#endif
-
# if !defined(NO_DEBUGGING)
void GC_print_heap_sects()
{
@@ -797,9 +789,6 @@ word n;
LOCK();
if (!GC_is_initialized) GC_init_inner();
result = (int)GC_expand_hp_inner(divHBLKSZ((word)bytes));
-# ifdef PRESERVE_LAST
- if (result) GC_protect_last_block = FALSE;
-# endif
UNLOCK();
ENABLE_SIGNALS();
return(result);
@@ -813,7 +802,6 @@ GC_bool GC_collect_or_expand(needed_blocks, ignore_off_page)
word needed_blocks;
GC_bool ignore_off_page;
{
-
if (!GC_incremental && !GC_dont_gc && GC_should_collect()) {
GC_notify_full_gc();
GC_gcollect_inner();
@@ -852,12 +840,6 @@ GC_bool ignore_off_page;
GC_printf0("Memory available again ...\n");
}
# endif
-# ifdef PRESERVE_LAST
- if (needed_blocks > 1) GC_protect_last_block = TRUE;
- /* We were forced to expand the heap as the result */
- /* of a large block allocation. Avoid breaking up */
- /* new block into small pieces. */
-# endif
}
}
return(TRUE);
diff --git a/backptr.h b/backptr.h
new file mode 100644
index 00000000..d34224e4
--- /dev/null
+++ b/backptr.h
@@ -0,0 +1,56 @@
+/*
+ * This is a simple API to implement pointer back tracing, i.e.
+ * to answer questions such as "who is pointing to this" or
+ * "why is this object being retained by the collector"
+ *
+ * This API assumes that we have an ANSI C compiler.
+ *
+ * Most of these calls yield useful information on only after
+ * a garbage collection. Usually the client will first force
+ * a full collection and then gather information, preferably
+ * before much intervening allocation.
+ *
+ * The implementation of the interface is only about 99.9999%
+ * correct. It is intended to be good enough for profiling,
+ * but is not intended to be used with production code.
+ *
+ * Results are likely to be much more useful if all allocation is
+ * accomplished through the debugging allocators.
+ *
+ * The implementation idea is due to A. Demers.
+ */
+
+/* Store information about the object referencing dest in *base_p */
+/* and *offset_p. */
+/* If multiple objects or roots point to dest, the one reported */
+/* will be the last on used by the garbage collector to trace the */
+/* object. */
+/* source is root ==> *base_p = address, *offset_p = 0 */
+/* source is heap object ==> *base_p != 0, *offset_p = offset */
+/* Returns 1 on success, 0 if source couldn't be determined. */
+/* Dest can be any address within a heap object. */
+typedef enum { GC_UNREFERENCED, /* No refence info available. */
+ GC_NO_SPACE, /* Dest not allocated with debug alloc */
+ GC_REFD_FROM_ROOT, /* Referenced directly by root *base_p */
+ GC_REFD_FROM_HEAP, /* Referenced from another heap obj. */
+ GC_FINALIZER_REFD /* Finalizable and hence accessible. */
+} GC_ref_kind;
+
+GC_ref_kind GC_get_back_ptr_info(void *dest, void **base_p, size_t *offset_p);
+
+/* Generate a random heap address. */
+/* The resulting address is in the heap, but */
+/* not necessarily inside a valid object. */
+void * GC_generate_random_heap_address(void);
+
+/* Generate a random address inside a valid marked heap object. */
+void * GC_generate_random_valid_address(void);
+
+/* Force a garbage collection and generate a backtrace from a */
+/* random heap address. */
+/* This uses the GC logging mechanism (GC_printf) to produce */
+/* output. It can often be called from a debugger. The */
+/* source in dbg_mlc.c also serves as a sample client. */
+void GC_generate_random_backtrace(void);
+
+
diff --git a/cord/cordxtra.c b/cord/cordxtra.c
index b306fbac..a5be10de 100644
--- a/cord/cordxtra.c
+++ b/cord/cordxtra.c
@@ -582,7 +582,7 @@ CORD CORD_from_file_lazy_inner(FILE * f, size_t len)
state -> lf_cache[i] = 0;
}
state -> lf_current = 0;
- GC_register_finalizer(state, CORD_lf_close_proc, 0, 0, 0);
+ GC_REGISTER_FINALIZER(state, CORD_lf_close_proc, 0, 0, 0);
return(CORD_from_fn(CORD_lf_func, state, len));
}
diff --git a/cord/gc.h b/cord/gc.h
index ceabb02f..30614095 100644
--- a/cord/gc.h
+++ b/cord/gc.h
@@ -96,11 +96,31 @@ GC_API GC_PTR (*GC_oom_fn) GC_PROTO((size_t bytes_requested));
/* pointer to a previously allocated heap */
/* object. */
+GC_API int GC_find_leak;
+ /* Do not actually garbage collect, but simply */
+ /* report inaccessible memory that was not */
+ /* deallocated with GC_free. Initial value */
+ /* is determined by FIND_LEAK macro. */
+
GC_API int GC_quiet; /* Disable statistics output. Only matters if */
/* collector has been compiled with statistics */
/* enabled. This involves a performance cost, */
/* and is thus not the default. */
+GC_API int GC_finalize_on_demand;
+ /* If nonzero, finalizers will only be run in */
+ /* response to an eplit GC_invoke_finalizers */
+ /* call. The default is determined by whether */
+ /* the FINALIZE_ON_DEMAND macro is defined */
+ /* when the collector is built. */
+
+GC_API int GC_java_finalization;
+ /* Mark objects reachable from finalizable */
+ /* objects in a separate postpass. This makes */
+ /* it a bit safer to use non-topologically- */
+ /* ordered finalization. Default value is */
+ /* determined by JAVA_FINALIZATION macro. */
+
GC_API int GC_dont_gc; /* Dont collect unless explicitly requested, e.g. */
/* because it's not safe. */
@@ -510,7 +530,7 @@ GC_API int GC_invoke_finalizers GC_PROTO((void));
/* be finalized. Return the number of finalizers */
/* that were run. Normally this is also called */
/* implicitly during some allocations. If */
- /* FINALIZE_ON_DEMAND is defined, it must be called */
+ /* GC-finalize_on_demand is nonzero, it must be called */
/* explicitly. */
/* GC_set_warn_proc can be used to redirect or filter warning messages. */
@@ -692,6 +712,7 @@ GC_API void (*GC_is_visible_print_proc)
/* This returns a list of objects, linked through their first */
/* word. Its use can greatly reduce lock contention problems, since */
/* the allocation lock can be acquired and released many fewer times. */
+/* lb must be large enough to hold the pointer field. */
GC_PTR GC_malloc_many(size_t lb);
#define GC_NEXT(p) (*(GC_PTR *)(p)) /* Retrieve the next element */
/* in returned list. */
diff --git a/dbg_mlc.c b/dbg_mlc.c
index 81516258..64832567 100644
--- a/dbg_mlc.c
+++ b/dbg_mlc.c
@@ -12,8 +12,11 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, October 9, 1995 1:16 pm PDT */
+# define I_HIDE_POINTERS
# include "gc_priv.h"
+# ifdef KEEP_BACK_PTRS
+# include "backptr.h"
+# endif
void GC_default_print_heap_obj_proc();
GC_API void GC_register_finalizer_no_order
@@ -31,6 +34,14 @@ GC_API void GC_register_finalizer_no_order
/* Object header */
typedef struct {
+# ifdef KEEP_BACK_PTRS
+ ptr_t oh_back_ptr;
+# define MARKED_FOR_FINALIZATION (ptr_t)(-1)
+ /* Object was marked because it is finalizable. */
+# ifdef ALIGN_DOUBLE
+ word oh_dummy;
+# endif
+# endif
char * oh_string; /* object descriptor string */
word oh_int; /* object descriptor integers */
# ifdef NEED_CALLINFO
@@ -85,6 +96,134 @@ ptr_t p;
return(FALSE);
}
+#ifdef KEEP_BACK_PTRS
+ /* Store back pointer to source in dest, if that appears to be possible. */
+ /* This is not completely safe, since we may mistakenly conclude that */
+ /* dest has a debugging wrapper. But the error probability is very */
+ /* small, and this shouldn't be used in production code. */
+ /* We assume that dest is the real base pointer. Source will usually */
+ /* be a pointer to the interior of an object. */
+ void GC_store_back_pointer(ptr_t source, ptr_t dest)
+ {
+ if (GC_has_debug_info(dest)) {
+ ((oh *)dest) -> oh_back_ptr = (ptr_t)HIDE_POINTER(source);
+ }
+ }
+
+ void GC_marked_for_finalization(ptr_t dest) {
+ GC_store_back_pointer(MARKED_FOR_FINALIZATION, dest);
+ }
+
+ /* Store information about the object referencing dest in *base_p */
+ /* and *offset_p. */
+ /* source is root ==> *base_p = 0, *offset_p = address */
+ /* source is heap object ==> *base_p != 0, *offset_p = offset */
+ /* Returns 1 on success, 0 if source couldn't be determined. */
+ /* Dest can be any address within a heap object. */
+ GC_ref_kind GC_get_back_ptr_info(void *dest, void **base_p, size_t *offset_p)
+ {
+ oh * hdr = (oh *)GC_base(dest);
+ ptr_t bp;
+ ptr_t bp_base;
+ if (!GC_has_debug_info((ptr_t) hdr)) return GC_NO_SPACE;
+ bp = hdr -> oh_back_ptr;
+ if (MARKED_FOR_FINALIZATION == bp) return GC_FINALIZER_REFD;
+ if (0 == bp) return GC_UNREFERENCED;
+ bp = REVEAL_POINTER(bp);
+ bp_base = GC_base(bp);
+ if (0 == bp_base) {
+ *base_p = bp;
+ *offset_p = 0;
+ return GC_REFD_FROM_ROOT;
+ } else {
+ if (GC_has_debug_info(bp_base)) bp_base += sizeof(oh);
+ *base_p = bp_base;
+ *offset_p = bp - bp_base;
+ return GC_REFD_FROM_HEAP;
+ }
+ }
+
+ /* Generate a random heap address. */
+ /* The resulting address is in the heap, but */
+ /* not necessarily inside a valid object. */
+ void *GC_generate_random_heap_address(void)
+ {
+ int i;
+ int heap_offset = random() % GC_heapsize;
+ for (i = 0; i < GC_n_heap_sects; ++ i) {
+ int size = GC_heap_sects[i].hs_bytes;
+ if (heap_offset < size) {
+ return GC_heap_sects[i].hs_start + heap_offset;
+ } else {
+ heap_offset -= size;
+ }
+ }
+ ABORT("GC_generate_random_heap_address: size inconsistency");
+ /*NOTREACHED*/
+ return 0;
+ }
+
+ /* Generate a random address inside a valid marked heap object. */
+ void *GC_generate_random_valid_address(void)
+ {
+ ptr_t result;
+ ptr_t base;
+ for (;;) {
+ result = GC_generate_random_heap_address();
+ base = GC_base(result);
+ if (0 == base) continue;
+ if (!GC_is_marked(base)) continue;
+ return result;
+ }
+ }
+
+ /* Force a garbage collection and generate a backtrace from a */
+ /* random heap address. */
+ void GC_generate_random_backtrace(void)
+ {
+ void * current;
+ int i;
+ void * base;
+ size_t offset;
+ GC_ref_kind source;
+ GC_gcollect();
+ current = GC_generate_random_valid_address();
+ GC_printf1("Chose address 0x%lx in object\n", (unsigned long)current);
+ GC_print_heap_obj(GC_base(current));
+ GC_err_printf0("\n");
+ for (i = 0; ; ++i) {
+ source = GC_get_back_ptr_info(current, &base, &offset);
+ if (GC_UNREFERENCED == source) {
+ GC_err_printf0("Reference could not be found\n");
+ goto out;
+ }
+ if (GC_NO_SPACE == source) {
+ GC_err_printf0("No debug info in object: Can't find reference\n");
+ goto out;
+ }
+ GC_err_printf1("Reachable via %d levels of pointers from ",
+ (unsigned long)i);
+ switch(source) {
+ case GC_REFD_FROM_ROOT:
+ GC_err_printf1("root at 0x%lx\n", (unsigned long)base);
+ goto out;
+ case GC_FINALIZER_REFD:
+ GC_err_printf0("list of finalizable objects\n");
+ goto out;
+ case GC_REFD_FROM_HEAP:
+ GC_err_printf1("offset %ld in object:\n", (unsigned long)offset);
+ /* Take GC_base(base) to get real base, i.e. header. */
+ GC_print_heap_obj(GC_base(base));
+ GC_err_printf0("\n");
+ break;
+ }
+ current = base;
+ }
+ out:;
+ }
+
+#endif /* KEEP_BACK_PTRS */
+
/* Store debugging info into p. Return displaced pointer. */
/* Assumes we don't hold allocation lock. */
ptr_t GC_store_debug_info(p, sz, string, integer)
@@ -100,6 +239,9 @@ word integer;
/* But that's expensive. And this way things should only appear */
/* inconsistent while we're in the handler. */
LOCK();
+# ifdef KEEP_BACK_PTRS
+ ((oh *)p) -> oh_back_ptr = 0;
+# endif
((oh *)p) -> oh_string = string;
((oh *)p) -> oh_int = integer;
((oh *)p) -> oh_sz = sz;
@@ -110,7 +252,7 @@ word integer;
return((ptr_t)result);
}
-/* Check the object with debugging info at p */
+/* Check the object with debugging info at ohdr */
/* return NIL if it's OK. Else return clobbered */
/* address. */
ptr_t GC_check_annotated_obj(ohdr)
@@ -408,31 +550,29 @@ GC_PTR p;
GC_err_printf0(
"GC_debug_free: found previously deallocated (?) object at ");
} else {
- GC_err_printf0("GC_debug_free: found smashed object at ");
+ GC_err_printf0("GC_debug_free: found smashed location at ");
}
GC_print_smashed_obj(p, clobbered);
}
/* Invalidate size */
((oh *)base) -> oh_sz = GC_size(base);
}
-# ifdef FIND_LEAK
+ if (GC_find_leak) {
GC_free(base);
-# else
- {
- register hdr * hhdr = HDR(p);
- GC_bool uncollectable = FALSE;
+ } else {
+ register hdr * hhdr = HDR(p);
+ GC_bool uncollectable = FALSE;
- if (hhdr -> hb_obj_kind == UNCOLLECTABLE) {
- uncollectable = TRUE;
- }
-# ifdef ATOMIC_UNCOLLECTABLE
- if (hhdr -> hb_obj_kind == AUNCOLLECTABLE) {
- uncollectable = TRUE;
- }
-# endif
- if (uncollectable) GC_free(base);
+ if (hhdr -> hb_obj_kind == UNCOLLECTABLE) {
+ uncollectable = TRUE;
}
-# endif
+# ifdef ATOMIC_UNCOLLECTABLE
+ if (hhdr -> hb_obj_kind == AUNCOLLECTABLE) {
+ uncollectable = TRUE;
+ }
+# endif
+ if (uncollectable) GC_free(base);
+ } /* !GC_find_leak */
}
# ifdef __STDC__
@@ -491,7 +631,7 @@ GC_PTR p;
}
clobbered = GC_check_annotated_obj((oh *)base);
if (clobbered != 0) {
- GC_err_printf0("GC_debug_realloc: found smashed object at ");
+ GC_err_printf0("GC_debug_realloc: found smashed location at ");
GC_print_smashed_obj(p, clobbered);
}
old_sz = ((oh *)base) -> oh_sz;
@@ -528,7 +668,7 @@ word dummy;
if (clobbered != 0) {
GC_err_printf0(
- "GC_check_heap_block: found smashed object at ");
+ "GC_check_heap_block: found smashed location at ");
GC_print_smashed_obj((ptr_t)p, clobbered);
}
}
diff --git a/dyn_load.c b/dyn_load.c
index 56aeb3dd..d3df0a08 100644
--- a/dyn_load.c
+++ b/dyn_load.c
@@ -283,6 +283,9 @@ void GC_register_dynamic_libraries()
static struct link_map *
GC_FirstDLOpenedLinkMap()
{
+# ifdef __GNUC__
+# pragma weak _DYNAMIC
+# endif
extern ElfW(Dyn) _DYNAMIC[];
ElfW(Dyn) *dp;
struct r_debug *r;
diff --git a/finalize.c b/finalize.c
index f33ae734..2ee927fe 100644
--- a/finalize.c
+++ b/finalize.c
@@ -1,6 +1,7 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -16,6 +17,18 @@
# include "gc_priv.h"
# include "gc_mark.h"
+# ifdef FINALIZE_ON_DEMAND
+ int GC_finalize_on_demand = 1;
+# else
+ int GC_finalize_on_demand = 0;
+# endif
+
+# ifdef JAVA_FINALIZATION
+ int GC_java_finalization = 1;
+# else
+ int GC_java_finalization = 0;
+# endif
+
/* Type of mark procedure used for marking from finalizable object. */
/* This procedure normally does not mark the object, only its */
/* descendents. */
@@ -249,7 +262,7 @@ out:
/* Possible finalization_marker procedures. Note that mark stack */
/* overflow is handled by the caller, and is not a disaster. */
-void GC_normal_finalize_mark_proc(p)
+GC_API void GC_normal_finalize_mark_proc(p)
ptr_t p;
{
hdr * hhdr = HDR(p);
@@ -261,7 +274,7 @@ ptr_t p;
/* This only pays very partial attention to the mark descriptor. */
/* It does the right thing for normal and atomic objects, and treats */
/* most others as normal. */
-void GC_ignore_self_finalize_mark_proc(p)
+GC_API void GC_ignore_self_finalize_mark_proc(p)
ptr_t p;
{
hdr * hhdr = HDR(p);
@@ -284,7 +297,7 @@ ptr_t p;
}
/*ARGSUSED*/
-void GC_null_finalize_mark_proc(p)
+GC_API void GC_null_finalize_mark_proc(p)
ptr_t p;
{
}
@@ -295,7 +308,11 @@ ptr_t p;
/* in the nonthreads case, we try to avoid disabling signals, */
/* since it can be expensive. Threads packages typically */
/* make it cheaper. */
-void GC_register_finalizer_inner(obj, fn, cd, ofn, ocd, mp)
+/* The last parameter is a procedure that determines */
+/* marking for finalization ordering. Any objects marked */
+/* by that procedure will be guaranteed to not have been */
+/* finalized when this finalizer is invoked. */
+GC_API void GC_register_finalizer_inner(obj, fn, cd, ofn, ocd, mp)
GC_PTR obj;
GC_finalization_proc fn;
GC_PTR cd;
@@ -505,6 +522,7 @@ void GC_finalize()
for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
if (!GC_is_marked(real_ptr)) {
+ GC_MARKED_FOR_FINALIZATION(real_ptr);
GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
if (GC_is_marked(real_ptr)) {
WARN("Finalization cycle involving %lx\n", real_ptr);
@@ -521,9 +539,9 @@ void GC_finalize()
while (curr_fo != 0) {
real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
if (!GC_is_marked(real_ptr)) {
-# ifndef JAVA_FINALIZATION
- GC_set_mark_bit(real_ptr);
-# endif
+ if (!GC_java_finalization) {
+ GC_set_mark_bit(real_ptr);
+ }
/* Delete from hash table */
next_fo = fo_next(curr_fo);
if (prev_fo == 0) {
@@ -555,20 +573,20 @@ void GC_finalize()
}
}
-# ifdef JAVA_FINALIZATION
- /* make sure we mark everything reachable from objects finalized
- using the no_order mark_proc */
- for (curr_fo = GC_finalize_now;
- curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
- real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
- if (!GC_is_marked(real_ptr)) {
- if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
- GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
- }
- GC_set_mark_bit(real_ptr);
- }
- }
-# endif
+ if (GC_java_finalization) {
+ /* make sure we mark everything reachable from objects finalized
+ using the no_order mark_proc */
+ for (curr_fo = GC_finalize_now;
+ curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
+ real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
+ if (!GC_is_marked(real_ptr)) {
+ if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
+ GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
+ }
+ GC_set_mark_bit(real_ptr);
+ }
+ }
+ }
/* Remove dangling disappearing links. */
for (i = 0; i < dl_size; i++) {
@@ -594,7 +612,7 @@ void GC_finalize()
}
}
-#ifdef JAVA_FINALIZATION
+#ifndef JAVA_FINALIZATION_NOT_NEEDED
/* Enqueue all remaining finalizers to be run - Assumes lock is
* held, and signals are disabled */
@@ -649,10 +667,15 @@ void GC_enqueue_all_finalizers()
* Unfortunately, the Java standard implies we have to keep running
* finalizers until there are no more left, a potential infinite loop.
* YUCK.
+ * Note that this is even more dangerous than the usual Java
+ * finalizers, in that objects reachable from static variables
+ * may have been finalized when these finalizers are run.
+ * Finalizers run at this point must be prepared to deal with a
+ * mostly broken world.
* This routine is externally callable, so is called without
* the allocation lock.
*/
-void GC_finalize_all()
+GC_API void GC_finalize_all()
{
DCL_LOCK_STATE;
diff --git a/gc.h b/gc.h
index ceabb02f..30614095 100644
--- a/gc.h
+++ b/gc.h
@@ -96,11 +96,31 @@ GC_API GC_PTR (*GC_oom_fn) GC_PROTO((size_t bytes_requested));
/* pointer to a previously allocated heap */
/* object. */
+GC_API int GC_find_leak;
+ /* Do not actually garbage collect, but simply */
+ /* report inaccessible memory that was not */
+ /* deallocated with GC_free. Initial value */
+ /* is determined by FIND_LEAK macro. */
+
GC_API int GC_quiet; /* Disable statistics output. Only matters if */
/* collector has been compiled with statistics */
/* enabled. This involves a performance cost, */
/* and is thus not the default. */
+GC_API int GC_finalize_on_demand;
+ /* If nonzero, finalizers will only be run in */
+ /* response to an eplit GC_invoke_finalizers */
+ /* call. The default is determined by whether */
+ /* the FINALIZE_ON_DEMAND macro is defined */
+ /* when the collector is built. */
+
+GC_API int GC_java_finalization;
+ /* Mark objects reachable from finalizable */
+ /* objects in a separate postpass. This makes */
+ /* it a bit safer to use non-topologically- */
+ /* ordered finalization. Default value is */
+ /* determined by JAVA_FINALIZATION macro. */
+
GC_API int GC_dont_gc; /* Dont collect unless explicitly requested, e.g. */
/* because it's not safe. */
@@ -510,7 +530,7 @@ GC_API int GC_invoke_finalizers GC_PROTO((void));
/* be finalized. Return the number of finalizers */
/* that were run. Normally this is also called */
/* implicitly during some allocations. If */
- /* FINALIZE_ON_DEMAND is defined, it must be called */
+ /* GC-finalize_on_demand is nonzero, it must be called */
/* explicitly. */
/* GC_set_warn_proc can be used to redirect or filter warning messages. */
@@ -692,6 +712,7 @@ GC_API void (*GC_is_visible_print_proc)
/* This returns a list of objects, linked through their first */
/* word. Its use can greatly reduce lock contention problems, since */
/* the allocation lock can be acquired and released many fewer times. */
+/* lb must be large enough to hold the pointer field. */
GC_PTR GC_malloc_many(size_t lb);
#define GC_NEXT(p) (*(GC_PTR *)(p)) /* Retrieve the next element */
/* in returned list. */
diff --git a/gc_alloc.h b/gc_alloc.h
index 1d912db2..1f1d54af 100644
--- a/gc_alloc.h
+++ b/gc_alloc.h
@@ -13,7 +13,7 @@
//
// This is a C++ header file that is intended to replace the SGI STL
-// alloc.h.
+// alloc.h. This assumes SGI STL version < 3.0.
//
// This assumes the collector has been compiled with -DATOMIC_UNCOLLECTABLE
// and -DALL_INTERIOR_POINTERS. We also recommend
diff --git a/gc_copy_descr.h b/gc_copy_descr.h
new file mode 100644
index 00000000..212c99e2
--- /dev/null
+++ b/gc_copy_descr.h
@@ -0,0 +1,26 @@
+
+/*
+ * Copyright (c) 1999 by Silicon Graphics. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Descriptor for allocation request. May be redefined by client. */
+typedef struct {
+ GC_word bitmap; /* Bitmap describing pointer locations. */
+ /* High order bit correspond to 0th */
+ /* word. 2 lsbs must be 0. */
+ size_t length; /* In bytes, must be multiple of word */
+ /* size. Must be >0, <= 512 */
+} * GC_copy_descriptor;
+
+/* The collector accesses descriptors only through these two macros. */
+#define GC_SIZE_FROM_DESCRIPTOR(d) ((d) -> length)
+#define GC_BIT_MAP_FROM_DESCRIPTOR(d) ((d) -> bitmap)
+
diff --git a/gc_hdrs.h b/gc_hdrs.h
index 2f2d1bf9..60dc2ad3 100644
--- a/gc_hdrs.h
+++ b/gc_hdrs.h
@@ -49,14 +49,16 @@ typedef struct bi {
hdr * index[BOTTOM_SZ];
/*
* The bottom level index contains one of three kinds of values:
- * 0 means we're not responsible for this block.
+ * 0 means we're not responsible for this block,
+ * or this is a block other than the first one in a free block.
* 1 < (long)X <= MAX_JUMP means the block starts at least
* X * HBLKSIZE bytes before the current address.
* A valid pointer points to a hdr structure. (The above can't be
* valid pointers due to the GET_MEM return convention.)
*/
struct bi * asc_link; /* All indices are linked in */
- /* ascending order. */
+ /* ascending order... */
+ struct bi * desc_link; /* ... and in descending order. */
word key; /* high order address bits. */
# ifdef HASH_TL
struct bi * hash_link; /* Hash chain link. */
diff --git a/gc_mark.h b/gc_mark.h
index 8b57c31f..4628323f 100644
--- a/gc_mark.h
+++ b/gc_mark.h
@@ -171,6 +171,8 @@ mse * GC_signal_mark_stack_overflow();
/* Mark bit is already set */ \
goto exit_label; \
} \
+ GC_STORE_BACK_PTR((ptr_t)source, (ptr_t)HBLKPTR(current) \
+ + WORDS_TO_BYTES(displ)); \
*mark_word_addr = mark_word | mark_bit; \
} \
PUSH_OBJ(((word *)(HBLKPTR(current)) + displ), hhdr, \
diff --git a/gc_priv.h b/gc_priv.h
index 934075fa..5ce52a7a 100644
--- a/gc_priv.h
+++ b/gc_priv.h
@@ -73,7 +73,7 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
# define CONST
#endif
-#ifdef AMIGA
+#if 0 /* was once defined for AMIGA */
# define GC_FAR __far
#else
# define GC_FAR
@@ -350,7 +350,7 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
+ GC_page_size) \
+ GC_page_size-1)
# else
-# if defined(AMIGA) || defined(NEXT) || defined(DOS4GW)
+# if defined(AMIGA) || defined(NEXT) || defined(MACOSX) || defined(DOS4GW)
# define GET_MEM(bytes) HBLKPTR((size_t) \
calloc(1, (size_t)bytes + GC_page_size) \
+ GC_page_size-1)
@@ -823,6 +823,7 @@ struct hblkhdr {
struct hblk * hb_next; /* Link field for hblk free list */
/* and for lists of chunks waiting to be */
/* reclaimed. */
+ struct hblk * hb_prev; /* Backwards link for free list. */
word hb_descr; /* object descriptor for marking. See */
/* mark.h. */
char* hb_map; /* A pointer to a pointer validity map of the block. */
@@ -837,9 +838,20 @@ struct hblkhdr {
# define IGNORE_OFF_PAGE 1 /* Ignore pointers that do not */
/* point to the first page of */
/* this object. */
+# define WAS_UNMAPPED 2 /* This is a free block, which has */
+ /* been unmapped from the address */
+ /* space. */
+ /* GC_remap must be invoked on it */
+ /* before it can be reallocated. */
+ /* Only set with USE_MUNMAP. */
unsigned short hb_last_reclaimed;
/* Value of GC_gc_no when block was */
/* last allocated or swept. May wrap. */
+ /* For a free block, this is maintained */
+ /* unly for USE_MUNMAP, and indicates */
+ /* when the header was allocated, or */
+ /* when the size of the block last */
+ /* changed. */
word hb_marks[MARK_BITS_SZ];
/* Bit i in the array refers to the */
/* object starting at the ith word (header */
@@ -959,6 +971,9 @@ struct _GC_arrays {
word _max_heapsize;
ptr_t _last_heap_addr;
ptr_t _prev_heap_addr;
+ word _large_free_bytes;
+ /* Total bytes contained in blocks on large object free */
+ /* list. */
word _words_allocd_before_gc;
/* Number of words allocated before this */
/* collection cycle. */
@@ -1005,6 +1020,9 @@ struct _GC_arrays {
/* Number of words in accessible atomic */
/* objects. */
# endif
+# ifdef USE_MUNMAP
+ word _unmapped_bytes;
+# endif
# ifdef MERGE_SIZES
unsigned _size_map[WORDS_TO_BYTES(MAXOBJSZ+1)];
/* Number of words to allocate for a given allocation request in */
@@ -1022,7 +1040,7 @@ struct _GC_arrays {
/* to an object at */
/* block_start+i&~3 - WORDS_TO_BYTES(j). */
/* (If ALL_INTERIOR_POINTERS is defined, then */
- /* instead ((short *)(hbh_map[sz])[i] is j if */
+ /* instead ((short *)(hb_map[sz])[i] is j if */
/* block_start+WORDS_TO_BYTES(i) is in the */
/* interior of an object starting at */
/* block_start+WORDS_TO_BYTES(i-j)). */
@@ -1135,6 +1153,7 @@ GC_API GC_FAR struct _GC_arrays GC_arrays;
# define GC_prev_heap_addr GC_arrays._prev_heap_addr
# define GC_words_allocd GC_arrays._words_allocd
# define GC_words_wasted GC_arrays._words_wasted
+# define GC_large_free_bytes GC_arrays._large_free_bytes
# define GC_words_finalized GC_arrays._words_finalized
# define GC_non_gc_bytes_at_gc GC_arrays._non_gc_bytes_at_gc
# define GC_mem_freed GC_arrays._mem_freed
@@ -1144,6 +1163,9 @@ GC_API GC_FAR struct _GC_arrays GC_arrays;
# define GC_words_allocd_before_gc GC_arrays._words_allocd_before_gc
# define GC_heap_sects GC_arrays._heap_sects
# define GC_last_stack GC_arrays._last_stack
+# ifdef USE_MUNMAP
+# define GC_unmapped_bytes GC_arrays._unmapped_bytes
+# endif
# ifdef MSWIN32
# define GC_heap_bases GC_arrays._heap_bases
# endif
@@ -1236,7 +1258,7 @@ extern char * GC_invalid_map;
/* Pointer to the nowhere valid hblk map */
/* Blocks pointing to this map are free. */
-extern struct hblk * GC_hblkfreelist;
+extern struct hblk * GC_hblkfreelist[];
/* List of completely empty heap blocks */
/* Linked through hb_next field of */
/* header structure associated with */
@@ -1311,7 +1333,12 @@ GC_bool GC_should_collect();
void GC_apply_to_all_blocks(/*fn, client_data*/);
/* Invoke fn(hbp, client_data) for each */
/* allocated heap block. */
-struct hblk * GC_next_block(/* struct hblk * h */);
+struct hblk * GC_next_used_block(/* struct hblk * h */);
+ /* Return first in-use block >= h */
+struct hblk * GC_prev_block(/* struct hblk * h */);
+ /* Return last block <= h. Returned block */
+ /* is managed by GC, but may or may not be in */
+ /* use. */
void GC_mark_init();
void GC_clear_marks(); /* Clear mark bits for all heap objects. */
void GC_invalidate_mark_state(); /* Tell the marker that marked */
@@ -1608,6 +1635,15 @@ extern void (*GC_print_heap_obj)(/* ptr_t p */);
/* detailed description of the object */
/* referred to by p. */
+/* Memory unmapping: */
+#ifdef USE_MUNMAP
+ void GC_unmap_old(void);
+ void GC_merge_unmapped(void);
+ void GC_unmap(ptr_t start, word bytes);
+ void GC_remap(ptr_t start, word bytes);
+ void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2);
+#endif
+
/* Virtual dirty bit implementation: */
/* Each implementation exports the following: */
void GC_read_dirty(); /* Retrieve dirty bits. */
@@ -1640,6 +1676,16 @@ void GC_print_heap_sects();
void GC_print_static_roots();
void GC_dump();
+#ifdef KEEP_BACK_PTRS
+ void GC_store_back_pointer(ptr_t source, ptr_t dest);
+ void GC_marked_for_finalization(ptr_t dest);
+# define GC_STORE_BACK_PTR(source, dest) GC_store_back_pointer(source, dest)
+# define GC_MARKED_FOR_FINALIZATION(dest) GC_marked_for_finalization(dest)
+#else
+# define GC_STORE_BACK_PTR(source, dest)
+# define GC_MARKED_FOR_FINALIZATION(dest)
+#endif
+
/* Make arguments appear live to compiler */
# ifdef __WATCOMC__
void GC_noop(void*, ...);
@@ -1690,4 +1736,13 @@ void GC_err_puts(/* char *s */);
/* newlines, don't ... */
+# ifdef GC_ASSERTIONS
+# define GC_ASSERT(expr) if(!(expr)) {\
+ GC_err_printf2("Assertion failure: %s:%ld\n", \
+ __FILE__, (unsigned long)__LINE__); \
+ ABORT("assertion failure"); }
+# else
+# define GC_ASSERT(expr)
+# endif
+
# endif /* GC_PRIVATE_H */
diff --git a/gcconfig.h b/gcconfig.h
index b1a9dc36..c9017d37 100644
--- a/gcconfig.h
+++ b/gcconfig.h
@@ -43,6 +43,11 @@
# define OPENBSD
# define mach_type_known
# endif
+# if defined(__OpenBSD__) && defined(__sparc__)
+# define SPARC
+# define OPENBSD
+# define mach_type_known
+# endif
# if defined(__NetBSD__) && defined(m68k)
# define M68K
# define NETBSD
@@ -100,7 +105,8 @@
# endif
# define mach_type_known
# endif
-# if defined(sparc) && defined(unix) && !defined(sun) && !defined(linux)
+# if defined(sparc) && defined(unix) && !defined(sun) && !defined(linux) \
+ && !defined(__OpenBSD__)
# define SPARC
# define DRSNX
# define mach_type_known
@@ -129,7 +135,7 @@
# define HP_PA
# define mach_type_known
# endif
-# if defined(LINUX) && defined(i386)
+# if defined(LINUX) && (defined(i386) || defined(__i386__))
# define I386
# define mach_type_known
# endif
@@ -141,9 +147,8 @@
# define M68K
# define mach_type_known
# endif
-# if defined(linux) && defined(sparc)
+# if defined(LINUX) && defined(sparc)
# define SPARC
-# define LINUX
# define mach_type_known
# endif
# if defined(__alpha) || defined(__alpha__)
@@ -153,9 +158,11 @@
# endif
# define mach_type_known
# endif
-# if defined(_AMIGA)
-# define M68K
+# if defined(_AMIGA) && !defined(AMIGA)
# define AMIGA
+# endif
+# ifdef AMIGA
+# define M68K
# define mach_type_known
# endif
# if defined(THINK_C) || defined(__MWERKS__) && !defined(__powerc)
@@ -168,6 +175,11 @@
# define MACOS
# define mach_type_known
# endif
+# if defined(macosx)
+# define MACOSX
+# define POWERPC
+# define mach_type_known
+# endif
# if defined(NeXT) && defined(mc68000)
# define M68K
# define NEXT
@@ -486,8 +498,8 @@
# ifdef POWERPC
# define MACH_TYPE "POWERPC"
-# define ALIGNMENT 2
# ifdef MACOS
+# define ALIGNMENT 2 /* Still necessary? Could it be 4? */
# ifndef __LOWMEM__
# include <LowMem.h>
# endif
@@ -497,14 +509,24 @@
# define DATAEND /* not needed */
# endif
# ifdef LINUX
+# define ALIGNMENT 4 /* Guess. Can someone verify? */
+ /* This was 2, but that didn't sound right. */
# define OS_TYPE "LINUX"
# define HEURISTIC1
# undef STACK_GRAN
# define STACK_GRAN 0x10000000
+ /* Stack usually starts at 0x80000000 */
# define DATASTART GC_data_start
extern int _end;
# define DATAEND (&_end)
# endif
+# ifdef MACOSX
+# define ALIGNMENT 4
+# define OS_TYPE "MACOSX"
+# define DATASTART ((ptr_t) get_etext())
+# define STACKBOTTOM ((ptr_t) 0xc0000000)
+# define DATAEND /* not needed */
+# endif
# endif
# ifdef VAX
@@ -603,6 +625,11 @@
# define SVR4
# define STACKBOTTOM ((ptr_t) 0xf0000000)
# endif
+# ifdef OPENBSD
+# define OS_TYPE "OPENBSD"
+# define STACKBOTTOM ((ptr_t) 0xf8000000)
+# define DATASTART ((ptr_t)(&etext))
+# endif
# endif
# ifdef I386
@@ -657,10 +684,13 @@
# endif
# ifdef LINUX
# define OS_TYPE "LINUX"
-# define STACKBOTTOM ((ptr_t)0xc0000000)
- /* Appears to be 0xe0000000 for at least one 2.1.91 kernel. */
- /* Probably needs to be more flexible, but I don't yet */
- /* fully understand how flexible. */
+# define HEURISTIC1
+# undef STACK_GRAN
+# define STACK_GRAN 0x10000000
+ /* STACKBOTTOM is usually 0xc0000000, but this changes with */
+ /* different kernel configurations. In particular, systems */
+ /* with 2GB physical memory will usually move the user */
+ /* address space limit, and hence initial SP to 0x80000000. */
# if !defined(LINUX_THREADS) || !defined(REDIRECT_MALLOC)
# define MPROTECT_VDB
# else
@@ -909,9 +939,13 @@
# define CPP_WORDSZ 64
# define STACKBOTTOM ((ptr_t) 0x120000000)
# ifdef __ELF__
+# if 0
+ /* __data_start apparently disappeared in some recent releases. */
extern int __data_start;
# define DATASTART &__data_start
-# define DYNAMIC_LOADING
+# endif
+# define DATASTART GC_data_start
+# define DYNAMIC_LOADING
# else
# define DATASTART ((ptr_t) 0x140000000)
# endif
@@ -1021,6 +1055,10 @@
# undef MPROTECT_VDB
# endif
+# ifdef USE_MUNMAP
+# undef MPROTECT_VDB /* Can't deal with address space holes. */
+# endif
+
# if !defined(PCR_VDB) && !defined(PROC_VDB) && !defined(MPROTECT_VDB)
# define DEFAULT_VDB
# endif
diff --git a/headers.c b/headers.c
index fae683a6..9564a6a5 100644
--- a/headers.c
+++ b/headers.c
@@ -25,6 +25,12 @@
# include "gc_priv.h"
bottom_index * GC_all_bottom_indices = 0;
+ /* Pointer to first (lowest addr) */
+ /* bottom_index. */
+
+bottom_index * GC_all_bottom_indices_end = 0;
+ /* Pointer to last (highest addr) */
+ /* bottom_index. */
/* Non-macro version of header location routine */
hdr * GC_find_header(h)
@@ -137,16 +143,17 @@ void GC_init_headers()
/* Make sure that there is a bottom level index block for address addr */
/* Return FALSE on failure. */
static GC_bool get_index(addr)
-register word addr;
+word addr;
{
- register word hi =
- (word)(addr) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
- register bottom_index * r;
- register bottom_index * p;
- register bottom_index ** prev;
+ word hi = (word)(addr) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
+ bottom_index * r;
+ bottom_index * p;
+ bottom_index ** prev;
+ bottom_index *pi;
+
# ifdef HASH_TL
- register unsigned i = TL_HASH(hi);
- register bottom_index * old;
+ unsigned i = TL_HASH(hi);
+ bottom_index * old;
old = p = GC_top_index[i];
while(p != GC_all_nils) {
@@ -164,11 +171,21 @@ register word addr;
if (r == 0) return(FALSE);
GC_top_index[hi] = r;
BZERO(r, sizeof (bottom_index));
-# endif
+# endif
r -> key = hi;
/* Add it to the list of bottom indices */
- prev = &GC_all_bottom_indices;
- while ((p = *prev) != 0 && p -> key < hi) prev = &(p -> asc_link);
+ prev = &GC_all_bottom_indices; /* pointer to p */
+ pi = 0; /* bottom_index preceding p */
+ while ((p = *prev) != 0 && p -> key < hi) {
+ pi = p;
+ prev = &(p -> asc_link);
+ }
+ r -> desc_link = pi;
+ if (0 == p) {
+ GC_all_bottom_indices_end = r;
+ } else {
+ p -> desc_link = r;
+ }
r -> asc_link = p;
*prev = r;
return(TRUE);
@@ -185,6 +202,9 @@ register struct hblk * h;
if (!get_index((word) h)) return(FALSE);
result = alloc_hdr();
SET_HDR(h, result);
+# ifdef USE_MUNMAP
+ result -> hb_last_reclaimed = GC_gc_no;
+# endif
return(result != 0);
}
@@ -261,7 +281,7 @@ word client_data;
/* Get the next valid block whose address is at least h */
/* Return 0 if there is none. */
-struct hblk * GC_next_block(h)
+struct hblk * GC_next_used_block(h)
struct hblk * h;
{
register bottom_index * bi;
@@ -276,15 +296,16 @@ struct hblk * h;
}
while(bi != 0) {
while (j < BOTTOM_SZ) {
- if (IS_FORWARDING_ADDR_OR_NIL(bi -> index[j])) {
+ hdr * hhdr = bi -> index[j];
+ if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
j++;
} else {
- if (bi->index[j]->hb_map != GC_invalid_map) {
+ if (hhdr->hb_map != GC_invalid_map) {
return((struct hblk *)
(((bi -> key << LOG_BOTTOM_SZ) + j)
<< LOG_HBLKSIZE));
} else {
- j += divHBLKSZ(bi->index[j] -> hb_sz);
+ j += divHBLKSZ(hhdr -> hb_sz);
}
}
}
@@ -293,3 +314,38 @@ struct hblk * h;
}
return(0);
}
+
+/* Get the last (highest address) block whose address is */
+/* at most h. Return 0 if there is none. */
+/* Unlike the above, this may return a free block. */
+struct hblk * GC_prev_block(h)
+struct hblk * h;
+{
+ register bottom_index * bi;
+ register signed_word j = ((word)h >> LOG_HBLKSIZE) & (BOTTOM_SZ-1);
+
+ GET_BI(h, bi);
+ if (bi == GC_all_nils) {
+ register word hi = (word)h >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
+ bi = GC_all_bottom_indices_end;
+ while (bi != 0 && bi -> key > hi) bi = bi -> desc_link;
+ j = BOTTOM_SZ - 1;
+ }
+ while(bi != 0) {
+ while (j >= 0) {
+ hdr * hhdr = bi -> index[j];
+ if (0 == hhdr) {
+ --j;
+ } else if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
+ j -= (signed_word)hhdr;
+ } else {
+ return((struct hblk *)
+ (((bi -> key << LOG_BOTTOM_SZ) + j)
+ << LOG_HBLKSIZE));
+ }
+ }
+ j = BOTTOM_SZ - 1;
+ bi = bi -> desc_link;
+ }
+ return(0);
+}
diff --git a/include/backptr.h b/include/backptr.h
new file mode 100644
index 00000000..d34224e4
--- /dev/null
+++ b/include/backptr.h
@@ -0,0 +1,56 @@
+/*
+ * This is a simple API to implement pointer back tracing, i.e.
+ * to answer questions such as "who is pointing to this" or
+ * "why is this object being retained by the collector"
+ *
+ * This API assumes that we have an ANSI C compiler.
+ *
+ * Most of these calls yield useful information on only after
+ * a garbage collection. Usually the client will first force
+ * a full collection and then gather information, preferably
+ * before much intervening allocation.
+ *
+ * The implementation of the interface is only about 99.9999%
+ * correct. It is intended to be good enough for profiling,
+ * but is not intended to be used with production code.
+ *
+ * Results are likely to be much more useful if all allocation is
+ * accomplished through the debugging allocators.
+ *
+ * The implementation idea is due to A. Demers.
+ */
+
+/* Store information about the object referencing dest in *base_p */
+/* and *offset_p. */
+/* If multiple objects or roots point to dest, the one reported */
+/* will be the last on used by the garbage collector to trace the */
+/* object. */
+/* source is root ==> *base_p = address, *offset_p = 0 */
+/* source is heap object ==> *base_p != 0, *offset_p = offset */
+/* Returns 1 on success, 0 if source couldn't be determined. */
+/* Dest can be any address within a heap object. */
+typedef enum { GC_UNREFERENCED, /* No refence info available. */
+ GC_NO_SPACE, /* Dest not allocated with debug alloc */
+ GC_REFD_FROM_ROOT, /* Referenced directly by root *base_p */
+ GC_REFD_FROM_HEAP, /* Referenced from another heap obj. */
+ GC_FINALIZER_REFD /* Finalizable and hence accessible. */
+} GC_ref_kind;
+
+GC_ref_kind GC_get_back_ptr_info(void *dest, void **base_p, size_t *offset_p);
+
+/* Generate a random heap address. */
+/* The resulting address is in the heap, but */
+/* not necessarily inside a valid object. */
+void * GC_generate_random_heap_address(void);
+
+/* Generate a random address inside a valid marked heap object. */
+void * GC_generate_random_valid_address(void);
+
+/* Force a garbage collection and generate a backtrace from a */
+/* random heap address. */
+/* This uses the GC logging mechanism (GC_printf) to produce */
+/* output. It can often be called from a debugger. The */
+/* source in dbg_mlc.c also serves as a sample client. */
+void GC_generate_random_backtrace(void);
+
+
diff --git a/include/gc.h b/include/gc.h
index ceabb02f..30614095 100644
--- a/include/gc.h
+++ b/include/gc.h
@@ -96,11 +96,31 @@ GC_API GC_PTR (*GC_oom_fn) GC_PROTO((size_t bytes_requested));
/* pointer to a previously allocated heap */
/* object. */
+GC_API int GC_find_leak;
+ /* Do not actually garbage collect, but simply */
+ /* report inaccessible memory that was not */
+ /* deallocated with GC_free. Initial value */
+ /* is determined by FIND_LEAK macro. */
+
GC_API int GC_quiet; /* Disable statistics output. Only matters if */
/* collector has been compiled with statistics */
/* enabled. This involves a performance cost, */
/* and is thus not the default. */
+GC_API int GC_finalize_on_demand;
+ /* If nonzero, finalizers will only be run in */
+ /* response to an eplit GC_invoke_finalizers */
+ /* call. The default is determined by whether */
+ /* the FINALIZE_ON_DEMAND macro is defined */
+ /* when the collector is built. */
+
+GC_API int GC_java_finalization;
+ /* Mark objects reachable from finalizable */
+ /* objects in a separate postpass. This makes */
+ /* it a bit safer to use non-topologically- */
+ /* ordered finalization. Default value is */
+ /* determined by JAVA_FINALIZATION macro. */
+
GC_API int GC_dont_gc; /* Dont collect unless explicitly requested, e.g. */
/* because it's not safe. */
@@ -510,7 +530,7 @@ GC_API int GC_invoke_finalizers GC_PROTO((void));
/* be finalized. Return the number of finalizers */
/* that were run. Normally this is also called */
/* implicitly during some allocations. If */
- /* FINALIZE_ON_DEMAND is defined, it must be called */
+ /* GC-finalize_on_demand is nonzero, it must be called */
/* explicitly. */
/* GC_set_warn_proc can be used to redirect or filter warning messages. */
@@ -692,6 +712,7 @@ GC_API void (*GC_is_visible_print_proc)
/* This returns a list of objects, linked through their first */
/* word. Its use can greatly reduce lock contention problems, since */
/* the allocation lock can be acquired and released many fewer times. */
+/* lb must be large enough to hold the pointer field. */
GC_PTR GC_malloc_many(size_t lb);
#define GC_NEXT(p) (*(GC_PTR *)(p)) /* Retrieve the next element */
/* in returned list. */
diff --git a/include/gc_alloc.h b/include/gc_alloc.h
index 1d912db2..1f1d54af 100644
--- a/include/gc_alloc.h
+++ b/include/gc_alloc.h
@@ -13,7 +13,7 @@
//
// This is a C++ header file that is intended to replace the SGI STL
-// alloc.h.
+// alloc.h. This assumes SGI STL version < 3.0.
//
// This assumes the collector has been compiled with -DATOMIC_UNCOLLECTABLE
// and -DALL_INTERIOR_POINTERS. We also recommend
diff --git a/include/leak_detector.h b/include/leak_detector.h
new file mode 100644
index 00000000..6786825a
--- /dev/null
+++ b/include/leak_detector.h
@@ -0,0 +1,7 @@
+#define GC_DEBUG
+#include "gc.h"
+#define malloc(n) GC_MALLOC(n)
+#define calloc(m,n) GC_MALLOC(m*n)
+#define free(p) GC_FREE(p)
+#define realloc(p,n) GC_REALLOC(n)
+#define CHECK_LEAKS() GC_gcollect()
diff --git a/include/private/gc_hdrs.h b/include/private/gc_hdrs.h
index 2f2d1bf9..60dc2ad3 100644
--- a/include/private/gc_hdrs.h
+++ b/include/private/gc_hdrs.h
@@ -49,14 +49,16 @@ typedef struct bi {
hdr * index[BOTTOM_SZ];
/*
* The bottom level index contains one of three kinds of values:
- * 0 means we're not responsible for this block.
+ * 0 means we're not responsible for this block,
+ * or this is a block other than the first one in a free block.
* 1 < (long)X <= MAX_JUMP means the block starts at least
* X * HBLKSIZE bytes before the current address.
* A valid pointer points to a hdr structure. (The above can't be
* valid pointers due to the GET_MEM return convention.)
*/
struct bi * asc_link; /* All indices are linked in */
- /* ascending order. */
+ /* ascending order... */
+ struct bi * desc_link; /* ... and in descending order. */
word key; /* high order address bits. */
# ifdef HASH_TL
struct bi * hash_link; /* Hash chain link. */
diff --git a/include/private/gc_priv.h b/include/private/gc_priv.h
index 934075fa..5ce52a7a 100644
--- a/include/private/gc_priv.h
+++ b/include/private/gc_priv.h
@@ -73,7 +73,7 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
# define CONST
#endif
-#ifdef AMIGA
+#if 0 /* was once defined for AMIGA */
# define GC_FAR __far
#else
# define GC_FAR
@@ -350,7 +350,7 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
+ GC_page_size) \
+ GC_page_size-1)
# else
-# if defined(AMIGA) || defined(NEXT) || defined(DOS4GW)
+# if defined(AMIGA) || defined(NEXT) || defined(MACOSX) || defined(DOS4GW)
# define GET_MEM(bytes) HBLKPTR((size_t) \
calloc(1, (size_t)bytes + GC_page_size) \
+ GC_page_size-1)
@@ -823,6 +823,7 @@ struct hblkhdr {
struct hblk * hb_next; /* Link field for hblk free list */
/* and for lists of chunks waiting to be */
/* reclaimed. */
+ struct hblk * hb_prev; /* Backwards link for free list. */
word hb_descr; /* object descriptor for marking. See */
/* mark.h. */
char* hb_map; /* A pointer to a pointer validity map of the block. */
@@ -837,9 +838,20 @@ struct hblkhdr {
# define IGNORE_OFF_PAGE 1 /* Ignore pointers that do not */
/* point to the first page of */
/* this object. */
+# define WAS_UNMAPPED 2 /* This is a free block, which has */
+ /* been unmapped from the address */
+ /* space. */
+ /* GC_remap must be invoked on it */
+ /* before it can be reallocated. */
+ /* Only set with USE_MUNMAP. */
unsigned short hb_last_reclaimed;
/* Value of GC_gc_no when block was */
/* last allocated or swept. May wrap. */
+ /* For a free block, this is maintained */
+ /* unly for USE_MUNMAP, and indicates */
+ /* when the header was allocated, or */
+ /* when the size of the block last */
+ /* changed. */
word hb_marks[MARK_BITS_SZ];
/* Bit i in the array refers to the */
/* object starting at the ith word (header */
@@ -959,6 +971,9 @@ struct _GC_arrays {
word _max_heapsize;
ptr_t _last_heap_addr;
ptr_t _prev_heap_addr;
+ word _large_free_bytes;
+ /* Total bytes contained in blocks on large object free */
+ /* list. */
word _words_allocd_before_gc;
/* Number of words allocated before this */
/* collection cycle. */
@@ -1005,6 +1020,9 @@ struct _GC_arrays {
/* Number of words in accessible atomic */
/* objects. */
# endif
+# ifdef USE_MUNMAP
+ word _unmapped_bytes;
+# endif
# ifdef MERGE_SIZES
unsigned _size_map[WORDS_TO_BYTES(MAXOBJSZ+1)];
/* Number of words to allocate for a given allocation request in */
@@ -1022,7 +1040,7 @@ struct _GC_arrays {
/* to an object at */
/* block_start+i&~3 - WORDS_TO_BYTES(j). */
/* (If ALL_INTERIOR_POINTERS is defined, then */
- /* instead ((short *)(hbh_map[sz])[i] is j if */
+ /* instead ((short *)(hb_map[sz])[i] is j if */
/* block_start+WORDS_TO_BYTES(i) is in the */
/* interior of an object starting at */
/* block_start+WORDS_TO_BYTES(i-j)). */
@@ -1135,6 +1153,7 @@ GC_API GC_FAR struct _GC_arrays GC_arrays;
# define GC_prev_heap_addr GC_arrays._prev_heap_addr
# define GC_words_allocd GC_arrays._words_allocd
# define GC_words_wasted GC_arrays._words_wasted
+# define GC_large_free_bytes GC_arrays._large_free_bytes
# define GC_words_finalized GC_arrays._words_finalized
# define GC_non_gc_bytes_at_gc GC_arrays._non_gc_bytes_at_gc
# define GC_mem_freed GC_arrays._mem_freed
@@ -1144,6 +1163,9 @@ GC_API GC_FAR struct _GC_arrays GC_arrays;
# define GC_words_allocd_before_gc GC_arrays._words_allocd_before_gc
# define GC_heap_sects GC_arrays._heap_sects
# define GC_last_stack GC_arrays._last_stack
+# ifdef USE_MUNMAP
+# define GC_unmapped_bytes GC_arrays._unmapped_bytes
+# endif
# ifdef MSWIN32
# define GC_heap_bases GC_arrays._heap_bases
# endif
@@ -1236,7 +1258,7 @@ extern char * GC_invalid_map;
/* Pointer to the nowhere valid hblk map */
/* Blocks pointing to this map are free. */
-extern struct hblk * GC_hblkfreelist;
+extern struct hblk * GC_hblkfreelist[];
/* List of completely empty heap blocks */
/* Linked through hb_next field of */
/* header structure associated with */
@@ -1311,7 +1333,12 @@ GC_bool GC_should_collect();
void GC_apply_to_all_blocks(/*fn, client_data*/);
/* Invoke fn(hbp, client_data) for each */
/* allocated heap block. */
-struct hblk * GC_next_block(/* struct hblk * h */);
+struct hblk * GC_next_used_block(/* struct hblk * h */);
+ /* Return first in-use block >= h */
+struct hblk * GC_prev_block(/* struct hblk * h */);
+ /* Return last block <= h. Returned block */
+ /* is managed by GC, but may or may not be in */
+ /* use. */
void GC_mark_init();
void GC_clear_marks(); /* Clear mark bits for all heap objects. */
void GC_invalidate_mark_state(); /* Tell the marker that marked */
@@ -1608,6 +1635,15 @@ extern void (*GC_print_heap_obj)(/* ptr_t p */);
/* detailed description of the object */
/* referred to by p. */
+/* Memory unmapping: */
+#ifdef USE_MUNMAP
+ void GC_unmap_old(void);
+ void GC_merge_unmapped(void);
+ void GC_unmap(ptr_t start, word bytes);
+ void GC_remap(ptr_t start, word bytes);
+ void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2);
+#endif
+
/* Virtual dirty bit implementation: */
/* Each implementation exports the following: */
void GC_read_dirty(); /* Retrieve dirty bits. */
@@ -1640,6 +1676,16 @@ void GC_print_heap_sects();
void GC_print_static_roots();
void GC_dump();
+#ifdef KEEP_BACK_PTRS
+ void GC_store_back_pointer(ptr_t source, ptr_t dest);
+ void GC_marked_for_finalization(ptr_t dest);
+# define GC_STORE_BACK_PTR(source, dest) GC_store_back_pointer(source, dest)
+# define GC_MARKED_FOR_FINALIZATION(dest) GC_marked_for_finalization(dest)
+#else
+# define GC_STORE_BACK_PTR(source, dest)
+# define GC_MARKED_FOR_FINALIZATION(dest)
+#endif
+
/* Make arguments appear live to compiler */
# ifdef __WATCOMC__
void GC_noop(void*, ...);
@@ -1690,4 +1736,13 @@ void GC_err_puts(/* char *s */);
/* newlines, don't ... */
+# ifdef GC_ASSERTIONS
+# define GC_ASSERT(expr) if(!(expr)) {\
+ GC_err_printf2("Assertion failure: %s:%ld\n", \
+ __FILE__, (unsigned long)__LINE__); \
+ ABORT("assertion failure"); }
+# else
+# define GC_ASSERT(expr)
+# endif
+
# endif /* GC_PRIVATE_H */
diff --git a/include/private/gcconfig.h b/include/private/gcconfig.h
index b1a9dc36..c9017d37 100644
--- a/include/private/gcconfig.h
+++ b/include/private/gcconfig.h
@@ -43,6 +43,11 @@
# define OPENBSD
# define mach_type_known
# endif
+# if defined(__OpenBSD__) && defined(__sparc__)
+# define SPARC
+# define OPENBSD
+# define mach_type_known
+# endif
# if defined(__NetBSD__) && defined(m68k)
# define M68K
# define NETBSD
@@ -100,7 +105,8 @@
# endif
# define mach_type_known
# endif
-# if defined(sparc) && defined(unix) && !defined(sun) && !defined(linux)
+# if defined(sparc) && defined(unix) && !defined(sun) && !defined(linux) \
+ && !defined(__OpenBSD__)
# define SPARC
# define DRSNX
# define mach_type_known
@@ -129,7 +135,7 @@
# define HP_PA
# define mach_type_known
# endif
-# if defined(LINUX) && defined(i386)
+# if defined(LINUX) && (defined(i386) || defined(__i386__))
# define I386
# define mach_type_known
# endif
@@ -141,9 +147,8 @@
# define M68K
# define mach_type_known
# endif
-# if defined(linux) && defined(sparc)
+# if defined(LINUX) && defined(sparc)
# define SPARC
-# define LINUX
# define mach_type_known
# endif
# if defined(__alpha) || defined(__alpha__)
@@ -153,9 +158,11 @@
# endif
# define mach_type_known
# endif
-# if defined(_AMIGA)
-# define M68K
+# if defined(_AMIGA) && !defined(AMIGA)
# define AMIGA
+# endif
+# ifdef AMIGA
+# define M68K
# define mach_type_known
# endif
# if defined(THINK_C) || defined(__MWERKS__) && !defined(__powerc)
@@ -168,6 +175,11 @@
# define MACOS
# define mach_type_known
# endif
+# if defined(macosx)
+# define MACOSX
+# define POWERPC
+# define mach_type_known
+# endif
# if defined(NeXT) && defined(mc68000)
# define M68K
# define NEXT
@@ -486,8 +498,8 @@
# ifdef POWERPC
# define MACH_TYPE "POWERPC"
-# define ALIGNMENT 2
# ifdef MACOS
+# define ALIGNMENT 2 /* Still necessary? Could it be 4? */
# ifndef __LOWMEM__
# include <LowMem.h>
# endif
@@ -497,14 +509,24 @@
# define DATAEND /* not needed */
# endif
# ifdef LINUX
+# define ALIGNMENT 4 /* Guess. Can someone verify? */
+ /* This was 2, but that didn't sound right. */
# define OS_TYPE "LINUX"
# define HEURISTIC1
# undef STACK_GRAN
# define STACK_GRAN 0x10000000
+ /* Stack usually starts at 0x80000000 */
# define DATASTART GC_data_start
extern int _end;
# define DATAEND (&_end)
# endif
+# ifdef MACOSX
+# define ALIGNMENT 4
+# define OS_TYPE "MACOSX"
+# define DATASTART ((ptr_t) get_etext())
+# define STACKBOTTOM ((ptr_t) 0xc0000000)
+# define DATAEND /* not needed */
+# endif
# endif
# ifdef VAX
@@ -603,6 +625,11 @@
# define SVR4
# define STACKBOTTOM ((ptr_t) 0xf0000000)
# endif
+# ifdef OPENBSD
+# define OS_TYPE "OPENBSD"
+# define STACKBOTTOM ((ptr_t) 0xf8000000)
+# define DATASTART ((ptr_t)(&etext))
+# endif
# endif
# ifdef I386
@@ -657,10 +684,13 @@
# endif
# ifdef LINUX
# define OS_TYPE "LINUX"
-# define STACKBOTTOM ((ptr_t)0xc0000000)
- /* Appears to be 0xe0000000 for at least one 2.1.91 kernel. */
- /* Probably needs to be more flexible, but I don't yet */
- /* fully understand how flexible. */
+# define HEURISTIC1
+# undef STACK_GRAN
+# define STACK_GRAN 0x10000000
+ /* STACKBOTTOM is usually 0xc0000000, but this changes with */
+ /* different kernel configurations. In particular, systems */
+ /* with 2GB physical memory will usually move the user */
+ /* address space limit, and hence initial SP to 0x80000000. */
# if !defined(LINUX_THREADS) || !defined(REDIRECT_MALLOC)
# define MPROTECT_VDB
# else
@@ -909,9 +939,13 @@
# define CPP_WORDSZ 64
# define STACKBOTTOM ((ptr_t) 0x120000000)
# ifdef __ELF__
+# if 0
+ /* __data_start apparently disappeared in some recent releases. */
extern int __data_start;
# define DATASTART &__data_start
-# define DYNAMIC_LOADING
+# endif
+# define DATASTART GC_data_start
+# define DYNAMIC_LOADING
# else
# define DATASTART ((ptr_t) 0x140000000)
# endif
@@ -1021,6 +1055,10 @@
# undef MPROTECT_VDB
# endif
+# ifdef USE_MUNMAP
+# undef MPROTECT_VDB /* Can't deal with address space holes. */
+# endif
+
# if !defined(PCR_VDB) && !defined(PROC_VDB) && !defined(MPROTECT_VDB)
# define DEFAULT_VDB
# endif
diff --git a/linux_threads.c b/linux_threads.c
index 4bcdd3a1..8287dce6 100644
--- a/linux_threads.c
+++ b/linux_threads.c
@@ -118,12 +118,12 @@ GC_linux_thread_top_of_stack() relies on implementation details of
LinuxThreads, namely that thread stacks are allocated on 2M boundaries
and grow to no more than 2M.
To make sure that we're using LinuxThreads and not some other thread
-package, we generate a dummy reference to `__pthread_initial_thread_bos',
+package, we generate a dummy reference to `pthread_kill_other_threads_np'
+(was `__pthread_initial_thread_bos' but that disappeared),
which is a symbol defined in LinuxThreads, but (hopefully) not in other
thread packages.
*/
-extern char * __pthread_initial_thread_bos;
-char **dummy_var_to_force_linux_threads = &__pthread_initial_thread_bos;
+void (*dummy_var_to_force_linux_threads)() = pthread_kill_other_threads_np;
#define LINUX_THREADS_STACK_SIZE (2 * 1024 * 1024)
diff --git a/mach_dep.c b/mach_dep.c
index 23e270e3..53698604 100644
--- a/mach_dep.c
+++ b/mach_dep.c
@@ -20,7 +20,11 @@
# define _longjmp(b,v) longjmp(b,v)
# endif
# ifdef AMIGA
-# include <dos.h>
+# ifndef __GNUC__
+# include <dos/dos.h>
+# else
+# include <machine/reg.h>
+# endif
# endif
#if defined(__MWERKS__) && !defined(POWERPC)
@@ -126,9 +130,28 @@ void GC_push_regs()
asm("addq.w &0x4,%sp"); /* put stack back where it was */
# endif /* M68K HP */
-# ifdef AMIGA
- /* AMIGA - could be replaced by generic code */
- /* a0, a1, d0 and d1 are caller save */
+# if defined(M68K) && defined(AMIGA)
+ /* AMIGA - could be replaced by generic code */
+ /* a0, a1, d0 and d1 are caller save */
+
+# ifdef __GNUC__
+ asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
+
+ asm("mov.l %a2,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %a3,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %a4,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %a5,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %a6,(%sp)"); asm("jsr _GC_push_one");
+ /* Skip frame pointer and stack pointer */
+ asm("mov.l %d2,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d3,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d4,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d5,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d6,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d7,(%sp)"); asm("jsr _GC_push_one");
+
+ asm("addq.w &0x4,%sp"); /* put stack back where it was */
+# else /* !__GNUC__ */
GC_push_one(getreg(REG_A2));
GC_push_one(getreg(REG_A3));
GC_push_one(getreg(REG_A4));
@@ -141,7 +164,8 @@ void GC_push_regs()
GC_push_one(getreg(REG_D5));
GC_push_one(getreg(REG_D6));
GC_push_one(getreg(REG_D7));
-# endif
+# endif /* !__GNUC__ */
+# endif /* AMIGA */
# if defined(M68K) && defined(MACOS)
# if defined(THINK_C)
diff --git a/malloc.c b/malloc.c
index 37da584c..66e62d29 100644
--- a/malloc.c
+++ b/malloc.c
@@ -93,8 +93,16 @@ register ptr_t *opp;
if(GC_incremental && !GC_dont_gc)
GC_collect_a_little_inner((int)n_blocks);
lw = ROUNDED_UP_WORDS(lb);
- while ((h = GC_allochblk(lw, k, 0)) == 0
- && GC_collect_or_expand(n_blocks, FALSE));
+ h = GC_allochblk(lw, k, 0);
+# ifdef USE_MUNMAP
+ if (0 == h) {
+ GC_merge_unmapped();
+ h = GC_allochblk(lw, k, 0);
+ }
+# endif
+ while (0 == h && GC_collect_or_expand(n_blocks, FALSE)) {
+ h = GC_allochblk(lw, k, 0);
+ }
if (h == 0) {
op = 0;
} else {
diff --git a/mallocx.c b/mallocx.c
index b1450215..8c07fa98 100644
--- a/mallocx.c
+++ b/mallocx.c
@@ -57,8 +57,16 @@ register int k;
if(GC_incremental && !GC_dont_gc)
GC_collect_a_little_inner((int)n_blocks);
lw = ROUNDED_UP_WORDS(lb);
- while ((h = GC_allochblk(lw, k, IGNORE_OFF_PAGE)) == 0
- && GC_collect_or_expand(n_blocks, TRUE));
+ h = GC_allochblk(lw, k, IGNORE_OFF_PAGE);
+# ifdef USE_MUNMAP
+ if (0 == h) {
+ GC_merge_unmapped();
+ h = GC_allochblk(lw, k, IGNORE_OFF_PAGE);
+ }
+# endif
+ while (0 == h && GC_collect_or_expand(n_blocks, TRUE)) {
+ h = GC_allochblk(lw, k, IGNORE_OFF_PAGE);
+ }
if (h == 0) {
op = 0;
} else {
diff --git a/mark.c b/mark.c
index c827af5c..34db472a 100644
--- a/mark.c
+++ b/mark.c
@@ -681,7 +681,7 @@ word p;
# endif
/* As above, but argument passed preliminary test. */
-# ifdef PRINT_BLACK_LIST
+# if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
void GC_push_one_checked(p, interior_ptrs, source)
ptr_t source;
# else
@@ -744,6 +744,7 @@ register GC_bool interior_ptrs;
} else {
if (!mark_bit_from_hdr(hhdr, displ)) {
set_mark_bit_from_hdr(hhdr, displ);
+ GC_STORE_BACK_PTR(source, (ptr_t)r);
PUSH_OBJ((word *)r, hhdr, GC_mark_stack_top,
&(GC_mark_stack[GC_mark_stack_size]));
}
@@ -1102,7 +1103,7 @@ struct hblk *h;
{
register hdr * hhdr;
- h = GC_next_block(h);
+ h = GC_next_used_block(h);
if (h == 0) return(0);
hhdr = HDR(h);
GC_push_marked(h, hhdr);
@@ -1118,7 +1119,7 @@ struct hblk *h;
if (!GC_dirty_maintained) { ABORT("dirty bits not set up"); }
for (;;) {
- h = GC_next_block(h);
+ h = GC_next_used_block(h);
if (h == 0) return(0);
hhdr = HDR(h);
# ifdef STUBBORN_ALLOC
@@ -1147,7 +1148,7 @@ struct hblk *h;
register hdr * hhdr = HDR(h);
for (;;) {
- h = GC_next_block(h);
+ h = GC_next_used_block(h);
if (h == 0) return(0);
hhdr = HDR(h);
if (hhdr -> hb_obj_kind == UNCOLLECTABLE) break;
diff --git a/misc.c b/misc.c
index 7779c43c..c3fce63d 100644
--- a/misc.c
+++ b/misc.c
@@ -73,6 +73,12 @@ GC_bool GC_dont_gc = 0;
GC_bool GC_quiet = 0;
+#ifdef FIND_LEAK
+ int GC_find_leak = 1;
+#else
+ int GC_find_leak = 0;
+#endif
+
/*ARGSUSED*/
GC_PTR GC_default_oom_fn GC_PROTO((size_t bytes_requested))
{
@@ -427,11 +433,8 @@ void GC_init_inner()
# ifdef MSWIN32
GC_init_win32();
# endif
-# if defined(LINUX) && defined(POWERPC)
- GC_init_linuxppc();
-# endif
-# if defined(LINUX) && defined(SPARC)
- GC_init_linuxsparc();
+# if defined(LINUX) && (defined(POWERPC) || defined(ALPHA) || defined(SPARC))
+ GC_init_linux_data_start();
# endif
# ifdef SOLARIS_THREADS
GC_thr_init();
@@ -558,7 +561,8 @@ void GC_init_inner()
void GC_enable_incremental GC_PROTO(())
{
-# if !defined(FIND_LEAK) && !defined(SMALL_CONFIG)
+# if !defined(SMALL_CONFIG)
+ if (!GC_find_leak) {
DCL_LOCK_STATE;
DISABLE_SIGNALS();
@@ -596,6 +600,7 @@ void GC_enable_incremental GC_PROTO(())
out:
UNLOCK();
ENABLE_SIGNALS();
+ }
# endif
}
diff --git a/nursery.c b/nursery.c
new file mode 100644
index 00000000..8bbb1015
--- /dev/null
+++ b/nursery.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 1999 by Silicon Graphics. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+
+??? This implementation is incomplete. If you are trying to
+??? compile this you are doing something wrong.
+
+#include "nursery.h"
+
+struct copy_obj {
+ ptr_t forward; /* Forwarding link for copied objects. */
+ GC_copy_descriptor descr; /* Object descriptor */
+ word data[1];
+}
+
+ptr_t GC_nursery_start; /* Start of nursery area. */
+ /* Must be NURSERY_BLOCK_SIZE */
+ /* aligned. */
+ptr_t GC_nursery_end; /* End of nursery area. */
+unsigned char * GC_nursery_map;
+ /* GC_nursery_map[i] != 0 if an object */
+ /* starts on the ith 64-bit "word" of */
+ /* nursery. This simple structure has */
+ /* the advantage that */
+ /* allocation is cheap. Lookup is */
+ /* cheap for pointers to the head of */
+ /* an object, which should be the */
+ /* usual case. */
+# define NURSERY_MAP_NOT_START 0 /* Not start of object. */
+# define NURSERY_MAP_START 1 /* Start of object. */
+# define NURSERY_MAP_PINNED 2 /* Start of pinned obj. */
+
+# ifdef ALIGN_DOUBLE
+# define NURSERY_WORD_SIZE (2 * sizeof(word))
+# else
+# define NURSERY_WORD_SIZE sizeof(word)
+# endif
+
+# define NURSERY_BLOCK_SIZE (HBLKSIZE/2)
+ /* HBLKSIZE must be a multiple of NURSERY_BLOCK_SIZE */
+# define NURSERY_SIZE (1024 * NURSERY_BLOCK_SIZE)
+
+size_t GC_nursery_size = NURSERY_SIZE;
+ /* Must be multiple of NURSERY_BLOCK_SIZE */
+
+size_t GC_nursery_blocks; /* Number of blocks in the nursery. */
+
+unsigned GC_next_nursery_block; /* index of next block we will attempt */
+ /* allocate from during this cycle. */
+ /* If it is pinned, we won't actually */
+ /* use it. */
+
+unsigned short *GC_pinned; /* Number of pinned objects in ith */
+ /* nursery block. */
+ /* GC_pinned[i] != 0 if the ith nursery */
+ /* block is pinned, and thus not used */
+ /* for allocation. */
+
+GC_copy_alloc_state global_alloc_state = (ptr_t)(-1); /* will overflow. */
+
+/* Should be called with allocator lock held. */
+GC_nursery_init() {
+ GC_nursery_start = GET_MEM(GC_nursery_size);
+ GC_nursery_end = GC_nursery_start + GC_nursery_size;
+ GC_next_nursery_block = 0;
+ if (GC_nursery_start < GC_least_plausible_heap_addr) {
+ GC_least_plausible_heap_addr = GC_nursery_start;
+ }
+ if (GC_nursery_end > GC_greatest_plausible_heap_addr) {
+ GC_greatest_plausible_heap_addr = GC_nursery_end;
+ }
+ if (GC_nursery_start & (NURSERY_BLOCK_SIZE-1)) {
+ GC_err_printf1("Nursery area is misaligned!!");
+ /* This should be impossible, since GET_MEM returns HBLKSIZE */
+ /* aligned chunks, and that should be a multiple of */
+ /* NURSERY_BLOCK_SIZE */
+ ABORT("misaligned nursery");
+ }
+ GC_nursery_map = GET_MEM(GC_nursery_size/NURSERY_WORD_SIZE);
+ /* Map is cleared a block at a time when we allocate from the block. */
+ /* BZERO(GC_nursery_map, GC_nursery_size/NURSERY_WORD_SIZE); */
+ GC_nursery_blocks = GC_nursery_size/NURSERY_BLOCK_SIZE;
+ GC_pinned = GC_scratch_alloc(GC_nursery_blocks * sizeof(unsigned short));
+ BZERO(GC_pinned, GC_nursery_blocks);
+}
+
+/* Pin all nursery objects referenced from mark stack. */
+void GC_pin_mark_stack_objects(void) {
+ for each possible pointer current in a mark stack object
+ if (current >= GC_nursery_start && current < GC_nursery_end) {
+ unsigned offset = current - GC_nursery_start;
+ unsigned word_offset = BYTES_TO_WORDS(offset);
+ unsigned blockno = (current - GC_nursery_start)/NURSERY_BLOCK_SIZE;
+ while (GC_nursery_map[word_offset] == NURSERY_MAP_NOT_START) {
+ --word_offset;
+ }
+ if (GC_nursery_map[word_offset] != NURSERY_MAP_PINNED) {
+ GC_nursery_map[word_offset] = NURSERY_MAP_PINNED;
+ ++GC_pinned[blockno];
+ ??Push object at GC_nursery_start + WORDS_TO_BYTES(word_offset)
+ ??onto stack.
+ }
+ }
+ }
+}
+
+/* Caller holds allocation lock. */
+void GC_collect_nursery(void) {
+ int i;
+ ptr_t scan_ptr = 0;
+ ?? old_mark_stack_top;
+ STOP_WORLD;
+ for (i = 0; i < GC_nursery_blocks; ++i) GC_pinned[i] = 0;
+ GC_push_all_roots();
+ old_mark_stack_top = GC_mark_stack_top();
+ GC_pin_mark_stack_objects();
+ START_WORLD;
+}
+
+/* Initialize an allocation state so that it can be used for */
+/* allocation. This implicitly reserves a small section of the */
+/* nursery for use with his allocator. */
+void GC_init_copy_alloc_state(GC_copy_alloc_state *)
+ unsigned next_block;
+ ptr_t block_addr;
+ LOCK();
+ next_block = GC_next_nursery_block;
+ while(is_pinned[next_block] && next_block < GC_nursery_blocks) {
+ ++next_block;
+ }
+ if (next_block < GC_nursery_blocks) {
+ block_addr = GC_nursery_start + NURSERY_BLOCK_SIZE * next_block;
+ GC_next_nursery_block = next_block + 1;
+ BZERO(GC_nursery_map + next_block *
+ (NURSERY_BLOCK_SIZE/NURSERY_WORD_SIZE),
+ NURSERY_BLOCK_SIZE/NURSERY_WORD_SIZE);
+ *GC_copy_alloc_state = block_addr;
+ UNLOCK();
+ } else {
+ GC_collect_nursery();
+ GC_next_nursery_block = 0;
+ UNLOCK();
+ get_new_block(s);
+ }
+}
+
+GC_PTR GC_copying_malloc2(GC_copy_descriptor *d, GC_copy_alloc_state *s) {
+ size_t sz = GC_SIZE_FROM_DESCRIPTOR(d);
+ ptrdiff_t offset;
+ ptr_t result = *s;
+ ptr_t new = result + sz;
+ if (new & COPY_BLOCK_MASK <= result & COPY_BLOCK_MASK> {
+ GC_init_copy_alloc_state(s);
+ result = *s;
+ new = result + sz;
+ GC_ASSERT(new & COPY_BLOCK_MASK > result & COPY_BLOCK_MASK>
+ }
+ (struct copy_obj *)result -> descr = d;
+ (struct copy_obj *)result -> forward = 0;
+ offset = (result - GC_nursery_start)/NURSERY_WORD_SIZE;
+ GC_nursery_map[offset] = NURSERY_MAP_NOT_START;
+}
+
+GC_PTR GC_copying_malloc(GC_copy_descriptor *d) {
+}
diff --git a/nursery.h b/nursery.h
new file mode 100755
index 00000000..d109ff09
--- /dev/null
+++ b/nursery.h
@@ -0,0 +1,90 @@
+
+/*
+ * Copyright (c) 1999 by Silicon Graphics. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+/*
+ * THIS IMPLEMENTATION FOR THIS INTERFACE IS INCOMPLETE.
+ * NONE OF THIS HAS BEEN TESTED. DO NOT USE.
+ *
+ * Comments on the interface are appreciated, especially from
+ * potential users of the interface.
+ *
+ * This is a Bartlett style copying collector for young objects.
+ * We assume for now that all objects allocated through this
+ * mechanism have pointers only in the first BITMAP_BITS words.
+ * (On a 32-bit machine, BITMAP_BITS is 30.)
+ * Objects allocated in this manner should be rarely referenced
+ * by objects not allocated either through this interface, or through
+ * the typed allocation interface.
+ * If this interface is used, we assume that type information provided
+ * through either this or the typed allocation interface is valid
+ * in a stronger sense:
+ *
+ * 1) No pointers are stored in fields not marked as such.
+ * (Otherwise it is only necessary that objects referenced by
+ * fields marked as nonpointers are also reachable via another
+ * path.)
+ * 2) Values stored in pointer fields are either not addresses in
+ * the heap, or they really are pointers. In the latter case, it
+ * is acceptable to move the object they refer to, and to update
+ * the pointer.
+ *
+ * GC_free may not be invoked on objects allocated with GC_copying_malloc.
+ *
+ * No extra space is added to the end of objects allocated through this
+ * interface. If the client needs to maintain pointers past the
+ * end, the size should be explicitly padded.
+ *
+ * We assume that calls to this will usually be compiler generated.
+ * Hence the interface is allowed to be a bit ugly in return for speed.
+ */
+
+#include "gc_copy_descr.h"
+
+/* GC_copy_descr.h must define */
+/* GC_SIZE_FROM_DESCRIPTOR(descr) and */
+/* GC_BIT_MAP_FROM_DESCRIPTOR(descr). */
+/* It may either be the GC supplied version of the header file, or a */
+/* client specific one that derives the information from a client- */
+/* specific type descriptor. */
+
+typedef GC_PTR GC_copy_alloc_state;
+ /* Current allocator state. */
+ /* Multiple allocation states */
+ /* may be used for concurrent */
+ /* allocation, or to enhance */
+ /* locality. */
+ /* Should be treated as opaque. */
+
+/* Allocate a memory block of size given in the descriptor, and with */
+/* pointer layout given by the descriptor. The resulting block may not */
+/* be cleared, and should immediately be initialized by the client. */
+/* (A concurrent GC may see an uninitialized pointer field. If it */
+/* points outside the nursery, that's fine. If it points inside, it */
+/* may retain an object, and be relocated. But that's also fine, since */
+/* the new value will be immediately overwritten. */
+/* This variant acquires the allocation lock, and uses a default */
+/* global allocation state. */
+GC_PTR GC_copying_malloc(GC_copy_descriptor);
+
+/* A variant of the above that does no locking on the fast path, */
+/* and passes an explicit pointer to an allocation state. */
+/* The allocation state is updated. */
+/* There will eventually need to be a macro or inline function version */
+/* of this. */
+GC_PTR GC_copying_malloc2(GC_copy_descriptor, GC_copy_alloc_state *);
+
+/* Initialize an allocation state so that it can be used for */
+/* allocation. This implicitly reserves a small section of the */
+/* nursery for use with this allocator. */
+void GC_init_copy_alloc_state(GC_copy_alloc_state *);
diff --git a/os_dep.c b/os_dep.c
index 7b3ba545..81f74f3a 100644
--- a/os_dep.c
+++ b/os_dep.c
@@ -72,7 +72,7 @@
# define NEED_FIND_LIMIT
# endif
-# if defined(LINUX) && (defined(POWERPC) || defined(SPARC))
+# if defined(LINUX) && (defined(POWERPC) || defined(SPARC) || defined(ALPHA))
# define NEED_FIND_LIMIT
# endif
@@ -139,29 +139,20 @@
# define OPT_PROT_EXEC 0
#endif
-#if defined(LINUX) && defined(POWERPC)
+#if defined(LINUX) && (defined(POWERPC) || defined(SPARC) || defined(ALPHA))
+ /* The I386 case can be handled without a search. The Alpha case */
+ /* used to be handled differently as well, but the rules changed */
+ /* for recent Linux versions. This seems to be the easiest way to */
+ /* cover all versions. */
ptr_t GC_data_start;
- void GC_init_linuxppc()
- {
- extern ptr_t GC_find_limit();
- extern char **_environ;
- /* This may need to be environ, without the underscore, for */
- /* some versions. */
- GC_data_start = GC_find_limit((ptr_t)&_environ, FALSE);
- }
-#endif
+ extern char * GC_copyright[]; /* Any data symbol would do. */
-#if defined(LINUX) && defined(SPARC)
- ptr_t GC_data_start;
-
- void GC_init_linuxsparc()
+ void GC_init_linux_data_start()
{
extern ptr_t GC_find_limit();
- extern char **_environ;
- /* This may need to be environ, without the underscore, for */
- /* some versions. */
- GC_data_start = GC_find_limit((ptr_t)&_environ, FALSE);
+
+ GC_data_start = GC_find_limit((ptr_t)GC_copyright, FALSE);
}
#endif
@@ -362,7 +353,8 @@ word GC_page_size;
}
# else
-# if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP)
+# if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) \
+ || defined(USE_MUNMAP)
void GC_setpagesize()
{
GC_page_size = GETPAGESIZE();
@@ -441,6 +433,24 @@ ptr_t GC_get_stack_base()
ptr_t GC_get_stack_base()
{
+ struct Process *proc = (struct Process*)SysBase->ThisTask;
+
+ /* Reference: Amiga Guru Book Pages: 42,567,574 */
+ if (proc->pr_Task.tc_Node.ln_Type==NT_PROCESS
+ && proc->pr_CLI != NULL) {
+ /* first ULONG is StackSize */
+ /*longPtr = proc->pr_ReturnAddr;
+ size = longPtr[0];*/
+
+ return (char *)proc->pr_ReturnAddr + sizeof(ULONG);
+ } else {
+ return (char *)proc->pr_Task.tc_SPUpper;
+ }
+}
+
+#if 0 /* old version */
+ptr_t GC_get_stack_base()
+{
extern struct WBStartup *_WBenchMsg;
extern long __base;
extern long __stack;
@@ -463,10 +473,9 @@ ptr_t GC_get_stack_base()
}
return (ptr_t)(__base + GC_max(size, __stack));
}
+#endif /* 0 */
-# else
-
-
+# else /* !AMIGA, !OS2, ... */
# ifdef NEED_FIND_LIMIT
/* Some tools to implement HEURISTIC2 */
@@ -486,7 +495,7 @@ ptr_t GC_get_stack_base()
typedef void (*handler)();
# endif
-# if defined(SUNOS5SIGS) || defined(IRIX5)
+# if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
static struct sigaction old_segv_act;
# if defined(_sigargs) /* !Irix6.x */
static struct sigaction old_bus_act;
@@ -497,7 +506,7 @@ ptr_t GC_get_stack_base()
void GC_setup_temporary_fault_handler()
{
-# if defined(SUNOS5SIGS) || defined(IRIX5)
+# if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
struct sigaction act;
act.sa_handler = GC_fault_handler;
@@ -533,7 +542,7 @@ ptr_t GC_get_stack_base()
void GC_reset_fault_handler()
{
-# if defined(SUNOS5SIGS) || defined(IRIX5)
+# if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
(void) sigaction(SIGSEGV, &old_segv_act, 0);
# ifdef _sigargs /* Irix 5.x, not 6.x */
(void) sigaction(SIGBUS, &old_bus_act, 0);
@@ -851,6 +860,72 @@ void GC_register_data_segments()
# else
# ifdef AMIGA
+ void GC_register_data_segments()
+ {
+ struct Process *proc;
+ struct CommandLineInterface *cli;
+ BPTR myseglist;
+ ULONG *data;
+
+ int num;
+
+
+# ifdef __GNUC__
+ ULONG dataSegSize;
+ GC_bool found_segment = FALSE;
+ extern char __data_size[];
+
+ dataSegSize=__data_size+8;
+ /* Can`t find the Location of __data_size, because
+ it`s possible that is it, inside the segment. */
+
+# endif
+
+ proc= (struct Process*)SysBase->ThisTask;
+
+ /* Reference: Amiga Guru Book Pages: 538ff,565,573
+ and XOper.asm */
+ if (proc->pr_Task.tc_Node.ln_Type==NT_PROCESS) {
+ if (proc->pr_CLI == NULL) {
+ myseglist = proc->pr_SegList;
+ } else {
+ /* ProcLoaded 'Loaded as a command: '*/
+ cli = BADDR(proc->pr_CLI);
+ myseglist = cli->cli_Module;
+ }
+ } else {
+ ABORT("Not a Process.");
+ }
+
+ if (myseglist == NULL) {
+ ABORT("Arrrgh.. can't find segments, aborting");
+ }
+
+ /* xoper hunks Shell Process */
+
+ num=0;
+ for (data = (ULONG *)BADDR(myseglist); data != NULL;
+ data = (ULONG *)BADDR(data[0])) {
+ if (((ULONG) GC_register_data_segments < (ULONG) &data[1]) ||
+ ((ULONG) GC_register_data_segments > (ULONG) &data[1] + data[-1])) {
+# ifdef __GNUC__
+ if (dataSegSize == data[-1]) {
+ found_segment = TRUE;
+ }
+# endif
+ GC_add_roots_inner((char *)&data[1],
+ ((char *)&data[1]) + data[-1], FALSE);
+ }
+ ++num;
+ } /* for */
+# ifdef __GNUC__
+ if (!found_segment) {
+ ABORT("Can`t find correct Segments.\nSolution: Use an newer version of ixemul.library");
+ }
+# endif
+ }
+
+#if 0 /* old version */
void GC_register_data_segments()
{
extern struct WBStartup *_WBenchMsg;
@@ -892,6 +967,7 @@ void GC_register_data_segments()
}
}
}
+#endif /* old version */
# else
@@ -932,7 +1008,8 @@ int * etext_addr;
void GC_register_data_segments()
{
-# if !defined(PCR) && !defined(SRC_M3) && !defined(NEXT) && !defined(MACOS)
+# if !defined(PCR) && !defined(SRC_M3) && !defined(NEXT) && !defined(MACOS) \
+ && !defined(MACOSX)
# if defined(REDIRECT_MALLOC) && defined(SOLARIS_THREADS)
/* As of Solaris 2.3, the Solaris threads implementation */
/* allocates the data structure for the initial thread with */
@@ -946,7 +1023,7 @@ void GC_register_data_segments()
GC_add_roots_inner(DATASTART, (char *)(DATAEND), FALSE);
# endif
# endif
-# if !defined(PCR) && defined(NEXT)
+# if !defined(PCR) && (defined(NEXT) || defined(MACOSX))
GC_add_roots_inner(DATASTART, (char *) get_end(), FALSE);
# endif
# if defined(MACOS)
@@ -1160,6 +1237,95 @@ void GC_win32_free_heap ()
# endif
+#ifdef USE_MUNMAP
+
+/* For now, this only works on some Unix-like systems. If you */
+/* have something else, don't define USE_MUNMAP. */
+/* We assume ANSI C to support this feature. */
+#include <unistd.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <fcntl.h>
+
+/* Compute a page aligned starting address for the unmap */
+/* operation on a block of size bytes starting at start. */
+/* Return 0 if the block is too small to make this feasible. */
+ptr_t GC_unmap_start(ptr_t start, word bytes)
+{
+ ptr_t result = start;
+ /* Round start to next page boundary. */
+ result += GC_page_size - 1;
+ result = (ptr_t)((word)result & ~(GC_page_size - 1));
+ if (result + GC_page_size > start + bytes) return 0;
+ return result;
+}
+
+/* Compute end address for an unmap operation on the indicated */
+/* block. */
+ptr_t GC_unmap_end(ptr_t start, word bytes)
+{
+ ptr_t end_addr = start + bytes;
+ end_addr = (ptr_t)((word)end_addr & ~(GC_page_size - 1));
+ return end_addr;
+}
+
+/* We assume that GC_remap is called on exactly the same range */
+/* as a previous call to GC_unmap. It is safe to consistently */
+/* round the endpoints in both places. */
+void GC_unmap(ptr_t start, word bytes)
+{
+ ptr_t start_addr = GC_unmap_start(start, bytes);
+ ptr_t end_addr = GC_unmap_end(start, bytes);
+ word len = end_addr - start_addr;
+ if (0 == start_addr) return;
+ if (munmap(start_addr, len) != 0) ABORT("munmap failed");
+ GC_unmapped_bytes += len;
+}
+
+
+void GC_remap(ptr_t start, word bytes)
+{
+ static int zero_descr = -1;
+ ptr_t start_addr = GC_unmap_start(start, bytes);
+ ptr_t end_addr = GC_unmap_end(start, bytes);
+ word len = end_addr - start_addr;
+ ptr_t result;
+
+ if (-1 == zero_descr) zero_descr = open("/dev/zero", O_RDWR);
+ if (0 == start_addr) return;
+ result = mmap(start_addr, len, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
+ MAP_FIXED | MAP_PRIVATE, zero_descr, 0);
+ if (result != start_addr) {
+ ABORT("mmap remapping failed");
+ }
+ GC_unmapped_bytes -= len;
+}
+
+/* Two adjacent blocks have already been unmapped and are about to */
+/* be merged. Unmap the whole block. This typically requires */
+/* that we unmap a small section in the middle that was not previously */
+/* unmapped due to alignment constraints. */
+void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2)
+{
+ ptr_t start1_addr = GC_unmap_start(start1, bytes1);
+ ptr_t end1_addr = GC_unmap_end(start1, bytes1);
+ ptr_t start2_addr = GC_unmap_start(start2, bytes2);
+ ptr_t end2_addr = GC_unmap_end(start2, bytes2);
+ ptr_t start_addr = end1_addr;
+ ptr_t end_addr = start2_addr;
+ word len;
+ GC_ASSERT(start1 + bytes1 == start2);
+ if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2);
+ if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2);
+ if (0 == start_addr) return;
+ len = end_addr - start_addr;
+ if (len != 0 && munmap(start_addr, len) != 0) ABORT("munmap failed");
+ GC_unmapped_bytes += len;
+}
+
+#endif /* USE_MUNMAP */
+
/* Routine for pushing any additional roots. In THREADS */
/* environment, this is also responsible for marking from */
/* thread stacks. In the SRC_M3 case, it also handles */
@@ -1699,7 +1865,7 @@ struct hblk *h;
void GC_dirty_init()
{
-#if defined(SUNOS5SIGS) || defined(IRIX5)
+#if defined(SUNOS5SIGS) || defined(IRIX5) /* || defined(OSF1) */
struct sigaction act, oldact;
# ifdef IRIX5
act.sa_flags = SA_RESTART;
@@ -2241,7 +2407,11 @@ struct hblk *h;
# if defined (DRSNX)
# include <sys/sparc/frame.h>
# else
-# include <sys/frame.h>
+# if defined(OPENBSD)
+# include <frame.h>
+# else
+# include <sys/frame.h>
+# endif
# endif
# endif
# if NARGS > 6
@@ -2251,6 +2421,15 @@ struct hblk *h;
#ifdef SAVE_CALL_CHAIN
/* Fill in the pc and argument information for up to NFRAMES of my */
/* callers. Ignore my frame and my callers frame. */
+
+#ifdef OPENBSD
+# define FR_SAVFP fr_fp
+# define FR_SAVPC fr_pc
+#else
+# define FR_SAVFP fr_savfp
+# define FR_SAVPC fr_savpc
+#endif
+
void GC_save_callers (info)
struct callinfo info[NFRAMES];
{
@@ -2261,11 +2440,11 @@ struct callinfo info[NFRAMES];
frame = (struct frame *) GC_save_regs_in_stack ();
- for (fp = frame -> fr_savfp; fp != 0 && nframes < NFRAMES;
- fp = fp -> fr_savfp, nframes++) {
+ for (fp = frame -> FR_SAVFP; fp != 0 && nframes < NFRAMES;
+ fp = fp -> FR_SAVFP, nframes++) {
register int i;
- info[nframes].ci_pc = fp->fr_savpc;
+ info[nframes].ci_pc = fp->FR_SAVPC;
for (i = 0; i < NARGS; i++) {
info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
}
diff --git a/reclaim.c b/reclaim.c
index 407b4c68..30859469 100644
--- a/reclaim.c
+++ b/reclaim.c
@@ -19,7 +19,6 @@
signed_word GC_mem_found = 0;
/* Number of words of memory reclaimed */
-# ifdef FIND_LEAK
static void report_leak(p, sz)
ptr_t p;
word sz;
@@ -39,13 +38,10 @@ word sz;
}
# define FOUND_FREE(hblk, word_no) \
- if (abort_if_found) { \
+ { \
report_leak((ptr_t)hblk + WORDS_TO_BYTES(word_no), \
HDR(hblk) -> hb_sz); \
}
-# else
-# define FOUND_FREE(hblk, word_no)
-# endif
/*
* reclaim phase
@@ -82,10 +78,9 @@ register hdr * hhdr;
* Clears unmarked objects.
*/
/*ARGSUSED*/
-ptr_t GC_reclaim_clear(hbp, hhdr, sz, list, abort_if_found)
+ptr_t GC_reclaim_clear(hbp, hhdr, sz, list)
register struct hblk *hbp; /* ptr to current heap block */
register hdr * hhdr;
-GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
register word sz;
{
@@ -105,7 +100,6 @@ register word sz;
if( mark_bit_from_hdr(hhdr, word_no) ) {
p += sz;
} else {
- FOUND_FREE(hbp, word_no);
INCR_WORDS(sz);
/* object is available - put on list */
obj_link(p) = list;
@@ -131,10 +125,9 @@ register word sz;
* A special case for 2 word composite objects (e.g. cons cells):
*/
/*ARGSUSED*/
-ptr_t GC_reclaim_clear2(hbp, hhdr, list, abort_if_found)
+ptr_t GC_reclaim_clear2(hbp, hhdr, list)
register struct hblk *hbp; /* ptr to current heap block */
hdr * hhdr;
-GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
{
register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
@@ -146,7 +139,6 @@ register ptr_t list;
register int i;
# define DO_OBJ(start_displ) \
if (!(mark_word & ((word)1 << start_displ))) { \
- FOUND_FREE(hbp, p - (word *)hbp + start_displ); \
p[start_displ] = (word)list; \
list = (ptr_t)(p+start_displ); \
p[start_displ+1] = 0; \
@@ -179,10 +171,9 @@ register ptr_t list;
* Another special case for 4 word composite objects:
*/
/*ARGSUSED*/
-ptr_t GC_reclaim_clear4(hbp, hhdr, list, abort_if_found)
+ptr_t GC_reclaim_clear4(hbp, hhdr, list)
register struct hblk *hbp; /* ptr to current heap block */
hdr * hhdr;
-GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
{
register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
@@ -193,7 +184,6 @@ register ptr_t list;
register word mark_word;
# define DO_OBJ(start_displ) \
if (!(mark_word & ((word)1 << start_displ))) { \
- FOUND_FREE(hbp, p - (word *)hbp + start_displ); \
p[start_displ] = (word)list; \
list = (ptr_t)(p+start_displ); \
p[start_displ+1] = 0; \
@@ -239,10 +229,9 @@ register ptr_t list;
/* The same thing, but don't clear objects: */
/*ARGSUSED*/
-ptr_t GC_reclaim_uninit(hbp, hhdr, sz, list, abort_if_found)
+ptr_t GC_reclaim_uninit(hbp, hhdr, sz, list)
register struct hblk *hbp; /* ptr to current heap block */
register hdr * hhdr;
-GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
register word sz;
{
@@ -260,7 +249,6 @@ register word sz;
/* go through all words in block */
while( p <= plim ) {
if( !mark_bit_from_hdr(hhdr, word_no) ) {
- FOUND_FREE(hbp, word_no);
INCR_WORDS(sz);
/* object is available - put on list */
obj_link(p) = list;
@@ -275,15 +263,42 @@ register word sz;
return(list);
}
+/* Don't really reclaim objects, just check for unmarked ones: */
+/*ARGSUSED*/
+void GC_reclaim_check(hbp, hhdr, sz)
+register struct hblk *hbp; /* ptr to current heap block */
+register hdr * hhdr;
+register word sz;
+{
+ register int word_no;
+ register word *p, *plim;
+# ifdef GATHERSTATS
+ register int n_words_found = 0;
+# endif
+
+ p = (word *)(hbp->hb_body);
+ word_no = HDR_WORDS;
+ plim = (word *)((((word)hbp) + HBLKSIZE)
+ - WORDS_TO_BYTES(sz));
+
+ /* go through all words in block */
+ while( p <= plim ) {
+ if( !mark_bit_from_hdr(hhdr, word_no) ) {
+ FOUND_FREE(hbp, word_no);
+ }
+ p += sz;
+ word_no += sz;
+ }
+}
+
#ifndef SMALL_CONFIG
/*
* Another special case for 2 word atomic objects:
*/
/*ARGSUSED*/
-ptr_t GC_reclaim_uninit2(hbp, hhdr, list, abort_if_found)
+ptr_t GC_reclaim_uninit2(hbp, hhdr, list)
register struct hblk *hbp; /* ptr to current heap block */
hdr * hhdr;
-GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
{
register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
@@ -295,7 +310,6 @@ register ptr_t list;
register int i;
# define DO_OBJ(start_displ) \
if (!(mark_word & ((word)1 << start_displ))) { \
- FOUND_FREE(hbp, p - (word *)hbp + start_displ); \
p[start_displ] = (word)list; \
list = (ptr_t)(p+start_displ); \
INCR_WORDS(2); \
@@ -327,10 +341,9 @@ register ptr_t list;
* Another special case for 4 word atomic objects:
*/
/*ARGSUSED*/
-ptr_t GC_reclaim_uninit4(hbp, hhdr, list, abort_if_found)
+ptr_t GC_reclaim_uninit4(hbp, hhdr, list)
register struct hblk *hbp; /* ptr to current heap block */
hdr * hhdr;
-GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
{
register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
@@ -341,7 +354,6 @@ register ptr_t list;
register word mark_word;
# define DO_OBJ(start_displ) \
if (!(mark_word & ((word)1 << start_displ))) { \
- FOUND_FREE(hbp, p - (word *)hbp + start_displ); \
p[start_displ] = (word)list; \
list = (ptr_t)(p+start_displ); \
INCR_WORDS(4); \
@@ -382,10 +394,9 @@ register ptr_t list;
/* Finally the one word case, which never requires any clearing: */
/*ARGSUSED*/
-ptr_t GC_reclaim1(hbp, hhdr, list, abort_if_found)
+ptr_t GC_reclaim1(hbp, hhdr, list)
register struct hblk *hbp; /* ptr to current heap block */
hdr * hhdr;
-GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
{
register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
@@ -397,7 +408,6 @@ register ptr_t list;
register int i;
# define DO_OBJ(start_displ) \
if (!(mark_word & ((word)1 << start_displ))) { \
- FOUND_FREE(hbp, p - (word *)hbp + start_displ); \
p[start_displ] = (word)list; \
list = (ptr_t)(p+start_displ); \
INCR_WORDS(1); \
@@ -433,9 +443,9 @@ register ptr_t list;
* If entirely empty blocks are to be completely deallocated, then
* caller should perform that check.
*/
-void GC_reclaim_small_nonempty_block(hbp, abort_if_found)
+void GC_reclaim_small_nonempty_block(hbp, report_if_found)
register struct hblk *hbp; /* ptr to current heap block */
-int abort_if_found; /* Abort if a reclaimable object is found */
+int report_if_found; /* Abort if a reclaimable object is found */
{
hdr * hhdr;
register word sz; /* size of objects in current block */
@@ -451,38 +461,40 @@ int abort_if_found; /* Abort if a reclaimable object is found */
flh = &(ok -> ok_freelist[sz]);
GC_write_hint(hbp);
- if (ok -> ok_init) {
+ if (report_if_found) {
+ GC_reclaim_check(hbp, hhdr, sz);
+ } else if (ok -> ok_init) {
switch(sz) {
# ifndef SMALL_CONFIG
case 1:
- *flh = GC_reclaim1(hbp, hhdr, *flh, abort_if_found);
+ *flh = GC_reclaim1(hbp, hhdr, *flh);
break;
case 2:
- *flh = GC_reclaim_clear2(hbp, hhdr, *flh, abort_if_found);
+ *flh = GC_reclaim_clear2(hbp, hhdr, *flh);
break;
case 4:
- *flh = GC_reclaim_clear4(hbp, hhdr, *flh, abort_if_found);
+ *flh = GC_reclaim_clear4(hbp, hhdr, *flh);
break;
# endif
default:
- *flh = GC_reclaim_clear(hbp, hhdr, sz, *flh, abort_if_found);
+ *flh = GC_reclaim_clear(hbp, hhdr, sz, *flh);
break;
}
} else {
switch(sz) {
# ifndef SMALL_CONFIG
case 1:
- *flh = GC_reclaim1(hbp, hhdr, *flh, abort_if_found);
+ *flh = GC_reclaim1(hbp, hhdr, *flh);
break;
case 2:
- *flh = GC_reclaim_uninit2(hbp, hhdr, *flh, abort_if_found);
+ *flh = GC_reclaim_uninit2(hbp, hhdr, *flh);
break;
case 4:
- *flh = GC_reclaim_uninit4(hbp, hhdr, *flh, abort_if_found);
+ *flh = GC_reclaim_uninit4(hbp, hhdr, *flh);
break;
# endif
default:
- *flh = GC_reclaim_uninit(hbp, hhdr, sz, *flh, abort_if_found);
+ *flh = GC_reclaim_uninit(hbp, hhdr, sz, *flh);
break;
}
}
@@ -494,11 +506,12 @@ int abort_if_found; /* Abort if a reclaimable object is found */
* to the heap block free list.
* Otherwise enqueue the block for later processing
* by GC_reclaim_small_nonempty_block.
- * If abort_if_found is TRUE, then process any block immediately.
+ * If report_if_found is TRUE, then process any block immediately, and
+ * simply report free objects; do not actually reclaim them.
*/
-void GC_reclaim_block(hbp, abort_if_found)
+void GC_reclaim_block(hbp, report_if_found)
register struct hblk *hbp; /* ptr to current heap block */
-word abort_if_found; /* Abort if a reclaimable object is found */
+word report_if_found; /* Abort if a reclaimable object is found */
{
register hdr * hhdr;
register word sz; /* size of objects in current block */
@@ -511,16 +524,19 @@ word abort_if_found; /* Abort if a reclaimable object is found */
if( sz > MAXOBJSZ ) { /* 1 big object */
if( !mark_bit_from_hdr(hhdr, HDR_WORDS) ) {
- FOUND_FREE(hbp, HDR_WORDS);
-# ifdef GATHERSTATS
+ if (report_if_found) {
+ FOUND_FREE(hbp, HDR_WORDS);
+ } else {
+# ifdef GATHERSTATS
GC_mem_found += sz;
-# endif
- GC_freehblk(hbp);
+# endif
+ GC_freehblk(hbp);
+ }
}
} else {
GC_bool empty = GC_block_empty(hhdr);
- if (abort_if_found) {
- GC_reclaim_small_nonempty_block(hbp, (int)abort_if_found);
+ if (report_if_found) {
+ GC_reclaim_small_nonempty_block(hbp, (int)report_if_found);
} else if (empty) {
# ifdef GATHERSTATS
GC_mem_found += BYTES_TO_WORDS(HBLKSIZE);
@@ -600,11 +616,11 @@ void GC_print_block_list()
#endif /* NO_DEBUGGING */
/*
- * Do the same thing on the entire heap, after first clearing small object
- * free lists (if we are not just looking for leaks).
+ * Perform GC_reclaim_block on the entire heap, after first clearing
+ * small object free lists (if we are not just looking for leaks).
*/
-void GC_start_reclaim(abort_if_found)
-int abort_if_found; /* Abort if a GC_reclaimable object is found */
+void GC_start_reclaim(report_if_found)
+int report_if_found; /* Abort if a GC_reclaimable object is found */
{
int kind;
@@ -617,7 +633,7 @@ int abort_if_found; /* Abort if a GC_reclaimable object is found */
register struct hblk ** rlist = GC_obj_kinds[kind].ok_reclaim_list;
if (rlist == 0) continue; /* This kind not used. */
- if (!abort_if_found) {
+ if (!report_if_found) {
lim = &(GC_obj_kinds[kind].ok_freelist[MAXOBJSZ+1]);
for( fop = GC_obj_kinds[kind].ok_freelist; fop < lim; fop++ ) {
*fop = 0;
@@ -637,7 +653,7 @@ int abort_if_found; /* Abort if a GC_reclaimable object is found */
/* Go through all heap blocks (in hblklist) and reclaim unmarked objects */
/* or enqueue the block for later processing. */
- GC_apply_to_all_blocks(GC_reclaim_block, (word)abort_if_found);
+ GC_apply_to_all_blocks(GC_reclaim_block, (word)report_if_found);
}
diff --git a/solaris_threads.c b/solaris_threads.c
index 1f5ebcdc..65b2c651 100644
--- a/solaris_threads.c
+++ b/solaris_threads.c
@@ -616,6 +616,25 @@ GC_thread GC_lookup_thread(thread_t id)
return(p);
}
+# define MAX_ORIG_STACK_SIZE (8 * 1024 * 1024)
+
+word GC_get_orig_stack_size() {
+ struct rlimit rl;
+ static int warned = 0;
+ int result;
+
+ if (getrlimit(RLIMIT_STACK, &rl) != 0) ABORT("getrlimit failed");
+ result = (word)rl.rlim_cur & ~(HBLKSIZE-1);
+ if (result > MAX_ORIG_STACK_SIZE) {
+ if (!warned) {
+ WARN("Large stack limit(%ld): only scanning 8 MB", result);
+ warned = 1;
+ }
+ result = MAX_ORIG_STACK_SIZE;
+ }
+ return result;
+}
+
/* Notify dirty bit implementation of unused parts of my stack. */
/* Caller holds allocation lock. */
void GC_my_stack_limits()
@@ -628,12 +647,9 @@ void GC_my_stack_limits()
if (stack_size == 0) {
/* original thread */
- struct rlimit rl;
-
- if (getrlimit(RLIMIT_STACK, &rl) != 0) ABORT("getrlimit failed");
/* Empirically, what should be the stack page with lowest */
/* address is actually inaccessible. */
- stack_size = ((word)rl.rlim_cur & ~(HBLKSIZE-1)) - GC_page_sz;
+ stack_size = GC_get_orig_stack_size() - GC_page_sz;
stack = GC_stackbottom - stack_size + GC_page_sz;
} else {
stack = me -> stack;
@@ -671,8 +687,7 @@ void GC_push_all_stacks()
top = p -> stack + p -> stack_size;
} else {
/* The original stack. */
- if (getrlimit(RLIMIT_STACK, &rl) != 0) ABORT("getrlimit failed");
- bottom = GC_stackbottom - rl.rlim_cur + GC_page_sz;
+ bottom = GC_stackbottom - GC_get_orig_stack_size() + GC_page_sz;
top = GC_stackbottom;
}
if ((word)sp > (word)bottom && (word)sp < (word)top) bottom = sp;
diff --git a/sparc_mach_dep.s b/sparc_mach_dep.s
index a6a0a241..9831c6ca 100644
--- a/sparc_mach_dep.s
+++ b/sparc_mach_dep.s
@@ -1,4 +1,4 @@
-! SPARCompiler 3.0 and later apparently no loner handles
+! SPARCompiler 3.0 and later apparently no longer handles
! asm outside functions. So we need a separate .s file
! This is only set up for SunOS 5, not SunOS 4.
! Assumes this is called before the stack contents are
@@ -35,4 +35,4 @@ loop:
- \ No newline at end of file
+
diff --git a/sparc_sunos4_mach_dep.s b/sparc_sunos4_mach_dep.s
index 7accadd3..41858073 100644
--- a/sparc_sunos4_mach_dep.s
+++ b/sparc_sunos4_mach_dep.s
@@ -1,4 +1,4 @@
-! SPARCompiler 3.0 and later apparently no loner handles
+! SPARCompiler 3.0 and later apparently no longer handles
! asm outside functions. So we need a separate .s file
! This is only set up for SunOS 4.
! Assumes this is called before the stack contents are
diff --git a/test.c b/test.c
index 0fdb030a..b65632c9 100644
--- a/test.c
+++ b/test.c
@@ -362,14 +362,14 @@ void reverse_test()
d = uncollectable_ints(1, 100);
e = uncollectable_ints(1, 1);
/* Check that realloc updates object descriptors correctly */
- f = (sexpr *)GC_malloc(4 * sizeof(sexpr));
- f = (sexpr *)GC_realloc((GC_PTR)f, 6 * sizeof(sexpr));
+ f = (sexpr *)GC_MALLOC(4 * sizeof(sexpr));
+ f = (sexpr *)GC_REALLOC((GC_PTR)f, 6 * sizeof(sexpr));
f[5] = ints(1,17);
- g = (sexpr *)GC_malloc(513 * sizeof(sexpr));
- g = (sexpr *)GC_realloc((GC_PTR)g, 800 * sizeof(sexpr));
+ g = (sexpr *)GC_MALLOC(513 * sizeof(sexpr));
+ g = (sexpr *)GC_REALLOC((GC_PTR)g, 800 * sizeof(sexpr));
g[799] = ints(1,18);
- h = (sexpr *)GC_malloc(1025 * sizeof(sexpr));
- h = (sexpr *)GC_realloc((GC_PTR)h, 2000 * sizeof(sexpr));
+ h = (sexpr *)GC_MALLOC(1025 * sizeof(sexpr));
+ h = (sexpr *)GC_REALLOC((GC_PTR)h, 2000 * sizeof(sexpr));
h[1999] = ints(1,19);
/* Try to force some collections and reuse of small list elements */
for (i = 0; i < 10; i++) {
@@ -610,8 +610,8 @@ thread_key_t fl_key;
void * alloc8bytes()
{
-# ifdef SMALL_CONFIG
- return(GC_malloc(8));
+# if defined(SMALL_CONFIG) || defined(GC_DEBUG)
+ return(GC_MALLOC(8));
# else
void ** my_free_list_ptr;
void * my_free_list;
@@ -930,7 +930,7 @@ void check_heap_stats()
int late_finalize_count = 0;
if (sizeof(char *) > 4) {
- max_heap_sz = 13000000;
+ max_heap_sz = 15000000;
} else {
max_heap_sz = 11000000;
}
diff --git a/version.h b/version.h
index 88858fa4..97ac5f5e 100644
--- a/version.h
+++ b/version.h
@@ -1,6 +1,6 @@
-#define GC_VERSION_MAJOR 4
-#define GC_VERSION_MINOR 14
-#define GC_ALPHA_VERSION GC_NOT_ALPHA
+#define GC_VERSION_MAJOR 5
+#define GC_VERSION_MINOR 0
+#define GC_ALPHA_VERSION 3
# define GC_NOT_ALPHA 0xff