summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHans Boehm <boehm@acm.org>1999-06-30 00:00:00 +0000
committerIvan Maidanski <ivmai@mail.ru>2014-05-17 17:54:12 +0400
commitfcbe8923d01fc6e199407161ed3c9d4dd39582d2 (patch)
tree2559bbdd97185652feecdcf8c12476374d65a478
parent3bd502ac2307ef9ffaec150e653fc2a117a74989 (diff)
downloadbdwgc-fcbe8923d01fc6e199407161ed3c9d4dd39582d2.tar.gz
gc5.0alpha1 tarball importgc5_0alpha1
-rw-r--r--Makefile41
-rw-r--r--README50
-rw-r--r--README.amiga47
-rw-r--r--allchblk.c657
-rw-r--r--alloc.c40
-rw-r--r--cord/cordxtra.c2
-rw-r--r--dbg_mlc.c8
-rw-r--r--dyn_load.c3
-rw-r--r--gc_hdrs.h6
-rw-r--r--gc_priv.h55
-rw-r--r--gcconfig.h48
-rw-r--r--headers.c86
-rw-r--r--include/private/gc_hdrs.h6
-rw-r--r--include/private/gc_priv.h55
-rw-r--r--include/private/gcconfig.h48
-rw-r--r--linux_threads.c5
-rw-r--r--mach_dep.c34
-rw-r--r--malloc.c12
-rw-r--r--mallocx.c12
-rw-r--r--mark.c6
-rw-r--r--misc.c7
-rw-r--r--os_dep.c242
-rw-r--r--solaris_threads.c27
-rw-r--r--sparc_mach_dep.s4
-rw-r--r--sparc_sunos4_mach_dep.s2
-rw-r--r--version.h6
26 files changed, 1191 insertions, 318 deletions
diff --git a/Makefile b/Makefile
index 063d394a..4d0d3529 100644
--- a/Makefile
+++ b/Makefile
@@ -98,6 +98,10 @@ CFLAGS= -O -DATOMIC_UNCOLLECTABLE -DNO_SIGNALS -DNO_EXECUTE_PERMISSION -DALL_INT
# fragmentation, but generally better performance for large heaps.
# -DUSE_MMAP use MMAP instead of sbrk to get new memory.
# Works for Solaris and Irix.
+# -DUSE_MUNMAP causes memory to be returned to the OS under the right
+# circumstances. This currently disables VM-based incremental collection.
+# This is currently experimental, and works only under some Unix and
+# Linux versions.
# -DMMAP_STACKS (for Solaris threads) Use mmap from /dev/zero rather than
# GC_scratch_alloc() to get stack memory.
# -DPRINT_BLACK_LIST Whenever a black list entry is added, i.e. whenever
@@ -111,6 +115,11 @@ CFLAGS= -O -DATOMIC_UNCOLLECTABLE -DNO_SIGNALS -DNO_EXECUTE_PERMISSION -DALL_INT
# large block allocator and/or collecting more frequently.
# If you expect the allocator to promtly use an explicitly expanded
# heap, this is highly recommended.
+# -DGC_ASSERTIONS Enable some internal GC assertion checking. Currently
+# this facility is only used in a few places. It is intended primarily
+# for debugging of the garbage collector itself, but could also
+# occasionally be useful for debugging of client code. Slows down the
+# collector somewhat, but not drastically.
#
@@ -199,19 +208,23 @@ mark.o typd_mlc.o finalize.o: $(srcdir)/gc_mark.h
base_lib gc.a: $(OBJS) dyn_load.o $(UTILS)
echo > base_lib
- rm -f on_sparc_sunos5_1
- ./if_mach SPARC SUNOS5 touch on_sparc_sunos5_1
+ rm -f dont_ar_1
+ ./if_mach SPARC SUNOS5 touch dont_ar_1
./if_mach SPARC SUNOS5 $(AR) rus gc.a $(OBJS) dyn_load.o
- ./if_not_there on_sparc_sunos5_1 $(AR) ru gc.a $(OBJS) dyn_load.o
- ./if_not_there on_sparc_sunos5_1 $(RANLIB) gc.a || cat /dev/null
+ ./if_mach M68K AMIGA touch dont_ar_1
+ ./if_mach M68K AMIGA $(AR) -vrus gc.a $(OBJS) dyn_load.o
+ ./if_not_there dont_ar_1 $(AR) ru gc.a $(OBJS) dyn_load.o
+ ./if_not_there dont_ar_1 $(RANLIB) gc.a || cat /dev/null
# ignore ranlib failure; that usually means it doesn't exist, and isn't needed
cords: $(CORD_OBJS) cord/cordtest $(UTILS)
- rm -f on_sparc_sunos5_3
- ./if_mach SPARC SUNOS5 touch on_sparc_sunos5_3
+ rm -f dont_ar_3
+ ./if_mach SPARC SUNOS5 touch dont_ar_3
./if_mach SPARC SUNOS5 $(AR) rus gc.a $(CORD_OBJS)
- ./if_not_there on_sparc_sunos5_3 $(AR) ru gc.a $(CORD_OBJS)
- ./if_not_there on_sparc_sunos5_3 $(RANLIB) gc.a || cat /dev/null
+ ./if_mach M68K AMIGA touch dont_ar_3
+ ./if_mach M68K AMIGA $(AR) -vrus gc.a $(CORD_OBJS)
+ ./if_not_there dont_ar_3 $(AR) ru gc.a $(CORD_OBJS)
+ ./if_not_there dont_ar_3 $(RANLIB) gc.a || cat /dev/null
gc_cpp.o: $(srcdir)/gc_cpp.cc $(srcdir)/gc_cpp.h $(srcdir)/gc.h Makefile
$(CXX) -c $(CXXFLAGS) $(srcdir)/gc_cpp.cc
@@ -223,11 +236,13 @@ base_lib $(UTILS)
./if_not_there test_cpp $(CXX) $(CXXFLAGS) -o test_cpp $(srcdir)/test_cpp.cc gc_cpp.o gc.a `./threadlibs`
c++: gc_cpp.o $(srcdir)/gc_cpp.h test_cpp
- rm -f on_sparc_sunos5_4
- ./if_mach SPARC SUNOS5 touch on_sparc_sunos5_4
+ rm -f dont_ar_4
+ ./if_mach SPARC SUNOS5 touch dont_ar_4
./if_mach SPARC SUNOS5 $(AR) rus gc.a gc_cpp.o
- ./if_not_there on_sparc_sunos5_4 $(AR) ru gc.a gc_cpp.o
- ./if_not_there on_sparc_sunos5_4 $(RANLIB) gc.a || cat /dev/null
+ ./if_mach M68K AMIGA touch dont_ar_4
+ ./if_mach M68K AMIGA $(AR) -vrus gc.a gc_cpp.o
+ ./if_not_there dont_ar_4 $(AR) ru gc.a gc_cpp.o
+ ./if_not_there dont_ar_4 $(RANLIB) gc.a || cat /dev/null
./test_cpp 1
echo > c++
@@ -276,6 +291,7 @@ mach_dep.o: $(srcdir)/mach_dep.c $(srcdir)/mips_sgi_mach_dep.s $(srcdir)/mips_ul
./if_mach ALPHA "" $(AS) -o mach_dep.o $(srcdir)/alpha_mach_dep.s
./if_mach SPARC SUNOS5 $(AS) -o mach_dep.o $(srcdir)/sparc_mach_dep.s
./if_mach SPARC SUNOS4 $(AS) -o mach_dep.o $(srcdir)/sparc_sunos4_mach_dep.s
+ ./if_mach SPARC OPENBSD $(AS) -o mach_dep.o $(srcdir)/sparc_sunos4_mach_dep.s
./if_not_there mach_dep.o $(CC) -c $(SPECIALCFLAGS) $(srcdir)/mach_dep.c
mark_rts.o: $(srcdir)/mark_rts.c if_mach if_not_there $(UTILS)
@@ -313,6 +329,7 @@ cord/de: $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a $(UTILS)
./if_mach RS6000 "" $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a -lcurses
./if_mach I386 LINUX $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a -lcurses `./threadlibs`
./if_mach ALPHA LINUX $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a -lcurses
+ ./if_mach M68K AMIGA $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a -lcurses
./if_not_there cord/de $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a $(CURSES) `./threadlibs`
if_mach: $(srcdir)/if_mach.c $(srcdir)/gcconfig.h
diff --git a/README b/README
index 5c572ce3..4c3dfe2f 100644
--- a/README
+++ b/README
@@ -11,7 +11,7 @@ Permission to modify the code and to distribute modified code is granted,
provided the above notices are retained, and a notice that the code was
modified is included with the above copyright notice.
-This is version 4.14 of a conservative garbage collector for C and C++.
+This is version 5.0alpha1 of a conservative garbage collector for C and C++.
You might find a more recent version of this at
@@ -23,7 +23,7 @@ HISTORY -
projects supported in part by the National Science Foundation
and the Defense Advance Research Projects Agency.
Much of the code was rewritten by Hans-J. Boehm at Xerox PARC
-and is now maintained by him at SGI (boehm@sgi.com).
+and is now maintained by him at SGI (boehm@sgi.com or boehm@acm.org).
Some other contributors:
@@ -40,8 +40,8 @@ Robert Brazile (brazile@diamond.bbn.com) originally supplied the ULTRIX code.
Al Dosser (dosser@src.dec.com) and Regis Cridlig (Regis.Cridlig@cl.cam.ac.uk)
subsequently provided updates and information on variation between ULTRIX
systems. Parag Patel (parag@netcom.com) supplied the A/UX code.
-Jesper Peterson(jep@mtiame.mtia.oz.au) and
-Michel Schinz supplied the Amiga port.
+Jesper Peterson(jep@mtiame.mtia.oz.au), Michel Schinz, and
+Martin Tauchmann (martintauchmann@bigfoot.com) supplied the Amiga port.
Thomas Funke (thf@zelator.in-berlin.de(?)) and
Brian D.Carlstrom (bdc@clark.lcs.mit.edu) supplied the NeXT ports.
Douglas Steel (doug@wg.icl.co.uk) provided ICL DRS6000 code.
@@ -612,7 +612,7 @@ reclaimed. Exclusive-or'ing forward and backward links in a list
doesn't cut it.
Some C optimizers may lose the last undisguised pointer to a memory
object as a consequence of clever optimizations. This has almost
-never been observed in practice. Send mail to boehm@sgi.com
+never been observed in practice. Send mail to boehm@acm.org
for suggestions on how to fix your compiler.
This is not a real-time collector. In the standard configuration,
percentage of time required for collection should be constant across
@@ -621,7 +621,7 @@ heap sizes. But collection pauses will increase for larger heaps.
per MB of accessible memory that needs to be scanned. Your mileage
may vary.) The incremental/generational collection facility helps,
but is portable only if "stubborn" allocation is used.
- Please address bug reports to boehm@sgi.com. If you are
+ Please address bug reports to boehm@acm.org. If you are
contemplating a major addition, you might also send mail to ask whether
it's already been done (or whether we tried and discarded it).
@@ -1452,6 +1452,37 @@ Since 4.14alpha1
Since 4.14alpha2
- changed STACKBOTTOM for DJGPP (Thanks to Salvador Eduardo Tropea).
+Since 4.14
+ - Reworked large block allocator. Now uses multiple doubly linked free
+ lists to approximate best fit.
+ - Changed heap expansion heuristic. Entirely free blocks are no longer
+ counted towards the heap size. This seems to have a major impact on
+ heap size stability; the old version could expand the heap way too
+ much in the presence of large block fragmentation.
+ - added -DGC_ASSERTIONS and some simple assertions inside the collector.
+ This is mainlyt for collector debugging.
+ - added -DUSE_MUNMAP to allow the heap to shrink. Suupported on only
+ a few UNIX-like platforms for now.
+ - added GC_dump_regions() for debugging of fragmentation issues.
+ - Changed PowerPC pointer alignment under Linux to 4. (This needs
+ checking by someone who has one. The suggestions came to me via a
+ rather circuitous path.)
+ - Changed the Linux/Alpha port to walk the data segment backwards until
+ it encounters a SIGSEGV. The old way to find the start of the data
+ segment broke with a recent release.
+ - cordxtra.c needed to call GC_REGISTER_FINALIZER instead of
+ GC_register_finalizer, so that it would continue to work with GC_DEBUG.
+ - allochblk sometimes cleared the wrong block for debugging purposes
+ when it dropped blacklisted blocks. This could result in spurious
+ error reports with GC_DEBUG.
+ - added MACOS X Server support. (Thanks to Andrew Stone.)
+ - Changed the Solaris threads code to ignore stack limits > 8 MB with
+ a warning. Empirically, it is not safe to access arbitrary pages
+ in such large stacks. And the dirty bit implementation does not
+ guarantee that none of them will be accessed.
+ - Integrated Martin Tauchmann's Amiga changes.
+ - Inetgrated James Dominy's OpenBSD/SPARC port.
+
To do:
- Very large root set sizes (> 16 MB or so) could cause the collector
to abort with an unexpected mark stack overflow. (Thanks again to
@@ -1469,9 +1500,4 @@ To do:
blocks reside in the newly allocated heap section, the heuristic for
temporarily ignoring black-listing fails, and the heap grows too much.
(This was observed in only one case, and could be worked around, but ...)
- - I've started work on rewriting the large block allocator to use approximate
- best fit. There are rare cases in which the current allocator results in
- excessive large block fragmentation, even with the 4.13 fixes. This should
- also reduce large block allocation time, whcih has become occasionally
- noticable in 4.13.
-
+ - Some platform specific updates are waiting for 4.15alpha1.
diff --git a/README.amiga b/README.amiga
index 865642be..47b15884 100644
--- a/README.amiga
+++ b/README.amiga
@@ -1,4 +1,51 @@
+===========================================================================
+ Martin Tauchmann's notes (1-Apr-99)
+===========================================================================
+
+Works now, also with the GNU-C compiler V2.7.2.1. <ftp://ftp.unina.it/pub/amiga/geekgadgets/amiga/m68k/snapshots/971125/amiga-bin/>
+Modify the `Makefile`
+CC=cc $(ABI_FLAG)
+to
+CC=gcc $(ABI_FLAG)
+
+TECHNICAL NOTES
+
+- `GC_get_stack_base()`, `GC_register_data_segments()` works now with every
+ C compiler; also Workbench.
+
+- Removed AMIGA_SKIP_SEG, but the Code-Segment must not be scanned by GC.
+
+
+PROBLEMS
+- When the Linker, does`t merge all Code-Segments to an single one. LD of GCC
+ do it always.
+
+- With ixemul.library V47.3, when an GC program launched from another program
+ (example: `Make` or `if_mach M68K AMIGA gctest`), `GC_register_data_segments()`
+ found the Segment-List of the caller program.
+ Can be fixed, if the run-time initialization code (for C programs, usually *crt0*)
+ support `__data` and `__bss`.
+
+- PowerPC Amiga currently not supported.
+
+- Dynamic libraries (dyn_load.c) not supported.
+
+
+TESTED WITH SOFTWARE
+
+`Optimized Oberon 2 C` (oo2c) <http://cognac.informatik.uni-kl.de/download/index.html>
+
+
+TESTED WITH HARDWARE
+
+MC68030
+
+
+CONTACT
+Please, contact me at <martintauchmann@bigfoot.com>, when you change the
+Amiga port. <http://martintauchmann.home.pages.de>
+
===========================================================================
Michel Schinz's notes
===========================================================================
diff --git a/allchblk.c b/allchblk.c
index ff94b480..d03afa99 100644
--- a/allchblk.c
+++ b/allchblk.c
@@ -1,7 +1,7 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1998 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -12,7 +12,6 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, August 9, 1995 5:08 pm PDT */
#define DEBUG
#undef DEBUG
@@ -21,39 +20,68 @@
/*
- * allocate/free routines for heap blocks
- * Note that everything called from outside the garbage collector
- * should be prepared to abort at any point as the result of a signal.
+ * Free heap blocks are kept on one of several free lists,
+ * depending on the size of the block. Each free list is doubly linked.
+ * Adjacent free blocks are coalesced.
*/
-/*
- * Free heap blocks are kept on a list sorted by address.
- * The hb_hdr.hbh_sz field of a free heap block contains the length
- * (in bytes) of the entire block.
- * Neighbors are coalesced.
- */
# define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
/* largest block we will allocate starting on a black */
/* listed block. Must be >= HBLKSIZE. */
-struct hblk * GC_hblkfreelist = 0;
-struct hblk *GC_savhbp = (struct hblk *)0; /* heap block preceding next */
- /* block to be examined by */
- /* GC_allochblk. */
+# define UNIQUE_THRESHOLD 32
+ /* Sizes up to this many HBLKs each have their own free list */
+# define HUGE_THRESHOLD 256
+ /* Sizes of at least this many heap blocks are mapped to a */
+ /* single free list. */
+# define FL_COMPRESSION 8
+ /* In between sizes map this many distinct sizes to a single */
+ /* bin. */
+
+# define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
+ + UNIQUE_THRESHOLD
+
+struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
+
+/* Map a number of blocks to the appropriate large block free list index. */
+int GC_hblk_fl_from_blocks(blocks_needed)
+word blocks_needed;
+{
+ if (blocks_needed <= UNIQUE_THRESHOLD) return blocks_needed;
+ if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
+ return (blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
+ + UNIQUE_THRESHOLD;
+
+}
+
+# define HBLK_IS_FREE(hdr) ((hdr) -> hb_map == GC_invalid_map)
+# define PHDR(hhdr) HDR(hhdr -> hb_prev)
+# define NHDR(hhdr) HDR(hhdr -> hb_next)
+
+# ifdef USE_MUNMAP
+# define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
+# else /* !USE_MMAP */
+# define IS_MAPPED(hhdr) 1
+# endif /* USE_MUNMAP */
# if !defined(NO_DEBUGGING)
void GC_print_hblkfreelist()
{
- struct hblk * h = GC_hblkfreelist;
+ struct hblk * h;
word total_free = 0;
- hdr * hhdr = HDR(h);
+ hdr * hhdr;
word sz;
+ int i;
- while (h != 0) {
+ for (i = 0; i <= N_HBLK_FLS; ++i) {
+ h = GC_hblkfreelist[i];
+ if (0 != h) GC_printf1("Free list %ld:\n", (unsigned long)i);
+ while (h != 0) {
+ hhdr = HDR(h);
sz = hhdr -> hb_sz;
- GC_printf2("0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
+ GC_printf2("\t0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
total_free += sz;
if (GC_is_black_listed(h, HBLKSIZE) != 0) {
GC_printf0("start black listed\n");
@@ -63,11 +91,90 @@ void GC_print_hblkfreelist()
GC_printf0("not black listed\n");
}
h = hhdr -> hb_next;
- hhdr = HDR(h);
+ }
+ }
+ if (total_free != GC_large_free_bytes) {
+ GC_printf1("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
+ (unsigned long) GC_large_free_bytes);
}
GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);
}
+/* Return the free list index on which the block described by the header */
+/* appears, or -1 if it appears nowhere. */
+int free_list_index_of(wanted)
+hdr * wanted;
+{
+ struct hblk * h;
+ hdr * hhdr;
+ int i;
+
+ for (i = 0; i <= N_HBLK_FLS; ++i) {
+ h = GC_hblkfreelist[i];
+ while (h != 0) {
+ hhdr = HDR(h);
+ if (hhdr == wanted) return i;
+ h = hhdr -> hb_next;
+ }
+ }
+ return -1;
+}
+
+void GC_dump_regions()
+{
+ int i;
+ ptr_t start, end;
+ ptr_t p;
+ size_t bytes;
+ hdr *hhdr;
+ for (i = 0; i < GC_n_heap_sects; ++i) {
+ start = GC_heap_sects[i].hs_start;
+ bytes = GC_heap_sects[i].hs_bytes;
+ end = start + bytes;
+ /* Merge in contiguous sections. */
+ while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
+ ++i;
+ end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
+ }
+ GC_printf2("***Section from 0x%lx to 0x%lx\n", start, end);
+ for (p = start; p < end;) {
+ hhdr = HDR(p);
+ GC_printf1("\t0x%lx ", (unsigned long)p);
+ if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
+ GC_printf1("Missing header!!\n", hhdr);
+ p += HBLKSIZE;
+ continue;
+ }
+ if (HBLK_IS_FREE(hhdr)) {
+ int correct_index = GC_hblk_fl_from_blocks(
+ divHBLKSZ(hhdr -> hb_sz));
+ int actual_index;
+
+ GC_printf1("\tfree block of size 0x%lx bytes",
+ (unsigned long)(hhdr -> hb_sz));
+ if (IS_MAPPED(hhdr)) {
+ GC_printf0("\n");
+ } else {
+ GC_printf0("(unmapped)\n");
+ }
+ actual_index = free_list_index_of(hhdr);
+ if (-1 == actual_index) {
+ GC_printf1("\t\tBlock not on free list %ld!!\n",
+ correct_index);
+ } else if (correct_index != actual_index) {
+ GC_printf2("\t\tBlock on list %ld, should be on %ld!!\n",
+ actual_index, correct_index);
+ }
+ p += hhdr -> hb_sz;
+ } else {
+ GC_printf1("\tused for blocks of size 0x%lx bytes\n",
+ (unsigned long)WORDS_TO_BYTES(hhdr -> hb_sz));
+ p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
+ }
+ }
+ }
+}
+
# endif /* NO_DEBUGGING */
/* Initialize hdr for a block containing the indicated size and */
@@ -100,20 +207,265 @@ unsigned char flags;
return(TRUE);
}
-#ifdef EXACT_FIRST
-# define LAST_TRIP 2
-#else
-# define LAST_TRIP 1
-#endif
+#define FL_UNKNOWN -1
+/*
+ * Remove hhdr from the appropriate free list.
+ * We assume it is on the nth free list, or on the size
+ * appropriate free list if n is FL_UNKNOWN.
+ */
+void GC_remove_from_fl(hhdr, n)
+hdr * hhdr;
+int n;
+{
+ GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
+ if (hhdr -> hb_prev == 0) {
+ int index;
+ if (FL_UNKNOWN == n) {
+ index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
+ } else {
+ index = n;
+ }
+ GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
+ GC_hblkfreelist[index] = hhdr -> hb_next;
+ } else {
+ PHDR(hhdr) -> hb_next = hhdr -> hb_next;
+ }
+ if (0 != hhdr -> hb_next) {
+ GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
+ NHDR(hhdr) -> hb_prev = hhdr -> hb_prev;
+ }
+}
-word GC_max_hblk_size = HBLKSIZE;
+/*
+ * Return a pointer to the free block ending just before h, if any.
+ */
+struct hblk * GC_free_block_ending_at(h)
+struct hblk *h;
+{
+ struct hblk * p = h - 1;
+ hdr * phdr = HDR(p);
+
+ while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
+ p = FORWARDED_ADDR(p,phdr);
+ phdr = HDR(p);
+ }
+ if (0 != phdr && HBLK_IS_FREE(phdr)) return p;
+ p = GC_prev_block(h - 1);
+ if (0 != p) {
+ phdr = HDR(p);
+ if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
+ return p;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Add hhdr to the appropriate free list.
+ * We maintain individual free lists sorted by address.
+ */
+void GC_add_to_fl(h, hhdr)
+struct hblk *h;
+hdr * hhdr;
+{
+ int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
+ struct hblk *second = GC_hblkfreelist[index];
+# ifdef GC_ASSERTIONS
+ struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
+ hdr * nexthdr = HDR(next);
+ struct hblk *prev = GC_free_block_ending_at(h);
+ hdr * prevhdr = HDR(prev);
+ GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr) || !IS_MAPPED(nexthdr));
+ GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr) || !IS_MAPPED(prevhdr));
+# endif
+ GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
+ GC_hblkfreelist[index] = h;
+ hhdr -> hb_next = second;
+ hhdr -> hb_prev = 0;
+ if (0 != second) HDR(second) -> hb_prev = h;
+ GC_invalidate_map(hhdr);
+}
+
+#ifdef USE_MUNMAP
+
+/* Unmap blocks that haven't been recently touched. This is the only way */
+/* way blocks are ever unmapped. */
+void GC_unmap_old(void)
+{
+ struct hblk * h;
+ hdr * hhdr;
+ word sz;
+ unsigned short last_rec, threshold;
+ int i;
+# define UNMAP_THRESHOLD 6
+
+ for (i = 0; i <= N_HBLK_FLS; ++i) {
+ for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
+ hhdr = HDR(h);
+ if (!IS_MAPPED(hhdr)) continue;
+ threshold = (unsigned short)(GC_gc_no - UNMAP_THRESHOLD);
+ last_rec = hhdr -> hb_last_reclaimed;
+ if (last_rec > GC_gc_no
+ || last_rec < threshold && threshold < GC_gc_no
+ /* not recently wrapped */) {
+ sz = hhdr -> hb_sz;
+ GC_unmap((ptr_t)h, sz);
+ hhdr -> hb_flags |= WAS_UNMAPPED;
+ }
+ }
+ }
+}
+
+/* Merge all unmapped blocks that are adjacent to other free */
+/* blocks. This may involve remapping, since all blocks are either */
+/* fully mapped or fully unmapped. */
+void GC_merge_unmapped(void)
+{
+ struct hblk * h, *next;
+ hdr * hhdr, *nexthdr;
+ word size, nextsize;
+ int i;
+
+ for (i = 0; i <= N_HBLK_FLS; ++i) {
+ h = GC_hblkfreelist[i];
+ while (h != 0) {
+ hhdr = HDR(h);
+ size = hhdr->hb_sz;
+ next = (struct hblk *)((word)h + size);
+ nexthdr = HDR(next);
+ /* Coalesce with successor, if possible */
+ if (0 != nexthdr && HBLK_IS_FREE(nexthdr)) {
+ nextsize = nexthdr -> hb_sz;
+ if (IS_MAPPED(hhdr)) {
+ GC_ASSERT(!IS_MAPPED(nexthdr));
+ /* make both consistent, so that we can merge */
+ if (size > nextsize) {
+ GC_remap((ptr_t)next, nextsize);
+ } else {
+ GC_unmap((ptr_t)h, size);
+ hhdr -> hb_flags |= WAS_UNMAPPED;
+ }
+ } else if (IS_MAPPED(nexthdr)) {
+ GC_ASSERT(!IS_MAPPED(hhdr));
+ if (size > nextsize) {
+ GC_unmap((ptr_t)next, nextsize);
+ } else {
+ GC_remap((ptr_t)h, size);
+ hhdr -> hb_flags &= ~WAS_UNMAPPED;
+ }
+ } else {
+ /* Unmap any gap in the middle */
+ GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz);
+ }
+ /* If they are both unmapped, we merge, but leave unmapped. */
+ GC_remove_from_fl(hhdr, i);
+ GC_remove_from_fl(nexthdr, FL_UNKNOWN);
+ hhdr -> hb_sz += nexthdr -> hb_sz;
+ GC_remove_header(next);
+ GC_add_to_fl(h, hhdr);
+ /* Start over at beginning of list */
+ h = GC_hblkfreelist[i];
+ } else /* not mergable with successor */ {
+ h = hhdr -> hb_next;
+ }
+ } /* while (h != 0) ... */
+ } /* for ... */
+}
+
+#endif /* USE_MUNMAP */
+
+/*
+ * Return a pointer to a block starting at h of length bytes.
+ * Memory for the block is mapped.
+ * Remove the block from its free list, and return the remainder (if any)
+ * to its appropriate free list.
+ * May fail by returning 0.
+ * The header for the returned block must be set up by the caller.
+ * If the return value is not 0, then hhdr is the header for it.
+ */
+struct hblk * GC_get_first_part(h, hhdr, bytes, index)
+struct hblk *h;
+hdr * hhdr;
+word bytes;
+int index;
+{
+ word total_size = hhdr -> hb_sz;
+ struct hblk * rest;
+ hdr * rest_hdr;
+
+ GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
+ GC_remove_from_fl(hhdr, index);
+ if (total_size == bytes) return h;
+ rest = (struct hblk *)((word)h + bytes);
+ if (!GC_install_header(rest)) return(0);
+ rest_hdr = HDR(rest);
+ rest_hdr -> hb_sz = total_size - bytes;
+ rest_hdr -> hb_flags = 0;
+# ifdef GC_ASSERTIONS
+ // Mark h not free, to avoid assertion about adjacent free blocks.
+ hhdr -> hb_map = 0;
+# endif
+ GC_add_to_fl(rest, rest_hdr);
+ return h;
+}
+
+/*
+ * H is a free block. N points at an address inside it.
+ * A new header for n has already been set up. Fix up h's header
+ * to reflect the fact that it is being split, move it to the
+ * appropriate free list.
+ * N replaces h in the original free list.
+ *
+ * Nhdr is not completely filled in, since it is about to allocated.
+ * It may in fact end up on the wrong free list for its size.
+ * (Hence adding it to a free list is silly. But this path is hopefully
+ * rare enough that it doesn't matter. The code is cleaner this way.)
+ */
+void GC_split_block(h, hhdr, n, nhdr, index)
+struct hblk *h;
+hdr * hhdr;
+struct hblk *n;
+hdr * nhdr;
+int index; /* Index of free list */
+{
+ word total_size = hhdr -> hb_sz;
+ word h_size = (word)n - (word)h;
+ struct hblk *prev = hhdr -> hb_prev;
+ struct hblk *next = hhdr -> hb_next;
+
+ /* Replace h with n on its freelist */
+ nhdr -> hb_prev = prev;
+ nhdr -> hb_next = next;
+ nhdr -> hb_sz = total_size - h_size;
+ nhdr -> hb_flags = 0;
+ if (0 != prev) {
+ HDR(prev) -> hb_next = n;
+ } else {
+ GC_hblkfreelist[index] = n;
+ }
+ if (0 != next) {
+ HDR(next) -> hb_prev = n;
+ }
+# ifdef GC_ASSERTIONS
+ nhdr -> hb_map = 0; /* Don't fail test for consecutive */
+ /* free blocks in GC_add_to_fl. */
+# endif
+# ifdef USE_MUNMAP
+ hhdr -> hb_last_reclaimed = GC_gc_no;
+# endif
+ hhdr -> hb_sz = h_size;
+ GC_add_to_fl(h, hhdr);
+ GC_invalidate_map(nhdr);
+}
+struct hblk * GC_allochblk_nth();
+
/*
* Allocate (and return pointer to) a heap block
- * for objects of size sz words.
+ * for objects of size sz words, searching the nth free list.
*
* NOTE: We set obj_map field in header correctly.
- * Caller is resposnsible for building an object freelist in block.
+ * Caller is responsible for building an object freelist in block.
*
* We clear the block if it is destined for large objects, and if
* kind requires that newly allocated objects be cleared.
@@ -124,48 +476,42 @@ word sz;
int kind;
unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
{
- register struct hblk *thishbp;
- register hdr * thishdr; /* Header corr. to thishbp */
+ int start_list = GC_hblk_fl_from_blocks(OBJ_SZ_TO_BLOCKS(sz));
+ int i;
+ for (i = start_list; i <= N_HBLK_FLS; ++i) {
+ struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
+ if (0 != result) return result;
+ }
+ return 0;
+}
+/*
+ * The same, but with search restricted to nth free list.
+ */
+struct hblk *
+GC_allochblk_nth(sz, kind, flags, n)
+word sz;
+int kind;
+unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
+int n;
+{
register struct hblk *hbp;
register hdr * hhdr; /* Header corr. to hbp */
- struct hblk *prevhbp;
- register hdr * phdr; /* Header corr. to prevhbp */
+ register struct hblk *thishbp;
+ register hdr * thishdr; /* Header corr. to hbp */
signed_word size_needed; /* number of bytes in requested objects */
signed_word size_avail; /* bytes available in this block */
- int trip_count = 0;
size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
- if ((word)size_needed > GC_max_hblk_size)
- GC_max_hblk_size = size_needed;
/* search for a big enough block in free list */
- hbp = GC_savhbp;
+ hbp = GC_hblkfreelist[n];
hhdr = HDR(hbp);
- for(;;) {
-
- prevhbp = hbp;
- phdr = hhdr;
- hbp = (prevhbp == 0? GC_hblkfreelist : phdr->hb_next);
- hhdr = HDR(hbp);
-
- if( prevhbp == GC_savhbp) {
- if (trip_count == LAST_TRIP) return(0);
- ++trip_count;
- }
-
- if( hbp == 0 ) continue;
-
+ for(; 0 != hbp; hbp = hhdr -> hb_next, hhdr = HDR(hbp)) {
size_avail = hhdr->hb_sz;
-# ifdef EXACT_FIRST
- if (trip_count <= 1 && size_avail != size_needed) continue;
-# endif
if (size_avail < size_needed) continue;
# ifdef PRESERVE_LAST
if (size_avail != size_needed
- && !GC_incremental
- && (word)size_needed <= GC_max_hblk_size/2
- && GC_in_last_heap_sect((ptr_t)hbp)
- && GC_should_collect()) {
+ && !GC_incremental && GC_should_collect()) {
continue;
}
# endif
@@ -176,13 +522,14 @@ unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
signed_word next_size;
thishbp = hhdr -> hb_next;
- if (thishbp == 0) thishbp = GC_hblkfreelist;
- thishdr = HDR(thishbp);
- next_size = (signed_word)(thishdr -> hb_sz);
- if (next_size < size_avail
+ if (thishbp != 0) {
+ thishdr = HDR(thishbp);
+ next_size = (signed_word)(thishdr -> hb_sz);
+ if (next_size < size_avail
&& next_size >= size_needed
&& !GC_is_black_listed(thishbp, (word)size_needed)) {
continue;
+ }
}
}
if ( !IS_UNCOLLECTABLE(kind) &&
@@ -204,19 +551,21 @@ unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
thishbp = lasthbp;
if (size_avail >= size_needed) {
if (thishbp != hbp && GC_install_header(thishbp)) {
+ /* Make sure it's mapped before we mangle it. */
+# ifdef USE_MUNMAP
+ if (!IS_MAPPED(hhdr)) {
+ GC_remap((ptr_t)hbp, size_avail);
+ hhdr -> hb_flags &= ~WAS_UNMAPPED;
+ }
+# endif
/* Split the block at thishbp */
thishdr = HDR(thishbp);
- /* GC_invalidate_map not needed, since we will */
- /* allocate this block. */
- thishdr -> hb_next = hhdr -> hb_next;
- thishdr -> hb_sz = size_avail;
- hhdr -> hb_sz = (ptr_t)thishbp - (ptr_t)hbp;
- hhdr -> hb_next = thishbp;
+ GC_split_block(hbp, hhdr, thishbp, thishdr, n);
/* Advance to thishbp */
- prevhbp = hbp;
- phdr = hhdr;
hbp = thishbp;
hhdr = thishdr;
+ /* We must now allocate thishbp, since it may */
+ /* be on the wrong free list. */
}
} else if (size_needed > (signed_word)BL_LIMIT
&& orig_avail - size_needed
@@ -224,11 +573,9 @@ unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
/* Punt, since anything else risks unreasonable heap growth. */
WARN("Needed to allocate blacklisted block at 0x%lx\n",
(word)hbp);
- thishbp = hbp;
size_avail = orig_avail;
- } else if (size_avail == 0
- && size_needed == HBLKSIZE
- && prevhbp != 0) {
+ } else if (size_avail == 0 && size_needed == HBLKSIZE
+ && IS_MAPPED(hhdr)) {
# ifndef FIND_LEAK
static unsigned count = 0;
@@ -241,11 +588,14 @@ unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
/* Allocate and drop the block in small chunks, to */
/* maximize the chance that we will recover some */
/* later. */
- struct hblk * limit = hbp + (hhdr->hb_sz/HBLKSIZE);
+ word total_size = hhdr -> hb_sz;
+ struct hblk * limit = hbp + divHBLKSZ(total_size);
struct hblk * h;
+ struct hblk * prev = hhdr -> hb_prev;
- GC_words_wasted += hhdr->hb_sz;
- phdr -> hb_next = hhdr -> hb_next;
+ GC_words_wasted += total_size;
+ GC_large_free_bytes -= total_size;
+ GC_remove_from_fl(hhdr, n);
for (h = hbp; h < limit; h++) {
if (h == hbp || GC_install_header(h)) {
hhdr = HDR(h);
@@ -254,63 +604,46 @@ unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
BYTES_TO_WORDS(HBLKSIZE - HDR_BYTES),
PTRFREE, 0); /* Cant fail */
if (GC_debugging_started) {
- BZERO(hbp + HDR_BYTES, HBLKSIZE - HDR_BYTES);
+ BZERO(h + HDR_BYTES, HBLKSIZE - HDR_BYTES);
}
}
}
/* Restore hbp to point at free block */
- if (GC_savhbp == hbp) GC_savhbp = prevhbp;
- hbp = prevhbp;
- hhdr = phdr;
- if (hbp == GC_savhbp) --trip_count;
+ hbp = prev;
+ if (0 == hbp) {
+ return GC_allochblk_nth(sz, kind, flags, n);
+ }
+ hhdr = HDR(hbp);
}
# endif
}
}
if( size_avail >= size_needed ) {
- /* found a big enough block */
- /* let thishbp --> the block */
- /* set prevhbp, hbp to bracket it */
- thishbp = hbp;
- thishdr = hhdr;
- if( size_avail == size_needed ) {
- hbp = hhdr->hb_next;
- hhdr = HDR(hbp);
- } else {
- hbp = (struct hblk *)
- (((word)thishbp) + size_needed);
- if (!GC_install_header(hbp)) {
- hbp = thishbp;
- continue;
- }
- hhdr = HDR(hbp);
- GC_invalidate_map(hhdr);
- hhdr->hb_next = thishdr->hb_next;
- hhdr->hb_sz = size_avail - size_needed;
- }
- /* remove *thishbp from hblk freelist */
- if( prevhbp == 0 ) {
- GC_hblkfreelist = hbp;
- } else {
- phdr->hb_next = hbp;
- }
- /* save current list search position */
- GC_savhbp = hbp;
+# ifdef USE_MUNMAP
+ if (!IS_MAPPED(hhdr)) {
+ GC_remap((ptr_t)hbp, size_avail);
+ hhdr -> hb_flags &= ~WAS_UNMAPPED;
+ }
+# endif
+ /* hbp may be on the wrong freelist; the parameter n */
+ /* is important. */
+ hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
break;
}
}
+
+ if (0 == hbp) return 0;
/* Notify virtual dirty bit implementation that we are about to write. */
- GC_write_hint(thishbp);
- /* This should deal better with large blocks. */
+ GC_write_hint(hbp);
/* Add it to map of valid blocks */
- if (!GC_install_counts(thishbp, (word)size_needed)) return(0);
+ if (!GC_install_counts(hbp, (word)size_needed)) return(0);
/* This leaks memory under very rare conditions. */
/* Set up header */
- if (!setup_header(thishdr, sz, kind, flags)) {
- GC_remove_counts(thishbp, (word)size_needed);
+ if (!setup_header(hhdr, sz, kind, flags)) {
+ GC_remove_counts(hbp, (word)size_needed);
return(0); /* ditto */
}
@@ -327,8 +660,11 @@ unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
GC_fail_count = 0;
}
+
+ GC_large_free_bytes -= size_needed;
- return( thishbp );
+ GC_ASSERT(IS_MAPPED(hhdr));
+ return( hbp );
}
struct hblk * GC_freehblk_ptr = 0; /* Search position hint for GC_freehblk */
@@ -341,75 +677,50 @@ struct hblk * GC_freehblk_ptr = 0; /* Search position hint for GC_freehblk */
* All mark words are assumed to be cleared.
*/
void
-GC_freehblk(p)
-register struct hblk *p;
+GC_freehblk(hbp)
+struct hblk *hbp;
{
-register hdr *phdr; /* Header corresponding to p */
-register struct hblk *hbp, *prevhbp;
-register hdr *hhdr, *prevhdr;
-register signed_word size;
+struct hblk *next, *prev;
+hdr *hhdr, *prevhdr, *nexthdr;
+signed_word size;
- /* GC_savhbp may become invalid due to coalescing. Clear it. */
- GC_savhbp = (struct hblk *)0;
- phdr = HDR(p);
- size = phdr->hb_sz;
- size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
- GC_remove_counts(p, (word)size);
- phdr->hb_sz = size;
- GC_invalidate_map(phdr);
- prevhbp = 0;
-
- /* The following optimization was suggested by David Detlefs. */
- /* Note that the header cannot be NIL, since there cannot be an */
- /* intervening call to GC_freehblk without resetting */
- /* GC_freehblk_ptr. */
- if (GC_freehblk_ptr != 0 &&
- HDR(GC_freehblk_ptr)->hb_map == GC_invalid_map &&
- (ptr_t)GC_freehblk_ptr < (ptr_t)p) {
- hbp = GC_freehblk_ptr;
- } else {
- hbp = GC_hblkfreelist;
- };
hhdr = HDR(hbp);
-
- while( (hbp != 0) && (hbp < p) ) {
- prevhbp = hbp;
- prevhdr = hhdr;
- hbp = hhdr->hb_next;
- hhdr = HDR(hbp);
- }
- GC_freehblk_ptr = prevhbp;
+ size = hhdr->hb_sz;
+ size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
+ GC_remove_counts(hbp, (word)size);
+ hhdr->hb_sz = size;
/* Check for duplicate deallocation in the easy case */
- if (hbp != 0 && (ptr_t)p + size > (ptr_t)hbp
- || prevhbp != 0 && (ptr_t)prevhbp + prevhdr->hb_sz > (ptr_t)p) {
+ if (HBLK_IS_FREE(hhdr)) {
GC_printf1("Duplicate large block deallocation of 0x%lx\n",
- (unsigned long) p);
- GC_printf2("Surrounding free blocks are 0x%lx and 0x%lx\n",
- (unsigned long) prevhbp, (unsigned long) hbp);
+ (unsigned long) hbp);
}
+ GC_ASSERT(IS_MAPPED(hhdr));
+ GC_invalidate_map(hhdr);
+ next = (struct hblk *)((word)hbp + size);
+ nexthdr = HDR(next);
+ prev = GC_free_block_ending_at(hbp);
/* Coalesce with successor, if possible */
- if( (((word)p)+size) == ((word)hbp) ) {
- phdr->hb_next = hhdr->hb_next;
- phdr->hb_sz += hhdr->hb_sz;
- GC_remove_header(hbp);
- } else {
- phdr->hb_next = hbp;
+ if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)) {
+ GC_remove_from_fl(nexthdr, FL_UNKNOWN);
+ hhdr -> hb_sz += nexthdr -> hb_sz;
+ GC_remove_header(next);
+ }
+ /* Coalesce with predecessor, if possible. */
+ if (0 != prev) {
+ prevhdr = HDR(prev);
+ if (IS_MAPPED(prevhdr)) {
+ GC_remove_from_fl(prevhdr, FL_UNKNOWN);
+ prevhdr -> hb_sz += hhdr -> hb_sz;
+ GC_remove_header(hbp);
+ hbp = prev;
+ hhdr = prevhdr;
+ }
}
-
- if( prevhbp == 0 ) {
- GC_hblkfreelist = p;
- } else if( (((word)prevhbp) + prevhdr->hb_sz)
- == ((word)p) ) {
- /* Coalesce with predecessor */
- prevhdr->hb_next = phdr->hb_next;
- prevhdr->hb_sz += phdr->hb_sz;
- GC_remove_header(p);
- } else {
- prevhdr->hb_next = p;
- }
+ GC_large_free_bytes += size;
+ GC_add_to_fl(hbp, hhdr);
}
diff --git a/alloc.c b/alloc.c
index 171dc780..4ff1d9bb 100644
--- a/alloc.c
+++ b/alloc.c
@@ -82,7 +82,7 @@ extern signed_word GC_mem_found; /* Number of reclaimed longwords */
GC_bool GC_dont_expand = 0;
-word GC_free_space_divisor = 4;
+word GC_free_space_divisor = 3;
extern GC_bool GC_collection_in_progress();
/* Collection is in progress, or was abandoned. */
@@ -130,18 +130,22 @@ static word min_words_allocd()
int dummy;
register signed_word stack_size = (ptr_t)(&dummy) - GC_stackbottom;
# endif
- register word total_root_size; /* includes double stack size, */
+ word total_root_size; /* includes double stack size, */
/* since the stack is expensive */
/* to scan. */
+ word scan_size; /* Estimate of memory to be scanned */
+ /* during normal GC. */
if (stack_size < 0) stack_size = -stack_size;
total_root_size = 2 * stack_size + GC_root_size;
+ scan_size = BYTES_TO_WORDS(GC_heapsize - GC_large_free_bytes
+ + (GC_large_free_bytes >> 2)
+ /* use a bit more of large empty heap */
+ + total_root_size);
if (GC_incremental) {
- return(BYTES_TO_WORDS(GC_heapsize + total_root_size)
- / (2 * GC_free_space_divisor));
+ return scan_size / (2 * GC_free_space_divisor);
} else {
- return(BYTES_TO_WORDS(GC_heapsize + total_root_size)
- / GC_free_space_divisor);
+ return scan_size / GC_free_space_divisor;
}
}
@@ -549,12 +553,16 @@ void GC_finish_collection()
# ifdef PRINTSTATS
GC_printf2(
- "Immediately reclaimed %ld bytes in heap of size %lu bytes\n",
+ "Immediately reclaimed %ld bytes in heap of size %lu bytes",
(long)WORDS_TO_BYTES(GC_mem_found),
(unsigned long)GC_heapsize);
- GC_printf2("%lu (atomic) + %lu (composite) collectable bytes in use\n",
- (unsigned long)WORDS_TO_BYTES(GC_atomic_in_use),
- (unsigned long)WORDS_TO_BYTES(GC_composite_in_use));
+# ifdef USE_MUNMAP
+ GC_printf1("(%lu unmapped)", GC_unmapped_bytes);
+# endif
+ GC_printf2(
+ "\n%lu (atomic) + %lu (composite) collectable bytes in use\n",
+ (unsigned long)WORDS_TO_BYTES(GC_atomic_in_use),
+ (unsigned long)WORDS_TO_BYTES(GC_composite_in_use));
# endif
GC_n_attempts = 0;
@@ -565,6 +573,9 @@ void GC_finish_collection()
GC_words_wasted = 0;
GC_mem_freed = 0;
+# ifdef USE_MUNMAP
+ GC_unmap_old();
+# endif
# ifdef PRINTTIMES
GET_TIME(done_time);
GC_printf2("Finalize + initiate sweep took %lu + %lu msecs\n",
@@ -608,7 +619,7 @@ void GC_gcollect GC_PROTO(())
word GC_n_heap_sects = 0; /* Number of sections currently in heap. */
/*
- * Use the chunk of memory starting at p of syze bytes as part of the heap.
+ * Use the chunk of memory starting at p of size bytes as part of the heap.
* Assumes p is HBLKSIZE aligned, and bytes is a multiple of HBLKSIZE.
*/
void GC_add_to_heap(p, bytes)
@@ -616,6 +627,7 @@ struct hblk *p;
word bytes;
{
word words;
+ hdr * phdr;
if (GC_n_heap_sects >= MAX_HEAP_SECTS) {
ABORT("Too many heap sections: Increase MAXHINCR or MAX_HEAP_SECTS");
@@ -630,7 +642,10 @@ word bytes;
GC_heap_sects[GC_n_heap_sects].hs_bytes = bytes;
GC_n_heap_sects++;
words = BYTES_TO_WORDS(bytes - HDR_BYTES);
- HDR(p) -> hb_sz = words;
+ phdr = HDR(p);
+ phdr -> hb_sz = words;
+ phdr -> hb_map = (char *)1; /* A value != GC_invalid_map */
+ phdr -> hb_flags = 0;
GC_freehblk(p);
GC_heapsize += bytes;
if ((ptr_t)p <= GC_least_plausible_heap_addr
@@ -813,7 +828,6 @@ GC_bool GC_collect_or_expand(needed_blocks, ignore_off_page)
word needed_blocks;
GC_bool ignore_off_page;
{
-
if (!GC_incremental && !GC_dont_gc && GC_should_collect()) {
GC_notify_full_gc();
GC_gcollect_inner();
diff --git a/cord/cordxtra.c b/cord/cordxtra.c
index b306fbac..a5be10de 100644
--- a/cord/cordxtra.c
+++ b/cord/cordxtra.c
@@ -582,7 +582,7 @@ CORD CORD_from_file_lazy_inner(FILE * f, size_t len)
state -> lf_cache[i] = 0;
}
state -> lf_current = 0;
- GC_register_finalizer(state, CORD_lf_close_proc, 0, 0, 0);
+ GC_REGISTER_FINALIZER(state, CORD_lf_close_proc, 0, 0, 0);
return(CORD_from_fn(CORD_lf_func, state, len));
}
diff --git a/dbg_mlc.c b/dbg_mlc.c
index 81516258..930ab3ef 100644
--- a/dbg_mlc.c
+++ b/dbg_mlc.c
@@ -110,7 +110,7 @@ word integer;
return((ptr_t)result);
}
-/* Check the object with debugging info at p */
+/* Check the object with debugging info at ohdr */
/* return NIL if it's OK. Else return clobbered */
/* address. */
ptr_t GC_check_annotated_obj(ohdr)
@@ -408,7 +408,7 @@ GC_PTR p;
GC_err_printf0(
"GC_debug_free: found previously deallocated (?) object at ");
} else {
- GC_err_printf0("GC_debug_free: found smashed object at ");
+ GC_err_printf0("GC_debug_free: found smashed location at ");
}
GC_print_smashed_obj(p, clobbered);
}
@@ -491,7 +491,7 @@ GC_PTR p;
}
clobbered = GC_check_annotated_obj((oh *)base);
if (clobbered != 0) {
- GC_err_printf0("GC_debug_realloc: found smashed object at ");
+ GC_err_printf0("GC_debug_realloc: found smashed location at ");
GC_print_smashed_obj(p, clobbered);
}
old_sz = ((oh *)base) -> oh_sz;
@@ -528,7 +528,7 @@ word dummy;
if (clobbered != 0) {
GC_err_printf0(
- "GC_check_heap_block: found smashed object at ");
+ "GC_check_heap_block: found smashed location at ");
GC_print_smashed_obj((ptr_t)p, clobbered);
}
}
diff --git a/dyn_load.c b/dyn_load.c
index 56aeb3dd..d3df0a08 100644
--- a/dyn_load.c
+++ b/dyn_load.c
@@ -283,6 +283,9 @@ void GC_register_dynamic_libraries()
static struct link_map *
GC_FirstDLOpenedLinkMap()
{
+# ifdef __GNUC__
+# pragma weak _DYNAMIC
+# endif
extern ElfW(Dyn) _DYNAMIC[];
ElfW(Dyn) *dp;
struct r_debug *r;
diff --git a/gc_hdrs.h b/gc_hdrs.h
index 2f2d1bf9..60dc2ad3 100644
--- a/gc_hdrs.h
+++ b/gc_hdrs.h
@@ -49,14 +49,16 @@ typedef struct bi {
hdr * index[BOTTOM_SZ];
/*
* The bottom level index contains one of three kinds of values:
- * 0 means we're not responsible for this block.
+ * 0 means we're not responsible for this block,
+ * or this is a block other than the first one in a free block.
* 1 < (long)X <= MAX_JUMP means the block starts at least
* X * HBLKSIZE bytes before the current address.
* A valid pointer points to a hdr structure. (The above can't be
* valid pointers due to the GET_MEM return convention.)
*/
struct bi * asc_link; /* All indices are linked in */
- /* ascending order. */
+ /* ascending order... */
+ struct bi * desc_link; /* ... and in descending order. */
word key; /* high order address bits. */
# ifdef HASH_TL
struct bi * hash_link; /* Hash chain link. */
diff --git a/gc_priv.h b/gc_priv.h
index 934075fa..83eb84a8 100644
--- a/gc_priv.h
+++ b/gc_priv.h
@@ -73,7 +73,7 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
# define CONST
#endif
-#ifdef AMIGA
+#if 0 /* was once defined for AMIGA */
# define GC_FAR __far
#else
# define GC_FAR
@@ -350,7 +350,7 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
+ GC_page_size) \
+ GC_page_size-1)
# else
-# if defined(AMIGA) || defined(NEXT) || defined(DOS4GW)
+# if defined(AMIGA) || defined(NEXT) || defined(MACOSX) || defined(DOS4GW)
# define GET_MEM(bytes) HBLKPTR((size_t) \
calloc(1, (size_t)bytes + GC_page_size) \
+ GC_page_size-1)
@@ -823,6 +823,7 @@ struct hblkhdr {
struct hblk * hb_next; /* Link field for hblk free list */
/* and for lists of chunks waiting to be */
/* reclaimed. */
+ struct hblk * hb_prev; /* Backwards link for free list. */
word hb_descr; /* object descriptor for marking. See */
/* mark.h. */
char* hb_map; /* A pointer to a pointer validity map of the block. */
@@ -837,9 +838,20 @@ struct hblkhdr {
# define IGNORE_OFF_PAGE 1 /* Ignore pointers that do not */
/* point to the first page of */
/* this object. */
+# define WAS_UNMAPPED 2 /* This is a free block, which has */
+ /* been unmapped from the address */
+ /* space. */
+ /* GC_remap must be invoked on it */
+ /* before it can be reallocated. */
+ /* Only set with USE_MUNMAP. */
unsigned short hb_last_reclaimed;
/* Value of GC_gc_no when block was */
/* last allocated or swept. May wrap. */
+ /* For a free block, this is maintained */
+ /* unly for USE_MUNMAP, and indicates */
+ /* when the header was allocated, or */
+ /* when the size of the block last */
+ /* changed. */
word hb_marks[MARK_BITS_SZ];
/* Bit i in the array refers to the */
/* object starting at the ith word (header */
@@ -959,6 +971,9 @@ struct _GC_arrays {
word _max_heapsize;
ptr_t _last_heap_addr;
ptr_t _prev_heap_addr;
+ word _large_free_bytes;
+ /* Total bytes contained in blocks on large object free */
+ /* list. */
word _words_allocd_before_gc;
/* Number of words allocated before this */
/* collection cycle. */
@@ -1005,6 +1020,9 @@ struct _GC_arrays {
/* Number of words in accessible atomic */
/* objects. */
# endif
+# ifdef USE_MUNMAP
+ word _unmapped_bytes;
+# endif
# ifdef MERGE_SIZES
unsigned _size_map[WORDS_TO_BYTES(MAXOBJSZ+1)];
/* Number of words to allocate for a given allocation request in */
@@ -1022,7 +1040,7 @@ struct _GC_arrays {
/* to an object at */
/* block_start+i&~3 - WORDS_TO_BYTES(j). */
/* (If ALL_INTERIOR_POINTERS is defined, then */
- /* instead ((short *)(hbh_map[sz])[i] is j if */
+ /* instead ((short *)(hb_map[sz])[i] is j if */
/* block_start+WORDS_TO_BYTES(i) is in the */
/* interior of an object starting at */
/* block_start+WORDS_TO_BYTES(i-j)). */
@@ -1135,6 +1153,7 @@ GC_API GC_FAR struct _GC_arrays GC_arrays;
# define GC_prev_heap_addr GC_arrays._prev_heap_addr
# define GC_words_allocd GC_arrays._words_allocd
# define GC_words_wasted GC_arrays._words_wasted
+# define GC_large_free_bytes GC_arrays._large_free_bytes
# define GC_words_finalized GC_arrays._words_finalized
# define GC_non_gc_bytes_at_gc GC_arrays._non_gc_bytes_at_gc
# define GC_mem_freed GC_arrays._mem_freed
@@ -1144,6 +1163,9 @@ GC_API GC_FAR struct _GC_arrays GC_arrays;
# define GC_words_allocd_before_gc GC_arrays._words_allocd_before_gc
# define GC_heap_sects GC_arrays._heap_sects
# define GC_last_stack GC_arrays._last_stack
+# ifdef USE_MUNMAP
+# define GC_unmapped_bytes GC_arrays._unmapped_bytes
+# endif
# ifdef MSWIN32
# define GC_heap_bases GC_arrays._heap_bases
# endif
@@ -1236,7 +1258,7 @@ extern char * GC_invalid_map;
/* Pointer to the nowhere valid hblk map */
/* Blocks pointing to this map are free. */
-extern struct hblk * GC_hblkfreelist;
+extern struct hblk * GC_hblkfreelist[];
/* List of completely empty heap blocks */
/* Linked through hb_next field of */
/* header structure associated with */
@@ -1311,7 +1333,12 @@ GC_bool GC_should_collect();
void GC_apply_to_all_blocks(/*fn, client_data*/);
/* Invoke fn(hbp, client_data) for each */
/* allocated heap block. */
-struct hblk * GC_next_block(/* struct hblk * h */);
+struct hblk * GC_next_used_block(/* struct hblk * h */);
+ /* Return first in-use block >= h */
+struct hblk * GC_prev_block(/* struct hblk * h */);
+ /* Return last block <= h. Returned block */
+ /* is managed by GC, but may or may not be in */
+ /* use. */
void GC_mark_init();
void GC_clear_marks(); /* Clear mark bits for all heap objects. */
void GC_invalidate_mark_state(); /* Tell the marker that marked */
@@ -1608,6 +1635,15 @@ extern void (*GC_print_heap_obj)(/* ptr_t p */);
/* detailed description of the object */
/* referred to by p. */
+/* Memory unmapping: */
+#ifdef USE_MUNMAP
+ void GC_unmap_old(void);
+ void GC_merge_unmapped(void);
+ void GC_unmap(ptr_t start, word bytes);
+ void GC_remap(ptr_t start, word bytes);
+ void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2);
+#endif
+
/* Virtual dirty bit implementation: */
/* Each implementation exports the following: */
void GC_read_dirty(); /* Retrieve dirty bits. */
@@ -1690,4 +1726,13 @@ void GC_err_puts(/* char *s */);
/* newlines, don't ... */
+# ifdef GC_ASSERTIONS
+# define GC_ASSERT(expr) if(!(expr)) {\
+ GC_err_printf2("Assertion failure: %s:%ld\n", \
+ __FILE__, (unsigned long)__LINE__); \
+ ABORT("assertion failure"); }
+# else
+# define GC_ASSERT(expr)
+# endif
+
# endif /* GC_PRIVATE_H */
diff --git a/gcconfig.h b/gcconfig.h
index b1a9dc36..94407e15 100644
--- a/gcconfig.h
+++ b/gcconfig.h
@@ -43,6 +43,11 @@
# define OPENBSD
# define mach_type_known
# endif
+# if defined(__OpenBSD__) && defined(sparc)
+# define SPARC
+# define OPENBSD
+# define mach_type_known
+# endif
# if defined(__NetBSD__) && defined(m68k)
# define M68K
# define NETBSD
@@ -100,7 +105,8 @@
# endif
# define mach_type_known
# endif
-# if defined(sparc) && defined(unix) && !defined(sun) && !defined(linux)
+# if defined(sparc) && defined(unix) && !defined(sun) && !defined(linux) \
+ && !defined(__OpenBSD__)
# define SPARC
# define DRSNX
# define mach_type_known
@@ -129,7 +135,7 @@
# define HP_PA
# define mach_type_known
# endif
-# if defined(LINUX) && defined(i386)
+# if defined(LINUX) && (defined(i386) || defined(__i386__))
# define I386
# define mach_type_known
# endif
@@ -153,9 +159,11 @@
# endif
# define mach_type_known
# endif
-# if defined(_AMIGA)
-# define M68K
+# if defined(_AMIGA) && !defined(AMIGA)
# define AMIGA
+# endif
+# ifdef AMIGA
+# define M68K
# define mach_type_known
# endif
# if defined(THINK_C) || defined(__MWERKS__) && !defined(__powerc)
@@ -168,6 +176,11 @@
# define MACOS
# define mach_type_known
# endif
+# if defined(macosx)
+# define MACOSX
+# define POWERPC
+# define mach_type_known
+# endif
# if defined(NeXT) && defined(mc68000)
# define M68K
# define NEXT
@@ -486,8 +499,8 @@
# ifdef POWERPC
# define MACH_TYPE "POWERPC"
-# define ALIGNMENT 2
# ifdef MACOS
+# define ALIGNMENT 2 /* Still necessary? Could it be 4? */
# ifndef __LOWMEM__
# include <LowMem.h>
# endif
@@ -497,14 +510,24 @@
# define DATAEND /* not needed */
# endif
# ifdef LINUX
+# define ALIGNMENT 4 /* Guess. Can someone verify? */
+ /* This was 2, but that didn't sound right. */
# define OS_TYPE "LINUX"
# define HEURISTIC1
# undef STACK_GRAN
# define STACK_GRAN 0x10000000
+ /* Stack usually starts at 0x80000000 */
# define DATASTART GC_data_start
extern int _end;
# define DATAEND (&_end)
# endif
+# ifdef MACOSX
+# define ALIGNMENT 4
+# define OS_TYPE "MACOSX"
+# define DATASTART ((ptr_t) get_etext())
+# define STACKBOTTOM ((ptr_t) 0xc0000000)
+# define DATAEND /* not needed */
+# endif
# endif
# ifdef VAX
@@ -603,6 +626,11 @@
# define SVR4
# define STACKBOTTOM ((ptr_t) 0xf0000000)
# endif
+# ifdef OPENBSD
+# define OS_TYPE "OPENBSD"
+# define STACKBOTTOM ((ptr_t) 0xf8000000)
+# define DATASTART ((ptr_t)(&etext))
+# endif
# endif
# ifdef I386
@@ -909,9 +937,13 @@
# define CPP_WORDSZ 64
# define STACKBOTTOM ((ptr_t) 0x120000000)
# ifdef __ELF__
+# if 0
+ /* __data_start apparently disappeared in some recent releases. */
extern int __data_start;
# define DATASTART &__data_start
-# define DYNAMIC_LOADING
+# endif
+# define DATASTART GC_data_start
+# define DYNAMIC_LOADING
# else
# define DATASTART ((ptr_t) 0x140000000)
# endif
@@ -1021,6 +1053,10 @@
# undef MPROTECT_VDB
# endif
+# ifdef USE_MUNMAP
+# undef MPROTECT_VDB /* Can't deal with address space holes. */
+# endif
+
# if !defined(PCR_VDB) && !defined(PROC_VDB) && !defined(MPROTECT_VDB)
# define DEFAULT_VDB
# endif
diff --git a/headers.c b/headers.c
index fae683a6..9564a6a5 100644
--- a/headers.c
+++ b/headers.c
@@ -25,6 +25,12 @@
# include "gc_priv.h"
bottom_index * GC_all_bottom_indices = 0;
+ /* Pointer to first (lowest addr) */
+ /* bottom_index. */
+
+bottom_index * GC_all_bottom_indices_end = 0;
+ /* Pointer to last (highest addr) */
+ /* bottom_index. */
/* Non-macro version of header location routine */
hdr * GC_find_header(h)
@@ -137,16 +143,17 @@ void GC_init_headers()
/* Make sure that there is a bottom level index block for address addr */
/* Return FALSE on failure. */
static GC_bool get_index(addr)
-register word addr;
+word addr;
{
- register word hi =
- (word)(addr) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
- register bottom_index * r;
- register bottom_index * p;
- register bottom_index ** prev;
+ word hi = (word)(addr) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
+ bottom_index * r;
+ bottom_index * p;
+ bottom_index ** prev;
+ bottom_index *pi;
+
# ifdef HASH_TL
- register unsigned i = TL_HASH(hi);
- register bottom_index * old;
+ unsigned i = TL_HASH(hi);
+ bottom_index * old;
old = p = GC_top_index[i];
while(p != GC_all_nils) {
@@ -164,11 +171,21 @@ register word addr;
if (r == 0) return(FALSE);
GC_top_index[hi] = r;
BZERO(r, sizeof (bottom_index));
-# endif
+# endif
r -> key = hi;
/* Add it to the list of bottom indices */
- prev = &GC_all_bottom_indices;
- while ((p = *prev) != 0 && p -> key < hi) prev = &(p -> asc_link);
+ prev = &GC_all_bottom_indices; /* pointer to p */
+ pi = 0; /* bottom_index preceding p */
+ while ((p = *prev) != 0 && p -> key < hi) {
+ pi = p;
+ prev = &(p -> asc_link);
+ }
+ r -> desc_link = pi;
+ if (0 == p) {
+ GC_all_bottom_indices_end = r;
+ } else {
+ p -> desc_link = r;
+ }
r -> asc_link = p;
*prev = r;
return(TRUE);
@@ -185,6 +202,9 @@ register struct hblk * h;
if (!get_index((word) h)) return(FALSE);
result = alloc_hdr();
SET_HDR(h, result);
+# ifdef USE_MUNMAP
+ result -> hb_last_reclaimed = GC_gc_no;
+# endif
return(result != 0);
}
@@ -261,7 +281,7 @@ word client_data;
/* Get the next valid block whose address is at least h */
/* Return 0 if there is none. */
-struct hblk * GC_next_block(h)
+struct hblk * GC_next_used_block(h)
struct hblk * h;
{
register bottom_index * bi;
@@ -276,15 +296,16 @@ struct hblk * h;
}
while(bi != 0) {
while (j < BOTTOM_SZ) {
- if (IS_FORWARDING_ADDR_OR_NIL(bi -> index[j])) {
+ hdr * hhdr = bi -> index[j];
+ if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
j++;
} else {
- if (bi->index[j]->hb_map != GC_invalid_map) {
+ if (hhdr->hb_map != GC_invalid_map) {
return((struct hblk *)
(((bi -> key << LOG_BOTTOM_SZ) + j)
<< LOG_HBLKSIZE));
} else {
- j += divHBLKSZ(bi->index[j] -> hb_sz);
+ j += divHBLKSZ(hhdr -> hb_sz);
}
}
}
@@ -293,3 +314,38 @@ struct hblk * h;
}
return(0);
}
+
+/* Get the last (highest address) block whose address is */
+/* at most h. Return 0 if there is none. */
+/* Unlike the above, this may return a free block. */
+struct hblk * GC_prev_block(h)
+struct hblk * h;
+{
+ register bottom_index * bi;
+ register signed_word j = ((word)h >> LOG_HBLKSIZE) & (BOTTOM_SZ-1);
+
+ GET_BI(h, bi);
+ if (bi == GC_all_nils) {
+ register word hi = (word)h >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
+ bi = GC_all_bottom_indices_end;
+ while (bi != 0 && bi -> key > hi) bi = bi -> desc_link;
+ j = BOTTOM_SZ - 1;
+ }
+ while(bi != 0) {
+ while (j >= 0) {
+ hdr * hhdr = bi -> index[j];
+ if (0 == hhdr) {
+ --j;
+ } else if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
+ j -= (signed_word)hhdr;
+ } else {
+ return((struct hblk *)
+ (((bi -> key << LOG_BOTTOM_SZ) + j)
+ << LOG_HBLKSIZE));
+ }
+ }
+ j = BOTTOM_SZ - 1;
+ bi = bi -> desc_link;
+ }
+ return(0);
+}
diff --git a/include/private/gc_hdrs.h b/include/private/gc_hdrs.h
index 2f2d1bf9..60dc2ad3 100644
--- a/include/private/gc_hdrs.h
+++ b/include/private/gc_hdrs.h
@@ -49,14 +49,16 @@ typedef struct bi {
hdr * index[BOTTOM_SZ];
/*
* The bottom level index contains one of three kinds of values:
- * 0 means we're not responsible for this block.
+ * 0 means we're not responsible for this block,
+ * or this is a block other than the first one in a free block.
* 1 < (long)X <= MAX_JUMP means the block starts at least
* X * HBLKSIZE bytes before the current address.
* A valid pointer points to a hdr structure. (The above can't be
* valid pointers due to the GET_MEM return convention.)
*/
struct bi * asc_link; /* All indices are linked in */
- /* ascending order. */
+ /* ascending order... */
+ struct bi * desc_link; /* ... and in descending order. */
word key; /* high order address bits. */
# ifdef HASH_TL
struct bi * hash_link; /* Hash chain link. */
diff --git a/include/private/gc_priv.h b/include/private/gc_priv.h
index 934075fa..83eb84a8 100644
--- a/include/private/gc_priv.h
+++ b/include/private/gc_priv.h
@@ -73,7 +73,7 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
# define CONST
#endif
-#ifdef AMIGA
+#if 0 /* was once defined for AMIGA */
# define GC_FAR __far
#else
# define GC_FAR
@@ -350,7 +350,7 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
+ GC_page_size) \
+ GC_page_size-1)
# else
-# if defined(AMIGA) || defined(NEXT) || defined(DOS4GW)
+# if defined(AMIGA) || defined(NEXT) || defined(MACOSX) || defined(DOS4GW)
# define GET_MEM(bytes) HBLKPTR((size_t) \
calloc(1, (size_t)bytes + GC_page_size) \
+ GC_page_size-1)
@@ -823,6 +823,7 @@ struct hblkhdr {
struct hblk * hb_next; /* Link field for hblk free list */
/* and for lists of chunks waiting to be */
/* reclaimed. */
+ struct hblk * hb_prev; /* Backwards link for free list. */
word hb_descr; /* object descriptor for marking. See */
/* mark.h. */
char* hb_map; /* A pointer to a pointer validity map of the block. */
@@ -837,9 +838,20 @@ struct hblkhdr {
# define IGNORE_OFF_PAGE 1 /* Ignore pointers that do not */
/* point to the first page of */
/* this object. */
+# define WAS_UNMAPPED 2 /* This is a free block, which has */
+ /* been unmapped from the address */
+ /* space. */
+ /* GC_remap must be invoked on it */
+ /* before it can be reallocated. */
+ /* Only set with USE_MUNMAP. */
unsigned short hb_last_reclaimed;
/* Value of GC_gc_no when block was */
/* last allocated or swept. May wrap. */
+ /* For a free block, this is maintained */
+ /* unly for USE_MUNMAP, and indicates */
+ /* when the header was allocated, or */
+ /* when the size of the block last */
+ /* changed. */
word hb_marks[MARK_BITS_SZ];
/* Bit i in the array refers to the */
/* object starting at the ith word (header */
@@ -959,6 +971,9 @@ struct _GC_arrays {
word _max_heapsize;
ptr_t _last_heap_addr;
ptr_t _prev_heap_addr;
+ word _large_free_bytes;
+ /* Total bytes contained in blocks on large object free */
+ /* list. */
word _words_allocd_before_gc;
/* Number of words allocated before this */
/* collection cycle. */
@@ -1005,6 +1020,9 @@ struct _GC_arrays {
/* Number of words in accessible atomic */
/* objects. */
# endif
+# ifdef USE_MUNMAP
+ word _unmapped_bytes;
+# endif
# ifdef MERGE_SIZES
unsigned _size_map[WORDS_TO_BYTES(MAXOBJSZ+1)];
/* Number of words to allocate for a given allocation request in */
@@ -1022,7 +1040,7 @@ struct _GC_arrays {
/* to an object at */
/* block_start+i&~3 - WORDS_TO_BYTES(j). */
/* (If ALL_INTERIOR_POINTERS is defined, then */
- /* instead ((short *)(hbh_map[sz])[i] is j if */
+ /* instead ((short *)(hb_map[sz])[i] is j if */
/* block_start+WORDS_TO_BYTES(i) is in the */
/* interior of an object starting at */
/* block_start+WORDS_TO_BYTES(i-j)). */
@@ -1135,6 +1153,7 @@ GC_API GC_FAR struct _GC_arrays GC_arrays;
# define GC_prev_heap_addr GC_arrays._prev_heap_addr
# define GC_words_allocd GC_arrays._words_allocd
# define GC_words_wasted GC_arrays._words_wasted
+# define GC_large_free_bytes GC_arrays._large_free_bytes
# define GC_words_finalized GC_arrays._words_finalized
# define GC_non_gc_bytes_at_gc GC_arrays._non_gc_bytes_at_gc
# define GC_mem_freed GC_arrays._mem_freed
@@ -1144,6 +1163,9 @@ GC_API GC_FAR struct _GC_arrays GC_arrays;
# define GC_words_allocd_before_gc GC_arrays._words_allocd_before_gc
# define GC_heap_sects GC_arrays._heap_sects
# define GC_last_stack GC_arrays._last_stack
+# ifdef USE_MUNMAP
+# define GC_unmapped_bytes GC_arrays._unmapped_bytes
+# endif
# ifdef MSWIN32
# define GC_heap_bases GC_arrays._heap_bases
# endif
@@ -1236,7 +1258,7 @@ extern char * GC_invalid_map;
/* Pointer to the nowhere valid hblk map */
/* Blocks pointing to this map are free. */
-extern struct hblk * GC_hblkfreelist;
+extern struct hblk * GC_hblkfreelist[];
/* List of completely empty heap blocks */
/* Linked through hb_next field of */
/* header structure associated with */
@@ -1311,7 +1333,12 @@ GC_bool GC_should_collect();
void GC_apply_to_all_blocks(/*fn, client_data*/);
/* Invoke fn(hbp, client_data) for each */
/* allocated heap block. */
-struct hblk * GC_next_block(/* struct hblk * h */);
+struct hblk * GC_next_used_block(/* struct hblk * h */);
+ /* Return first in-use block >= h */
+struct hblk * GC_prev_block(/* struct hblk * h */);
+ /* Return last block <= h. Returned block */
+ /* is managed by GC, but may or may not be in */
+ /* use. */
void GC_mark_init();
void GC_clear_marks(); /* Clear mark bits for all heap objects. */
void GC_invalidate_mark_state(); /* Tell the marker that marked */
@@ -1608,6 +1635,15 @@ extern void (*GC_print_heap_obj)(/* ptr_t p */);
/* detailed description of the object */
/* referred to by p. */
+/* Memory unmapping: */
+#ifdef USE_MUNMAP
+ void GC_unmap_old(void);
+ void GC_merge_unmapped(void);
+ void GC_unmap(ptr_t start, word bytes);
+ void GC_remap(ptr_t start, word bytes);
+ void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2);
+#endif
+
/* Virtual dirty bit implementation: */
/* Each implementation exports the following: */
void GC_read_dirty(); /* Retrieve dirty bits. */
@@ -1690,4 +1726,13 @@ void GC_err_puts(/* char *s */);
/* newlines, don't ... */
+# ifdef GC_ASSERTIONS
+# define GC_ASSERT(expr) if(!(expr)) {\
+ GC_err_printf2("Assertion failure: %s:%ld\n", \
+ __FILE__, (unsigned long)__LINE__); \
+ ABORT("assertion failure"); }
+# else
+# define GC_ASSERT(expr)
+# endif
+
# endif /* GC_PRIVATE_H */
diff --git a/include/private/gcconfig.h b/include/private/gcconfig.h
index b1a9dc36..94407e15 100644
--- a/include/private/gcconfig.h
+++ b/include/private/gcconfig.h
@@ -43,6 +43,11 @@
# define OPENBSD
# define mach_type_known
# endif
+# if defined(__OpenBSD__) && defined(sparc)
+# define SPARC
+# define OPENBSD
+# define mach_type_known
+# endif
# if defined(__NetBSD__) && defined(m68k)
# define M68K
# define NETBSD
@@ -100,7 +105,8 @@
# endif
# define mach_type_known
# endif
-# if defined(sparc) && defined(unix) && !defined(sun) && !defined(linux)
+# if defined(sparc) && defined(unix) && !defined(sun) && !defined(linux) \
+ && !defined(__OpenBSD__)
# define SPARC
# define DRSNX
# define mach_type_known
@@ -129,7 +135,7 @@
# define HP_PA
# define mach_type_known
# endif
-# if defined(LINUX) && defined(i386)
+# if defined(LINUX) && (defined(i386) || defined(__i386__))
# define I386
# define mach_type_known
# endif
@@ -153,9 +159,11 @@
# endif
# define mach_type_known
# endif
-# if defined(_AMIGA)
-# define M68K
+# if defined(_AMIGA) && !defined(AMIGA)
# define AMIGA
+# endif
+# ifdef AMIGA
+# define M68K
# define mach_type_known
# endif
# if defined(THINK_C) || defined(__MWERKS__) && !defined(__powerc)
@@ -168,6 +176,11 @@
# define MACOS
# define mach_type_known
# endif
+# if defined(macosx)
+# define MACOSX
+# define POWERPC
+# define mach_type_known
+# endif
# if defined(NeXT) && defined(mc68000)
# define M68K
# define NEXT
@@ -486,8 +499,8 @@
# ifdef POWERPC
# define MACH_TYPE "POWERPC"
-# define ALIGNMENT 2
# ifdef MACOS
+# define ALIGNMENT 2 /* Still necessary? Could it be 4? */
# ifndef __LOWMEM__
# include <LowMem.h>
# endif
@@ -497,14 +510,24 @@
# define DATAEND /* not needed */
# endif
# ifdef LINUX
+# define ALIGNMENT 4 /* Guess. Can someone verify? */
+ /* This was 2, but that didn't sound right. */
# define OS_TYPE "LINUX"
# define HEURISTIC1
# undef STACK_GRAN
# define STACK_GRAN 0x10000000
+ /* Stack usually starts at 0x80000000 */
# define DATASTART GC_data_start
extern int _end;
# define DATAEND (&_end)
# endif
+# ifdef MACOSX
+# define ALIGNMENT 4
+# define OS_TYPE "MACOSX"
+# define DATASTART ((ptr_t) get_etext())
+# define STACKBOTTOM ((ptr_t) 0xc0000000)
+# define DATAEND /* not needed */
+# endif
# endif
# ifdef VAX
@@ -603,6 +626,11 @@
# define SVR4
# define STACKBOTTOM ((ptr_t) 0xf0000000)
# endif
+# ifdef OPENBSD
+# define OS_TYPE "OPENBSD"
+# define STACKBOTTOM ((ptr_t) 0xf8000000)
+# define DATASTART ((ptr_t)(&etext))
+# endif
# endif
# ifdef I386
@@ -909,9 +937,13 @@
# define CPP_WORDSZ 64
# define STACKBOTTOM ((ptr_t) 0x120000000)
# ifdef __ELF__
+# if 0
+ /* __data_start apparently disappeared in some recent releases. */
extern int __data_start;
# define DATASTART &__data_start
-# define DYNAMIC_LOADING
+# endif
+# define DATASTART GC_data_start
+# define DYNAMIC_LOADING
# else
# define DATASTART ((ptr_t) 0x140000000)
# endif
@@ -1021,6 +1053,10 @@
# undef MPROTECT_VDB
# endif
+# ifdef USE_MUNMAP
+# undef MPROTECT_VDB /* Can't deal with address space holes. */
+# endif
+
# if !defined(PCR_VDB) && !defined(PROC_VDB) && !defined(MPROTECT_VDB)
# define DEFAULT_VDB
# endif
diff --git a/linux_threads.c b/linux_threads.c
index 4bcdd3a1..e780a8ad 100644
--- a/linux_threads.c
+++ b/linux_threads.c
@@ -118,12 +118,13 @@ GC_linux_thread_top_of_stack() relies on implementation details of
LinuxThreads, namely that thread stacks are allocated on 2M boundaries
and grow to no more than 2M.
To make sure that we're using LinuxThreads and not some other thread
-package, we generate a dummy reference to `__pthread_initial_thread_bos',
+package, we generate a dummy reference to `__pthread_kill_other_threads_np'
+(was `__pthread_initial_thread_bos' but that disappeared),
which is a symbol defined in LinuxThreads, but (hopefully) not in other
thread packages.
*/
extern char * __pthread_initial_thread_bos;
-char **dummy_var_to_force_linux_threads = &__pthread_initial_thread_bos;
+char **dummy_var_to_force_linux_threads = &__pthread_kill_other_threads_np;
#define LINUX_THREADS_STACK_SIZE (2 * 1024 * 1024)
diff --git a/mach_dep.c b/mach_dep.c
index 23e270e3..53698604 100644
--- a/mach_dep.c
+++ b/mach_dep.c
@@ -20,7 +20,11 @@
# define _longjmp(b,v) longjmp(b,v)
# endif
# ifdef AMIGA
-# include <dos.h>
+# ifndef __GNUC__
+# include <dos/dos.h>
+# else
+# include <machine/reg.h>
+# endif
# endif
#if defined(__MWERKS__) && !defined(POWERPC)
@@ -126,9 +130,28 @@ void GC_push_regs()
asm("addq.w &0x4,%sp"); /* put stack back where it was */
# endif /* M68K HP */
-# ifdef AMIGA
- /* AMIGA - could be replaced by generic code */
- /* a0, a1, d0 and d1 are caller save */
+# if defined(M68K) && defined(AMIGA)
+ /* AMIGA - could be replaced by generic code */
+ /* a0, a1, d0 and d1 are caller save */
+
+# ifdef __GNUC__
+ asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
+
+ asm("mov.l %a2,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %a3,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %a4,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %a5,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %a6,(%sp)"); asm("jsr _GC_push_one");
+ /* Skip frame pointer and stack pointer */
+ asm("mov.l %d2,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d3,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d4,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d5,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d6,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d7,(%sp)"); asm("jsr _GC_push_one");
+
+ asm("addq.w &0x4,%sp"); /* put stack back where it was */
+# else /* !__GNUC__ */
GC_push_one(getreg(REG_A2));
GC_push_one(getreg(REG_A3));
GC_push_one(getreg(REG_A4));
@@ -141,7 +164,8 @@ void GC_push_regs()
GC_push_one(getreg(REG_D5));
GC_push_one(getreg(REG_D6));
GC_push_one(getreg(REG_D7));
-# endif
+# endif /* !__GNUC__ */
+# endif /* AMIGA */
# if defined(M68K) && defined(MACOS)
# if defined(THINK_C)
diff --git a/malloc.c b/malloc.c
index 37da584c..66e62d29 100644
--- a/malloc.c
+++ b/malloc.c
@@ -93,8 +93,16 @@ register ptr_t *opp;
if(GC_incremental && !GC_dont_gc)
GC_collect_a_little_inner((int)n_blocks);
lw = ROUNDED_UP_WORDS(lb);
- while ((h = GC_allochblk(lw, k, 0)) == 0
- && GC_collect_or_expand(n_blocks, FALSE));
+ h = GC_allochblk(lw, k, 0);
+# ifdef USE_MUNMAP
+ if (0 == h) {
+ GC_merge_unmapped();
+ h = GC_allochblk(lw, k, 0);
+ }
+# endif
+ while (0 == h && GC_collect_or_expand(n_blocks, FALSE)) {
+ h = GC_allochblk(lw, k, 0);
+ }
if (h == 0) {
op = 0;
} else {
diff --git a/mallocx.c b/mallocx.c
index b1450215..8c07fa98 100644
--- a/mallocx.c
+++ b/mallocx.c
@@ -57,8 +57,16 @@ register int k;
if(GC_incremental && !GC_dont_gc)
GC_collect_a_little_inner((int)n_blocks);
lw = ROUNDED_UP_WORDS(lb);
- while ((h = GC_allochblk(lw, k, IGNORE_OFF_PAGE)) == 0
- && GC_collect_or_expand(n_blocks, TRUE));
+ h = GC_allochblk(lw, k, IGNORE_OFF_PAGE);
+# ifdef USE_MUNMAP
+ if (0 == h) {
+ GC_merge_unmapped();
+ h = GC_allochblk(lw, k, IGNORE_OFF_PAGE);
+ }
+# endif
+ while (0 == h && GC_collect_or_expand(n_blocks, TRUE)) {
+ h = GC_allochblk(lw, k, IGNORE_OFF_PAGE);
+ }
if (h == 0) {
op = 0;
} else {
diff --git a/mark.c b/mark.c
index c827af5c..15c48762 100644
--- a/mark.c
+++ b/mark.c
@@ -1102,7 +1102,7 @@ struct hblk *h;
{
register hdr * hhdr;
- h = GC_next_block(h);
+ h = GC_next_used_block(h);
if (h == 0) return(0);
hhdr = HDR(h);
GC_push_marked(h, hhdr);
@@ -1118,7 +1118,7 @@ struct hblk *h;
if (!GC_dirty_maintained) { ABORT("dirty bits not set up"); }
for (;;) {
- h = GC_next_block(h);
+ h = GC_next_used_block(h);
if (h == 0) return(0);
hhdr = HDR(h);
# ifdef STUBBORN_ALLOC
@@ -1147,7 +1147,7 @@ struct hblk *h;
register hdr * hhdr = HDR(h);
for (;;) {
- h = GC_next_block(h);
+ h = GC_next_used_block(h);
if (h == 0) return(0);
hhdr = HDR(h);
if (hhdr -> hb_obj_kind == UNCOLLECTABLE) break;
diff --git a/misc.c b/misc.c
index 7779c43c..edcac90f 100644
--- a/misc.c
+++ b/misc.c
@@ -427,11 +427,8 @@ void GC_init_inner()
# ifdef MSWIN32
GC_init_win32();
# endif
-# if defined(LINUX) && defined(POWERPC)
- GC_init_linuxppc();
-# endif
-# if defined(LINUX) && defined(SPARC)
- GC_init_linuxsparc();
+# if defined(LINUX) && (defined(POWERPC) || defined(ALPHA) || defined(SPARC))
+ GC_init_linux_data_start();
# endif
# ifdef SOLARIS_THREADS
GC_thr_init();
diff --git a/os_dep.c b/os_dep.c
index 7b3ba545..e08a936a 100644
--- a/os_dep.c
+++ b/os_dep.c
@@ -139,29 +139,20 @@
# define OPT_PROT_EXEC 0
#endif
-#if defined(LINUX) && defined(POWERPC)
+#if defined(LINUX) && (defined(POWERPC) || defined(SPARC) || defined(ALPHA))
+ /* The I386 case can be handled without a search. The Alpha case */
+ /* used to be handled differently as well, but the rules changed */
+ /* for recent Linux versions. This seems to be the easiest way to */
+ /* cover all versions. */
ptr_t GC_data_start;
- void GC_init_linuxppc()
- {
- extern ptr_t GC_find_limit();
- extern char **_environ;
- /* This may need to be environ, without the underscore, for */
- /* some versions. */
- GC_data_start = GC_find_limit((ptr_t)&_environ, FALSE);
- }
-#endif
+ extern char * GC_copyright[]; /* Any data symbol would do. */
-#if defined(LINUX) && defined(SPARC)
- ptr_t GC_data_start;
-
- void GC_init_linuxsparc()
+ void GC_init_linux_data_start()
{
extern ptr_t GC_find_limit();
- extern char **_environ;
- /* This may need to be environ, without the underscore, for */
- /* some versions. */
- GC_data_start = GC_find_limit((ptr_t)&_environ, FALSE);
+
+ GC_data_start = GC_find_limit((ptr_t)GC_copyright, FALSE);
}
#endif
@@ -362,7 +353,8 @@ word GC_page_size;
}
# else
-# if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP)
+# if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) \
+ || defined(USE_MUNMAP)
void GC_setpagesize()
{
GC_page_size = GETPAGESIZE();
@@ -441,6 +433,24 @@ ptr_t GC_get_stack_base()
ptr_t GC_get_stack_base()
{
+ struct Process *proc = (struct Process*)SysBase->ThisTask;
+
+ /* Reference: Amiga Guru Book Pages: 42,567,574 */
+ if (proc->pr_Task.tc_Node.ln_Type==NT_PROCESS
+ && proc->pr_CLI != NULL) {
+ /* first ULONG is StackSize */
+ /*longPtr = proc->pr_ReturnAddr;
+ size = longPtr[0];*/
+
+ return (char *)proc->pr_ReturnAddr + sizeof(ULONG);
+ } else {
+ return (char *)proc->pr_Task.tc_SPUpper;
+ }
+}
+
+#if 0 /* old version */
+ptr_t GC_get_stack_base()
+{
extern struct WBStartup *_WBenchMsg;
extern long __base;
extern long __stack;
@@ -463,10 +473,9 @@ ptr_t GC_get_stack_base()
}
return (ptr_t)(__base + GC_max(size, __stack));
}
+#endif /* 0 */
-# else
-
-
+# else /* !AMIGA, !OS2, ... */
# ifdef NEED_FIND_LIMIT
/* Some tools to implement HEURISTIC2 */
@@ -486,7 +495,7 @@ ptr_t GC_get_stack_base()
typedef void (*handler)();
# endif
-# if defined(SUNOS5SIGS) || defined(IRIX5)
+# if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
static struct sigaction old_segv_act;
# if defined(_sigargs) /* !Irix6.x */
static struct sigaction old_bus_act;
@@ -497,7 +506,7 @@ ptr_t GC_get_stack_base()
void GC_setup_temporary_fault_handler()
{
-# if defined(SUNOS5SIGS) || defined(IRIX5)
+# if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
struct sigaction act;
act.sa_handler = GC_fault_handler;
@@ -533,7 +542,7 @@ ptr_t GC_get_stack_base()
void GC_reset_fault_handler()
{
-# if defined(SUNOS5SIGS) || defined(IRIX5)
+# if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
(void) sigaction(SIGSEGV, &old_segv_act, 0);
# ifdef _sigargs /* Irix 5.x, not 6.x */
(void) sigaction(SIGBUS, &old_bus_act, 0);
@@ -851,6 +860,72 @@ void GC_register_data_segments()
# else
# ifdef AMIGA
+ void GC_register_data_segments()
+ {
+ struct Process *proc;
+ struct CommandLineInterface *cli;
+ BPTR myseglist;
+ ULONG *data;
+
+ int num;
+
+
+# ifdef __GNUC__
+ ULONG dataSegSize;
+ GC_bool found_segment = FALSE;
+ extern char __data_size[];
+
+ dataSegSize=__data_size+8;
+ /* Can`t find the Location of __data_size, because
+ it`s possible that is it, inside the segment. */
+
+# endif
+
+ proc= (struct Process*)SysBase->ThisTask;
+
+ /* Reference: Amiga Guru Book Pages: 538ff,565,573
+ and XOper.asm */
+ if (proc->pr_Task.tc_Node.ln_Type==NT_PROCESS) {
+ if (proc->pr_CLI == NULL) {
+ myseglist = proc->pr_SegList;
+ } else {
+ /* ProcLoaded 'Loaded as a command: '*/
+ cli = BADDR(proc->pr_CLI);
+ myseglist = cli->cli_Module;
+ }
+ } else {
+ ABORT("Not a Process.");
+ }
+
+ if (myseglist == NULL) {
+ ABORT("Arrrgh.. can't find segments, aborting");
+ }
+
+ /* xoper hunks Shell Process */
+
+ num=0;
+ for (data = (ULONG *)BADDR(myseglist); data != NULL;
+ data = (ULONG *)BADDR(data[0])) {
+ if (((ULONG) GC_register_data_segments < (ULONG) &data[1]) ||
+ ((ULONG) GC_register_data_segments > (ULONG) &data[1] + data[-1])) {
+# ifdef __GNUC__
+ if (dataSegSize == data[-1]) {
+ found_segment = TRUE;
+ }
+# endif
+ GC_add_roots_inner((char *)&data[1],
+ ((char *)&data[1]) + data[-1], FALSE);
+ }
+ ++num;
+ } /* for */
+# ifdef __GNUC__
+ if (!found_segment) {
+ ABORT("Can`t find correct Segments.\nSolution: Use an newer version of ixemul.library");
+ }
+# endif
+ }
+
+#if 0 /* old version */
void GC_register_data_segments()
{
extern struct WBStartup *_WBenchMsg;
@@ -892,6 +967,7 @@ void GC_register_data_segments()
}
}
}
+#endif /* old version */
# else
@@ -932,7 +1008,8 @@ int * etext_addr;
void GC_register_data_segments()
{
-# if !defined(PCR) && !defined(SRC_M3) && !defined(NEXT) && !defined(MACOS)
+# if !defined(PCR) && !defined(SRC_M3) && !defined(NEXT) && !defined(MACOS) \
+ && !defined(MACOSX)
# if defined(REDIRECT_MALLOC) && defined(SOLARIS_THREADS)
/* As of Solaris 2.3, the Solaris threads implementation */
/* allocates the data structure for the initial thread with */
@@ -946,7 +1023,7 @@ void GC_register_data_segments()
GC_add_roots_inner(DATASTART, (char *)(DATAEND), FALSE);
# endif
# endif
-# if !defined(PCR) && defined(NEXT)
+# if !defined(PCR) && (defined(NEXT) || defined(MACOSX))
GC_add_roots_inner(DATASTART, (char *) get_end(), FALSE);
# endif
# if defined(MACOS)
@@ -1160,6 +1237,95 @@ void GC_win32_free_heap ()
# endif
+#ifdef USE_MUNMAP
+
+/* For now, this only works on some Unix-like systems. If you */
+/* have something else, don't define USE_MUNMAP. */
+/* We assume ANSI C to support this feature. */
+#include <unistd.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <fcntl.h>
+
+/* Compute a page aligned starting address for the unmap */
+/* operation on a block of size bytes starting at start. */
+/* Return 0 if the block is too small to make this feasible. */
+ptr_t GC_unmap_start(ptr_t start, word bytes)
+{
+ ptr_t result = start;
+ /* Round start to next page boundary. */
+ result += GC_page_size - 1;
+ result = (ptr_t)((word)result & ~(GC_page_size - 1));
+ if (result + GC_page_size > start + bytes) return 0;
+ return result;
+}
+
+/* Compute end address for an unmap operation on the indicated */
+/* block. */
+ptr_t GC_unmap_end(ptr_t start, word bytes)
+{
+ ptr_t end_addr = start + bytes;
+ end_addr = (ptr_t)((word)end_addr & ~(GC_page_size - 1));
+ return end_addr;
+}
+
+/* We assume that GC_remap is called on exactly the same range */
+/* as a previous call to GC_unmap. It is safe to consistently */
+/* round the endpoints in both places. */
+void GC_unmap(ptr_t start, word bytes)
+{
+ ptr_t start_addr = GC_unmap_start(start, bytes);
+ ptr_t end_addr = GC_unmap_end(start, bytes);
+ word len = end_addr - start_addr;
+ if (0 == start_addr) return;
+ if (munmap(start_addr, len) != 0) ABORT("munmap failed");
+ GC_unmapped_bytes += len;
+}
+
+
+void GC_remap(ptr_t start, word bytes)
+{
+ static int zero_descr = -1;
+ ptr_t start_addr = GC_unmap_start(start, bytes);
+ ptr_t end_addr = GC_unmap_end(start, bytes);
+ word len = end_addr - start_addr;
+ ptr_t result;
+
+ if (-1 == zero_descr) zero_descr = open("/dev/zero", O_RDWR);
+ if (0 == start_addr) return;
+ result = mmap(start_addr, len, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
+ MAP_FIXED | MAP_PRIVATE, zero_descr, 0);
+ if (result != start_addr) {
+ ABORT("mmap remapping failed");
+ }
+ GC_unmapped_bytes -= len;
+}
+
+/* Two adjacent blocks have already been unmapped and are about to */
+/* be merged. Unmap the whole block. This typically requires */
+/* that we unmap a small section in the middle that was not previously */
+/* unmapped due to alignment constraints. */
+void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2)
+{
+ ptr_t start1_addr = GC_unmap_start(start1, bytes1);
+ ptr_t end1_addr = GC_unmap_end(start1, bytes1);
+ ptr_t start2_addr = GC_unmap_start(start2, bytes2);
+ ptr_t end2_addr = GC_unmap_end(start2, bytes2);
+ ptr_t start_addr = end1_addr;
+ ptr_t end_addr = start2_addr;
+ word len;
+ GC_ASSERT(start1 + bytes1 == start2);
+ if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2);
+ if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2);
+ if (0 == start_addr) return;
+ len = end_addr - start_addr;
+ if (len != 0 && munmap(start_addr, len) != 0) ABORT("munmap failed");
+ GC_unmapped_bytes += len;
+}
+
+#endif /* USE_MUNMAP */
+
/* Routine for pushing any additional roots. In THREADS */
/* environment, this is also responsible for marking from */
/* thread stacks. In the SRC_M3 case, it also handles */
@@ -1699,7 +1865,7 @@ struct hblk *h;
void GC_dirty_init()
{
-#if defined(SUNOS5SIGS) || defined(IRIX5)
+#if defined(SUNOS5SIGS) || defined(IRIX5) /* || defined(OSF1) */
struct sigaction act, oldact;
# ifdef IRIX5
act.sa_flags = SA_RESTART;
@@ -2241,6 +2407,11 @@ struct hblk *h;
# if defined (DRSNX)
# include <sys/sparc/frame.h>
# else
+# if defined(OPENBSD)
+# include <frame.h>
+# else
+# include <sys/frame.h>
+# endif
# include <sys/frame.h>
# endif
# endif
@@ -2251,6 +2422,15 @@ struct hblk *h;
#ifdef SAVE_CALL_CHAIN
/* Fill in the pc and argument information for up to NFRAMES of my */
/* callers. Ignore my frame and my callers frame. */
+
+#ifdef OPENBSD
+# define FR_SAVFP fr_fp
+# define FR_SAVPC fr_pc
+#else
+# define FR_SAVFP fr_savfp
+# define FR_SAVPC fr_savpc
+#endif
+
void GC_save_callers (info)
struct callinfo info[NFRAMES];
{
@@ -2261,11 +2441,11 @@ struct callinfo info[NFRAMES];
frame = (struct frame *) GC_save_regs_in_stack ();
- for (fp = frame -> fr_savfp; fp != 0 && nframes < NFRAMES;
- fp = fp -> fr_savfp, nframes++) {
+ for (fp = frame -> FR_SAVFP; fp != 0 && nframes < NFRAMES;
+ fp = fp -> FR_SAVFP, nframes++) {
register int i;
- info[nframes].ci_pc = fp->fr_savpc;
+ info[nframes].ci_pc = fp->FR_SAVPC;
for (i = 0; i < NARGS; i++) {
info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
}
diff --git a/solaris_threads.c b/solaris_threads.c
index 1f5ebcdc..65b2c651 100644
--- a/solaris_threads.c
+++ b/solaris_threads.c
@@ -616,6 +616,25 @@ GC_thread GC_lookup_thread(thread_t id)
return(p);
}
+# define MAX_ORIG_STACK_SIZE (8 * 1024 * 1024)
+
+word GC_get_orig_stack_size() {
+ struct rlimit rl;
+ static int warned = 0;
+ int result;
+
+ if (getrlimit(RLIMIT_STACK, &rl) != 0) ABORT("getrlimit failed");
+ result = (word)rl.rlim_cur & ~(HBLKSIZE-1);
+ if (result > MAX_ORIG_STACK_SIZE) {
+ if (!warned) {
+ WARN("Large stack limit(%ld): only scanning 8 MB", result);
+ warned = 1;
+ }
+ result = MAX_ORIG_STACK_SIZE;
+ }
+ return result;
+}
+
/* Notify dirty bit implementation of unused parts of my stack. */
/* Caller holds allocation lock. */
void GC_my_stack_limits()
@@ -628,12 +647,9 @@ void GC_my_stack_limits()
if (stack_size == 0) {
/* original thread */
- struct rlimit rl;
-
- if (getrlimit(RLIMIT_STACK, &rl) != 0) ABORT("getrlimit failed");
/* Empirically, what should be the stack page with lowest */
/* address is actually inaccessible. */
- stack_size = ((word)rl.rlim_cur & ~(HBLKSIZE-1)) - GC_page_sz;
+ stack_size = GC_get_orig_stack_size() - GC_page_sz;
stack = GC_stackbottom - stack_size + GC_page_sz;
} else {
stack = me -> stack;
@@ -671,8 +687,7 @@ void GC_push_all_stacks()
top = p -> stack + p -> stack_size;
} else {
/* The original stack. */
- if (getrlimit(RLIMIT_STACK, &rl) != 0) ABORT("getrlimit failed");
- bottom = GC_stackbottom - rl.rlim_cur + GC_page_sz;
+ bottom = GC_stackbottom - GC_get_orig_stack_size() + GC_page_sz;
top = GC_stackbottom;
}
if ((word)sp > (word)bottom && (word)sp < (word)top) bottom = sp;
diff --git a/sparc_mach_dep.s b/sparc_mach_dep.s
index a6a0a241..9831c6ca 100644
--- a/sparc_mach_dep.s
+++ b/sparc_mach_dep.s
@@ -1,4 +1,4 @@
-! SPARCompiler 3.0 and later apparently no loner handles
+! SPARCompiler 3.0 and later apparently no longer handles
! asm outside functions. So we need a separate .s file
! This is only set up for SunOS 5, not SunOS 4.
! Assumes this is called before the stack contents are
@@ -35,4 +35,4 @@ loop:
- \ No newline at end of file
+
diff --git a/sparc_sunos4_mach_dep.s b/sparc_sunos4_mach_dep.s
index 7accadd3..41858073 100644
--- a/sparc_sunos4_mach_dep.s
+++ b/sparc_sunos4_mach_dep.s
@@ -1,4 +1,4 @@
-! SPARCompiler 3.0 and later apparently no loner handles
+! SPARCompiler 3.0 and later apparently no longer handles
! asm outside functions. So we need a separate .s file
! This is only set up for SunOS 4.
! Assumes this is called before the stack contents are
diff --git a/version.h b/version.h
index 88858fa4..8466409c 100644
--- a/version.h
+++ b/version.h
@@ -1,6 +1,6 @@
-#define GC_VERSION_MAJOR 4
-#define GC_VERSION_MINOR 14
-#define GC_ALPHA_VERSION GC_NOT_ALPHA
+#define GC_VERSION_MAJOR 5
+#define GC_VERSION_MINOR 0
+#define GC_ALPHA_VERSION 1
# define GC_NOT_ALPHA 0xff