summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MacOS.c5
-rw-r--r--Mac_files/MacOS_Test_config.h19
-rw-r--r--Mac_files/MacOS_config.h13
-rw-r--r--Makefile18
-rw-r--r--Makefile.dj16
-rw-r--r--README48
-rw-r--r--README.Mac13
-rw-r--r--README.linux32
-rw-r--r--README.sgi16
-rw-r--r--README.solaris210
-rw-r--r--README.win325
-rw-r--r--allchblk.c30
-rw-r--r--alloc.c31
-rw-r--r--blacklst.c54
-rw-r--r--checksums.c4
-rw-r--r--config.h57
-rw-r--r--cord/README4
-rw-r--r--cord/gc.h43
-rw-r--r--dbg_mlc.c72
-rw-r--r--dyn_load.c8
-rw-r--r--gc.h43
-rw-r--r--gc_alloc.h8
-rw-r--r--gc_mark.h26
-rw-r--r--gc_priv.h213
-rw-r--r--headers.c20
-rw-r--r--include/gc.h43
-rw-r--r--include/gc_alloc.h8
-rw-r--r--include/private/config.h57
-rw-r--r--include/private/gc_priv.h213
-rw-r--r--irix_threads.c52
-rw-r--r--linux_threads.c642
-rw-r--r--mach_dep.c6
-rw-r--r--malloc.c2
-rw-r--r--mallocx.c2
-rw-r--r--mark.c72
-rw-r--r--mark_rts.c8
-rw-r--r--misc.c55
-rw-r--r--new_hblk.c2
-rw-r--r--obj_map.c2
-rw-r--r--os_dep.c113
-rw-r--r--pcr_interface.c6
-rw-r--r--ptr_chck.c4
-rw-r--r--reclaim.c22
-rw-r--r--solaris_pthreads.c1
-rw-r--r--solaris_threads.c14
-rw-r--r--solaris_threads.h2
-rw-r--r--stubborn.c6
-rw-r--r--test.c31
-rw-r--r--test_cpp.cc21
-rw-r--r--threadlibs.c2
-rw-r--r--typd_mlc.c9
-rw-r--r--version.h2
52 files changed, 1704 insertions, 501 deletions
diff --git a/MacOS.c b/MacOS.c
index 7855c935..420ea500 100644
--- a/MacOS.c
+++ b/MacOS.c
@@ -8,7 +8,8 @@
11/22/94 pcb StripAddress the temporary memory handle for 24-bit mode.
11/30/94 pcb Tracking all memory usage so we can deallocate it all at once.
- 02/10/96 pcb Added routine to perform a final collection when unloading shared library.
+ 02/10/96 pcb Added routine to perform a final collection when
+unloading shared library.
by Patrick C. Beard.
*/
@@ -127,7 +128,7 @@ void GC_MacFreeTemporaryMemory()
}
theTemporaryMemory = NULL;
-# if !defined(SHARED_LIBRARY_BUILD)
+# if !defined(SILENT) && !defined(SHARED_LIBRARY_BUILD)
fprintf(stdout, "[total memory used: %ld bytes.]\n",
totalMemoryUsed);
fprintf(stdout, "[total collections: %ld.]\n", GC_gc_no);
diff --git a/Mac_files/MacOS_Test_config.h b/Mac_files/MacOS_Test_config.h
index 94db03f0..c95f4bb2 100644
--- a/Mac_files/MacOS_Test_config.h
+++ b/Mac_files/MacOS_Test_config.h
@@ -14,11 +14,14 @@
/* Boehm, November 17, 1995 12:05 pm PST */
#ifdef __MWERKS__
-#if defined(__powerc)
-#include <MacHeadersPPC>
-#else
-#include <MacHeaders68K>
+
+// for CodeWarrior Pro with Metrowerks Standard Library (MSL).
+// #define MSL_USE_PRECOMPILED_HEADERS 0
+#include <ansi_prefix.mac.h>
+#ifndef __STDC__
+#define __STDC__ 0
#endif
+
#endif
// these are defined again in gc_priv.h.
@@ -26,10 +29,10 @@
#undef FALSE
#define ALL_INTERIOR_POINTERS // follows interior pointers.
-//#define SILENT // want collection messages.
+//#define SILENT // want collection messages.
//#define DONT_ADD_BYTE_AT_END // no padding.
-//#define SMALL_CONFIG // whether to a smaller heap.
-#define NO_SIGNALS // signals aren't real on the Macintosh.
+//#define SMALL_CONFIG // whether to a smaller heap.
+#define NO_SIGNALS // signals aren't real on the Macintosh.
#define USE_TEMPORARY_MEMORY // use Macintosh temporary memory.
// CFLAGS= -O -DNO_SIGNALS -DALL_INTERIOR_POINTERS -DSILENT
@@ -85,4 +88,4 @@
// since some ports use malloc or calloc to obtain system memory.
// (Probably works for UNIX, and win32.)
// -DNO_DEBUG removes GC_dump and the debugging routines it calls.
-// Reduces code size slightly at the expense of debuggability. \ No newline at end of file
+// Reduces code size slightly at the expense of debuggability.
diff --git a/Mac_files/MacOS_config.h b/Mac_files/MacOS_config.h
index 838be591..93c3c97a 100644
--- a/Mac_files/MacOS_config.h
+++ b/Mac_files/MacOS_config.h
@@ -12,13 +12,16 @@
/* Boehm, November 17, 1995 12:10 pm PST */
#ifdef __MWERKS__
-#if defined(__powerc)
-#include <MacHeadersPPC>
-#else
-#include <MacHeaders68K>
-#endif
+
+// for CodeWarrior Pro with Metrowerks Standard Library (MSL).
+// #define MSL_USE_PRECOMPILED_HEADERS 0
+#include <ansi_prefix.mac.h>
+#ifndef __STDC__
+#define __STDC__ 0
#endif
+#endif /* __MWERKS__ */
+
// these are defined again in gc_priv.h.
#undef TRUE
#undef FALSE
diff --git a/Makefile b/Makefile
index 8e531188..056df2f6 100644
--- a/Makefile
+++ b/Makefile
@@ -7,7 +7,7 @@
# and runs some tests of collector and cords. Does not add cords or
# c++ interface to gc.a
# cord/de - builds dumb editor based on cords.
-ABI_FLAG=
+ABI_FLAG=
CC=cc $(ABI_FLAG)
CXX=CC $(ABI_FLAG)
AS=as $(ABI_FLAG)
@@ -16,7 +16,7 @@ AS=as $(ABI_FLAG)
# Under Irix 6, you will have to specify the ABI for as if you specify
# it for the C compiler.
-CFLAGS= -O -DNO_SIGNALS -DALL_INTERIOR_POINTERS -DNO_EXECUTE_PERMISSION -DSILENT
+CFLAGS= -O -DATOMIC_UNCOLLECTABLE -DNO_SIGNALS -DALL_INTERIOR_POINTERS -DNO_EXECUTE_PERMISSION -DSILENT
# Setjmp_test may yield overly optimistic results when compiled
# without optimization.
@@ -95,6 +95,16 @@ CFLAGS= -O -DNO_SIGNALS -DALL_INTERIOR_POINTERS -DNO_EXECUTE_PERMISSION -DSILENT
# Works for Solaris and Irix.
# -DMMAP_STACKS (for Solaris threads) Use mmap from /dev/zero rather than
# GC_scratch_alloc() to get stack memory.
+# -DPRINT_BLACK_LIST Whenever a black list entry is added, i.e. whenever
+# the garbage collector detects a value that looks almost, but not quite,
+# like a pointer, print both the address containing the value, and the
+# value of the near-bogus-pointer. Can be used to identifiy regions of
+# memory that are likely to contribute misidentified pointers.
+# -DOLD_BLOCK_ALLOC Use the old, possibly faster, large block
+# allocation strategy. The new strategy tries harder to minimize
+# fragmentation, sometimes at the expense of spending more time in the
+# large block allocator and/or collecting more frequently.
+#
@@ -114,9 +124,9 @@ RANLIB= ranlib
srcdir = .
VPATH = $(srcdir)
-OBJS= alloc.o reclaim.o allchblk.o misc.o mach_dep.o os_dep.o mark_rts.o headers.o mark.o obj_map.o blacklst.o finalize.o new_hblk.o dbg_mlc.o malloc.o stubborn.o checksums.o solaris_threads.o irix_threads.o typd_mlc.o ptr_chck.o mallocx.o solaris_pthreads.o
+OBJS= alloc.o reclaim.o allchblk.o misc.o mach_dep.o os_dep.o mark_rts.o headers.o mark.o obj_map.o blacklst.o finalize.o new_hblk.o dbg_mlc.o malloc.o stubborn.o checksums.o solaris_threads.o irix_threads.o linux_threads.o typd_mlc.o ptr_chck.o mallocx.o solaris_pthreads.o
-CSRCS= reclaim.c allchblk.c misc.c alloc.c mach_dep.c os_dep.c mark_rts.c headers.c mark.c obj_map.c pcr_interface.c blacklst.c finalize.c new_hblk.c real_malloc.c dyn_load.c dbg_mlc.c malloc.c stubborn.c checksums.c solaris_threads.c irix_threads.c typd_mlc.c ptr_chck.c mallocx.c solaris_pthreads.c
+CSRCS= reclaim.c allchblk.c misc.c alloc.c mach_dep.c os_dep.c mark_rts.c headers.c mark.c obj_map.c pcr_interface.c blacklst.c finalize.c new_hblk.c real_malloc.c dyn_load.c dbg_mlc.c malloc.c stubborn.c checksums.c solaris_threads.c irix_threads.c linux_threads.c typd_mlc.c ptr_chck.c mallocx.c solaris_pthreads.c
CORD_SRCS= cord/cordbscs.c cord/cordxtra.c cord/cordprnt.c cord/de.c cord/cordtest.c cord/cord.h cord/ec.h cord/private/cord_pos.h cord/de_win.c cord/de_win.h cord/de_cmds.h cord/de_win.ICO cord/de_win.RC cord/SCOPTIONS.amiga cord/SMakefile.amiga
diff --git a/Makefile.dj b/Makefile.dj
index 9187f73a..979ac6f3 100644
--- a/Makefile.dj
+++ b/Makefile.dj
@@ -147,7 +147,7 @@ SPECIALCFLAGS =
# not time-critical anyway.
# Set SPECIALCFLAGS to -q nodirect_code on Encore.
-all: gc.a gctest
+all: gc.a gctest$(EXE_SUFFIX)
pcr: PCR-Makefile gc_private.h gc_hdrs.h gc.h config.h mach_dep.o $(SRCS)
make -f PCR-Makefile depend
@@ -194,19 +194,19 @@ base_lib $(UTILS)
-$(RM) test_cpp test_cpp$(EXE_SUFFIX)
./if_mach HP_PA "" $(CXX) $(CXXFLAGS) -o test_cpp $(srcdir)/test_cpp.cc gc_cpp.o gc.a -ldld
./if_not_there test_cpp$(EXE_SUFFIX) $(CXXLD) $(CXXFLAGS) -o test_cpp $(srcdir)/test_cpp.cc gc_cpp.o gc.a
+ $(RM) test_cpp
c++: gc_cpp.o $(srcdir)/gc_cpp.h test_cpp
-$(RM) on_sparc_sunos5
$(AR) ru gc.a gc_cpp.o
$(RANLIB) gc.a
- ./test_cpp 1
+ ./test_cpp$(EXE_SUFFIX) 1
echo > c++
dyn_load_sunos53.o: dyn_load.c
$(CC) $(CFLAGS) -DSUNOS53_SHARED_LIB -c $(srcdir)/dyn_load.c -o $@
mach_dep.o: $(srcdir)/mach_dep.c
-# $(srcdir)/mips_mach_dep.s $(srcdir)/rs6000_mach_dep.s if_mach if_not_there
-$(RM) mach_dep.o
$(CC) -c $(SPECIALCFLAGS) $(srcdir)/mach_dep.c
@@ -251,15 +251,20 @@ cord/cordbscs.o cord/cordxtra.o gc.a $(CURSES)
if_mach$(EXE_SUFFIX): $(srcdir)/if_mach.c $(srcdir)/config.h
$(CC) $(CFLAGS) -o if_mach $(srcdir)/if_mach.c
+ -$(RM) if_mach
threadlibs$(EXE_SUFFIX): $(srcdir)/threadlibs.c $(srcdir)/config.h Makefile
$(CC) $(CFLAGS) -o threadlibs $(srcdir)/threadlibs.c
+ -$(RM) threadlibs
if_not_there$(EXE_SUFFIX): $(srcdir)/if_not_there.c
$(CC) $(CFLAGS) -o if_not_there $(srcdir)/if_not_there.c
+ -$(RM) if_not_there
clean:
- -$(RM) gc.a *.o gctest gctest_dyn_link test_cpp \
+ -$(RM) gc.a *.o
+ -$(RM) *.o
+ -$(RM) gctest gctest_dyn_link test_cpp \
setjmp_test mon.out gmon.out a.out core if_not_there if_mach \
$(CORD_OBJS) cordtest cord/cordtest de cord/de
-$(RM) gctest$(EXE_SUFFIX) gctest_dyn_link$(EXE_SUFFIX) test_cpp$(EXE_SUFFIX) \
@@ -270,6 +275,7 @@ clean:
gctest$(EXE_SUFFIX): test.o gc.a
-$(RM) gctest$(EXE_SUFFIX)
$(CC) $(CFLAGS) -o gctest test.o gc.a
+ $(RM) gctest
# If an optimized setjmp_test generates a segmentation fault,
# odds are your compiler is broken. Gctest may still work.
@@ -278,6 +284,7 @@ setjmp_test$(EXE_SUFFIX): $(srcdir)/setjmp_t.c $(srcdir)/gc.h \
if_mach$(EXE_SUFFIX) if_not_there$(EXE_SUFFIX)
-$(RM) setjmp_test$(EXE_SUFFIX)
$(CC) $(CFLAGS) -o setjmp_test $(srcdir)/setjmp_t.c
+ $(RM) setjmp_test
test: KandRtest cord/cordtest$(EXE_SUFFIX)
./cord/cordtest$(EXE_SUFFIX)
@@ -287,4 +294,3 @@ KandRtest: setjmp_test$(EXE_SUFFIX) gctest$(EXE_SUFFIX)
./setjmp_test$(EXE_SUFFIX)
./gctest$(EXE_SUFFIX)
-
diff --git a/README b/README
index 94604cf2..98fce1ee 100644
--- a/README
+++ b/README
@@ -1,6 +1,6 @@
Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
-Copyright (c) 1996 by Silicon Graphics. All rights reserved.
+Copyright (c) 1996-1998 by Silicon Graphics. All rights reserved.
THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -11,7 +11,11 @@ Permission to modify the code and to distribute modified code is granted,
provided the above notices are retained, and a notice that the code was
modified is included with the above copyright notice.
-This is version 4.13alpha1 of a conservative garbage collector for C and C++.
+This is version 4.13alpha2 of a conservative garbage collector for C and C++.
+
+You might find a more recent version of this at
+
+http://reality.sgi.com/boehm/gc.html
HISTORY -
@@ -144,18 +148,19 @@ See README.debugging for details.
seen by the garbage collector. Thus objects pointed to only from such a
region may be prematurely deallocated. It is thus suggested that the
standard "malloc" be used only for memory regions, such as I/O buffers, that
-are guaranteed not to contain pointers. Pointers in C language automatic,
-static, or register variables, are correctly recognized. (Note that
-GC_malloc_uncollectable has semantics similar to standard malloc,
-but allocates objects that are traced by the collector.)
+are guaranteed not to contain pointers to garbage collectable memory.
+Pointers in C language automatic, static, or register variables,
+are correctly recognized. (Note that GC_malloc_uncollectable has semantics
+similar to standard malloc, but allocates objects that are traced by the
+collector.)
The collector does not always know how to find pointers in data
areas that are associated with dynamic libraries. This is easy to
remedy IF you know how to find those data areas on your operating
system (see GC_add_roots). Code for doing this under SunOS, IRIX 5.X and 6.X,
-HP/UX, Alpha OSF/1 and win32 is included and used by default. (See
-README.win32 for win32 details.) On other systems pointers from dynamic library
-data areas may not be considered by the collector.
+HP/UX, Alpha OSF/1, Linux, and win32 is included and used by default. (See
+README.win32 for win32 details.) On other systems pointers from dynamic
+library data areas may not be considered by the collector.
Note that the garbage collector does not need to be informed of shared
read-only data. However if the shared library mechanism can introduce
@@ -1345,8 +1350,27 @@ Since 4.12:
os_dep.c code for dealing with various Linux versions.
- Added workaround for Irix pthreads sigaction bug and possible signal
misdirection problems.
+Since alpha1:
+ - Changed RS6000 STACKBOTTOM.
+ - Integrated Patrick Beard's Mac changes.
+ - Alpha1 didn't compile on Irix m.n, m < 6.
+ - Replaced Makefile.dj with a new one from Gary Leavens.
+ - Added Andrew Stitcher's changes to support SCO OpenServer.
+ - Added PRINT_BLACK_LIST, to allow debugging of high densities of false
+ pointers.
+ - Added code to debug allocator to keep track of return address
+ in GC_malloc caller, thus giving a bit more context.
+ - Changed default behavior of large block allocator to more
+ aggressively avoid fragmentation. This is likely to slow down the
+ collector when it succeeds at reducing space cost.
+ - Integrated Fergus Henderson's CYGWIN32 changes. They are untested,
+ but needed for newer versions.
+ - USE_MMAP had some serious bugs. This caused the collector to fail
+ consistently on Solaris with -DSMALL_CONFIG.
+ - Added Linux threads support, thanks largely to Fergus Henderson.
To do:
+ - I have a backlog of unintegrated contributed platform-specific changes.
- Very large root set sizes (> 16 MB or so) could cause the collector
to abort with an unexpected mark stack overflow. (Thanks again to
Peter Chubb.) NOT YET FIXED. Workaround is to increase the initial
@@ -1358,11 +1382,5 @@ To do:
off DYNAMIC_LOADING in the collector as a workaround. It may also
be possible to conditionally intercept mmap and use GC_exclude_static_roots.
The real fix is to walk rld data structures, which looks possible.
- - SGI pthreads and incremental collection don't mix yet. This actually
- now appears to work under Irix 6.5, but is not enabled by default.
- Integrate MIT and DEC pthreads ports.
- - The Irix pthreads locking mechanism leaves something to be desired.
- It should eventually resort to nanosleep with exponential backoff.
- There seem to be associated performance problems with
- pthreads + incremental GC.
diff --git a/README.Mac b/README.Mac
index 538ad73c..6d2fa743 100644
--- a/README.Mac
+++ b/README.Mac
@@ -1,3 +1,14 @@
+Patrick Beard's Notes for building GC v4.12 with CodeWarrior Pro 2:
+----------------------------------------------------------------------------
+The current build environment for the collector is CodeWarrior Pro 2.
+Projects for CodeWarrior Pro 2 (and for quite a few older versions)
+are distributed in the file Mac_projects.sit.hqx. The project file
+:Mac_projects:gc.prj builds static library versions of the collector.
+:Mac_projects:gctest.prj builds the GC test suite.
+
+Configuring the collector is still done by editing the files
+:Mac_files:MacOS_config.h and :Mac_files:MacOS_Test_config.h.
+
Lars Farm's suggestions on building the collector:
----------------------------------------------------------------------------
Garbage Collection on MacOS - a manual 'MakeFile'
@@ -240,7 +251,7 @@ prefix:
#include <ansi_prefix.mac.h>
#undef NDEBUG
-#define ALL_INTERIOR_POINTERS /* for GC_priv.h
+#define ALL_INTERIOR_POINTERS /* for GC_priv.h */
---- ( cut here ) ----
3) Test that the C++ interface 'gc_cpp.cc/h' works with 'test_cpp.cc'.
diff --git a/README.linux b/README.linux
index 9ec4161f..ffe735bc 100644
--- a/README.linux
+++ b/README.linux
@@ -5,3 +5,35 @@ Incremental GC is supported.
Dynamic libraries are supported on an ELF system. A static executable
should be linked with the gcc option "-Wl,-defsym,_DYNAMIC=0".
+
+The collector appears to work with Linux threads. We have seen
+intermittent hangs in sem_wait. So far we have been unable to reproduce
+these unless the process was being debugged or traced. Thus it's
+possible that the only real issue is that the debugger loses
+signals on rare occasions.
+
+The garbage collector uses SIGPWR and SIGXCPU if it is used with
+Linux threads. These should not be touched by the client program.
+
+To use threads, you need to abide by the following requirements:
+
+1) You need to use LinuxThreads (which are included in libc6).
+
+ The collector relies on some implementation details of the LinuxThreads
+ package. It is unlikely that this code will work on other
+ pthread implementations (in particular it will *not* work with
+ MIT pthreads).
+
+2) You must compile the collector with -DLINUX_THREADS and -D_REENTRANT
+ specified in the Makefile.
+
+3) Every file that makes thread calls should define LINUX_THREADS and
+ _REENTRANT and then include gc.h. Gc.h redefines some of the
+ pthread primitives as macros which also provide the collector with
+ information it requires.
+
+4) Currently dlopen() is probably not safe. The collector must traverse
+ the list of libraries maintained by the runtime loader. That can
+ probably be an inconsistent state when a thread calling the loader is
+ is stopped for GC. (It's possible that this is fixable in the
+ same way it is handled for SOLARIS_THREADS, with GC_dlopen.)
diff --git a/README.sgi b/README.sgi
index f21eeb7c..186e4977 100644
--- a/README.sgi
+++ b/README.sgi
@@ -11,8 +11,7 @@ version of malloc is linked in.
Sproc threads are not supported in this version, though there may exist other
ports.
-Pthreads are somewhat supported without incremental collection. This
-requires that:
+Pthreads support is provided. This requires that:
1) You compile the collector with -DIRIX_THREADS specified in the Makefile.
@@ -27,13 +26,12 @@ will run on other pthreads platforms. But please tell me if it does.)
include gc.h. Gc.h redefines some of the pthread primitives as macros which
also provide the collector with information it requires.
-4) For the time being, you should not use dlopen.
+4) pthread_cond_wait and pthread_cond_timed_wait should be prepared for
+premature wakeups. (I believe the pthreads and realted standards require this
+anyway. Irix pthreads often terminate a wait if a signal arrives.
+The garbage collector uses signals to stop threads.)
-5) pthread_cond_wait and pthread_cond_timed_wait should be prepared for premature
-wakeups. (I believe the pthreads and realted standards require this anyway.
-Irix pthreads often terminate a wait if a signal arrives. The garbage collector
-uses signals to stop threads.)
-
-6) It is expensive to stop a thread waiting in IO at the time the request is
+5) It is expensive to stop a thread waiting in IO at the time the request is
initiated. Applications with many such threads may not exhibit acceptable
performance with the collector. (Increasing the heap size may help.)
+
diff --git a/README.solaris2 b/README.solaris2
index 1edd6e66..9ef4648d 100644
--- a/README.solaris2
+++ b/README.solaris2
@@ -50,6 +50,16 @@ GC_malloc, it is necessary to call GC_thr_init explicitly before forking the
first thread. (This avoids a deadlock arising from calling GC_thr_init
with the allocation lock held.)
+It appears that there is a problem in using gc_cpp.h in conjunction with
+Solaris threads and Sun's C++ runtime. Apparently the overloaded new operator
+is invoked by some iostream initialization code before threads are correctly
+initialized. As a result, call to thr_self() in garbage collector
+initialization segfaults. Currently the only known workaround is to not
+invoke the garbage collector from a user defined global operator new, or to
+have it invoke the garbage-collector's allocators only after main has started.
+(Note that the latter requires a moderately expensive test in operator
+delete.)
+
Hans-J. Boehm
(The above contains my personal opinions, which are probably not shared
by anyone else.)
diff --git a/README.win32 b/README.win32
index 95bf50fe..76c4c6e3 100644
--- a/README.win32
+++ b/README.win32
@@ -43,7 +43,10 @@ to be i386.)
For GNU-win32, use the regular makefile, possibly after uncommenting
the line "include Makefile.DLLs". The latter should be necessary only
-if you want to package the collector as a DLL.
+if you want to package the collector as a DLL. The GNU-win32 port is
+believed to work only for b18, not b19, probably dues to linker changes
+in b19. This is probably fixable with a different definition of
+DATASTART and DATAEND in config.h.
For Borland tools, use BCC_MAKEFILE. Note that
Borland's compiler defaults to 1 byte alignment in structures (-a1),
diff --git a/allchblk.c b/allchblk.c
index 3179c934..7a5a3a1c 100644
--- a/allchblk.c
+++ b/allchblk.c
@@ -1,6 +1,7 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1998 by Silicon Graphics. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -72,7 +73,7 @@ void GC_print_hblkfreelist()
/* Initialize hdr for a block containing the indicated size and */
/* kind of objects. */
/* Return FALSE on failure. */
-static bool setup_header(hhdr, sz, kind, flags)
+static GC_bool setup_header(hhdr, sz, kind, flags)
register hdr * hhdr;
word sz; /* object size in words */
int kind;
@@ -99,6 +100,12 @@ unsigned char flags;
return(TRUE);
}
+#ifdef EXACT_FIRST
+# define LAST_TRIP 2
+#else
+# define LAST_TRIP 1
+#endif
+
/*
* Allocate (and return pointer to) a heap block
* for objects of size sz words.
@@ -123,7 +130,7 @@ unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
register hdr * phdr; /* Header corr. to prevhbp */
signed_word size_needed; /* number of bytes in requested objects */
signed_word size_avail; /* bytes available in this block */
- bool first_time = TRUE;
+ int trip_count = 0;
size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
@@ -137,16 +144,25 @@ unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
hbp = (prevhbp == 0? GC_hblkfreelist : phdr->hb_next);
hhdr = HDR(hbp);
- if( prevhbp == GC_savhbp && !first_time) {
- return(0);
+ if( prevhbp == GC_savhbp) {
+ if (trip_count == LAST_TRIP) return(0);
+ ++trip_count;
}
- first_time = FALSE;
-
if( hbp == 0 ) continue;
size_avail = hhdr->hb_sz;
+# ifdef EXACT_FIRST
+ if (trip_count <= 1 && size_avail != size_needed) continue;
+# endif
if (size_avail < size_needed) continue;
+# ifdef PRESERVE_LAST
+ if (size_avail != size_needed
+ && !GC_incremental
+ && GC_in_last_heap_sect(hbp) && GC_should_collect()) {
+ continue;
+ }
+# endif
/* If the next heap block is obviously better, go on. */
/* This prevents us from disassembling a single large block */
/* to get tiny blocks. */
@@ -240,7 +256,7 @@ unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
if (GC_savhbp == hbp) GC_savhbp = prevhbp;
hbp = prevhbp;
hhdr = phdr;
- if (hbp == GC_savhbp) first_time = TRUE;
+ if (hbp == GC_savhbp) --trip_count;
}
# endif
}
diff --git a/alloc.c b/alloc.c
index b7cb6f54..d60544a3 100644
--- a/alloc.c
+++ b/alloc.c
@@ -78,11 +78,11 @@ char * GC_copyright[] =
extern signed_word GC_mem_found; /* Number of reclaimed longwords */
/* after garbage collection */
-bool GC_dont_expand = 0;
+GC_bool GC_dont_expand = 0;
word GC_free_space_divisor = 4;
-extern bool GC_collection_in_progress();
+extern GC_bool GC_collection_in_progress();
int GC_never_stop_func GC_PROTO((void)) { return(0); }
@@ -189,7 +189,7 @@ void GC_clear_a_few_frames()
}
/* Have we allocated enough to amortize a collection? */
-bool GC_should_collect()
+GC_bool GC_should_collect()
{
return(GC_adj_words_allocd() >= min_words_allocd());
}
@@ -249,7 +249,7 @@ void GC_maybe_gc()
* Stop the world garbage collection. Assumes lock held, signals disabled.
* If stop_func is not GC_never_stop_func, then abort if stop_func returns TRUE.
*/
-bool GC_try_to_collect_inner(stop_func)
+GC_bool GC_try_to_collect_inner(stop_func)
GC_stop_func stop_func;
{
if (GC_collection_in_progress()) {
@@ -355,7 +355,7 @@ int GC_collect_a_little GC_PROTO(())
* Otherwise we may fail and return FALSE if this takes too long.
* Increment GC_gc_no if we succeed.
*/
-bool GC_stopped_mark(stop_func)
+GC_bool GC_stopped_mark(stop_func)
GC_stop_func stop_func;
{
register int i;
@@ -610,6 +610,21 @@ word bytes;
}
}
+#ifdef PRESERVE_LAST
+GC_bool GC_in_last_heap_sect(p)
+ptr_t p;
+{
+ struct HeapSect * last_heap_sect = &(GC_heap_sects[GC_n_heap_sects-1]);
+ ptr_t start = last_heap_sect -> hs_start;
+ ptr_t end;
+
+ if (p < start) return FALSE;
+ end = start + last_heap_sect -> hs_bytes;
+ if (p >= end) return FALSE;
+ return TRUE;
+}
+#endif
+
# if !defined(NO_DEBUGGING)
void GC_print_heap_sects()
{
@@ -667,7 +682,7 @@ GC_word GC_max_retries = 0;
* Tiny values of n are rounded up.
* Returns FALSE on failure.
*/
-bool GC_expand_hp_inner(n)
+GC_bool GC_expand_hp_inner(n)
word n;
{
word bytes;
@@ -749,9 +764,9 @@ unsigned GC_fail_count = 0;
/* How many consecutive GC/expansion failures? */
/* Reset by GC_allochblk. */
-bool GC_collect_or_expand(needed_blocks, ignore_off_page)
+GC_bool GC_collect_or_expand(needed_blocks, ignore_off_page)
word needed_blocks;
-bool ignore_off_page;
+GC_bool ignore_off_page;
{
if (!GC_incremental && !GC_dont_gc && GC_should_collect()) {
diff --git a/blacklst.c b/blacklst.c
index 5917eb7a..44455e5b 100644
--- a/blacklst.c
+++ b/blacklst.c
@@ -52,6 +52,28 @@ word GC_black_list_spacing = MINHINCR*HBLKSIZE; /* Initial rough guess */
void GC_clear_bl();
+void GC_default_print_heap_obj_proc(p)
+ptr_t p;
+{
+ ptr_t base = GC_base(p);
+
+ GC_err_printf2("start: 0x%lx, appr. length: %ld", base, GC_size(base));
+}
+
+void (*GC_print_heap_obj)(/* char * s, ptr_t p */) =
+ GC_default_print_heap_obj_proc;
+
+void GC_print_source_ptr(ptr_t p)
+{
+ ptr_t base = GC_base(p);
+ if (0 == base) {
+ GC_err_printf0("in root set");
+ } else {
+ GC_err_printf0("in object at ");
+ (*GC_print_heap_obj)(base);
+ }
+}
+
void GC_bl_init()
{
# ifndef ALL_INTERIOR_POINTERS
@@ -132,7 +154,12 @@ void GC_unpromote_black_lists()
/* P is not a valid pointer reference, but it falls inside */
/* the plausible heap bounds. */
/* Add it to the normal incomplete black list if appropriate. */
-void GC_add_to_black_list_normal(p)
+#ifdef PRINT_BLACK_LIST
+ void GC_add_to_black_list_normal(p, source)
+ ptr_t source;
+#else
+ void GC_add_to_black_list_normal(p)
+#endif
word p;
{
if (!(GC_modws_valid_offsets[p & (sizeof(word)-1)])) return;
@@ -140,10 +167,13 @@ word p;
register int index = PHT_HASH(p);
if (HDR(p) == 0 || get_pht_entry_from_index(GC_old_normal_bl, index)) {
-# ifdef PRINTBLACKLIST
+# ifdef PRINT_BLACK_LIST
if (!get_pht_entry_from_index(GC_incomplete_normal_bl, index)) {
- GC_printf1("Black listing (normal) 0x%lx\n",
- (unsigned long) p);
+ GC_err_printf2(
+ "Black listing (normal) 0x%lx referenced from 0x%lx ",
+ (unsigned long) p, (unsigned long) source);
+ GC_print_source_ptr(source);
+ GC_err_puts("\n");
}
# endif
set_pht_entry_from_index(GC_incomplete_normal_bl, index);
@@ -154,16 +184,24 @@ word p;
# endif
/* And the same for false pointers from the stack. */
-void GC_add_to_black_list_stack(p)
+#ifdef PRINT_BLACK_LIST
+ void GC_add_to_black_list_stack(p, source)
+ ptr_t source;
+#else
+ void GC_add_to_black_list_stack(p)
+#endif
word p;
{
register int index = PHT_HASH(p);
if (HDR(p) == 0 || get_pht_entry_from_index(GC_old_stack_bl, index)) {
-# ifdef PRINTBLACKLIST
+# ifdef PRINT_BLACK_LIST
if (!get_pht_entry_from_index(GC_incomplete_stack_bl, index)) {
- GC_printf1("Black listing (stack) 0x%lx\n",
- (unsigned long)p);
+ GC_err_printf2(
+ "Black listing (stack) 0x%lx referenced from 0x%lx ",
+ (unsigned long)p, (unsigned long)source);
+ GC_print_source_ptr(source);
+ GC_err_puts("\n");
}
# endif
set_pht_entry_from_index(GC_incomplete_stack_bl, index);
diff --git a/checksums.c b/checksums.c
index 98ef08fe..212655f4 100644
--- a/checksums.c
+++ b/checksums.c
@@ -25,7 +25,7 @@
# define OFFSET 0x10000
typedef struct {
- bool new_valid;
+ GC_bool new_valid;
word old_sum;
word new_sum;
struct hblk * block; /* Block to which this refers + OFFSET */
@@ -50,7 +50,7 @@ struct hblk *h;
# ifdef STUBBORN_ALLOC
/* Check whether a stubborn object from the given block appears on */
/* the appropriate free list. */
-bool GC_on_free_list(h)
+GC_bool GC_on_free_list(h)
struct hblk *h;
{
register hdr * hhdr = HDR(h);
diff --git a/config.h b/config.h
index d5d618c5..fc8004ce 100644
--- a/config.h
+++ b/config.h
@@ -102,7 +102,11 @@
# if defined(_M_XENIX) && defined(_M_SYSV) && defined(_M_I386)
/* The above test may need refinement */
# define I386
-# define SCO
+# if defined(_SCO_ELF)
+# define SCO_ELF
+# else
+# define SCO
+# endif
# define mach_type_known
# endif
# if defined(_AUX_SOURCE)
@@ -195,7 +199,9 @@
# endif
# if defined(__DJGPP__)
# define I386
-# define DJGPP /* MSDOS running the DJGPP port of GCC */
+# ifndef DJGPP
+# define DJGPP /* MSDOS running the DJGPP port of GCC */
+# endif
# define mach_type_known
# endif
# if defined(__CYGWIN32__)
@@ -570,9 +576,20 @@
+((word)&etext & 0xfff))
# define STACKBOTTOM ((ptr_t) 0x7ffffffc)
# endif
+# ifdef SCO_ELF
+# define OS_TYPE "SCO_ELF"
+ extern int etext;
+# define DATASTART ((ptr_t)(&etext))
+# define STACKBOTTOM ((ptr_t) 0x08048000)
+# define DYNAMIC_LOADING
+# define ELF_CLASS ELFCLASS32
+# endif
# ifdef LINUX
# define OS_TYPE "LINUX"
# define STACKBOTTOM ((ptr_t)0xc0000000)
+ /* Appears to be 0xe0000000 for at least one 2.1.91 kernel. */
+ /* Probably needs to be more flexible, but I don't yet */
+ /* fully understand how flexible. */
# define MPROTECT_VDB
# ifdef __ELF__
# define DYNAMIC_LOADING
@@ -605,11 +622,22 @@
# endif
# endif
# ifdef CYGWIN32
-# define OS_TYPE "CYGWIN32"
- extern int _bss_start__;
-# define DATASTART ((ptr_t)&_bss_start__)
- extern int _data_end__;
-# define DATAEND ((ptr_t)&_data_end__)
+ extern int _data_start__;
+ extern int _data_end__;
+ extern int _bss_start__;
+ extern int _bss_end__;
+ /* For binutils 2.9.1, we have */
+ /* DATASTART = _data_start__ */
+ /* DATAEND = _bss_end__ */
+ /* whereas for some earlier versions it was */
+ /* DATASTART = _bss_start__ */
+ /* DATAEND = _data_end__ */
+ /* To get it right for both, we take the */
+ /* minumum/maximum of the two. */
+# define MAX(x,y) ((x) > (y) ? (x) : (y))
+# define MIN(x,y) ((x) < (y) ? (x) : (y))
+# define DATASTART ((ptr_t) MIN(_data_start__, _bss_start__))
+# define DATAEND ((ptr_t) MAX(_data_end__, _bss_end__))
# undef STACK_GRAN
# define STACK_GRAN 0x10000
# define HEURISTIC1
@@ -704,7 +732,7 @@
extern int _fdata;
# define DATASTART ((ptr_t)(&_fdata))
# ifdef USE_MMAP
-# define HEAP_START (ptr_t)0x40000000
+# define HEAP_START (ptr_t)0x30000000
# else
# define HEAP_START DATASTART
# endif
@@ -726,9 +754,7 @@
# endif
# ifdef IRIX5
# define OS_TYPE "IRIX5"
-# ifndef IRIX_THREADS
-# define MPROTECT_VDB
-# endif
+# define MPROTECT_VDB
# ifdef _MIPS_SZPTR
# define CPP_WORDSZ _MIPS_SZPTR
# define ALIGNMENT (_MIPS_SZPTR/8)
@@ -748,7 +774,7 @@
# define ALIGNMENT 4
# define DATASTART ((ptr_t)0x20000000)
extern int errno;
-# define STACKBOTTOM ((ptr_t)((ulong)&errno + 2*sizeof(int)))
+# define STACKBOTTOM ((ptr_t)((ulong)&errno))
# define DYNAMIC_LOADING
/* For really old versions of AIX, this may have to be removed. */
# endif
@@ -918,10 +944,15 @@
# if defined(IRIX_THREADS) && !defined(IRIX5)
--> inconsistent configuration
# endif
+# if defined(LINUX_THREADS) && !defined(LINUX)
+--> inconsistent configuration
+# endif
# if defined(SOLARIS_THREADS) && !defined(SUNOS5)
--> inconsistent configuration
# endif
-# if defined(PCR) || defined(SRC_M3) || defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || defined(IRIX_THREADS)
+# if defined(PCR) || defined(SRC_M3) || \
+ defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || \
+ defined(IRIX_THREADS) || defined(LINUX_THREADS)
# define THREADS
# endif
diff --git a/cord/README b/cord/README
index 865725ee..62101452 100644
--- a/cord/README
+++ b/cord/README
@@ -9,10 +9,10 @@ Permission to modify the code and to distribute modified code is granted,
provided the above notices are retained, and a notice that the code was
modified is included with the above copyright notice.
-Please send bug reports to Hans-J. Boehm (boehm@parc.xerox.com).
+Please send bug reports to Hans-J. Boehm (boehm@sgi.com).
This is a string packages that uses a tree-based representation.
-See gc.h for a description of the functions provided. Ec.h describes
+See cord.h for a description of the functions provided. Ec.h describes
"extensible cords", which are essentially output streams that write
to a cord. These allow for efficient construction of cords without
requiring a bound on the size of a cord.
diff --git a/cord/gc.h b/cord/gc.h
index 23037246..09c8ca81 100644
--- a/cord/gc.h
+++ b/cord/gc.h
@@ -293,37 +293,48 @@ GC_API int GC_collect_a_little GC_PROTO((void));
GC_API GC_PTR GC_malloc_ignore_off_page GC_PROTO((size_t lb));
GC_API GC_PTR GC_malloc_atomic_ignore_off_page GC_PROTO((size_t lb));
+#if defined(__sgi) && !defined(__GNUC__) && _COMPILER_VERSION >= 720
+# define GC_ADD_CALLER
+# define GC_RETURN_ADDR (GC_word)__return_address
+#endif
+
+#ifdef GC_ADD_CALLER
+# define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__
+# define GC_EXTRA_PARAMS GC_word ra, char * descr_string, int descr_int
+#else
+# define GC_EXTRAS __FILE__, __LINE__
+# define GC_EXTRA_PARAMS char * descr_string, int descr_int
+#endif
+
/* Debugging (annotated) allocation. GC_gcollect will check */
/* objects allocated in this way for overwrites, etc. */
GC_API GC_PTR GC_debug_malloc
- GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+ GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API GC_PTR GC_debug_malloc_atomic
- GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+ GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API GC_PTR GC_debug_malloc_uncollectable
- GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+ GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API GC_PTR GC_debug_malloc_stubborn
- GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+ GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API void GC_debug_free GC_PROTO((GC_PTR object_addr));
GC_API GC_PTR GC_debug_realloc
GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes,
- char * descr_string, int descr_int));
+ GC_EXTRA_PARAMS));
GC_API void GC_debug_change_stubborn GC_PROTO((GC_PTR));
GC_API void GC_debug_end_stubborn_change GC_PROTO((GC_PTR));
# ifdef GC_DEBUG
-# define GC_MALLOC(sz) GC_debug_malloc(sz, __FILE__, __LINE__)
-# define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, __FILE__, __LINE__)
+# define GC_MALLOC(sz) GC_debug_malloc(sz, GC_EXTRAS)
+# define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, GC_EXTRAS)
# define GC_MALLOC_UNCOLLECTABLE(sz) GC_debug_malloc_uncollectable(sz, \
- __FILE__, __LINE__)
-# define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, __FILE__, \
- __LINE__)
+ GC_EXTRAS)
+# define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, GC_EXTRAS)
# define GC_FREE(p) GC_debug_free(p)
# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
GC_debug_register_finalizer(p, f, d, of, od)
# define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
GC_debug_register_finalizer_ignore_self(p, f, d, of, od)
-# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, __FILE__, \
- __LINE__)
+# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, GC_EXTRAS);
# define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p)
# define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p)
# define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
@@ -632,7 +643,7 @@ GC_API void (*GC_is_visible_print_proc)
# endif /* SOLARIS_THREADS */
-#ifdef IRIX_THREADS
+#if defined(IRIX_THREADS) || defined(LINUX_THREADS)
/* We treat these similarly. */
# include <pthread.h>
# include <signal.h>
@@ -647,9 +658,9 @@ GC_API void (*GC_is_visible_print_proc)
# define pthread_sigmask GC_pthread_sigmask
# define pthread_join GC_pthread_join
-#endif /* IRIX_THREADS */
+#endif /* IRIX_THREADS || LINUX_THREADS */
-#if defined(SOLARIS_THREADS) || defined(IRIX_THREADS)
+#if defined(THREADS) && !defined(SRC_M3)
/* This returns a list of objects, linked through their first */
/* word. Its use can greatly reduce lock contention problems, since */
/* the allocation lock can be acquired and released many fewer times. */
@@ -658,7 +669,7 @@ GC_PTR GC_malloc_many(size_t lb);
/* in returned list. */
extern void GC_thr_init(); /* Needed for Solaris/X86 */
-#endif /* SOLARIS_THREADS */
+#endif /* THREADS && !SRC_M3 */
/*
* If you are planning on putting
diff --git a/dbg_mlc.c b/dbg_mlc.c
index 445f123f..6c4b2ba5 100644
--- a/dbg_mlc.c
+++ b/dbg_mlc.c
@@ -28,7 +28,7 @@
typedef struct {
char * oh_string; /* object descriptor string */
word oh_int; /* object descriptor integers */
-# ifdef SAVE_CALL_CHAIN
+# ifdef NEED_CALLINFO
struct callinfo oh_ci[NFRAMES];
# endif
word oh_sz; /* Original malloc arg. */
@@ -43,17 +43,22 @@ typedef struct {
#ifdef SAVE_CALL_CHAIN
-# define ADD_CALL_CHAIN(base) GC_save_callers(((oh *)(base)) -> oh_ci)
+# define ADD_CALL_CHAIN(base, ra) GC_save_callers(((oh *)(base)) -> oh_ci)
# define PRINT_CALL_CHAIN(base) GC_print_callers(((oh *)(base)) -> oh_ci)
#else
-# define ADD_CALL_CHAIN(base)
+# ifdef GC_ADD_CALLER
+# define ADD_CALL_CHAIN(base, ra) ((oh *)(base)) -> oh_ci[0].ci_pc = (ra)
+# define PRINT_CALL_CHAIN(base) GC_print_callers(((oh *)(base)) -> oh_ci)
+# else
+# define ADD_CALL_CHAIN(base, ra)
# define PRINT_CALL_CHAIN(base)
+# endif
#endif
/* Check whether object with base pointer p has debugging info */
/* p is assumed to point to a legitimate object in our part */
/* of the heap. */
-bool GC_has_debug_info(p)
+GC_bool GC_has_debug_info(p)
ptr_t p;
{
register oh * ohdr = (oh *)p;
@@ -135,6 +140,17 @@ ptr_t p;
(unsigned long)(ohdr -> oh_sz));
PRINT_CALL_CHAIN(ohdr);
}
+
+void GC_debug_print_heap_obj_proc(p)
+ptr_t p;
+{
+ if (GC_has_debug_info(p)) {
+ GC_print_obj(p);
+ } else {
+ GC_default_print_heap_obj_proc(p);
+ }
+}
+
void GC_print_smashed_obj(p, clobbered_addr)
ptr_t p, clobbered_addr;
{
@@ -163,6 +179,7 @@ void GC_check_heap_proc();
void GC_start_debugging()
{
GC_check_heap = GC_check_heap_proc;
+ GC_print_heap_obj = GC_debug_print_heap_obj_proc;
GC_debugging_started = TRUE;
GC_register_displacement((word)sizeof(oh));
}
@@ -178,13 +195,24 @@ void GC_start_debugging()
GC_register_displacement((word)sizeof(oh) + offset);
}
+# ifdef GC_ADD_CALLER
+# define EXTRA_ARGS word ra, char * s, int i
+# define OPT_RA ra,
+# else
+# define EXTRA_ARGS char * s, int i
+# define OPT_RA
+# endif
+
# ifdef __STDC__
- GC_PTR GC_debug_malloc(size_t lb, char * s, int i)
+ GC_PTR GC_debug_malloc(size_t lb, EXTRA_ARGS)
# else
GC_PTR GC_debug_malloc(lb, s, i)
size_t lb;
char * s;
int i;
+# ifdef GC_ADD_CALLER
+ --> GC_ADD_CALLER not implemented for K&R C
+# endif
# endif
{
GC_PTR result = GC_malloc(lb + DEBUG_BYTES);
@@ -199,13 +227,13 @@ void GC_start_debugging()
if (!GC_debugging_started) {
GC_start_debugging();
}
- ADD_CALL_CHAIN(result);
+ ADD_CALL_CHAIN(result, ra);
return (GC_store_debug_info(result, (word)lb, s, (word)i));
}
#ifdef STUBBORN_ALLOC
# ifdef __STDC__
- GC_PTR GC_debug_malloc_stubborn(size_t lb, char * s, int i)
+ GC_PTR GC_debug_malloc_stubborn(size_t lb, EXTRA_ARGS)
# else
GC_PTR GC_debug_malloc_stubborn(lb, s, i)
size_t lb;
@@ -225,7 +253,7 @@ void GC_start_debugging()
if (!GC_debugging_started) {
GC_start_debugging();
}
- ADD_CALL_CHAIN(result);
+ ADD_CALL_CHAIN(result, ra);
return (GC_store_debug_info(result, (word)lb, s, (word)i));
}
@@ -272,7 +300,7 @@ GC_PTR p;
#endif /* STUBBORN_ALLOC */
# ifdef __STDC__
- GC_PTR GC_debug_malloc_atomic(size_t lb, char * s, int i)
+ GC_PTR GC_debug_malloc_atomic(size_t lb, EXTRA_ARGS)
# else
GC_PTR GC_debug_malloc_atomic(lb, s, i)
size_t lb;
@@ -292,12 +320,12 @@ GC_PTR p;
if (!GC_debugging_started) {
GC_start_debugging();
}
- ADD_CALL_CHAIN(result);
+ ADD_CALL_CHAIN(result, ra);
return (GC_store_debug_info(result, (word)lb, s, (word)i));
}
# ifdef __STDC__
- GC_PTR GC_debug_malloc_uncollectable(size_t lb, char * s, int i)
+ GC_PTR GC_debug_malloc_uncollectable(size_t lb, EXTRA_ARGS)
# else
GC_PTR GC_debug_malloc_uncollectable(lb, s, i)
size_t lb;
@@ -317,13 +345,13 @@ GC_PTR p;
if (!GC_debugging_started) {
GC_start_debugging();
}
- ADD_CALL_CHAIN(result);
+ ADD_CALL_CHAIN(result, ra);
return (GC_store_debug_info(result, (word)lb, s, (word)i));
}
#ifdef ATOMIC_UNCOLLECTABLE
# ifdef __STDC__
- GC_PTR GC_debug_malloc_atomic_uncollectable(size_t lb, char * s, int i)
+ GC_PTR GC_debug_malloc_atomic_uncollectable(size_t lb, EXTRA_ARGS)
# else
GC_PTR GC_debug_malloc_atomic_uncollectable(lb, s, i)
size_t lb;
@@ -344,7 +372,7 @@ GC_PTR p;
if (!GC_debugging_started) {
GC_start_debugging();
}
- ADD_CALL_CHAIN(result);
+ ADD_CALL_CHAIN(result, ra);
return (GC_store_debug_info(result, (word)lb, s, (word)i));
}
#endif /* ATOMIC_UNCOLLECTABLE */
@@ -387,7 +415,7 @@ GC_PTR p;
# else
{
register hdr * hhdr = HDR(p);
- bool uncollectable = FALSE;
+ GC_bool uncollectable = FALSE;
if (hhdr -> hb_obj_kind == UNCOLLECTABLE) {
uncollectable = TRUE;
@@ -403,7 +431,7 @@ GC_PTR p;
}
# ifdef __STDC__
- GC_PTR GC_debug_realloc(GC_PTR p, size_t lb, char *s, int i)
+ GC_PTR GC_debug_realloc(GC_PTR p, size_t lb, EXTRA_ARGS)
# else
GC_PTR GC_debug_realloc(p, lb, s, i)
GC_PTR p;
@@ -419,7 +447,7 @@ GC_PTR p;
register size_t old_sz;
register hdr * hhdr;
- if (p == 0) return(GC_debug_malloc(lb, s, i));
+ if (p == 0) return(GC_debug_malloc(lb, OPT_RA s, i));
if (base == 0) {
GC_err_printf1(
"Attempt to reallocate invalid pointer %lx\n", (unsigned long)p);
@@ -435,21 +463,21 @@ GC_PTR p;
switch (hhdr -> hb_obj_kind) {
# ifdef STUBBORN_ALLOC
case STUBBORN:
- result = GC_debug_malloc_stubborn(lb, s, i);
+ result = GC_debug_malloc_stubborn(lb, OPT_RA s, i);
break;
# endif
case NORMAL:
- result = GC_debug_malloc(lb, s, i);
+ result = GC_debug_malloc(lb, OPT_RA s, i);
break;
case PTRFREE:
- result = GC_debug_malloc_atomic(lb, s, i);
+ result = GC_debug_malloc_atomic(lb, OPT_RA s, i);
break;
case UNCOLLECTABLE:
- result = GC_debug_malloc_uncollectable(lb, s, i);
+ result = GC_debug_malloc_uncollectable(lb, OPT_RA s, i);
break;
# ifdef ATOMIC_UNCOLLECTABLE
case AUNCOLLECTABLE:
- result = GC_debug_malloc_atomic_uncollectable(lb, s, i);
+ result = GC_debug_malloc_atomic_uncollectable(lb, OPT_RA s, i);
break;
# endif
default:
diff --git a/dyn_load.c b/dyn_load.c
index b19cddac..0785a6e9 100644
--- a/dyn_load.c
+++ b/dyn_load.c
@@ -48,7 +48,7 @@
#if !defined(SUNOS4) && !defined(SUNOS5DL) && !defined(IRIX5) && \
!defined(MSWIN32) && !(defined(ALPHA) && defined(OSF1)) && \
!defined(HP_PA) && (!defined(LINUX) && !defined(__ELF__)) && \
- !defined(RS6000)
+ !defined(RS6000) && !defined(SCO_ELF)
--> We only know how to find data segments of dynamic libraries for the
--> above. Additional SVR4 variants might not be too
--> hard to add.
@@ -260,7 +260,7 @@ void GC_register_dynamic_libraries()
# endif /* !USE_PROC ... */
# endif /* SUNOS */
-#if defined(LINUX) && defined(__ELF__)
+#if defined(LINUX) && defined(__ELF__) || defined(SCO_ELF)
/* Dynamic loading code for Linux running ELF. Somewhat tested on
* Linux/x86, untested but hopefully should work on Linux/Alpha.
@@ -468,7 +468,7 @@ void GC_register_dynamic_libraries()
/* that could possibly have been written to. */
DWORD GC_allocation_granularity;
- extern bool GC_is_heap_base (ptr_t p);
+ extern GC_bool GC_is_heap_base (ptr_t p);
# ifdef WIN32_THREADS
extern void GC_get_next_stack(char *start, char **lo, char **hi);
@@ -503,7 +503,7 @@ void GC_register_dynamic_libraries()
# endif
}
- extern bool GC_win32s;
+ extern GC_bool GC_win32s;
void GC_register_dynamic_libraries()
{
diff --git a/gc.h b/gc.h
index 23037246..09c8ca81 100644
--- a/gc.h
+++ b/gc.h
@@ -293,37 +293,48 @@ GC_API int GC_collect_a_little GC_PROTO((void));
GC_API GC_PTR GC_malloc_ignore_off_page GC_PROTO((size_t lb));
GC_API GC_PTR GC_malloc_atomic_ignore_off_page GC_PROTO((size_t lb));
+#if defined(__sgi) && !defined(__GNUC__) && _COMPILER_VERSION >= 720
+# define GC_ADD_CALLER
+# define GC_RETURN_ADDR (GC_word)__return_address
+#endif
+
+#ifdef GC_ADD_CALLER
+# define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__
+# define GC_EXTRA_PARAMS GC_word ra, char * descr_string, int descr_int
+#else
+# define GC_EXTRAS __FILE__, __LINE__
+# define GC_EXTRA_PARAMS char * descr_string, int descr_int
+#endif
+
/* Debugging (annotated) allocation. GC_gcollect will check */
/* objects allocated in this way for overwrites, etc. */
GC_API GC_PTR GC_debug_malloc
- GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+ GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API GC_PTR GC_debug_malloc_atomic
- GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+ GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API GC_PTR GC_debug_malloc_uncollectable
- GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+ GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API GC_PTR GC_debug_malloc_stubborn
- GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+ GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API void GC_debug_free GC_PROTO((GC_PTR object_addr));
GC_API GC_PTR GC_debug_realloc
GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes,
- char * descr_string, int descr_int));
+ GC_EXTRA_PARAMS));
GC_API void GC_debug_change_stubborn GC_PROTO((GC_PTR));
GC_API void GC_debug_end_stubborn_change GC_PROTO((GC_PTR));
# ifdef GC_DEBUG
-# define GC_MALLOC(sz) GC_debug_malloc(sz, __FILE__, __LINE__)
-# define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, __FILE__, __LINE__)
+# define GC_MALLOC(sz) GC_debug_malloc(sz, GC_EXTRAS)
+# define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, GC_EXTRAS)
# define GC_MALLOC_UNCOLLECTABLE(sz) GC_debug_malloc_uncollectable(sz, \
- __FILE__, __LINE__)
-# define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, __FILE__, \
- __LINE__)
+ GC_EXTRAS)
+# define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, GC_EXTRAS)
# define GC_FREE(p) GC_debug_free(p)
# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
GC_debug_register_finalizer(p, f, d, of, od)
# define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
GC_debug_register_finalizer_ignore_self(p, f, d, of, od)
-# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, __FILE__, \
- __LINE__)
+# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, GC_EXTRAS);
# define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p)
# define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p)
# define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
@@ -632,7 +643,7 @@ GC_API void (*GC_is_visible_print_proc)
# endif /* SOLARIS_THREADS */
-#ifdef IRIX_THREADS
+#if defined(IRIX_THREADS) || defined(LINUX_THREADS)
/* We treat these similarly. */
# include <pthread.h>
# include <signal.h>
@@ -647,9 +658,9 @@ GC_API void (*GC_is_visible_print_proc)
# define pthread_sigmask GC_pthread_sigmask
# define pthread_join GC_pthread_join
-#endif /* IRIX_THREADS */
+#endif /* IRIX_THREADS || LINUX_THREADS */
-#if defined(SOLARIS_THREADS) || defined(IRIX_THREADS)
+#if defined(THREADS) && !defined(SRC_M3)
/* This returns a list of objects, linked through their first */
/* word. Its use can greatly reduce lock contention problems, since */
/* the allocation lock can be acquired and released many fewer times. */
@@ -658,7 +669,7 @@ GC_PTR GC_malloc_many(size_t lb);
/* in returned list. */
extern void GC_thr_init(); /* Needed for Solaris/X86 */
-#endif /* SOLARIS_THREADS */
+#endif /* THREADS && !SRC_M3 */
/*
* If you are planning on putting
diff --git a/gc_alloc.h b/gc_alloc.h
index 645748b0..1d912db2 100644
--- a/gc_alloc.h
+++ b/gc_alloc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1996-1998 by Silicon Graphics. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -369,6 +369,12 @@ __GC_SPECIALIZE(unsigned, single_client_alloc)
__GC_SPECIALIZE(float, single_client_alloc)
__GC_SPECIALIZE(double, single_client_alloc)
+#ifdef __STL_USE_STD_ALLOCATORS
+
+???copy stuff from stl_alloc.h or remove it to a different file ???
+
+#endif /* __STL_USE_STD_ALLOCATORS */
+
#endif /* _SGI_SOURCE */
#endif /* GC_ALLOC_H */
diff --git a/gc_mark.h b/gc_mark.h
index 72c38546..ade98a91 100644
--- a/gc_mark.h
+++ b/gc_mark.h
@@ -124,9 +124,20 @@ mse * GC_signal_mark_stack_overflow();
} \
}
-/* Push the contenst of current onto the mark stack if it is a valid */
+#ifdef PRINT_BLACK_LIST
+# define GC_FIND_START(current, hhdr, source) \
+ GC_find_start(current, hhdr, source)
+#else
+# define GC_FIND_START(current, hhdr, source) \
+ GC_find_start(current, hhdr)
+#endif
+
+/* Push the contents of current onto the mark stack if it is a valid */
/* ptr to a currently unmarked object. Mark it. */
-# define PUSH_CONTENTS(current, mark_stack_top, mark_stack_limit) \
+/* If we assumed a standard-conforming compiler, we could probably */
+/* generate the exit_label transparently. */
+# define PUSH_CONTENTS(current, mark_stack_top, mark_stack_limit, \
+ source, exit_label) \
{ \
register int displ; /* Displacement in block; first bytes, then words */ \
register hdr * hhdr; \
@@ -134,14 +145,14 @@ mse * GC_signal_mark_stack_overflow();
\
GET_HDR(current,hhdr); \
if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) { \
- current = GC_find_start(current, hhdr); \
- if (current == 0) continue; \
+ current = GC_FIND_START(current, hhdr, (word)source); \
+ if (current == 0) goto exit_label; \
hhdr = HDR(current); \
} \
displ = HBLKDISPL(current); \
map_entry = MAP_ENTRY((hhdr -> hb_map), displ); \
if (map_entry == OBJ_INVALID) { \
- GC_ADD_TO_BLACK_LIST_NORMAL(current); continue; \
+ GC_ADD_TO_BLACK_LIST_NORMAL(current, source); goto exit_label; \
} \
displ = BYTES_TO_WORDS(displ); \
displ -= map_entry; \
@@ -153,12 +164,13 @@ mse * GC_signal_mark_stack_overflow();
\
if (mark_word & mark_bit) { \
/* Mark bit is already set */ \
- continue; \
+ goto exit_label; \
} \
*mark_word_addr = mark_word | mark_bit; \
} \
PUSH_OBJ(((word *)(HBLKPTR(current)) + displ), hhdr, \
mark_stack_top, mark_stack_limit) \
+ exit_label: ; \
}
@@ -205,7 +217,7 @@ mse * GC_signal_mark_stack_overflow();
} \
}
-extern bool GC_mark_stack_too_small;
+extern GC_bool GC_mark_stack_too_small;
/* We need a larger mark stack. May be */
/* set by client supplied mark routines.*/
diff --git a/gc_priv.h b/gc_priv.h
index a70032a8..96ba1da1 100644
--- a/gc_priv.h
+++ b/gc_priv.h
@@ -49,22 +49,7 @@ typedef GC_signed_word signed_word;
# include "gc_hdrs.h"
# endif
-# if !defined(bool) && !defined(__cplusplus)
- typedef int bool;
- /* This is problematic with C++ implementations that do not define bool. */
- /* By now they should. */
-# else
-# if defined(_SGI_SOURCE) && !defined(_BOOL)
- typedef int bool;
-# endif
-# if defined(__SUNPRO_CC) && __SUNPRO_CC <= 0x420
- typedef int bool;
-# endif
-# if defined(__cplusplus) && defined(_MSC_VER) && _MSC_VER <= 1020
- /* Visual C++ 4.2 does not have bool type. */
- typedef int bool;
-# endif
-# endif
+typedef int GC_bool;
# define TRUE 1
# define FALSE 0
@@ -148,16 +133,10 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
#define PRINTBLOCKS /* Print object sizes associated with heap blocks, */
/* whether the objects are atomic or composite, and */
/* whether or not the block was found to be empty */
- /* duing the reclaim phase. Typically generates */
+ /* during the reclaim phase. Typically generates */
/* about one screenful per garbage collection. */
#undef PRINTBLOCKS
-#define PRINTBLACKLIST /* Print black listed blocks, i.e. values that */
- /* cause the allocator to avoid allocating certain */
- /* blocks in order to avoid introducing "false */
- /* hits". */
-#undef PRINTBLACKLIST
-
#ifdef SILENT
# ifdef PRINTSTATS
# undef PRINTSTATS
@@ -191,6 +170,15 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
/* May save significant amounts of space for obj_map */
/* entries. */
+#ifndef OLD_BLOCK_ALLOC
+ /* Macros controlling large block allocation strategy. */
+# define EXACT_FIRST /* Make a complete pass through the large object */
+ /* free list before splitting a block */
+# define PRESERVE_LAST /* Do not divide last allocated heap segment */
+ /* unless we would otherwise need to expand the */
+ /* heap. */
+#endif
+
/* ALIGN_DOUBLE requires MERGE_SIZES at present. */
# if defined(ALIGN_DOUBLE) && !defined(MERGE_SIZES)
# define MERGE_SIZES
@@ -203,6 +191,7 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
# ifndef LARGE_CONFIG
# define MINHINCR 16 /* Minimum heap increment, in blocks of HBLKSIZE */
+ /* Must be multiple of largest page size. */
# define MAXHINCR 512 /* Maximum heap increment, in blocks */
# else
# define MINHINCR 64
@@ -230,6 +219,8 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
/* */
/*********************************/
+#ifdef SAVE_CALL_CHAIN
+
/*
* Number of frames and arguments to save in objects allocated by
* debugging allocator.
@@ -238,12 +229,7 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
/* alignment reasons. */
# define NARGS 2 /* Mumber of arguments to save for each call. */
-
-#ifdef SAVE_CALL_CHAIN
- struct callinfo {
- word ci_pc;
- word ci_arg[NARGS]; /* bit-wise complement to avoid retention */
- };
+# define NEED_CALLINFO
/* Fill in the pc and argument information for up to NFRAMES of my */
/* callers. Ignore my frame and my callers frame. */
@@ -251,6 +237,27 @@ void GC_save_callers (/* struct callinfo info[NFRAMES] */);
void GC_print_callers (/* struct callinfo info[NFRAMES] */);
+#else
+
+# ifdef GC_ADD_CALLER
+# define NFRAMES 1
+# define NARGS 0
+# define NEED_CALLINFO
+# endif
+
+#endif
+
+#ifdef NEED_CALLINFO
+ struct callinfo {
+ word ci_pc;
+# if NARGS > 0
+ word ci_arg[NARGS]; /* bit-wise complement to avoid retention */
+# endif
+# if defined(ALIGN_DOUBLE) && (NFRAMES * (NARGS + 1)) % 2 == 1
+ /* Likely alignment problem. */
+ word ci_dummy;
+# endif
+ };
#endif
@@ -327,6 +334,8 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
/* HBLKSIZE aligned allocation. 0 is taken to mean failure */
/* space is assumed to be cleared. */
+/* In the case os USE_MMAP, the argument must also be a */
+/* physical page size. */
# ifdef PCR
char * real_malloc();
# define GET_MEM(bytes) HBLKPTR(real_malloc((size_t)bytes + GC_page_size) \
@@ -422,12 +431,58 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
# define LOCK() mutex_lock(&GC_allocate_ml);
# define UNLOCK() mutex_unlock(&GC_allocate_ml);
# endif
+# ifdef LINUX_THREADS
+# include <pthread.h>
+# ifdef __i386__
+ inline static GC_test_and_set(volatile unsigned int *addr) {
+ int oldval;
+ /* Note: the "xchg" instruction does not need a "lock" prefix */
+ __asm__ __volatile__("xchgl %0, %1"
+ : "=r"(oldval), "=m"(*(addr))
+ : "0"(1), "m"(*(addr)));
+ return oldval;
+ }
+# else
+ -- > Need implementation of GC_test_and_set()
+# endif
+# define GC_clear(addr) (*(addr) = 0)
+
+ extern volatile unsigned int GC_allocate_lock;
+ /* This is not a mutex because mutexes that obey the (optional) */
+ /* POSIX scheduling rules are subject to convoys in high contention */
+ /* applications. This is basically a spin lock. */
+ extern pthread_t GC_lock_holder;
+ extern void GC_lock(void);
+ /* Allocation lock holder. Only set if acquired by client through */
+ /* GC_call_with_alloc_lock. */
+# define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
+# define NO_THREAD (pthread_t)(-1)
+# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
+# define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
+# ifdef UNDEFINED
+# define LOCK() pthread_mutex_lock(&GC_allocate_ml)
+# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
+# else
+# define LOCK() \
+ { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
+# define UNLOCK() \
+ GC_clear(&GC_allocate_lock)
+# endif
+ extern GC_bool GC_collecting;
+# define ENTER_GC() \
+ { \
+ GC_collecting = 1; \
+ }
+# define EXIT_GC() GC_collecting = 0;
+# endif /* LINUX_THREADS */
# ifdef IRIX_THREADS
# include <pthread.h>
# include <mutex.h>
# if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
-# define __test_and_set(l,v) test_and_set(l,v)
+# define GC_test_and_set(addr, v) test_and_set(addr,v)
+# else
+# define GC_test_and_set(addr, v) __test_and_set(addr,v)
# endif
extern unsigned long GC_allocate_lock;
/* This is not a mutex because mutexes that obey the (optional) */
@@ -445,20 +500,20 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
# define LOCK() pthread_mutex_lock(&GC_allocate_ml)
# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
# else
-# define LOCK() { if (__test_and_set(&GC_allocate_lock, 1)) GC_lock(); }
+# define LOCK() { if (GC_test_and_set(&GC_allocate_lock, 1)) GC_lock(); }
# if __mips >= 3 && (defined (_ABIN32) || defined(_ABI64))
# define UNLOCK() __lock_release(&GC_allocate_lock)
# else
# define UNLOCK() GC_allocate_lock = 0
# endif
# endif
- extern bool GC_collecting;
+ extern GC_bool GC_collecting;
# define ENTER_GC() \
{ \
GC_collecting = 1; \
}
# define EXIT_GC() GC_collecting = 0;
-# endif
+# endif /* IRIX_THREADS */
# ifdef WIN32_THREADS
# include <windows.h>
GC_API CRITICAL_SECTION GC_allocate_ml;
@@ -511,7 +566,8 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
# else
# if defined(SRC_M3) || defined(AMIGA) || defined(SOLARIS_THREADS) \
|| defined(MSWIN32) || defined(MACOS) || defined(DJGPP) \
- || defined(NO_SIGNALS) || defined(IRIX_THREADS)
+ || defined(NO_SIGNALS) || defined(IRIX_THREADS) \
+ || defined(LINUX_THREADS)
/* Also useful for debugging. */
/* Should probably use thr_sigsetmask for SOLARIS_THREADS. */
# define DISABLE_SIGNALS()
@@ -538,7 +594,8 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
PCR_allSigsBlocked, \
PCR_waitForever);
# else
-# if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || defined(IRIX_THREADS)
+# if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) \
+ || defined(IRIX_THREADS) || defined(LINUX_THREADS)
void GC_stop_world();
void GC_start_world();
# define STOP_WORLD() GC_stop_world()
@@ -1030,11 +1087,11 @@ extern struct obj_kind {
/* swept. */
word ok_descriptor; /* Descriptor template for objects in this */
/* block. */
- bool ok_relocate_descr;
+ GC_bool ok_relocate_descr;
/* Add object size in bytes to descriptor */
/* template to obtain descriptor. Otherwise */
/* template is used as is. */
- bool ok_init; /* Clear objects before putting them on the free list. */
+ GC_bool ok_init; /* Clear objects before putting them on the free list. */
} GC_obj_kinds[MAXOBJKINDS];
/* Predefined kinds: */
# define PTRFREE 0
@@ -1080,14 +1137,15 @@ extern struct hblk * GC_hblkfreelist;
/* header structure associated with */
/* block. */
-extern bool GC_is_initialized; /* GC_init() has been run. */
+extern GC_bool GC_is_initialized; /* GC_init() has been run. */
-extern bool GC_objects_are_marked; /* There are marked objects in */
+extern GC_bool GC_objects_are_marked; /* There are marked objects in */
/* the heap. */
-extern int GC_incremental; /* Using incremental/generational collection. */
+extern GC_bool GC_incremental; /* Using incremental/generational collection. */
-extern bool GC_dirty_maintained;/* Dirty bits are being maintained, */
+extern GC_bool GC_dirty_maintained;
+ /* Dirty bits are being maintained, */
/* either for incremental collection, */
/* or to limit the root set. */
@@ -1097,7 +1155,7 @@ extern bool GC_dirty_maintained;/* Dirty bits are being maintained, */
extern word GC_root_size; /* Total size of registered root sections */
-extern bool GC_debugging_started; /* GC_debug_malloc has been called. */
+extern GC_bool GC_debugging_started; /* GC_debug_malloc has been called. */
extern ptr_t GC_least_plausible_heap_addr;
extern ptr_t GC_greatest_plausible_heap_addr;
@@ -1116,7 +1174,7 @@ extern ptr_t GC_greatest_plausible_heap_addr;
/* object are used. */
-/* Mark bit perations */
+/* Mark bit operations */
/*
* Retrieve, set, clear the mark bit corresponding
@@ -1136,6 +1194,13 @@ extern ptr_t GC_greatest_plausible_heap_addr;
/* Important internal collector routines */
+ptr_t GC_approx_sp();
+
+GC_bool GC_should_collect();
+#ifdef PRESERVE_LAST
+ GC_bool GC_in_last_heap_sect(/* ptr_t */);
+ /* In last added heap section? If so, avoid breaking up. */
+#endif
void GC_apply_to_all_blocks(/*fn, client_data*/);
/* Invoke fn(hbp, client_data) for each */
/* allocated heap block. */
@@ -1150,8 +1215,8 @@ void GC_invalidate_mark_state(); /* Tell the marker that marked */
void GC_mark_from_mark_stack(); /* Mark from everything on the mark stack. */
/* Return after about one pages worth of */
/* work. */
-bool GC_mark_stack_empty();
-bool GC_mark_some(); /* Perform about one pages worth of marking */
+GC_bool GC_mark_stack_empty();
+GC_bool GC_mark_some(); /* Perform about one pages worth of marking */
/* work of whatever kind is needed. Returns */
/* quickly if no collection is in progress. */
/* Return TRUE if mark phase finished. */
@@ -1165,7 +1230,7 @@ void GC_push_dirty(/*b,t*/); /* Push all possibly changed */
/* subintervals of [b,t) onto */
/* mark stack. */
#ifndef SMALL_CONFIG
- void GC_push_conditional(/* ptr_t b, ptr_t t, bool all*/);
+ void GC_push_conditional(/* ptr_t b, ptr_t t, GC_bool all*/);
#else
# define GC_push_conditional(b, t, all) GC_push_all(b, t)
#endif
@@ -1173,7 +1238,7 @@ void GC_push_dirty(/*b,t*/); /* Push all possibly changed */
/* on the third arg. */
void GC_push_all_stack(/*b,t*/); /* As above, but consider */
/* interior pointers as valid */
-void GC_push_roots(/* bool all */); /* Push all or dirty roots. */
+void GC_push_roots(/* GC_bool all */); /* Push all or dirty roots. */
extern void (*GC_push_other_roots)();
/* Push system or application specific roots */
/* onto the mark stack. In some environments */
@@ -1210,12 +1275,12 @@ struct hblk * GC_push_next_marked(/* h */);
/* Ditto, but also mark from clean pages. */
struct hblk * GC_push_next_marked_uncollectable(/* h */);
/* Ditto, but mark only from uncollectable pages. */
-bool GC_stopped_mark(); /* Stop world and mark from all roots */
+GC_bool GC_stopped_mark(); /* Stop world and mark from all roots */
/* and rescuers. */
void GC_clear_hdr_marks(/* hhdr */); /* Clear the mark bits in a header */
void GC_set_hdr_marks(/* hhdr */); /* Set the mark bits in a header */
void GC_add_roots_inner();
-bool GC_is_static_root(/* ptr_t p */);
+GC_bool GC_is_static_root(/* ptr_t p */);
/* Is the address p in one of the registered static */
/* root sections? */
void GC_register_dynamic_libraries();
@@ -1228,15 +1293,27 @@ void GC_register_data_segments();
/* Black listing: */
void GC_bl_init();
# ifndef ALL_INTERIOR_POINTERS
- void GC_add_to_black_list_normal(/* bits */);
+ void GC_add_to_black_list_normal(/* bits, maybe source */);
/* Register bits as a possible future false */
/* reference from the heap or static data */
-# define GC_ADD_TO_BLACK_LIST_NORMAL(bits) GC_add_to_black_list_normal(bits)
+# ifdef PRINT_BLACK_LIST
+# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
+ GC_add_to_black_list_normal(bits, source)
+# else
+# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
+ GC_add_to_black_list_normal(bits)
+# endif
# else
-# define GC_ADD_TO_BLACK_LIST_NORMAL(bits) GC_add_to_black_list_stack(bits)
+# ifdef PRINT_BLACK_LIST
+# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
+ GC_add_to_black_list_stack(bits, source)
+# else
+# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
+ GC_add_to_black_list_stack(bits)
+# endif
# endif
-void GC_add_to_black_list_stack(/* bits */);
+void GC_add_to_black_list_stack(/* bits, maybe source */);
struct hblk * GC_is_black_listed(/* h, len */);
/* If there are likely to be false references */
/* to a block starting at h of the indicated */
@@ -1265,7 +1342,7 @@ void GC_invalidate_map(/* hdr */);
/* with the block. This identifies */
/* the block as invalid to the mark */
/* routines. */
-bool GC_add_map_entry(/*sz*/);
+GC_bool GC_add_map_entry(/*sz*/);
/* Add a heap block map for objects of */
/* size sz to obj_map. */
/* Return FALSE on failure. */
@@ -1289,7 +1366,7 @@ void GC_freehblk(); /* Deallocate a heap block and mark it */
/* Misc GC: */
void GC_init_inner();
-bool GC_expand_hp_inner();
+GC_bool GC_expand_hp_inner();
void GC_start_reclaim(/*abort_if_found*/);
/* Restore unmarked objects to free */
/* lists, or (if abort_if_found is */
@@ -1305,12 +1382,12 @@ void GC_reclaim_or_delete_all();
/* Arrange for all reclaim lists to be */
/* empty. Judiciously choose between */
/* sweeping and discarding each page. */
-bool GC_reclaim_all(/* GC_stop_func f*/);
+GC_bool GC_reclaim_all(/* GC_stop_func f*/);
/* Reclaim all blocks. Abort (in a */
/* consistent state) if f returns TRUE. */
-bool GC_block_empty(/* hhdr */); /* Block completely unmarked? */
-bool GC_never_stop_func(); /* Returns FALSE. */
-bool GC_try_to_collect_inner(/* GC_stop_func f */);
+GC_bool GC_block_empty(/* hhdr */); /* Block completely unmarked? */
+GC_bool GC_never_stop_func(); /* Returns FALSE. */
+GC_bool GC_try_to_collect_inner(/* GC_stop_func f */);
/* Collect; caller must have acquired */
/* lock and disabled signals. */
/* Collection is aborted if f returns */
@@ -1320,7 +1397,7 @@ bool GC_try_to_collect_inner(/* GC_stop_func f */);
(void) GC_try_to_collect_inner(GC_never_stop_func)
void GC_finish_collection(); /* Finish collection. Mark bits are */
/* consistent and lock is still held. */
-bool GC_collect_or_expand(/* needed_blocks */);
+GC_bool GC_collect_or_expand(/* needed_blocks */);
/* Collect or expand heap in an attempt */
/* make the indicated number of free */
/* blocks available. Should be called */
@@ -1364,10 +1441,10 @@ ptr_t GC_allocobj(/* sz_inn_words, kind */);
/* head. */
void GC_init_headers();
-bool GC_install_header(/*h*/);
+GC_bool GC_install_header(/*h*/);
/* Install a header for block h. */
/* Return FALSE on failure. */
-bool GC_install_counts(/*h, sz*/);
+GC_bool GC_install_counts(/*h, sz*/);
/* Set up forwarding counts for block */
/* h of size sz. */
/* Return FALSE on failure. */
@@ -1394,13 +1471,17 @@ extern void (*GC_check_heap)();
/* Check that all objects in the heap with */
/* debugging info are intact. Print */
/* descriptions of any that are not. */
+extern void (*GC_print_heap_obj)(/* ptr_t p */);
+ /* If possible print s followed by a more */
+ /* detailed description of the object */
+ /* referred to by p. */
/* Virtual dirty bit implementation: */
/* Each implementation exports the following: */
void GC_read_dirty(); /* Retrieve dirty bits. */
-bool GC_page_was_dirty(/* struct hblk * h */);
+GC_bool GC_page_was_dirty(/* struct hblk * h */);
/* Read retrieved dirty bits. */
-bool GC_page_was_ever_dirty(/* struct hblk * h */);
+GC_bool GC_page_was_ever_dirty(/* struct hblk * h */);
/* Could the page contain valid heap pointers? */
void GC_is_fresh(/* struct hblk * h, word number_of_blocks */);
/* Assert the region currently contains no */
@@ -1410,13 +1491,13 @@ void GC_write_hint(/* struct hblk * h */);
void GC_dirty_init();
/* Slow/general mark bit manipulation: */
-bool GC_is_marked();
+GC_bool GC_is_marked();
void GC_clear_mark_bit();
void GC_set_mark_bit();
/* Stubborn objects: */
void GC_read_changed(); /* Analogous to GC_read_dirty */
-bool GC_page_was_changed(/* h */); /* Analogous to GC_page_was_dirty */
+GC_bool GC_page_was_changed(/* h */); /* Analogous to GC_page_was_dirty */
void GC_clean_changing_list(); /* Collect obsolete changing list entries */
void GC_stubborn_init();
diff --git a/headers.c b/headers.c
index a7b6030a..b5cc1af8 100644
--- a/headers.c
+++ b/headers.c
@@ -71,7 +71,12 @@ register word bytes;
if (bytes_to_get <= bytes) {
/* Undo the damage, and get memory directly */
- ptr_t result = (ptr_t)GET_MEM(bytes);
+ bytes_to_get = bytes;
+# ifdef USE_MMAP
+ bytes_to_get += GC_page_size - 1;
+ bytes_to_get &= ~(GC_page_size - 1);
+# endif
+ result = (ptr_t)GET_MEM(bytes_to_get);
scratch_free_ptr -= bytes;
GC_scratch_last_end_ptr = result + bytes;
return(result);
@@ -82,7 +87,12 @@ register word bytes;
GC_printf0("Out of memory - trying to allocate less\n");
# endif
scratch_free_ptr -= bytes;
- return((ptr_t)GET_MEM(bytes));
+ bytes_to_get = bytes;
+# ifdef USE_MMAP
+ bytes_to_get += GC_page_size - 1;
+ bytes_to_get &= (GC_page_size - 1);
+# endif
+ return((ptr_t)GET_MEM(bytes_to_get));
}
scratch_free_ptr = result;
GC_scratch_end_ptr = scratch_free_ptr + bytes_to_get;
@@ -127,7 +137,7 @@ void GC_init_headers()
/* Make sure that there is a bottom level index block for address addr */
/* Return FALSE on failure. */
-static bool get_index(addr)
+static GC_bool get_index(addr)
register word addr;
{
register word hi =
@@ -168,7 +178,7 @@ register word addr;
/* Install a header for block h. */
/* The header is uninitialized. */
/* Returns FALSE on failure. */
-bool GC_install_header(h)
+GC_bool GC_install_header(h)
register struct hblk * h;
{
hdr * result;
@@ -180,7 +190,7 @@ register struct hblk * h;
}
/* Set up forwarding counts for block h of size sz */
-bool GC_install_counts(h, sz)
+GC_bool GC_install_counts(h, sz)
register struct hblk * h;
register word sz; /* bytes */
{
diff --git a/include/gc.h b/include/gc.h
index 23037246..09c8ca81 100644
--- a/include/gc.h
+++ b/include/gc.h
@@ -293,37 +293,48 @@ GC_API int GC_collect_a_little GC_PROTO((void));
GC_API GC_PTR GC_malloc_ignore_off_page GC_PROTO((size_t lb));
GC_API GC_PTR GC_malloc_atomic_ignore_off_page GC_PROTO((size_t lb));
+#if defined(__sgi) && !defined(__GNUC__) && _COMPILER_VERSION >= 720
+# define GC_ADD_CALLER
+# define GC_RETURN_ADDR (GC_word)__return_address
+#endif
+
+#ifdef GC_ADD_CALLER
+# define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__
+# define GC_EXTRA_PARAMS GC_word ra, char * descr_string, int descr_int
+#else
+# define GC_EXTRAS __FILE__, __LINE__
+# define GC_EXTRA_PARAMS char * descr_string, int descr_int
+#endif
+
/* Debugging (annotated) allocation. GC_gcollect will check */
/* objects allocated in this way for overwrites, etc. */
GC_API GC_PTR GC_debug_malloc
- GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+ GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API GC_PTR GC_debug_malloc_atomic
- GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+ GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API GC_PTR GC_debug_malloc_uncollectable
- GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+ GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API GC_PTR GC_debug_malloc_stubborn
- GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+ GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
GC_API void GC_debug_free GC_PROTO((GC_PTR object_addr));
GC_API GC_PTR GC_debug_realloc
GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes,
- char * descr_string, int descr_int));
+ GC_EXTRA_PARAMS));
GC_API void GC_debug_change_stubborn GC_PROTO((GC_PTR));
GC_API void GC_debug_end_stubborn_change GC_PROTO((GC_PTR));
# ifdef GC_DEBUG
-# define GC_MALLOC(sz) GC_debug_malloc(sz, __FILE__, __LINE__)
-# define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, __FILE__, __LINE__)
+# define GC_MALLOC(sz) GC_debug_malloc(sz, GC_EXTRAS)
+# define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, GC_EXTRAS)
# define GC_MALLOC_UNCOLLECTABLE(sz) GC_debug_malloc_uncollectable(sz, \
- __FILE__, __LINE__)
-# define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, __FILE__, \
- __LINE__)
+ GC_EXTRAS)
+# define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, GC_EXTRAS)
# define GC_FREE(p) GC_debug_free(p)
# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
GC_debug_register_finalizer(p, f, d, of, od)
# define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
GC_debug_register_finalizer_ignore_self(p, f, d, of, od)
-# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, __FILE__, \
- __LINE__)
+# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, GC_EXTRAS);
# define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p)
# define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p)
# define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
@@ -632,7 +643,7 @@ GC_API void (*GC_is_visible_print_proc)
# endif /* SOLARIS_THREADS */
-#ifdef IRIX_THREADS
+#if defined(IRIX_THREADS) || defined(LINUX_THREADS)
/* We treat these similarly. */
# include <pthread.h>
# include <signal.h>
@@ -647,9 +658,9 @@ GC_API void (*GC_is_visible_print_proc)
# define pthread_sigmask GC_pthread_sigmask
# define pthread_join GC_pthread_join
-#endif /* IRIX_THREADS */
+#endif /* IRIX_THREADS || LINUX_THREADS */
-#if defined(SOLARIS_THREADS) || defined(IRIX_THREADS)
+#if defined(THREADS) && !defined(SRC_M3)
/* This returns a list of objects, linked through their first */
/* word. Its use can greatly reduce lock contention problems, since */
/* the allocation lock can be acquired and released many fewer times. */
@@ -658,7 +669,7 @@ GC_PTR GC_malloc_many(size_t lb);
/* in returned list. */
extern void GC_thr_init(); /* Needed for Solaris/X86 */
-#endif /* SOLARIS_THREADS */
+#endif /* THREADS && !SRC_M3 */
/*
* If you are planning on putting
diff --git a/include/gc_alloc.h b/include/gc_alloc.h
index 645748b0..1d912db2 100644
--- a/include/gc_alloc.h
+++ b/include/gc_alloc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1996-1998 by Silicon Graphics. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -369,6 +369,12 @@ __GC_SPECIALIZE(unsigned, single_client_alloc)
__GC_SPECIALIZE(float, single_client_alloc)
__GC_SPECIALIZE(double, single_client_alloc)
+#ifdef __STL_USE_STD_ALLOCATORS
+
+???copy stuff from stl_alloc.h or remove it to a different file ???
+
+#endif /* __STL_USE_STD_ALLOCATORS */
+
#endif /* _SGI_SOURCE */
#endif /* GC_ALLOC_H */
diff --git a/include/private/config.h b/include/private/config.h
index d5d618c5..fc8004ce 100644
--- a/include/private/config.h
+++ b/include/private/config.h
@@ -102,7 +102,11 @@
# if defined(_M_XENIX) && defined(_M_SYSV) && defined(_M_I386)
/* The above test may need refinement */
# define I386
-# define SCO
+# if defined(_SCO_ELF)
+# define SCO_ELF
+# else
+# define SCO
+# endif
# define mach_type_known
# endif
# if defined(_AUX_SOURCE)
@@ -195,7 +199,9 @@
# endif
# if defined(__DJGPP__)
# define I386
-# define DJGPP /* MSDOS running the DJGPP port of GCC */
+# ifndef DJGPP
+# define DJGPP /* MSDOS running the DJGPP port of GCC */
+# endif
# define mach_type_known
# endif
# if defined(__CYGWIN32__)
@@ -570,9 +576,20 @@
+((word)&etext & 0xfff))
# define STACKBOTTOM ((ptr_t) 0x7ffffffc)
# endif
+# ifdef SCO_ELF
+# define OS_TYPE "SCO_ELF"
+ extern int etext;
+# define DATASTART ((ptr_t)(&etext))
+# define STACKBOTTOM ((ptr_t) 0x08048000)
+# define DYNAMIC_LOADING
+# define ELF_CLASS ELFCLASS32
+# endif
# ifdef LINUX
# define OS_TYPE "LINUX"
# define STACKBOTTOM ((ptr_t)0xc0000000)
+ /* Appears to be 0xe0000000 for at least one 2.1.91 kernel. */
+ /* Probably needs to be more flexible, but I don't yet */
+ /* fully understand how flexible. */
# define MPROTECT_VDB
# ifdef __ELF__
# define DYNAMIC_LOADING
@@ -605,11 +622,22 @@
# endif
# endif
# ifdef CYGWIN32
-# define OS_TYPE "CYGWIN32"
- extern int _bss_start__;
-# define DATASTART ((ptr_t)&_bss_start__)
- extern int _data_end__;
-# define DATAEND ((ptr_t)&_data_end__)
+ extern int _data_start__;
+ extern int _data_end__;
+ extern int _bss_start__;
+ extern int _bss_end__;
+ /* For binutils 2.9.1, we have */
+ /* DATASTART = _data_start__ */
+ /* DATAEND = _bss_end__ */
+ /* whereas for some earlier versions it was */
+ /* DATASTART = _bss_start__ */
+ /* DATAEND = _data_end__ */
+ /* To get it right for both, we take the */
+ /* minumum/maximum of the two. */
+# define MAX(x,y) ((x) > (y) ? (x) : (y))
+# define MIN(x,y) ((x) < (y) ? (x) : (y))
+# define DATASTART ((ptr_t) MIN(_data_start__, _bss_start__))
+# define DATAEND ((ptr_t) MAX(_data_end__, _bss_end__))
# undef STACK_GRAN
# define STACK_GRAN 0x10000
# define HEURISTIC1
@@ -704,7 +732,7 @@
extern int _fdata;
# define DATASTART ((ptr_t)(&_fdata))
# ifdef USE_MMAP
-# define HEAP_START (ptr_t)0x40000000
+# define HEAP_START (ptr_t)0x30000000
# else
# define HEAP_START DATASTART
# endif
@@ -726,9 +754,7 @@
# endif
# ifdef IRIX5
# define OS_TYPE "IRIX5"
-# ifndef IRIX_THREADS
-# define MPROTECT_VDB
-# endif
+# define MPROTECT_VDB
# ifdef _MIPS_SZPTR
# define CPP_WORDSZ _MIPS_SZPTR
# define ALIGNMENT (_MIPS_SZPTR/8)
@@ -748,7 +774,7 @@
# define ALIGNMENT 4
# define DATASTART ((ptr_t)0x20000000)
extern int errno;
-# define STACKBOTTOM ((ptr_t)((ulong)&errno + 2*sizeof(int)))
+# define STACKBOTTOM ((ptr_t)((ulong)&errno))
# define DYNAMIC_LOADING
/* For really old versions of AIX, this may have to be removed. */
# endif
@@ -918,10 +944,15 @@
# if defined(IRIX_THREADS) && !defined(IRIX5)
--> inconsistent configuration
# endif
+# if defined(LINUX_THREADS) && !defined(LINUX)
+--> inconsistent configuration
+# endif
# if defined(SOLARIS_THREADS) && !defined(SUNOS5)
--> inconsistent configuration
# endif
-# if defined(PCR) || defined(SRC_M3) || defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || defined(IRIX_THREADS)
+# if defined(PCR) || defined(SRC_M3) || \
+ defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || \
+ defined(IRIX_THREADS) || defined(LINUX_THREADS)
# define THREADS
# endif
diff --git a/include/private/gc_priv.h b/include/private/gc_priv.h
index a70032a8..96ba1da1 100644
--- a/include/private/gc_priv.h
+++ b/include/private/gc_priv.h
@@ -49,22 +49,7 @@ typedef GC_signed_word signed_word;
# include "gc_hdrs.h"
# endif
-# if !defined(bool) && !defined(__cplusplus)
- typedef int bool;
- /* This is problematic with C++ implementations that do not define bool. */
- /* By now they should. */
-# else
-# if defined(_SGI_SOURCE) && !defined(_BOOL)
- typedef int bool;
-# endif
-# if defined(__SUNPRO_CC) && __SUNPRO_CC <= 0x420
- typedef int bool;
-# endif
-# if defined(__cplusplus) && defined(_MSC_VER) && _MSC_VER <= 1020
- /* Visual C++ 4.2 does not have bool type. */
- typedef int bool;
-# endif
-# endif
+typedef int GC_bool;
# define TRUE 1
# define FALSE 0
@@ -148,16 +133,10 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
#define PRINTBLOCKS /* Print object sizes associated with heap blocks, */
/* whether the objects are atomic or composite, and */
/* whether or not the block was found to be empty */
- /* duing the reclaim phase. Typically generates */
+ /* during the reclaim phase. Typically generates */
/* about one screenful per garbage collection. */
#undef PRINTBLOCKS
-#define PRINTBLACKLIST /* Print black listed blocks, i.e. values that */
- /* cause the allocator to avoid allocating certain */
- /* blocks in order to avoid introducing "false */
- /* hits". */
-#undef PRINTBLACKLIST
-
#ifdef SILENT
# ifdef PRINTSTATS
# undef PRINTSTATS
@@ -191,6 +170,15 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
/* May save significant amounts of space for obj_map */
/* entries. */
+#ifndef OLD_BLOCK_ALLOC
+ /* Macros controlling large block allocation strategy. */
+# define EXACT_FIRST /* Make a complete pass through the large object */
+ /* free list before splitting a block */
+# define PRESERVE_LAST /* Do not divide last allocated heap segment */
+ /* unless we would otherwise need to expand the */
+ /* heap. */
+#endif
+
/* ALIGN_DOUBLE requires MERGE_SIZES at present. */
# if defined(ALIGN_DOUBLE) && !defined(MERGE_SIZES)
# define MERGE_SIZES
@@ -203,6 +191,7 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
# ifndef LARGE_CONFIG
# define MINHINCR 16 /* Minimum heap increment, in blocks of HBLKSIZE */
+ /* Must be multiple of largest page size. */
# define MAXHINCR 512 /* Maximum heap increment, in blocks */
# else
# define MINHINCR 64
@@ -230,6 +219,8 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
/* */
/*********************************/
+#ifdef SAVE_CALL_CHAIN
+
/*
* Number of frames and arguments to save in objects allocated by
* debugging allocator.
@@ -238,12 +229,7 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
/* alignment reasons. */
# define NARGS 2 /* Mumber of arguments to save for each call. */
-
-#ifdef SAVE_CALL_CHAIN
- struct callinfo {
- word ci_pc;
- word ci_arg[NARGS]; /* bit-wise complement to avoid retention */
- };
+# define NEED_CALLINFO
/* Fill in the pc and argument information for up to NFRAMES of my */
/* callers. Ignore my frame and my callers frame. */
@@ -251,6 +237,27 @@ void GC_save_callers (/* struct callinfo info[NFRAMES] */);
void GC_print_callers (/* struct callinfo info[NFRAMES] */);
+#else
+
+# ifdef GC_ADD_CALLER
+# define NFRAMES 1
+# define NARGS 0
+# define NEED_CALLINFO
+# endif
+
+#endif
+
+#ifdef NEED_CALLINFO
+ struct callinfo {
+ word ci_pc;
+# if NARGS > 0
+ word ci_arg[NARGS]; /* bit-wise complement to avoid retention */
+# endif
+# if defined(ALIGN_DOUBLE) && (NFRAMES * (NARGS + 1)) % 2 == 1
+ /* Likely alignment problem. */
+ word ci_dummy;
+# endif
+ };
#endif
@@ -327,6 +334,8 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
/* HBLKSIZE aligned allocation. 0 is taken to mean failure */
/* space is assumed to be cleared. */
+/* In the case os USE_MMAP, the argument must also be a */
+/* physical page size. */
# ifdef PCR
char * real_malloc();
# define GET_MEM(bytes) HBLKPTR(real_malloc((size_t)bytes + GC_page_size) \
@@ -422,12 +431,58 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
# define LOCK() mutex_lock(&GC_allocate_ml);
# define UNLOCK() mutex_unlock(&GC_allocate_ml);
# endif
+# ifdef LINUX_THREADS
+# include <pthread.h>
+# ifdef __i386__
+ inline static GC_test_and_set(volatile unsigned int *addr) {
+ int oldval;
+ /* Note: the "xchg" instruction does not need a "lock" prefix */
+ __asm__ __volatile__("xchgl %0, %1"
+ : "=r"(oldval), "=m"(*(addr))
+ : "0"(1), "m"(*(addr)));
+ return oldval;
+ }
+# else
+ -- > Need implementation of GC_test_and_set()
+# endif
+# define GC_clear(addr) (*(addr) = 0)
+
+ extern volatile unsigned int GC_allocate_lock;
+ /* This is not a mutex because mutexes that obey the (optional) */
+ /* POSIX scheduling rules are subject to convoys in high contention */
+ /* applications. This is basically a spin lock. */
+ extern pthread_t GC_lock_holder;
+ extern void GC_lock(void);
+ /* Allocation lock holder. Only set if acquired by client through */
+ /* GC_call_with_alloc_lock. */
+# define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
+# define NO_THREAD (pthread_t)(-1)
+# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
+# define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
+# ifdef UNDEFINED
+# define LOCK() pthread_mutex_lock(&GC_allocate_ml)
+# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
+# else
+# define LOCK() \
+ { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
+# define UNLOCK() \
+ GC_clear(&GC_allocate_lock)
+# endif
+ extern GC_bool GC_collecting;
+# define ENTER_GC() \
+ { \
+ GC_collecting = 1; \
+ }
+# define EXIT_GC() GC_collecting = 0;
+# endif /* LINUX_THREADS */
# ifdef IRIX_THREADS
# include <pthread.h>
# include <mutex.h>
# if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
-# define __test_and_set(l,v) test_and_set(l,v)
+# define GC_test_and_set(addr, v) test_and_set(addr,v)
+# else
+# define GC_test_and_set(addr, v) __test_and_set(addr,v)
# endif
extern unsigned long GC_allocate_lock;
/* This is not a mutex because mutexes that obey the (optional) */
@@ -445,20 +500,20 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
# define LOCK() pthread_mutex_lock(&GC_allocate_ml)
# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
# else
-# define LOCK() { if (__test_and_set(&GC_allocate_lock, 1)) GC_lock(); }
+# define LOCK() { if (GC_test_and_set(&GC_allocate_lock, 1)) GC_lock(); }
# if __mips >= 3 && (defined (_ABIN32) || defined(_ABI64))
# define UNLOCK() __lock_release(&GC_allocate_lock)
# else
# define UNLOCK() GC_allocate_lock = 0
# endif
# endif
- extern bool GC_collecting;
+ extern GC_bool GC_collecting;
# define ENTER_GC() \
{ \
GC_collecting = 1; \
}
# define EXIT_GC() GC_collecting = 0;
-# endif
+# endif /* IRIX_THREADS */
# ifdef WIN32_THREADS
# include <windows.h>
GC_API CRITICAL_SECTION GC_allocate_ml;
@@ -511,7 +566,8 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
# else
# if defined(SRC_M3) || defined(AMIGA) || defined(SOLARIS_THREADS) \
|| defined(MSWIN32) || defined(MACOS) || defined(DJGPP) \
- || defined(NO_SIGNALS) || defined(IRIX_THREADS)
+ || defined(NO_SIGNALS) || defined(IRIX_THREADS) \
+ || defined(LINUX_THREADS)
/* Also useful for debugging. */
/* Should probably use thr_sigsetmask for SOLARIS_THREADS. */
# define DISABLE_SIGNALS()
@@ -538,7 +594,8 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
PCR_allSigsBlocked, \
PCR_waitForever);
# else
-# if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || defined(IRIX_THREADS)
+# if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) \
+ || defined(IRIX_THREADS) || defined(LINUX_THREADS)
void GC_stop_world();
void GC_start_world();
# define STOP_WORLD() GC_stop_world()
@@ -1030,11 +1087,11 @@ extern struct obj_kind {
/* swept. */
word ok_descriptor; /* Descriptor template for objects in this */
/* block. */
- bool ok_relocate_descr;
+ GC_bool ok_relocate_descr;
/* Add object size in bytes to descriptor */
/* template to obtain descriptor. Otherwise */
/* template is used as is. */
- bool ok_init; /* Clear objects before putting them on the free list. */
+ GC_bool ok_init; /* Clear objects before putting them on the free list. */
} GC_obj_kinds[MAXOBJKINDS];
/* Predefined kinds: */
# define PTRFREE 0
@@ -1080,14 +1137,15 @@ extern struct hblk * GC_hblkfreelist;
/* header structure associated with */
/* block. */
-extern bool GC_is_initialized; /* GC_init() has been run. */
+extern GC_bool GC_is_initialized; /* GC_init() has been run. */
-extern bool GC_objects_are_marked; /* There are marked objects in */
+extern GC_bool GC_objects_are_marked; /* There are marked objects in */
/* the heap. */
-extern int GC_incremental; /* Using incremental/generational collection. */
+extern GC_bool GC_incremental; /* Using incremental/generational collection. */
-extern bool GC_dirty_maintained;/* Dirty bits are being maintained, */
+extern GC_bool GC_dirty_maintained;
+ /* Dirty bits are being maintained, */
/* either for incremental collection, */
/* or to limit the root set. */
@@ -1097,7 +1155,7 @@ extern bool GC_dirty_maintained;/* Dirty bits are being maintained, */
extern word GC_root_size; /* Total size of registered root sections */
-extern bool GC_debugging_started; /* GC_debug_malloc has been called. */
+extern GC_bool GC_debugging_started; /* GC_debug_malloc has been called. */
extern ptr_t GC_least_plausible_heap_addr;
extern ptr_t GC_greatest_plausible_heap_addr;
@@ -1116,7 +1174,7 @@ extern ptr_t GC_greatest_plausible_heap_addr;
/* object are used. */
-/* Mark bit perations */
+/* Mark bit operations */
/*
* Retrieve, set, clear the mark bit corresponding
@@ -1136,6 +1194,13 @@ extern ptr_t GC_greatest_plausible_heap_addr;
/* Important internal collector routines */
+ptr_t GC_approx_sp();
+
+GC_bool GC_should_collect();
+#ifdef PRESERVE_LAST
+ GC_bool GC_in_last_heap_sect(/* ptr_t */);
+ /* In last added heap section? If so, avoid breaking up. */
+#endif
void GC_apply_to_all_blocks(/*fn, client_data*/);
/* Invoke fn(hbp, client_data) for each */
/* allocated heap block. */
@@ -1150,8 +1215,8 @@ void GC_invalidate_mark_state(); /* Tell the marker that marked */
void GC_mark_from_mark_stack(); /* Mark from everything on the mark stack. */
/* Return after about one pages worth of */
/* work. */
-bool GC_mark_stack_empty();
-bool GC_mark_some(); /* Perform about one pages worth of marking */
+GC_bool GC_mark_stack_empty();
+GC_bool GC_mark_some(); /* Perform about one pages worth of marking */
/* work of whatever kind is needed. Returns */
/* quickly if no collection is in progress. */
/* Return TRUE if mark phase finished. */
@@ -1165,7 +1230,7 @@ void GC_push_dirty(/*b,t*/); /* Push all possibly changed */
/* subintervals of [b,t) onto */
/* mark stack. */
#ifndef SMALL_CONFIG
- void GC_push_conditional(/* ptr_t b, ptr_t t, bool all*/);
+ void GC_push_conditional(/* ptr_t b, ptr_t t, GC_bool all*/);
#else
# define GC_push_conditional(b, t, all) GC_push_all(b, t)
#endif
@@ -1173,7 +1238,7 @@ void GC_push_dirty(/*b,t*/); /* Push all possibly changed */
/* on the third arg. */
void GC_push_all_stack(/*b,t*/); /* As above, but consider */
/* interior pointers as valid */
-void GC_push_roots(/* bool all */); /* Push all or dirty roots. */
+void GC_push_roots(/* GC_bool all */); /* Push all or dirty roots. */
extern void (*GC_push_other_roots)();
/* Push system or application specific roots */
/* onto the mark stack. In some environments */
@@ -1210,12 +1275,12 @@ struct hblk * GC_push_next_marked(/* h */);
/* Ditto, but also mark from clean pages. */
struct hblk * GC_push_next_marked_uncollectable(/* h */);
/* Ditto, but mark only from uncollectable pages. */
-bool GC_stopped_mark(); /* Stop world and mark from all roots */
+GC_bool GC_stopped_mark(); /* Stop world and mark from all roots */
/* and rescuers. */
void GC_clear_hdr_marks(/* hhdr */); /* Clear the mark bits in a header */
void GC_set_hdr_marks(/* hhdr */); /* Set the mark bits in a header */
void GC_add_roots_inner();
-bool GC_is_static_root(/* ptr_t p */);
+GC_bool GC_is_static_root(/* ptr_t p */);
/* Is the address p in one of the registered static */
/* root sections? */
void GC_register_dynamic_libraries();
@@ -1228,15 +1293,27 @@ void GC_register_data_segments();
/* Black listing: */
void GC_bl_init();
# ifndef ALL_INTERIOR_POINTERS
- void GC_add_to_black_list_normal(/* bits */);
+ void GC_add_to_black_list_normal(/* bits, maybe source */);
/* Register bits as a possible future false */
/* reference from the heap or static data */
-# define GC_ADD_TO_BLACK_LIST_NORMAL(bits) GC_add_to_black_list_normal(bits)
+# ifdef PRINT_BLACK_LIST
+# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
+ GC_add_to_black_list_normal(bits, source)
+# else
+# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
+ GC_add_to_black_list_normal(bits)
+# endif
# else
-# define GC_ADD_TO_BLACK_LIST_NORMAL(bits) GC_add_to_black_list_stack(bits)
+# ifdef PRINT_BLACK_LIST
+# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
+ GC_add_to_black_list_stack(bits, source)
+# else
+# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
+ GC_add_to_black_list_stack(bits)
+# endif
# endif
-void GC_add_to_black_list_stack(/* bits */);
+void GC_add_to_black_list_stack(/* bits, maybe source */);
struct hblk * GC_is_black_listed(/* h, len */);
/* If there are likely to be false references */
/* to a block starting at h of the indicated */
@@ -1265,7 +1342,7 @@ void GC_invalidate_map(/* hdr */);
/* with the block. This identifies */
/* the block as invalid to the mark */
/* routines. */
-bool GC_add_map_entry(/*sz*/);
+GC_bool GC_add_map_entry(/*sz*/);
/* Add a heap block map for objects of */
/* size sz to obj_map. */
/* Return FALSE on failure. */
@@ -1289,7 +1366,7 @@ void GC_freehblk(); /* Deallocate a heap block and mark it */
/* Misc GC: */
void GC_init_inner();
-bool GC_expand_hp_inner();
+GC_bool GC_expand_hp_inner();
void GC_start_reclaim(/*abort_if_found*/);
/* Restore unmarked objects to free */
/* lists, or (if abort_if_found is */
@@ -1305,12 +1382,12 @@ void GC_reclaim_or_delete_all();
/* Arrange for all reclaim lists to be */
/* empty. Judiciously choose between */
/* sweeping and discarding each page. */
-bool GC_reclaim_all(/* GC_stop_func f*/);
+GC_bool GC_reclaim_all(/* GC_stop_func f*/);
/* Reclaim all blocks. Abort (in a */
/* consistent state) if f returns TRUE. */
-bool GC_block_empty(/* hhdr */); /* Block completely unmarked? */
-bool GC_never_stop_func(); /* Returns FALSE. */
-bool GC_try_to_collect_inner(/* GC_stop_func f */);
+GC_bool GC_block_empty(/* hhdr */); /* Block completely unmarked? */
+GC_bool GC_never_stop_func(); /* Returns FALSE. */
+GC_bool GC_try_to_collect_inner(/* GC_stop_func f */);
/* Collect; caller must have acquired */
/* lock and disabled signals. */
/* Collection is aborted if f returns */
@@ -1320,7 +1397,7 @@ bool GC_try_to_collect_inner(/* GC_stop_func f */);
(void) GC_try_to_collect_inner(GC_never_stop_func)
void GC_finish_collection(); /* Finish collection. Mark bits are */
/* consistent and lock is still held. */
-bool GC_collect_or_expand(/* needed_blocks */);
+GC_bool GC_collect_or_expand(/* needed_blocks */);
/* Collect or expand heap in an attempt */
/* make the indicated number of free */
/* blocks available. Should be called */
@@ -1364,10 +1441,10 @@ ptr_t GC_allocobj(/* sz_inn_words, kind */);
/* head. */
void GC_init_headers();
-bool GC_install_header(/*h*/);
+GC_bool GC_install_header(/*h*/);
/* Install a header for block h. */
/* Return FALSE on failure. */
-bool GC_install_counts(/*h, sz*/);
+GC_bool GC_install_counts(/*h, sz*/);
/* Set up forwarding counts for block */
/* h of size sz. */
/* Return FALSE on failure. */
@@ -1394,13 +1471,17 @@ extern void (*GC_check_heap)();
/* Check that all objects in the heap with */
/* debugging info are intact. Print */
/* descriptions of any that are not. */
+extern void (*GC_print_heap_obj)(/* ptr_t p */);
+ /* If possible print s followed by a more */
+ /* detailed description of the object */
+ /* referred to by p. */
/* Virtual dirty bit implementation: */
/* Each implementation exports the following: */
void GC_read_dirty(); /* Retrieve dirty bits. */
-bool GC_page_was_dirty(/* struct hblk * h */);
+GC_bool GC_page_was_dirty(/* struct hblk * h */);
/* Read retrieved dirty bits. */
-bool GC_page_was_ever_dirty(/* struct hblk * h */);
+GC_bool GC_page_was_ever_dirty(/* struct hblk * h */);
/* Could the page contain valid heap pointers? */
void GC_is_fresh(/* struct hblk * h, word number_of_blocks */);
/* Assert the region currently contains no */
@@ -1410,13 +1491,13 @@ void GC_write_hint(/* struct hblk * h */);
void GC_dirty_init();
/* Slow/general mark bit manipulation: */
-bool GC_is_marked();
+GC_bool GC_is_marked();
void GC_clear_mark_bit();
void GC_set_mark_bit();
/* Stubborn objects: */
void GC_read_changed(); /* Analogous to GC_read_dirty */
-bool GC_page_was_changed(/* h */); /* Analogous to GC_page_was_dirty */
+GC_bool GC_page_was_changed(/* h */); /* Analogous to GC_page_was_dirty */
void GC_clean_changing_list(); /* Collect obsolete changing list entries */
void GC_stubborn_init();
diff --git a/irix_threads.c b/irix_threads.c
index 661a2c0c..f45c4631 100644
--- a/irix_threads.c
+++ b/irix_threads.c
@@ -15,6 +15,10 @@
* Support code for Irix (>=6.2) Pthreads. This relies on properties
* not guaranteed by the Pthread standard. It may or may not be portable
* to other implementations.
+ *
+ * Note that there is a lot of code duplication between linux_threads.c
+ * and irix_threads.c; any changes made here may need to be reflected
+ * there too.
*/
# if defined(IRIX_THREADS)
@@ -107,7 +111,6 @@ void GC_suspend_handler(int sig)
int i;
if (sig != SIG_SUSPEND) ABORT("Bad signal in suspend_handler");
- pthread_mutex_lock(&GC_suspend_lock);
me = GC_lookup_thread(pthread_self());
/* The lookup here is safe, since I'm doing this on behalf */
/* of a thread which holds the allocation lock in order */
@@ -118,6 +121,7 @@ void GC_suspend_handler(int sig)
pthread_mutex_unlock(&GC_suspend_lock);
return;
}
+ pthread_mutex_lock(&GC_suspend_lock);
me -> stack_ptr = (ptr_t)(&dummy);
me -> stop = STOPPED;
pthread_cond_signal(&GC_suspend_ack_cv);
@@ -127,7 +131,7 @@ void GC_suspend_handler(int sig)
}
-bool GC_thr_initialized = FALSE;
+GC_bool GC_thr_initialized = FALSE;
size_t GC_min_stack_sz;
@@ -198,7 +202,7 @@ GC_thread GC_new_thread(pthread_t id)
int hv = ((word)id) % THREAD_TABLE_SZ;
GC_thread result;
static struct GC_Thread_Rep first_thread;
- static bool first_thread_used = FALSE;
+ static GC_bool first_thread_used = FALSE;
if (!first_thread_used) {
result = &first_thread;
@@ -301,7 +305,7 @@ void GC_stop_world()
case 0:
break;
default:
- ABORT("thr_kill failed");
+ ABORT("pthread_kill failed");
}
}
}
@@ -370,8 +374,6 @@ int GC_is_thread_stack(ptr_t addr)
}
# endif
-extern ptr_t GC_approx_sp();
-
/* We hold allocation lock. We assume the world is stopped. */
void GC_push_all_stacks()
{
@@ -407,12 +409,20 @@ void GC_push_all_stacks()
void GC_thr_init()
{
GC_thread t;
+ struct sigaction act;
GC_thr_initialized = TRUE;
GC_min_stack_sz = HBLKSIZE;
GC_page_sz = sysconf(_SC_PAGESIZE);
- if (sigset(SIG_SUSPEND, GC_suspend_handler) != SIG_DFL)
+ (void) sigaction(SIG_SUSPEND, 0, &act);
+ if (act.sa_handler != SIG_DFL)
ABORT("Previously installed SIG_SUSPEND handler");
+ /* Install handler. */
+ act.sa_handler = GC_suspend_handler;
+ act.sa_flags = SA_RESTART;
+ (void) sigemptyset(&act.sa_mask);
+ if (0 != sigaction(SIG_SUSPEND, &act, 0))
+ ABORT("Failed to install SIG_SUSPEND handler");
/* Add the initial thread, so we can stop it. */
t = GC_new_thread(pthread_self());
t -> stack_size = 0;
@@ -548,7 +558,7 @@ GC_pthread_create(pthread_t *new_thread,
return(result);
}
-bool GC_collecting = 0; /* A hint that we're in the collector and */
+GC_bool GC_collecting = 0; /* A hint that we're in the collector and */
/* holding the allocation lock for an */
/* extended period. */
@@ -558,6 +568,8 @@ bool GC_collecting = 0; /* A hint that we're in the collector and */
unsigned long GC_allocate_lock = 0;
+#define SLEEP_THRESHOLD 3
+
void GC_lock()
{
# define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
@@ -566,13 +578,14 @@ void GC_lock()
unsigned my_spin_max;
static unsigned last_spins = 0;
unsigned my_last_spins;
- unsigned junk;
+ volatile unsigned junk;
# define PAUSE junk *= junk; junk *= junk; junk *= junk; junk *= junk
int i;
- if (!__test_and_set(&GC_allocate_lock, 1)) {
+ if (!GC_test_and_set(&GC_allocate_lock, 1)) {
return;
}
+ junk = 0;
my_spin_max = spin_max;
my_last_spins = last_spins;
for (i = 0; i < my_spin_max; i++) {
@@ -581,7 +594,7 @@ void GC_lock()
PAUSE;
continue;
}
- if (!__test_and_set(&GC_allocate_lock, 1)) {
+ if (!GC_test_and_set(&GC_allocate_lock, 1)) {
/*
* got it!
* Spinning worked. Thus we're probably not being scheduled
@@ -596,11 +609,22 @@ void GC_lock()
/* We are probably being scheduled against the other process. Sleep. */
spin_max = low_spin_max;
yield:
- for (;;) {
- if (!__test_and_set(&GC_allocate_lock, 1)) {
+ for (i = 0;; ++i) {
+ if (!GC_test_and_set(&GC_allocate_lock, 1)) {
return;
}
- sched_yield();
+ if (i < SLEEP_THRESHOLD) {
+ sched_yield();
+ } else {
+ struct timespec ts;
+
+ if (i > 26) i = 26;
+ /* Don't wait for more than about 60msecs, even */
+ /* under extreme contention. */
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1 << i;
+ nanosleep(&ts, 0);
+ }
}
}
diff --git a/linux_threads.c b/linux_threads.c
new file mode 100644
index 00000000..12e71cea
--- /dev/null
+++ b/linux_threads.c
@@ -0,0 +1,642 @@
+/*
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/*
+ * Support code for LinuxThreads, the clone()-based kernel
+ * thread package for Linux which is included in libc6.
+ *
+ * This code relies on implementation details of LinuxThreads,
+ * (i.e. properties not guaranteed by the Pthread standard):
+ *
+ * - the function GC_linux_thread_top_of_stack(void)
+ * relies on the way LinuxThreads lays out thread stacks
+ * in the address space.
+ *
+ * Note that there is a lot of code duplication between linux_threads.c
+ * and irix_threads.c; any changes made here may need to be reflected
+ * there too.
+ */
+
+# if defined(LINUX_THREADS)
+
+# include "gc_priv.h"
+# include <pthread.h>
+# include <time.h>
+# include <errno.h>
+# include <unistd.h>
+# include <sys/mman.h>
+# include <sys/time.h>
+# include <semaphore.h>
+
+#undef pthread_create
+#undef pthread_sigmask
+#undef pthread_join
+
+void GC_thr_init();
+
+#if 0
+void GC_print_sig_mask()
+{
+ sigset_t blocked;
+ int i;
+
+ if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
+ ABORT("pthread_sigmask");
+ GC_printf0("Blocked: ");
+ for (i = 1; i <= MAXSIG; i++) {
+ if (sigismember(&blocked, i)) { GC_printf1("%ld ",(long) i); }
+ }
+ GC_printf0("\n");
+}
+#endif
+
+/* We use the allocation lock to protect thread-related data structures. */
+
+/* The set of all known threads. We intercept thread creation and */
+/* joins. We never actually create detached threads. We allocate all */
+/* new thread stacks ourselves. These allow us to maintain this */
+/* data structure. */
+/* Protected by GC_thr_lock. */
+/* Some of this should be declared volatile, but that's incosnsistent */
+/* with some library routine declarations. */
+typedef struct GC_Thread_Rep {
+ struct GC_Thread_Rep * next; /* More recently allocated threads */
+ /* with a given pthread id come */
+ /* first. (All but the first are */
+ /* guaranteed to be dead, but we may */
+ /* not yet have registered the join.) */
+ pthread_t id;
+ word flags;
+# define FINISHED 1 /* Thread has exited. */
+# define DETACHED 2 /* Thread is intended to be detached. */
+# define MAIN_THREAD 4 /* True for the original thread only. */
+
+ ptr_t stack_end;
+ ptr_t stack_ptr; /* Valid only when stopped. */
+ int signal;
+ void * status; /* The value returned from the thread. */
+ /* Used only to avoid premature */
+ /* reclamation of any data it might */
+ /* reference. */
+} * GC_thread;
+
+GC_thread GC_lookup_thread(pthread_t id);
+
+/*
+ * The only way to suspend threads given the pthread interface is to send
+ * signals. We can't use SIGSTOP directly, because we need to get the
+ * thread to save its stack pointer in the GC thread table before
+ * suspending. So we have to reserve a signal of our own for this.
+ * This means we have to intercept client calls to change the signal mask.
+ * The linuxthreads package already uses SIGUSR1 and SIGUSR2,
+ * so we need to reuse something else. I chose SIGPWR.
+ * (Perhaps SIGUNUSED would be a better choice.)
+ */
+#define SIG_SUSPEND SIGPWR
+
+#define SIG_RESTART SIGXCPU
+
+sem_t GC_suspend_ack_sem;
+
+/*
+GC_linux_thread_top_of_stack() relies on implementation details of
+LinuxThreads, namely that thread stacks are allocated on 2M boundaries
+and grow to no more than 2M.
+To make sure that we're using LinuxThreads and not some other thread
+package, we generate a dummy reference to `__pthread_initial_thread_bos',
+which is a symbol defined in LinuxThreads, but (hopefully) not in other
+thread packages.
+*/
+extern char * __pthread_initial_thread_bos;
+char **dummy_var_to_force_linux_threads = &__pthread_initial_thread_bos;
+
+#define LINUX_THREADS_STACK_SIZE (2 * 1024 * 1024)
+
+static inline ptr_t GC_linux_thread_top_of_stack(void)
+{
+ char *sp = GC_approx_sp();
+ ptr_t tos = (ptr_t) (((unsigned long)sp | (LINUX_THREADS_STACK_SIZE - 1)) + 1);
+#if DEBUG_THREADS
+ GC_printf1("SP = %lx\n", (unsigned long)sp);
+ GC_printf1("TOS = %lx\n", (unsigned long)tos);
+#endif
+ return tos;
+}
+
+void GC_suspend_handler(int sig)
+{
+ int dummy;
+ pthread_t my_thread = pthread_self();
+ GC_thread me;
+ sigset_t all_sigs;
+ sigset_t old_sigs;
+ int i;
+ sigset_t mask;
+
+ if (sig != SIG_SUSPEND) ABORT("Bad signal in suspend_handler");
+
+#if DEBUG_THREADS
+ GC_printf1("Suspending 0x%x\n", my_thread);
+#endif
+
+ me = GC_lookup_thread(my_thread);
+ /* The lookup here is safe, since I'm doing this on behalf */
+ /* of a thread which holds the allocation lock in order */
+ /* to stop the world. Thus concurrent modification of the */
+ /* data structure is impossible. */
+ me -> stack_ptr = (ptr_t)(&dummy);
+ me -> stack_end = GC_linux_thread_top_of_stack();
+
+ /* Tell the thread that wants to stop the world that this */
+ /* thread has been stopped. Note that sem_post() is */
+ /* the only async-signal-safe primitive in LinuxThreads. */
+ sem_post(&GC_suspend_ack_sem);
+
+ /* Wait until that thread tells us to restart by sending */
+ /* this thread a SIG_RESTART signal. */
+ /* SIG_RESTART should be masked at this point. Thus there */
+ /* is no race. */
+ if (sigfillset(&mask) != 0) ABORT("sigfillset() failed");
+ if (sigdelset(&mask, SIG_RESTART) != 0) ABORT("sigdelset() failed");
+ do {
+ me->signal = 0;
+ sigsuspend(&mask); /* Wait for signal */
+ } while (me->signal != SIG_RESTART);
+
+#if DEBUG_THREADS
+ GC_printf1("Continuing 0x%x\n", my_thread);
+#endif
+}
+
+void GC_restart_handler(int sig)
+{
+ GC_thread me;
+
+ if (sig != SIG_RESTART) ABORT("Bad signal in suspend_handler");
+
+ /* Let the GC_suspend_handler() know that we got a SIG_RESTART. */
+ /* The lookup here is safe, since I'm doing this on behalf */
+ /* of a thread which holds the allocation lock in order */
+ /* to stop the world. Thus concurrent modification of the */
+ /* data structure is impossible. */
+ me = GC_lookup_thread(pthread_self());
+ me->signal = SIG_RESTART;
+
+ /*
+ ** Note: even if we didn't do anything useful here,
+ ** it would still be necessary to have a signal handler,
+ ** rather than ignoring the signals, otherwise
+ ** the signals will not be delivered at all, and
+ ** will thus not interrupt the sigsuspend() above.
+ */
+
+#if DEBUG_THREADS
+ GC_printf1("In GC_restart_handler for 0x%x\n", pthread_self());
+#endif
+}
+
+GC_bool GC_thr_initialized = FALSE;
+
+# define THREAD_TABLE_SZ 128 /* Must be power of 2 */
+volatile GC_thread GC_threads[THREAD_TABLE_SZ];
+
+/* Add a thread to GC_threads. We assume it wasn't already there. */
+/* Caller holds allocation lock. */
+GC_thread GC_new_thread(pthread_t id)
+{
+ int hv = ((word)id) % THREAD_TABLE_SZ;
+ GC_thread result;
+ static struct GC_Thread_Rep first_thread;
+ static GC_bool first_thread_used = FALSE;
+
+ if (!first_thread_used) {
+ result = &first_thread;
+ first_thread_used = TRUE;
+ /* Dont acquire allocation lock, since we may already hold it. */
+ } else {
+ result = (struct GC_Thread_Rep *)
+ GC_generic_malloc_inner(sizeof(struct GC_Thread_Rep), NORMAL);
+ }
+ if (result == 0) return(0);
+ result -> id = id;
+ result -> next = GC_threads[hv];
+ GC_threads[hv] = result;
+ /* result -> flags = 0; */
+ return(result);
+}
+
+/* Delete a thread from GC_threads. We assume it is there. */
+/* (The code intentionally traps if it wasn't.) */
+/* Caller holds allocation lock. */
+void GC_delete_thread(pthread_t id)
+{
+ int hv = ((word)id) % THREAD_TABLE_SZ;
+ register GC_thread p = GC_threads[hv];
+ register GC_thread prev = 0;
+
+ while (!pthread_equal(p -> id, id)) {
+ prev = p;
+ p = p -> next;
+ }
+ if (prev == 0) {
+ GC_threads[hv] = p -> next;
+ } else {
+ prev -> next = p -> next;
+ }
+}
+
+/* If a thread has been joined, but we have not yet */
+/* been notified, then there may be more than one thread */
+/* in the table with the same pthread id. */
+/* This is OK, but we need a way to delete a specific one. */
+void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
+{
+ int hv = ((word)id) % THREAD_TABLE_SZ;
+ register GC_thread p = GC_threads[hv];
+ register GC_thread prev = 0;
+
+ while (p != gc_id) {
+ prev = p;
+ p = p -> next;
+ }
+ if (prev == 0) {
+ GC_threads[hv] = p -> next;
+ } else {
+ prev -> next = p -> next;
+ }
+}
+
+/* Return a GC_thread corresponding to a given thread_t. */
+/* Returns 0 if it's not there. */
+/* Caller holds allocation lock or otherwise inhibits */
+/* updates. */
+/* If there is more than one thread with the given id we */
+/* return the most recent one. */
+GC_thread GC_lookup_thread(pthread_t id)
+{
+ int hv = ((word)id) % THREAD_TABLE_SZ;
+ register GC_thread p = GC_threads[hv];
+
+ while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
+ return(p);
+}
+
+/* Caller holds allocation lock. */
+void GC_stop_world()
+{
+ pthread_t my_thread = pthread_self();
+ register int i;
+ register GC_thread p;
+ register int n_live_threads = 0;
+ register int result;
+
+ for (i = 0; i < THREAD_TABLE_SZ; i++) {
+ for (p = GC_threads[i]; p != 0; p = p -> next) {
+ if (p -> id != my_thread) {
+ if (p -> flags & FINISHED) continue;
+ n_live_threads++;
+ #if DEBUG_THREADS
+ GC_printf1("Sending suspend signal to 0x%x\n", p -> id);
+ #endif
+ result = pthread_kill(p -> id, SIG_SUSPEND);
+ switch(result) {
+ case ESRCH:
+ /* Not really there anymore. Possible? */
+ n_live_threads--;
+ break;
+ case 0:
+ break;
+ default:
+ ABORT("pthread_kill failed");
+ }
+ }
+ }
+ }
+ for (i = 0; i < n_live_threads; i++) {
+ sem_wait(&GC_suspend_ack_sem);
+ }
+ #if DEBUG_THREADS
+ GC_printf1("World stopped 0x%x\n", pthread_self());
+ #endif
+}
+
+/* Caller holds allocation lock. */
+void GC_start_world()
+{
+ pthread_t my_thread = pthread_self();
+ register int i;
+ register GC_thread p;
+ register int n_live_threads = 0;
+ register int result;
+
+# if DEBUG_THREADS
+ GC_printf0("World starting\n");
+# endif
+
+ for (i = 0; i < THREAD_TABLE_SZ; i++) {
+ for (p = GC_threads[i]; p != 0; p = p -> next) {
+ if (p -> id != my_thread) {
+ if (p -> flags & FINISHED) continue;
+ n_live_threads++;
+ #if DEBUG_THREADS
+ GC_printf1("Sending restart signal to 0x%x\n", p -> id);
+ #endif
+ result = pthread_kill(p -> id, SIG_RESTART);
+ switch(result) {
+ case ESRCH:
+ /* Not really there anymore. Possible? */
+ n_live_threads--;
+ break;
+ case 0:
+ break;
+ default:
+ ABORT("pthread_kill failed");
+ }
+ }
+ }
+ }
+ #if DEBUG_THREADS
+ GC_printf0("World started\n");
+ #endif
+}
+
+/* We hold allocation lock. We assume the world is stopped. */
+void GC_push_all_stacks()
+{
+ register int i;
+ register GC_thread p;
+ register ptr_t sp = GC_approx_sp();
+ register ptr_t lo, hi;
+ pthread_t me = pthread_self();
+
+ if (!GC_thr_initialized) GC_thr_init();
+ #if DEBUG_THREADS
+ GC_printf1("Pushing stacks from thread 0x%lx\n", (unsigned long) me);
+ #endif
+ for (i = 0; i < THREAD_TABLE_SZ; i++) {
+ for (p = GC_threads[i]; p != 0; p = p -> next) {
+ if (p -> flags & FINISHED) continue;
+ if (pthread_equal(p -> id, me)) {
+ lo = GC_approx_sp();
+ } else {
+ lo = p -> stack_ptr;
+ }
+ if ((p -> flags & MAIN_THREAD) == 0) {
+ if (pthread_equal(p -> id, me)) {
+ hi = GC_linux_thread_top_of_stack();
+ } else {
+ hi = p -> stack_end;
+ }
+ } else {
+ /* The original stack. */
+ hi = GC_stackbottom;
+ }
+ #if DEBUG_THREADS
+ GC_printf3("Stack for thread 0x%lx = [%lx,%lx)\n",
+ (unsigned long) p -> id,
+ (unsigned long) lo, (unsigned long) hi);
+ #endif
+ GC_push_all_stack(lo, hi);
+ }
+ }
+}
+
+
+/* We hold the allocation lock. */
+void GC_thr_init()
+{
+ GC_thread t;
+ struct sigaction act;
+
+ GC_thr_initialized = TRUE;
+
+ if (sem_init(&GC_suspend_ack_sem, 0, 0) != 0)
+ ABORT("sem_init failed");
+
+ act.sa_flags = SA_RESTART;
+ if (sigfillset(&act.sa_mask) != 0) {
+ ABORT("sigfillset() failed");
+ }
+ /* SIG_RESTART is unmasked by the handler when necessary. */
+ act.sa_handler = GC_suspend_handler;
+ if (sigaction(SIG_SUSPEND, &act, NULL) != 0) {
+ ABORT("Cannot set SIG_SUSPEND handler");
+ }
+
+ act.sa_handler = GC_restart_handler;
+ if (sigaction(SIG_RESTART, &act, NULL) != 0) {
+ ABORT("Cannot set SIG_SUSPEND handler");
+ }
+
+ /* Add the initial thread, so we can stop it. */
+ t = GC_new_thread(pthread_self());
+ t -> stack_ptr = (ptr_t)(&t);
+ t -> flags = DETACHED | MAIN_THREAD;
+}
+
+int GC_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
+{
+ sigset_t fudged_set;
+
+ if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
+ fudged_set = *set;
+ sigdelset(&fudged_set, SIG_SUSPEND);
+ set = &fudged_set;
+ }
+ return(pthread_sigmask(how, set, oset));
+}
+
+struct start_info {
+ void *(*start_routine)(void *);
+ void *arg;
+};
+
+void GC_thread_exit_proc(void *dummy)
+{
+ GC_thread me;
+
+ LOCK();
+ me = GC_lookup_thread(pthread_self());
+ if (me -> flags & DETACHED) {
+ GC_delete_thread(pthread_self());
+ } else {
+ me -> flags |= FINISHED;
+ }
+ UNLOCK();
+}
+
+int GC_pthread_join(pthread_t thread, void **retval)
+{
+ int result;
+ GC_thread thread_gc_id;
+
+ LOCK();
+ thread_gc_id = GC_lookup_thread(thread);
+ /* This is guaranteed to be the intended one, since the thread id */
+ /* cant have been recycled by pthreads. */
+ UNLOCK();
+ result = pthread_join(thread, retval);
+ LOCK();
+ /* Here the pthread thread id may have been recycled. */
+ GC_delete_gc_thread(thread, thread_gc_id);
+ UNLOCK();
+ return result;
+}
+
+void * GC_start_routine(void * arg)
+{
+ struct start_info * si = arg;
+ void * result;
+ GC_thread me;
+
+ LOCK();
+ me = GC_lookup_thread(pthread_self());
+ UNLOCK();
+ pthread_cleanup_push(GC_thread_exit_proc, 0);
+# ifdef DEBUG_THREADS
+ GC_printf1("Starting thread 0x%x\n", pthread_self());
+ GC_printf1("pid = %ld\n", (long) getpid());
+ GC_printf1("sp = 0x%lx\n", (long) &arg);
+# endif
+ result = (*(si -> start_routine))(si -> arg);
+#if DEBUG_THREADS
+ GC_printf1("Finishing thread 0x%x\n", pthread_self());
+#endif
+ me -> status = result;
+ me -> flags |= FINISHED;
+ pthread_cleanup_pop(1);
+ /* This involves acquiring the lock, ensuring that we can't exit */
+ /* while a collection that thinks we're alive is trying to stop */
+ /* us. */
+ return(result);
+}
+
+int
+GC_pthread_create(pthread_t *new_thread,
+ const pthread_attr_t *attr,
+ void *(*start_routine)(void *), void *arg)
+{
+ int result;
+ GC_thread t;
+ pthread_t my_new_thread;
+ void * stack;
+ size_t stacksize;
+ pthread_attr_t new_attr;
+ int detachstate;
+ word my_flags = 0;
+ struct start_info * si = GC_malloc(sizeof(struct start_info));
+
+ if (0 == si) return(ENOMEM);
+ si -> start_routine = start_routine;
+ si -> arg = arg;
+ LOCK();
+ if (!GC_thr_initialized) GC_thr_init();
+ if (NULL == attr) {
+ stack = 0;
+ (void) pthread_attr_init(&new_attr);
+ } else {
+ new_attr = *attr;
+ }
+ pthread_attr_getdetachstate(&new_attr, &detachstate);
+ if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
+ result = pthread_create(&my_new_thread, &new_attr, GC_start_routine, si);
+ /* No GC can start until the thread is registered, since we hold */
+ /* the allocation lock. */
+ if (0 == result) {
+ t = GC_new_thread(my_new_thread);
+ t -> flags = my_flags;
+ t -> stack_ptr = 0;
+ t -> stack_end = 0;
+ if (0 != new_thread) *new_thread = my_new_thread;
+ }
+ UNLOCK();
+ /* pthread_attr_destroy(&new_attr); */
+ return(result);
+}
+
+GC_bool GC_collecting = 0;
+ /* A hint that we're in the collector and */
+ /* holding the allocation lock for an */
+ /* extended period. */
+
+/* Reasonably fast spin locks. Basically the same implementation */
+/* as STL alloc.h. This isn't really the right way to do this. */
+/* but until the POSIX scheduling mess gets straightened out ... */
+
+volatile unsigned int GC_allocate_lock = 0;
+
+
+void GC_lock()
+{
+# define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
+# define high_spin_max 1000 /* spin cycles for multiprocessor */
+ static unsigned spin_max = low_spin_max;
+ unsigned my_spin_max;
+ static unsigned last_spins = 0;
+ unsigned my_last_spins;
+ volatile unsigned junk;
+# define PAUSE junk *= junk; junk *= junk; junk *= junk; junk *= junk
+ int i;
+
+ if (!GC_test_and_set(&GC_allocate_lock)) {
+ return;
+ }
+ junk = 0;
+ my_spin_max = spin_max;
+ my_last_spins = last_spins;
+ for (i = 0; i < my_spin_max; i++) {
+ if (GC_collecting) goto yield;
+ if (i < my_last_spins/2 || GC_allocate_lock) {
+ PAUSE;
+ continue;
+ }
+ if (!GC_test_and_set(&GC_allocate_lock)) {
+ /*
+ * got it!
+ * Spinning worked. Thus we're probably not being scheduled
+ * against the other process with which we were contending.
+ * Thus it makes sense to spin longer the next time.
+ */
+ last_spins = i;
+ spin_max = high_spin_max;
+ return;
+ }
+ }
+ /* We are probably being scheduled against the other process. Sleep. */
+ spin_max = low_spin_max;
+yield:
+ for (i = 0;; ++i) {
+ if (!GC_test_and_set(&GC_allocate_lock)) {
+ return;
+ }
+# define SLEEP_THRESHOLD 12
+ /* nanosleep(<= 2ms) just spins under Linux. We */
+ /* want to be careful to avoid that behavior. */
+ if (i < SLEEP_THRESHOLD) {
+ sched_yield();
+ } else {
+ struct timespec ts;
+
+ if (i > 26) i = 26;
+ /* Don't wait for more than about 60msecs, even */
+ /* under extreme contention. */
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1 << i;
+ nanosleep(&ts, 0);
+ }
+ }
+}
+
+# endif /* LINUX_THREADS */
+
diff --git a/mach_dep.c b/mach_dep.c
index 06463d16..e6087d94 100644
--- a/mach_dep.c
+++ b/mach_dep.c
@@ -169,8 +169,8 @@ void GC_push_regs()
# endif /* MACOS */
# if defined(I386) &&!defined(OS2) &&!defined(SVR4) &&!defined(MSWIN32) \
- && !defined(SCO) && !(defined(LINUX) && defined(__ELF__)) \
- && !defined(DOS4GW)
+ && !defined(SCO) && !defined(SCO_ELF) && !(defined(LINUX) \
+ && defined(__ELF__)) && !defined(DOS4GW)
/* I386 code, generic code does not appear to work */
/* It does appear to work under OS2, and asms dont */
/* This is used for some 38g UNIX variants and for CYGWIN32 */
@@ -219,7 +219,7 @@ void GC_push_regs()
__asm add esp,4
# endif
-# if defined(I386) && (defined(SVR4) || defined(SCO))
+# if defined(I386) && (defined(SVR4) || defined(SCO) || defined(SCO_ELF))
/* I386 code, SVR4 variant, generic code does not appear to work */
asm("pushl %eax"); asm("call GC_push_one"); asm("addl $4,%esp");
asm("pushl %ebx"); asm("call GC_push_one"); asm("addl $4,%esp");
diff --git a/malloc.c b/malloc.c
index 49e03cee..41553b7f 100644
--- a/malloc.c
+++ b/malloc.c
@@ -21,7 +21,7 @@ void GC_extend_size_map(); /* in misc.c. */
/* Allocate reclaim list for kind: */
/* Return TRUE on success */
-bool GC_alloc_reclaim_list(kind)
+GC_bool GC_alloc_reclaim_list(kind)
register struct obj_kind * kind;
{
struct hblk ** result = (struct hblk **)
diff --git a/mallocx.c b/mallocx.c
index 598fdfa6..ae8bfffb 100644
--- a/mallocx.c
+++ b/mallocx.c
@@ -25,7 +25,7 @@
extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
void GC_extend_size_map(); /* in misc.c. */
-bool GC_alloc_reclaim_list(); /* in malloc.c */
+GC_bool GC_alloc_reclaim_list(); /* in malloc.c */
/* Some externally visible but unadvertised variables to allow access to */
/* free lists from inlined allocators without including gc_priv.h */
diff --git a/mark.c b/mark.c
index 1935b5b6..1723a446 100644
--- a/mark.c
+++ b/mark.c
@@ -103,12 +103,12 @@ static struct hblk * scan_ptr;
mark_state_t GC_mark_state = MS_NONE;
-bool GC_mark_stack_too_small = FALSE;
+GC_bool GC_mark_stack_too_small = FALSE;
-bool GC_objects_are_marked = FALSE; /* Are there collectable marked */
+GC_bool GC_objects_are_marked = FALSE; /* Are there collectable marked */
/* objects in the heap? */
-bool GC_collection_in_progress()
+GC_bool GC_collection_in_progress()
{
return(GC_mark_state != MS_NONE);
}
@@ -169,7 +169,7 @@ ptr_t p;
clear_mark_bit_from_hdr(hhdr, word_no);
}
-bool GC_is_marked(p)
+GC_bool GC_is_marked(p)
ptr_t p;
{
register struct hblk *h = HBLKPTR(p);
@@ -233,7 +233,7 @@ static void alloc_mark_stack();
/* Perform a small amount of marking. */
/* We try to touch roughly a page of memory. */
/* Return TRUE if we just finished a mark phase. */
-bool GC_mark_some()
+GC_bool GC_mark_some()
{
switch(GC_mark_state) {
case MS_NONE:
@@ -320,7 +320,7 @@ bool GC_mark_some()
}
-bool GC_mark_stack_empty()
+GC_bool GC_mark_stack_empty()
{
return(GC_mark_stack_top < GC_mark_stack);
}
@@ -339,7 +339,13 @@ bool GC_mark_stack_empty()
/* Returns NIL without black listing if current points to a block */
/* with IGNORE_OFF_PAGE set. */
/*ARGSUSED*/
-word GC_find_start(current, hhdr)
+# ifdef PRINT_BLACK_LIST
+ word GC_find_start(current, hhdr, source)
+ word source;
+# else
+ word GC_find_start(current, hhdr)
+# define source 0
+# endif
register word current;
register hdr * hhdr;
{
@@ -357,18 +363,19 @@ register hdr * hhdr;
if ((word *)orig - (word *)current
>= (ptrdiff_t)(hhdr->hb_sz)) {
/* Pointer past the end of the block */
- GC_ADD_TO_BLACK_LIST_NORMAL(orig);
+ GC_ADD_TO_BLACK_LIST_NORMAL(orig, source);
return(0);
}
return(current);
} else {
- GC_ADD_TO_BLACK_LIST_NORMAL(current);
+ GC_ADD_TO_BLACK_LIST_NORMAL(current, source);
return(0);
}
# else
- GC_ADD_TO_BLACK_LIST_NORMAL(current);
+ GC_ADD_TO_BLACK_LIST_NORMAL(current, source);
return(0);
# endif
+# undef source
}
void GC_invalidate_mark_state()
@@ -444,15 +451,14 @@ void GC_mark_from_mark_stack()
credit -= WORDS_TO_BYTES(WORDSZ/2); /* guess */
while (descr != 0) {
if ((signed_word)descr < 0) {
- current = *current_p++;
- descr <<= 1;
- if ((ptr_t)current < least_ha) continue;
- if ((ptr_t)current >= greatest_ha) continue;
- PUSH_CONTENTS(current, GC_mark_stack_top_reg, mark_stack_limit);
- } else {
- descr <<= 1;
- current_p++;
+ current = *current_p;
+ if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
+ PUSH_CONTENTS(current, GC_mark_stack_top_reg, mark_stack_limit,
+ current_p, exit1);
+ }
}
+ descr <<= 1;
+ ++ current_p;
}
continue;
case DS_PROC:
@@ -477,10 +483,11 @@ void GC_mark_from_mark_stack()
limit -= 1;
while (current_p <= limit) {
current = *current_p;
+ if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
+ PUSH_CONTENTS(current, GC_mark_stack_top_reg,
+ mark_stack_limit, current_p, exit2);
+ }
current_p = (word *)((char *)current_p + ALIGNMENT);
- if ((ptr_t)current < least_ha) continue;
- if ((ptr_t)current >= greatest_ha) continue;
- PUSH_CONTENTS(current, GC_mark_stack_top_reg, mark_stack_limit);
}
}
GC_mark_stack_top = GC_mark_stack_top_reg;
@@ -655,9 +662,15 @@ word p;
# endif
/* As above, but argument passed preliminary test. */
-void GC_push_one_checked(p, interior_ptrs)
+# ifdef PRINT_BLACK_LIST
+ void GC_push_one_checked(p, interior_ptrs, source)
+ ptr_t source;
+# else
+ void GC_push_one_checked(p, interior_ptrs)
+# define source 0
+# endif
register word p;
-register bool interior_ptrs;
+register GC_bool interior_ptrs;
{
register word r;
register hdr * hhdr;
@@ -695,9 +708,14 @@ register bool interior_ptrs;
/* displ is the word index within the block. */
if (hhdr == 0) {
if (interior_ptrs) {
- GC_add_to_black_list_stack(p);
+# ifdef PRINT_BLACK_LIST
+ GC_add_to_black_list_stack(p, source);
+# else
+ GC_add_to_black_list_stack(p);
+# endif
} else {
- GC_ADD_TO_BLACK_LIST_NORMAL(p);
+ GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
+# undef source /* In case we had to define it. */
}
} else {
if (!mark_bit_from_hdr(hhdr, displ)) {
@@ -733,7 +751,7 @@ void GC_add_trace_entry(char *kind, word arg1, word arg2)
if (GC_trace_buf_ptr >= TRACE_ENTRIES) GC_trace_buf_ptr = 0;
}
-void GC_print_trace(word gc_no, bool lock)
+void GC_print_trace(word gc_no, GC_bool lock)
{
int i;
struct trace_entry *p;
@@ -989,7 +1007,7 @@ register hdr * hhdr;
#ifndef SMALL_CONFIG
/* Test whether any page in the given block is dirty */
-bool GC_block_was_dirty(h, hhdr)
+GC_bool GC_block_was_dirty(h, hhdr)
struct hblk *h;
register hdr * hhdr;
{
diff --git a/mark_rts.c b/mark_rts.c
index 35d200d3..efe6b640 100644
--- a/mark_rts.c
+++ b/mark_rts.c
@@ -46,7 +46,7 @@ struct roots {
# ifndef MSWIN32
struct roots * r_next;
# endif
- bool r_tmp;
+ GC_bool r_tmp;
/* Delete before registering new dynamic libraries */
};
@@ -85,7 +85,7 @@ void GC_print_static_roots()
/* Primarily for debugging support: */
/* Is the address p in one of the registered static */
/* root sections? */
-bool GC_is_static_root(p)
+GC_bool GC_is_static_root(p)
ptr_t p;
{
static int last_root_set = 0;
@@ -185,7 +185,7 @@ char * b; char * e;
/* reregistering dynamic libraries. */
void GC_add_roots_inner(b, e, tmp)
char * b; char * e;
-bool tmp;
+GC_bool tmp;
{
struct roots * old;
@@ -417,7 +417,7 @@ int all;
*/
void GC_push_roots(all)
-bool all;
+GC_bool all;
{
register int i;
diff --git a/misc.c b/misc.c
index a2af3a6f..4c76af81 100644
--- a/misc.c
+++ b/misc.c
@@ -42,7 +42,7 @@
# ifdef WIN32_THREADS
GC_API CRITICAL_SECTION GC_allocate_ml;
# else
-# ifdef IRIX_THREADS
+# if defined(IRIX_THREADS) || defined(LINUX_THREADS)
# ifdef UNDEFINED
pthread_mutex_t GC_allocate_ml = PTHREAD_MUTEX_INITIALIZER;
# endif
@@ -59,7 +59,7 @@
GC_FAR struct _GC_arrays GC_arrays /* = { 0 } */;
-bool GC_debugging_started = FALSE;
+GC_bool GC_debugging_started = FALSE;
/* defined here so we don't have to load debug_malloc.o */
void (*GC_check_heap)() = (void (*)())0;
@@ -68,9 +68,9 @@ void (*GC_start_call_back)() = (void (*)())0;
ptr_t GC_stackbottom = 0;
-bool GC_dont_gc = 0;
+GC_bool GC_dont_gc = 0;
-bool GC_quiet = 0;
+GC_bool GC_quiet = 0;
/*ARGSUSED*/
GC_PTR GC_default_oom_fn GC_PROTO((size_t bytes_requested))
@@ -232,8 +232,6 @@ word limit;
}
#endif
-extern ptr_t GC_approx_sp(); /* in mark_rts.c */
-
/* Clear some of the inaccessible part of the stack. Returns its */
/* argument, so it can be used in a tail call position, hence clearing */
/* another frame. */
@@ -330,8 +328,7 @@ ptr_t arg;
/* Make sure r points to the beginning of the object */
r &= ~(WORDS_TO_BYTES(1) - 1);
{
- register int offset =
- (char *)r - (char *)(HBLKPTR(r)) - HDR_BYTES;
+ register int offset = (char *)r - (char *)(HBLKPTR(r));
register signed_word sz = candidate_hdr -> hb_sz;
# ifdef ALL_INTERIOR_POINTERS
@@ -392,11 +389,7 @@ size_t GC_get_bytes_since_gc GC_PROTO(())
return ((size_t) WORDS_TO_BYTES(GC_words_allocd));
}
-bool GC_is_initialized = FALSE;
-
-#if defined(SOLARIS_THREADS) || defined(IRIX_THREADS)
- extern void GC_thr_init();
-#endif
+GC_bool GC_is_initialized = FALSE;
void GC_init()
{
@@ -436,10 +429,11 @@ void GC_init_inner()
/* We need dirty bits in order to find live stack sections. */
GC_dirty_init();
# endif
-# ifdef IRIX_THREADS
+# if defined(IRIX_THREADS) || defined(LINUX_THREADS)
GC_thr_init();
# endif
-# if !defined(THREADS) || defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || defined(IRIX_THREADS)
+# if !defined(THREADS) || defined(SOLARIS_THREADS) || defined(WIN32_THREADS) \
+ || defined(IRIX_THREADS) || defined(LINUX_THREADS)
if (GC_stackbottom == 0) {
GC_stackbottom = GC_get_stack_base();
}
@@ -563,7 +557,7 @@ void GC_enable_incremental GC_PROTO(())
GC_setpagesize();
# ifdef MSWIN32
{
- extern bool GC_is_win32s();
+ extern GC_bool GC_is_win32s();
/* VirtualProtect is not functional under win32s. */
if (GC_is_win32s()) goto out;
@@ -757,6 +751,35 @@ char * msg;
}
#endif
+#ifdef NEED_CALLINFO
+
+void GC_print_callers (info)
+struct callinfo info[NFRAMES];
+{
+ register int i,j;
+
+# if NFRAMES == 1
+ GC_err_printf0("\tCaller at allocation:\n");
+# else
+ GC_err_printf0("\tCall chain at allocation:\n");
+# endif
+ for (i = 0; i < NFRAMES; i++) {
+ if (info[i].ci_pc == 0) break;
+# if NARGS > 0
+ GC_err_printf0("\t\targs: ");
+ for (j = 0; j < NARGS; j++) {
+ if (j != 0) GC_err_printf0(", ");
+ GC_err_printf2("%d (0x%X)", ~(info[i].ci_arg[j]),
+ ~(info[i].ci_arg[j]));
+ }
+ GC_err_printf0("\n");
+# endif
+ GC_err_printf1("\t\t##PC##= 0x%X\n", info[i].ci_pc);
+ }
+}
+
+#endif /* SAVE_CALL_CHAIN */
+
# ifdef SRC_M3
void GC_enable()
{
diff --git a/new_hblk.c b/new_hblk.c
index af657f41..9f32ae0d 100644
--- a/new_hblk.c
+++ b/new_hblk.c
@@ -164,7 +164,7 @@ int kind;
*prev;
word *last_object; /* points to last object in new hblk */
register struct hblk *h; /* the new heap block */
- register bool clear = GC_obj_kinds[kind].ok_init;
+ register GC_bool clear = GC_obj_kinds[kind].ok_init;
# ifdef PRINTSTATS
if ((sizeof (struct hblk)) > HBLKSIZE) {
diff --git a/obj_map.c b/obj_map.c
index ee00db02..82ebf311 100644
--- a/obj_map.c
+++ b/obj_map.c
@@ -100,7 +100,7 @@ word offset;
/* Add a heap block map for objects of size sz to obj_map. */
/* Return FALSE on failure. */
-bool GC_add_map_entry(sz)
+GC_bool GC_add_map_entry(sz)
word sz;
{
register unsigned obj_start;
diff --git a/os_dep.c b/os_dep.c
index 2ec79053..d64dd5c0 100644
--- a/os_dep.c
+++ b/os_dep.c
@@ -14,9 +14,6 @@
# include "gc_priv.h"
-# include <stdio.h>
-# include <signal.h>
-
# if defined(LINUX) && !defined(POWERPC)
# include <linux/version.h>
# if (LINUX_VERSION_CODE <= 0x10400)
@@ -49,6 +46,9 @@
# endif
# endif
+# include <stdio.h>
+# include <signal.h>
+
/* Blatantly OS dependent routines, except for those that are related */
/* dynamic loading. */
@@ -142,6 +142,8 @@
{
extern ptr_t GC_find_limit();
extern char **_environ;
+ /* This may need to be environ, without the underscore, for */
+ /* some versions. */
GC_data_start = GC_find_limit((ptr_t)&_environ, FALSE);
}
#endif
@@ -269,7 +271,7 @@ void GC_enable_signals(void)
# define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old))
# endif
-static bool mask_initialized = FALSE;
+static GC_bool mask_initialized = FALSE;
static SIGSET_T new_mask;
@@ -343,7 +345,7 @@ word GC_page_size;
}
# else
-# if defined(MPROTECT_VDB) || defined(PROC_VDB)
+# if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP)
void GC_setpagesize()
{
GC_page_size = GETPAGESIZE();
@@ -468,7 +470,8 @@ ptr_t GC_get_stack_base()
# endif
# if defined(SUNOS5SIGS) || defined(IRIX5)
- static struct sigaction oldact;
+ static struct sigaction old_segv_act;
+ static struct sigaction old_bus_act;
# else
static handler old_segv_handler, old_bus_handler;
# endif
@@ -479,7 +482,7 @@ ptr_t GC_get_stack_base()
struct sigaction act;
act.sa_handler = GC_fault_handler;
- act.sa_flags = SA_RESTART | SA_SIGINFO | SA_NODEFER;
+ act.sa_flags = SA_RESTART | SA_NODEFER;
/* The presence of SA_NODEFER represents yet another gross */
/* hack. Under Solaris 2.3, siglongjmp doesn't appear to */
/* interact correctly with -lthread. We hide the confusion */
@@ -490,10 +493,16 @@ ptr_t GC_get_stack_base()
# ifdef IRIX_THREADS
/* Older versions have a bug related to retrieving and */
/* and setting a handler at the same time. */
- (void) sigaction(SIGSEGV, 0, &oldact);
+ (void) sigaction(SIGSEGV, 0, &old_segv_act);
(void) sigaction(SIGSEGV, &act, 0);
# else
- (void) sigaction(SIGSEGV, &act, &oldact);
+ (void) sigaction(SIGSEGV, &act, &old_segv_act);
+# ifdef _sigargs /* Irix 5.x, not 6.x */
+ /* Under 5.x, we may get SIGBUS. */
+ /* Pthreads doesn't exist under 5.x, so we don't */
+ /* have to worry in the threads case. */
+ (void) sigaction(SIGBUS, &act, &old_bus_act);
+# endif
# endif /* IRIX_THREADS */
# else
old_segv_handler = signal(SIGSEGV, GC_fault_handler);
@@ -506,7 +515,10 @@ ptr_t GC_get_stack_base()
void GC_reset_fault_handler()
{
# if defined(SUNOS5SIGS) || defined(IRIX5)
- (void) sigaction(SIGSEGV, &oldact, 0);
+ (void) sigaction(SIGSEGV, &old_segv_act, 0);
+# ifdef _sigargs /* Irix 5.x, not 6.x */
+ (void) sigaction(SIGBUS, &old_bus_act, 0);
+# endif
# else
(void) signal(SIGSEGV, old_segv_handler);
# ifdef SIGBUS
@@ -519,7 +531,7 @@ ptr_t GC_get_stack_base()
/* the smallest location q s.t. [q,p] is addressible (!up). */
ptr_t GC_find_limit(p, up)
ptr_t p;
- bool up;
+ GC_bool up;
{
static VOLATILE ptr_t result;
/* Needs to be static, since otherwise it may not be */
@@ -706,9 +718,9 @@ void GC_register_data_segments()
/* all real work is done by GC_register_dynamic_libraries. Under */
/* win32s, we cannot find the data segments associated with dll's. */
/* We rgister the main data segment here. */
- bool GC_win32s = FALSE; /* We're running under win32s. */
+ GC_bool GC_win32s = FALSE; /* We're running under win32s. */
- bool GC_is_win32s()
+ GC_bool GC_is_win32s()
{
DWORD v = GetVersion();
@@ -748,7 +760,7 @@ void GC_register_data_segments()
/* Is p the start of either the malloc heap, or of one of our */
/* heap sections? */
- bool GC_is_heap_base (ptr_t p)
+ GC_bool GC_is_heap_base (ptr_t p)
{
register unsigned i;
@@ -1000,7 +1012,7 @@ word bytes;
ptr_t GC_unix_get_mem(bytes)
word bytes;
{
- static bool initialized = FALSE;
+ static GC_bool initialized = FALSE;
static int fd;
void *result;
static ptr_t last_addr = HEAP_START;
@@ -1009,6 +1021,7 @@ word bytes;
fd = open("/dev/zero", O_RDONLY);
initialized = TRUE;
}
+ if (bytes & (GC_page_size -1)) ABORT("Bad GET_MEM arg");
result = mmap(last_addr, bytes, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
MAP_PRIVATE | MAP_FIXED, fd, 0/* offset */);
if (result == MAP_FAILED) return(0);
@@ -1211,7 +1224,8 @@ void GC_default_push_other_roots()
# endif /* SRC_M3 */
-# if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || defined(IRIX_THREADS)
+# if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) \
+ || defined(IRIX_THREADS) || defined LINUX_THREADS
extern void GC_push_all_stacks();
@@ -1247,7 +1261,7 @@ void (*GC_push_other_roots)() = GC_default_push_other_roots;
* or write only to the stack.
*/
-bool GC_dirty_maintained = FALSE;
+GC_bool GC_dirty_maintained = FALSE;
# ifdef DEFAULT_VDB
@@ -1273,7 +1287,7 @@ void GC_read_dirty()
/* of the pages overlapping h are dirty. This routine may err on the */
/* side of labelling pages as dirty (and this implementation does). */
/*ARGSUSED*/
-bool GC_page_was_dirty(h)
+GC_bool GC_page_was_dirty(h)
struct hblk *h;
{
return(TRUE);
@@ -1288,7 +1302,7 @@ struct hblk *h;
/* Could any valid GC heap pointer ever have been written to this page? */
/*ARGSUSED*/
-bool GC_page_was_ever_dirty(h)
+GC_bool GC_page_was_ever_dirty(h)
struct hblk *h;
{
return(TRUE);
@@ -1480,7 +1494,7 @@ SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
if (SIG_OK && CODE_OK) {
register struct hblk * h =
(struct hblk *)((word)addr & ~(GC_page_size-1));
- bool in_allocd_block;
+ GC_bool in_allocd_block;
# ifdef SUNOS5SIGS
/* Address is only within the correct physical page. */
@@ -1564,7 +1578,7 @@ struct hblk *h;
{
register struct hblk * h_trunc;
register unsigned i;
- register bool found_clean;
+ register GC_bool found_clean;
if (!GC_dirty_maintained) return;
h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
@@ -1634,11 +1648,17 @@ void GC_dirty_init()
# else
sigaction(SIGSEGV, &act, &oldact);
# endif
- if (oldact.sa_flags & SA_SIGINFO) {
+# if defined(_sigargs)
+ /* This is Irix 5.x, not 6.x. Irix 5.x does not have */
+ /* sa_sigaction. */
+ GC_old_segv_handler = oldact.sa_handler;
+# else /* Irix 6.x or SUNOS5SIGS */
+ if (oldact.sa_flags & SA_SIGINFO) {
GC_old_segv_handler = (SIG_PF)(oldact.sa_sigaction);
- } else {
+ } else {
GC_old_segv_handler = oldact.sa_handler;
- }
+ }
+# endif
if (GC_old_segv_handler == SIG_IGN) {
GC_err_printf0("Previously ignored segmentation violation!?");
GC_old_segv_handler = SIG_DFL;
@@ -1686,7 +1706,7 @@ void GC_read_dirty()
GC_protect_heap();
}
-bool GC_page_was_dirty(h)
+GC_bool GC_page_was_dirty(h)
struct hblk * h;
{
register word index = PHT_HASH(h);
@@ -1762,13 +1782,9 @@ word len;
GC_begin_syscall();
GC_unprotect_range(buf, (word)nbyte);
# ifdef IRIX5
- /* Indirect system call exists, but is undocumented, and */
- /* always seems to return EINVAL. There seems to be no */
- /* general way to wrap system calls, since the system call */
- /* convention appears to require an immediate argument for */
- /* the system call number, and building the required code */
- /* in the data segment also seems dangerous. We can fake it */
- /* for read; anything else is up to the client. */
+ /* Indirect system call may not always be easily available. */
+ /* We could call _read, but that would interfere with the */
+ /* libpthread interception of read. */
{
struct iovec iov;
@@ -1785,7 +1801,7 @@ word len;
#endif /* !MSWIN32 */
/*ARGSUSED*/
-bool GC_page_was_ever_dirty(h)
+GC_bool GC_page_was_ever_dirty(h)
struct hblk *h;
{
return(TRUE);
@@ -1998,11 +2014,11 @@ int dummy;
#undef READ
-bool GC_page_was_dirty(h)
+GC_bool GC_page_was_dirty(h)
struct hblk *h;
{
register word index = PHT_HASH(h);
- register bool result;
+ register GC_bool result;
result = get_pht_entry_from_index(GC_grungy_pages, index);
# ifdef SOLARIS_THREADS
@@ -2016,11 +2032,11 @@ struct hblk *h;
return(result);
}
-bool GC_page_was_ever_dirty(h)
+GC_bool GC_page_was_ever_dirty(h)
struct hblk *h;
{
register word index = PHT_HASH(h);
- register bool result;
+ register GC_bool result;
result = get_pht_entry_from_index(GC_written_pages, index);
# ifdef SOLARIS_THREADS
@@ -2096,7 +2112,7 @@ void GC_read_dirty()
}
}
-bool GC_page_was_dirty(h)
+GC_bool GC_page_was_dirty(h)
struct hblk *h;
{
if((ptr_t)h < GC_vd_base || (ptr_t)h >= GC_vd_base + NPAGES*HBLKSIZE) {
@@ -2161,26 +2177,5 @@ struct callinfo info[NFRAMES];
#endif /* SAVE_CALL_CHAIN */
#endif /* SPARC */
-#ifdef SAVE_CALL_CHAIN
-
-void GC_print_callers (info)
-struct callinfo info[NFRAMES];
-{
- register int i,j;
-
- GC_err_printf0("\tCall chain at allocation:\n");
- for (i = 0; i < NFRAMES; i++) {
- if (info[i].ci_pc == 0) break;
- GC_err_printf0("\t\targs: ");
- for (j = 0; j < NARGS; j++) {
- if (j != 0) GC_err_printf0(", ");
- GC_err_printf2("%d (0x%X)", ~(info[i].ci_arg[j]),
- ~(info[i].ci_arg[j]));
- }
- GC_err_printf1("\n\t\t##PC##= 0x%X\n", info[i].ci_pc);
- }
-}
-
-#endif /* SAVE_CALL_CHAIN */
diff --git a/pcr_interface.c b/pcr_interface.c
index 12e3181e..4c950933 100644
--- a/pcr_interface.c
+++ b/pcr_interface.c
@@ -61,7 +61,7 @@ void * GC_DebugReallocProc(void * old_object, size_t new_size_in_bytes)
typedef struct {
PCR_ERes (*ed_proc)(void *p, size_t size, PCR_Any data);
- bool ed_pointerfree;
+ GC_bool ed_pointerfree;
PCR_ERes ed_fail_code;
PCR_Any ed_client_data;
} enumerate_data;
@@ -137,7 +137,7 @@ struct PCR_MM_ProcsRep GC_DebugRep = {
GC_DummyShutdownProc /* mmp_shutdown */
};
-bool GC_use_debug = 0;
+GC_bool GC_use_debug = 0;
void GC_pcr_install()
{
@@ -156,7 +156,7 @@ PCR_GC_Run(void)
if( !PCR_Base_TestPCRArg("-nogc") ) {
GC_quiet = ( PCR_Base_TestPCRArg("-gctrace") ? 0 : 1 );
- GC_use_debug = (bool)PCR_Base_TestPCRArg("-debug_alloc");
+ GC_use_debug = (GC_bool)PCR_Base_TestPCRArg("-debug_alloc");
GC_init();
if( !PCR_Base_TestPCRArg("-nogc_incremental") ) {
/*
diff --git a/ptr_chck.c b/ptr_chck.c
index d7fe4e1d..f3451ee6 100644
--- a/ptr_chck.c
+++ b/ptr_chck.c
@@ -196,7 +196,7 @@ void (*GC_is_visible_print_proc) GC_PROTO((GC_PTR p)) =
GC_default_is_visible_print_proc;
/* Could p be a stack address? */
-bool GC_on_stack(p)
+GC_bool GC_on_stack(p)
ptr_t p;
{
# ifdef THREADS
@@ -248,7 +248,7 @@ ptr_t p;
if (GC_on_stack(p)) return(p);
hhdr = HDR((word)p);
if (hhdr == 0) {
- bool result;
+ GC_bool result;
if (GC_is_static_root(p)) return(p);
/* Else do it again correctly: */
diff --git a/reclaim.c b/reclaim.c
index 516ffafc..407b4c68 100644
--- a/reclaim.c
+++ b/reclaim.c
@@ -59,7 +59,7 @@ word sz;
* memory.
*/
-bool GC_block_empty(hhdr)
+GC_bool GC_block_empty(hhdr)
register hdr * hhdr;
{
register word *p = (word *)(&(hhdr -> hb_marks[0]));
@@ -85,7 +85,7 @@ register hdr * hhdr;
ptr_t GC_reclaim_clear(hbp, hhdr, sz, list, abort_if_found)
register struct hblk *hbp; /* ptr to current heap block */
register hdr * hhdr;
-bool abort_if_found; /* Abort if a reclaimable object is found */
+GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
register word sz;
{
@@ -134,7 +134,7 @@ register word sz;
ptr_t GC_reclaim_clear2(hbp, hhdr, list, abort_if_found)
register struct hblk *hbp; /* ptr to current heap block */
hdr * hhdr;
-bool abort_if_found; /* Abort if a reclaimable object is found */
+GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
{
register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
@@ -182,7 +182,7 @@ register ptr_t list;
ptr_t GC_reclaim_clear4(hbp, hhdr, list, abort_if_found)
register struct hblk *hbp; /* ptr to current heap block */
hdr * hhdr;
-bool abort_if_found; /* Abort if a reclaimable object is found */
+GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
{
register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
@@ -242,7 +242,7 @@ register ptr_t list;
ptr_t GC_reclaim_uninit(hbp, hhdr, sz, list, abort_if_found)
register struct hblk *hbp; /* ptr to current heap block */
register hdr * hhdr;
-bool abort_if_found; /* Abort if a reclaimable object is found */
+GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
register word sz;
{
@@ -283,7 +283,7 @@ register word sz;
ptr_t GC_reclaim_uninit2(hbp, hhdr, list, abort_if_found)
register struct hblk *hbp; /* ptr to current heap block */
hdr * hhdr;
-bool abort_if_found; /* Abort if a reclaimable object is found */
+GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
{
register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
@@ -330,7 +330,7 @@ register ptr_t list;
ptr_t GC_reclaim_uninit4(hbp, hhdr, list, abort_if_found)
register struct hblk *hbp; /* ptr to current heap block */
hdr * hhdr;
-bool abort_if_found; /* Abort if a reclaimable object is found */
+GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
{
register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
@@ -385,7 +385,7 @@ register ptr_t list;
ptr_t GC_reclaim1(hbp, hhdr, list, abort_if_found)
register struct hblk *hbp; /* ptr to current heap block */
hdr * hhdr;
-bool abort_if_found; /* Abort if a reclaimable object is found */
+GC_bool abort_if_found; /* Abort if a reclaimable object is found */
register ptr_t list;
{
register word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
@@ -518,7 +518,7 @@ word abort_if_found; /* Abort if a reclaimable object is found */
GC_freehblk(hbp);
}
} else {
- bool empty = GC_block_empty(hhdr);
+ GC_bool empty = GC_block_empty(hhdr);
if (abort_if_found) {
GC_reclaim_small_nonempty_block(hbp, (int)abort_if_found);
} else if (empty) {
@@ -675,9 +675,9 @@ int kind;
* recently reclaimed, and discard the rest.
* Stop_func may be 0.
*/
-bool GC_reclaim_all(stop_func, ignore_old)
+GC_bool GC_reclaim_all(stop_func, ignore_old)
GC_stop_func stop_func;
-bool ignore_old;
+GC_bool ignore_old;
{
register word sz;
register int kind;
diff --git a/solaris_pthreads.c b/solaris_pthreads.c
index e5e31d78..c896779d 100644
--- a/solaris_pthreads.c
+++ b/solaris_pthreads.c
@@ -37,6 +37,7 @@
# include <unistd.h>
# include <errno.h>
# include "solaris_threads.h"
+# include <stdio.h>
#undef pthread_join
#undef pthread_create
diff --git a/solaris_threads.c b/solaris_threads.c
index d83ffbb5..1f5ebcdc 100644
--- a/solaris_threads.c
+++ b/solaris_threads.c
@@ -227,7 +227,7 @@ static void stop_all_lwps()
char buf[30];
prstatus_t status;
register int i;
- bool changed;
+ GC_bool changed;
lwpid_t me = _lwp_self();
if (GC_main_proc_fd == -1) {
@@ -342,7 +342,7 @@ static void restart_all_lwps()
{
int lwp_fd;
register int i;
- bool changed;
+ GC_bool changed;
lwpid_t me = _lwp_self();
# define PARANOID
@@ -392,7 +392,7 @@ static void restart_all_lwps()
if (i >= max_lwps) ABORT("Too many lwps");
}
-bool GC_multithreaded = 0;
+GC_bool GC_multithreaded = 0;
void GC_stop_world()
{
@@ -410,7 +410,7 @@ void GC_start_world()
void GC_thr_init(void);
-bool GC_thr_initialized = FALSE;
+GC_bool GC_thr_initialized = FALSE;
size_t GC_min_stack_sz;
@@ -565,7 +565,7 @@ GC_thread GC_new_thread(thread_t id)
int hv = ((word)id) % THREAD_TABLE_SZ;
GC_thread result;
static struct GC_Thread_Rep first_thread;
- static bool first_thread_used = FALSE;
+ static GC_bool first_thread_used = FALSE;
if (!first_thread_used) {
result = &first_thread;
@@ -645,8 +645,6 @@ void GC_my_stack_limits()
}
-extern ptr_t GC_approx_sp();
-
/* We hold allocation lock. We assume the world is stopped. */
void GC_push_all_stacks()
{
@@ -823,7 +821,7 @@ int GC_thr_join(thread_t wait_for, thread_t *departed, void **status)
LOCK();
if (wait_for == 0) {
register int i;
- register bool thread_exists;
+ register GC_bool thread_exists;
for (;;) {
thread_exists = FALSE;
diff --git a/solaris_threads.h b/solaris_threads.h
index d13dd554..b2cdb36e 100644
--- a/solaris_threads.h
+++ b/solaris_threads.h
@@ -24,7 +24,7 @@
} * GC_thread;
extern GC_thread GC_new_thread(thread_t id);
- extern bool GC_thr_initialized;
+ extern GC_bool GC_thr_initialized;
extern volatile GC_thread GC_threads[];
extern size_t GC_min_stack_sz;
extern size_t GC_page_sz;
diff --git a/stubborn.c b/stubborn.c
index ab228fba..bef7b98a 100644
--- a/stubborn.c
+++ b/stubborn.c
@@ -73,7 +73,7 @@ void GC_stubborn_init()
/* Invariant while this is running: GC_changing_list_current */
/* points at a word containing 0. */
/* Returns FALSE on failure. */
-bool GC_compact_changing_list()
+GC_bool GC_compact_changing_list()
{
register GC_PTR *p, *q;
register word count = 0;
@@ -139,7 +139,7 @@ GC_PTR p;
# else
register GC_PTR * my_current = GC_changing_list_current;
# endif
- register bool tried_quick;
+ register GC_bool tried_quick;
DCL_LOCK_STATE;
if (*my_current == p) {
@@ -252,7 +252,7 @@ void GC_read_changed()
}
}
-bool GC_page_was_changed(h)
+GC_bool GC_page_was_changed(h)
struct hblk * h;
{
register word index = PHT_HASH(h);
diff --git a/test.c b/test.c
index 9d2457cd..2fc51e12 100644
--- a/test.c
+++ b/test.c
@@ -24,7 +24,7 @@
# include <assert.h> /* Not normally used, but handy for debugging. */
# include "gc.h"
# include "gc_typed.h"
-# include "gc_priv.h" /* For output and some statistics */
+# include "gc_priv.h" /* For output, locking, and some statistics */
# include "config.h"
# ifdef MSWIN32
@@ -45,7 +45,7 @@
# include <synch.h>
# endif
-# ifdef IRIX_THREADS
+# if defined(IRIX_THREADS) || defined(LINUX_THREADS)
# include <pthread.h>
# endif
@@ -386,7 +386,7 @@ VOLATILE int dropped_something = 0;
static mutex_t incr_lock;
mutex_lock(&incr_lock);
# endif
-# ifdef IRIX_THREADS
+# if defined(IRIX_THREADS) || defined(LINUX_THREADS)
static pthread_mutex_t incr_lock = PTHREAD_MUTEX_INITIALIZER;
pthread_mutex_lock(&incr_lock);
# endif
@@ -404,7 +404,7 @@ VOLATILE int dropped_something = 0;
# ifdef SOLARIS_THREADS
mutex_unlock(&incr_lock);
# endif
-# ifdef IRIX_THREADS
+# if defined(IRIX_THREADS) || defined(LINUX_THREADS)
pthread_mutex_unlock(&incr_lock);
# endif
# ifdef WIN32_THREADS
@@ -465,7 +465,7 @@ int n;
static mutex_t incr_lock;
mutex_lock(&incr_lock);
# endif
-# ifdef IRIX_THREADS
+# if defined(IRIX_THREADS) || defined(LINUX_THREADS)
static pthread_mutex_t incr_lock = PTHREAD_MUTEX_INITIALIZER;
pthread_mutex_lock(&incr_lock);
# endif
@@ -481,7 +481,7 @@ int n;
# ifdef SOLARIS_THREADS
mutex_unlock(&incr_lock);
# endif
-# ifdef IRIX_THREADS
+# if defined(IRIX_THREADS) || defined(LINUX_THREADS)
pthread_mutex_unlock(&incr_lock);
# endif
# ifdef WIN32_THREADS
@@ -543,6 +543,9 @@ thread_key_t fl_key;
void * alloc8bytes()
{
+# ifdef SMALL_CONFIG
+ return(GC_malloc(8));
+# else
void ** my_free_list_ptr;
void * my_free_list;
@@ -568,6 +571,7 @@ void * alloc8bytes()
*my_free_list_ptr = GC_NEXT(my_free_list);
GC_NEXT(my_free_list) = 0;
return(my_free_list);
+# endif
}
#else
@@ -921,7 +925,8 @@ void SetMinimumStack(long minSize)
}
-#if !defined(PCR) && !defined(SOLARIS_THREADS) && !defined(WIN32_THREADS) && !defined(IRIX_THREADS) || defined(LINT)
+#if !defined(PCR) && !defined(SOLARIS_THREADS) && !defined(WIN32_THREADS) \
+ && !defined(IRIX_THREADS) && !defined(LINUX_THREADS) || defined(LINT)
#ifdef MSWIN32
int APIENTRY WinMain(HINSTANCE instance, HINSTANCE prev, LPSTR cmd, int n)
#else
@@ -1049,7 +1054,7 @@ test()
}
#endif
-#if defined(SOLARIS_THREADS) || defined(IRIX_THREADS)
+#if defined(SOLARIS_THREADS) || defined(IRIX_THREADS) || defined(LINUX_THREADS)
void * thr_run_one_test(void * arg)
{
run_one_test();
@@ -1107,13 +1112,17 @@ main()
# ifdef IRIX_THREADS
/* Force a larger stack to be preallocated */
/* Since the initial cant always grow later. */
- *((char *)&code - 1024*1024) = 0; /* Require 1 Mb */
+ *((volatile char *)&code - 1024*1024) = 0; /* Require 1 Mb */
# endif /* IRIX_THREADS */
pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, 1000000);
+# ifdef IRIX_THREADS
+ pthread_attr_setstacksize(&attr, 1000000);
+# endif
n_tests = 0;
# ifdef MPROTECT_VDB
GC_enable_incremental();
+ (void) GC_printf0("Switched to incremental mode\n");
+ (void) GC_printf0("Emulating dirty bits with mprotect/signals\n");
# endif
(void) GC_set_warn_proc(warn_proc);
if ((code = pthread_create(&th1, &attr, thr_run_one_test, 0)) != 0) {
@@ -1140,4 +1149,4 @@ main()
return(0);
}
#endif /* pthreads */
-#endif /* SOLARIS_THREADS || IRIX_THREADS */
+#endif /* SOLARIS_THREADS || IRIX_THREADS || LINUX_THREADS */
diff --git a/test_cpp.cc b/test_cpp.cc
index 036795c6..3160b098 100644
--- a/test_cpp.cc
+++ b/test_cpp.cc
@@ -34,9 +34,9 @@ few minutes to complete.
extern "C" {
#include "gc_priv.h"
}
-# ifdef MSWIN32
-# include <windows.h>
-# endif
+#ifdef MSWIN32
+# include <windows.h>
+#endif
#define my_assert( e ) \
@@ -174,11 +174,20 @@ int APIENTRY WinMain(
if (0 == argv[ argc ]) break;}
#else
-int main( int argc, char* argv[] ) {
+# ifdef MACOS
+ int main() {
+# else
+ int main( int argc, char* argv[] ) {
+# endif
#endif
+# if defined(MACOS) // MacOS
+ char* argv_[] = {"test_cpp", "10"}; // doesn't
+ argv = argv_; // have a
+ argc = sizeof(argv_)/sizeof(argv_[0]); // commandline
+# endif
int i, iters, n;
-# ifndef __GNUC__
+# if !defined(__GNUC__) && !defined(MACOS)
int *x = (int *)alloc::allocate(sizeof(int));
*x = 29;
@@ -247,7 +256,7 @@ int main( int argc, char* argv[] ) {
D::Test();
F::Test();}
-# ifndef __GNUC__
+# if !defined(__GNUC__) && !defined(MACOS)
my_assert (29 == x[3]);
# endif
GC_printf0( "The test appears to have succeeded.\n" );
diff --git a/threadlibs.c b/threadlibs.c
index bd549172..c8530e6f 100644
--- a/threadlibs.c
+++ b/threadlibs.c
@@ -3,7 +3,7 @@
int main()
{
-# ifdef IRIX_THREADS
+# if defined(IRIX_THREADS) || defined(LINUX_THREADS)
printf("-lpthread\n");
# endif
# ifdef SOLARIS_THREADS
diff --git a/typd_mlc.c b/typd_mlc.c
index 9ddda983..387d2305 100644
--- a/typd_mlc.c
+++ b/typd_mlc.c
@@ -46,7 +46,7 @@
# define EXTRA_BYTES (sizeof(word))
# endif
-bool GC_explicit_typing_initialized = FALSE;
+GC_bool GC_explicit_typing_initialized = FALSE;
int GC_explicit_kind; /* Object kind for objects with indirect */
/* (possibly extended) descriptors. */
@@ -59,7 +59,7 @@ int GC_array_kind; /* Object kind for objects with complex */
/* can be described by a BITMAP_BITS sized bitmap. */
typedef struct {
word ed_bitmap; /* lsb corresponds to first word. */
- bool ed_continued; /* next entry is continuation. */
+ GC_bool ed_continued; /* next entry is continuation. */
} ext_descr;
/* Array descriptors. GC_array_mark_proc understands these. */
@@ -430,7 +430,8 @@ word env;
if (bm & 1) {
current = *current_p;
if ((ptr_t)current >= least_ha && (ptr_t)current <= greatest_ha) {
- PUSH_CONTENTS(current, mark_stack_ptr, mark_stack_limit);
+ PUSH_CONTENTS(current, mark_stack_ptr,
+ mark_stack_limit, current_p, exit1);
}
}
}
@@ -590,7 +591,7 @@ word env;
if (last_set_bit < 0) return(0 /* no pointers */);
# if ALIGNMENT == CPP_WORDSZ/8
{
- register bool all_bits_set = TRUE;
+ register GC_bool all_bits_set = TRUE;
for (i = 0; i < last_set_bit; i++) {
if (!GC_get_bit(bm, i)) {
all_bits_set = FALSE;
diff --git a/version.h b/version.h
index 7b3f2c92..56114873 100644
--- a/version.h
+++ b/version.h
@@ -1,6 +1,6 @@
#define GC_VERSION_MAJOR 4
#define GC_VERSION_MINOR 13
-#define GC_ALPHA_VERSION 1
+#define GC_ALPHA_VERSION 2
# define GC_NOT_ALPHA 0xff