summaryrefslogtreecommitdiff
path: root/tools/build/src/engine/boehm_gc/include
diff options
context:
space:
mode:
Diffstat (limited to 'tools/build/src/engine/boehm_gc/include')
-rw-r--r--tools/build/src/engine/boehm_gc/include/cord.h327
-rw-r--r--tools/build/src/engine/boehm_gc/include/ec.h70
-rw-r--r--tools/build/src/engine/boehm_gc/include/gc.h1139
-rw-r--r--tools/build/src/engine/boehm_gc/include/gc_allocator.h245
-rw-r--r--tools/build/src/engine/boehm_gc/include/gc_amiga_redirects.h30
-rw-r--r--tools/build/src/engine/boehm_gc/include/gc_backptr.h65
-rw-r--r--tools/build/src/engine/boehm_gc/include/gc_config_macros.h179
-rw-r--r--tools/build/src/engine/boehm_gc/include/gc_cpp.h374
-rw-r--r--tools/build/src/engine/boehm_gc/include/gc_gcj.h94
-rw-r--r--tools/build/src/engine/boehm_gc/include/gc_inline.h128
-rw-r--r--tools/build/src/engine/boehm_gc/include/gc_mark.h201
-rw-r--r--tools/build/src/engine/boehm_gc/include/gc_pthread_redirects.h54
-rw-r--r--tools/build/src/engine/boehm_gc/include/gc_tiny_fl.h89
-rw-r--r--tools/build/src/engine/boehm_gc/include/gc_typed.h111
-rw-r--r--tools/build/src/engine/boehm_gc/include/include.am54
-rw-r--r--tools/build/src/engine/boehm_gc/include/javaxfc.h21
-rw-r--r--tools/build/src/engine/boehm_gc/include/leak_detector.h9
-rw-r--r--tools/build/src/engine/boehm_gc/include/new_gc_alloc.h484
-rw-r--r--tools/build/src/engine/boehm_gc/include/private/cord_pos.h118
-rw-r--r--tools/build/src/engine/boehm_gc/include/private/darwin_semaphore.h68
-rw-r--r--tools/build/src/engine/boehm_gc/include/private/darwin_stop_world.h22
-rw-r--r--tools/build/src/engine/boehm_gc/include/private/dbg_mlc.h178
-rw-r--r--tools/build/src/engine/boehm_gc/include/private/gc_hdrs.h206
-rw-r--r--tools/build/src/engine/boehm_gc/include/private/gc_locks.h210
-rw-r--r--tools/build/src/engine/boehm_gc/include/private/gc_pmark.h494
-rw-r--r--tools/build/src/engine/boehm_gc/include/private/gc_priv.h2040
-rw-r--r--tools/build/src/engine/boehm_gc/include/private/gcconfig.h2339
-rw-r--r--tools/build/src/engine/boehm_gc/include/private/msvc_dbg.h69
-rw-r--r--tools/build/src/engine/boehm_gc/include/private/pthread_stop_world.h11
-rw-r--r--tools/build/src/engine/boehm_gc/include/private/pthread_support.h84
-rw-r--r--tools/build/src/engine/boehm_gc/include/private/specific.h96
-rw-r--r--tools/build/src/engine/boehm_gc/include/private/thread_local_alloc.h152
-rw-r--r--tools/build/src/engine/boehm_gc/include/weakpointer.h221
33 files changed, 9982 insertions, 0 deletions
diff --git a/tools/build/src/engine/boehm_gc/include/cord.h b/tools/build/src/engine/boehm_gc/include/cord.h
new file mode 100644
index 000000000..926089e86
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/cord.h
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 1993-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Author: Hans-J. Boehm (boehm@parc.xerox.com)
+ */
+/* Boehm, October 5, 1995 4:20 pm PDT */
+
+/*
+ * Cords are immutable character strings. A number of operations
+ * on long cords are much more efficient than their strings.h counterpart.
+ * In particular, concatenation takes constant time independent of the length
+ * of the arguments. (Cords are represented as trees, with internal
+ * nodes representing concatenation and leaves consisting of either C
+ * strings or a functional description of the string.)
+ *
+ * The following are reasonable applications of cords. They would perform
+ * unacceptably if C strings were used:
+ * - A compiler that produces assembly language output by repeatedly
+ * concatenating instructions onto a cord representing the output file.
+ * - A text editor that converts the input file to a cord, and then
+ * performs editing operations by producing a new cord representing
+ * the file after echa character change (and keeping the old ones in an
+ * edit history)
+ *
+ * For optimal performance, cords should be built by
+ * concatenating short sections.
+ * This interface is designed for maximum compatibility with C strings.
+ * ASCII NUL characters may be embedded in cords using CORD_from_fn.
+ * This is handled correctly, but CORD_to_char_star will produce a string
+ * with embedded NULs when given such a cord.
+ *
+ * This interface is fairly big, largely for performance reasons.
+ * The most basic constants and functions:
+ *
+ * CORD - the type of a cord;
+ * CORD_EMPTY - empty cord;
+ * CORD_len(cord) - length of a cord;
+ * CORD_cat(cord1,cord2) - concatenation of two cords;
+ * CORD_substr(cord, start, len) - substring (or subcord);
+ * CORD_pos i; CORD_FOR(i, cord) { ... CORD_pos_fetch(i) ... } -
+ * examine each character in a cord. CORD_pos_fetch(i) is the char.
+ * CORD_fetch(int i) - Retrieve i'th character (slowly).
+ * CORD_cmp(cord1, cord2) - compare two cords.
+ * CORD_from_file(FILE * f) - turn a read-only file into a cord.
+ * CORD_to_char_star(cord) - convert to C string.
+ * (Non-NULL C constant strings are cords.)
+ * CORD_printf (etc.) - cord version of printf. Use %r for cords.
+ */
+# ifndef CORD_H
+
+# define CORD_H
+# include <stddef.h>
+# include <stdio.h>
+/* Cords have type const char *. This is cheating quite a bit, and not */
+/* 100% portable. But it means that nonempty character string */
+/* constants may be used as cords directly, provided the string is */
+/* never modified in place. The empty cord is represented by, and */
+/* can be written as, 0. */
+
+typedef const char * CORD;
+
+/* An empty cord is always represented as nil */
+# define CORD_EMPTY 0
+
+/* Is a nonempty cord represented as a C string? */
+#define CORD_IS_STRING(s) (*(s) != '\0')
+
+/* Concatenate two cords. If the arguments are C strings, they may */
+/* not be subsequently altered. */
+CORD CORD_cat(CORD x, CORD y);
+
+/* Concatenate a cord and a C string with known length. Except for the */
+/* empty string case, this is a special case of CORD_cat. Since the */
+/* length is known, it can be faster. */
+/* The string y is shared with the resulting CORD. Hence it should */
+/* not be altered by the caller. */
+CORD CORD_cat_char_star(CORD x, const char * y, size_t leny);
+
+/* Compute the length of a cord */
+size_t CORD_len(CORD x);
+
+/* Cords may be represented by functions defining the ith character */
+typedef char (* CORD_fn)(size_t i, void * client_data);
+
+/* Turn a functional description into a cord. */
+CORD CORD_from_fn(CORD_fn fn, void * client_data, size_t len);
+
+/* Return the substring (subcord really) of x with length at most n, */
+/* starting at position i. (The initial character has position 0.) */
+CORD CORD_substr(CORD x, size_t i, size_t n);
+
+/* Return the argument, but rebalanced to allow more efficient */
+/* character retrieval, substring operations, and comparisons. */
+/* This is useful only for cords that were built using repeated */
+/* concatenation. Guarantees log time access to the result, unless */
+/* x was obtained through a large number of repeated substring ops */
+/* or the embedded functional descriptions take longer to evaluate. */
+/* May reallocate significant parts of the cord. The argument is not */
+/* modified; only the result is balanced. */
+CORD CORD_balance(CORD x);
+
+/* The following traverse a cord by applying a function to each */
+/* character. This is occasionally appropriate, especially where */
+/* speed is crucial. But, since C doesn't have nested functions, */
+/* clients of this sort of traversal are clumsy to write. Consider */
+/* the functions that operate on cord positions instead. */
+
+/* Function to iteratively apply to individual characters in cord. */
+typedef int (* CORD_iter_fn)(char c, void * client_data);
+
+/* Function to apply to substrings of a cord. Each substring is a */
+/* a C character string, not a general cord. */
+typedef int (* CORD_batched_iter_fn)(const char * s, void * client_data);
+# define CORD_NO_FN ((CORD_batched_iter_fn)0)
+
+/* Apply f1 to each character in the cord, in ascending order, */
+/* starting at position i. If */
+/* f2 is not CORD_NO_FN, then multiple calls to f1 may be replaced by */
+/* a single call to f2. The parameter f2 is provided only to allow */
+/* some optimization by the client. This terminates when the right */
+/* end of this string is reached, or when f1 or f2 return != 0. In the */
+/* latter case CORD_iter returns != 0. Otherwise it returns 0. */
+/* The specified value of i must be < CORD_len(x). */
+int CORD_iter5(CORD x, size_t i, CORD_iter_fn f1,
+ CORD_batched_iter_fn f2, void * client_data);
+
+/* A simpler version that starts at 0, and without f2: */
+int CORD_iter(CORD x, CORD_iter_fn f1, void * client_data);
+# define CORD_iter(x, f1, cd) CORD_iter5(x, 0, f1, CORD_NO_FN, cd)
+
+/* Similar to CORD_iter5, but end-to-beginning. No provisions for */
+/* CORD_batched_iter_fn. */
+int CORD_riter4(CORD x, size_t i, CORD_iter_fn f1, void * client_data);
+
+/* A simpler version that starts at the end: */
+int CORD_riter(CORD x, CORD_iter_fn f1, void * client_data);
+
+/* Functions that operate on cord positions. The easy way to traverse */
+/* cords. A cord position is logically a pair consisting of a cord */
+/* and an index into that cord. But it is much faster to retrieve a */
+/* charcter based on a position than on an index. Unfortunately, */
+/* positions are big (order of a few 100 bytes), so allocate them with */
+/* caution. */
+/* Things in cord_pos.h should be treated as opaque, except as */
+/* described below. Also note that */
+/* CORD_pos_fetch, CORD_next and CORD_prev have both macro and function */
+/* definitions. The former may evaluate their argument more than once. */
+# include "private/cord_pos.h"
+
+/*
+ Visible definitions from above:
+
+ typedef <OPAQUE but fairly big> CORD_pos[1];
+
+ * Extract the cord from a position:
+ CORD CORD_pos_to_cord(CORD_pos p);
+
+ * Extract the current index from a position:
+ size_t CORD_pos_to_index(CORD_pos p);
+
+ * Fetch the character located at the given position:
+ char CORD_pos_fetch(CORD_pos p);
+
+ * Initialize the position to refer to the given cord and index.
+ * Note that this is the most expensive function on positions:
+ void CORD_set_pos(CORD_pos p, CORD x, size_t i);
+
+ * Advance the position to the next character.
+ * P must be initialized and valid.
+ * Invalidates p if past end:
+ void CORD_next(CORD_pos p);
+
+ * Move the position to the preceding character.
+ * P must be initialized and valid.
+ * Invalidates p if past beginning:
+ void CORD_prev(CORD_pos p);
+
+ * Is the position valid, i.e. inside the cord?
+ int CORD_pos_valid(CORD_pos p);
+*/
+# define CORD_FOR(pos, cord) \
+ for (CORD_set_pos(pos, cord, 0); CORD_pos_valid(pos); CORD_next(pos))
+
+
+/* An out of memory handler to call. May be supplied by client. */
+/* Must not return. */
+extern void (* CORD_oom_fn)(void);
+
+/* Dump the representation of x to stdout in an implementation defined */
+/* manner. Intended for debugging only. */
+void CORD_dump(CORD x);
+
+/* The following could easily be implemented by the client. They are */
+/* provided in cordxtra.c for convenience. */
+
+/* Concatenate a character to the end of a cord. */
+CORD CORD_cat_char(CORD x, char c);
+
+/* Concatenate n cords. */
+CORD CORD_catn(int n, /* CORD */ ...);
+
+/* Return the character in CORD_substr(x, i, 1) */
+char CORD_fetch(CORD x, size_t i);
+
+/* Return < 0, 0, or > 0, depending on whether x < y, x = y, x > y */
+int CORD_cmp(CORD x, CORD y);
+
+/* A generalization that takes both starting positions for the */
+/* comparison, and a limit on the number of characters to be compared. */
+int CORD_ncmp(CORD x, size_t x_start, CORD y, size_t y_start, size_t len);
+
+/* Find the first occurrence of s in x at position start or later. */
+/* Return the position of the first character of s in x, or */
+/* CORD_NOT_FOUND if there is none. */
+size_t CORD_str(CORD x, size_t start, CORD s);
+
+/* Return a cord consisting of i copies of (possibly NUL) c. Dangerous */
+/* in conjunction with CORD_to_char_star. */
+/* The resulting representation takes constant space, independent of i. */
+CORD CORD_chars(char c, size_t i);
+# define CORD_nul(i) CORD_chars('\0', (i))
+
+/* Turn a file into cord. The file must be seekable. Its contents */
+/* must remain constant. The file may be accessed as an immediate */
+/* result of this call and/or as a result of subsequent accesses to */
+/* the cord. Short files are likely to be immediately read, but */
+/* long files are likely to be read on demand, possibly relying on */
+/* stdio for buffering. */
+/* We must have exclusive access to the descriptor f, i.e. we may */
+/* read it at any time, and expect the file pointer to be */
+/* where we left it. Normally this should be invoked as */
+/* CORD_from_file(fopen(...)) */
+/* CORD_from_file arranges to close the file descriptor when it is no */
+/* longer needed (e.g. when the result becomes inaccessible). */
+/* The file f must be such that ftell reflects the actual character */
+/* position in the file, i.e. the number of characters that can be */
+/* or were read with fread. On UNIX systems this is always true. On */
+/* MS Windows systems, f must be opened in binary mode. */
+CORD CORD_from_file(FILE * f);
+
+/* Equivalent to the above, except that the entire file will be read */
+/* and the file pointer will be closed immediately. */
+/* The binary mode restriction from above does not apply. */
+CORD CORD_from_file_eager(FILE * f);
+
+/* Equivalent to the above, except that the file will be read on demand.*/
+/* The binary mode restriction applies. */
+CORD CORD_from_file_lazy(FILE * f);
+
+/* Turn a cord into a C string. The result shares no structure with */
+/* x, and is thus modifiable. */
+char * CORD_to_char_star(CORD x);
+
+/* Turn a C string into a CORD. The C string is copied, and so may */
+/* subsequently be modified. */
+CORD CORD_from_char_star(const char *s);
+
+/* Identical to the above, but the result may share structure with */
+/* the argument and is thus not modifiable. */
+const char * CORD_to_const_char_star(CORD x);
+
+/* Write a cord to a file, starting at the current position. No */
+/* trailing NULs are newlines are added. */
+/* Returns EOF if a write error occurs, 1 otherwise. */
+int CORD_put(CORD x, FILE * f);
+
+/* "Not found" result for the following two functions. */
+# define CORD_NOT_FOUND ((size_t)(-1))
+
+/* A vague analog of strchr. Returns the position (an integer, not */
+/* a pointer) of the first occurrence of (char) c inside x at position */
+/* i or later. The value i must be < CORD_len(x). */
+size_t CORD_chr(CORD x, size_t i, int c);
+
+/* A vague analog of strrchr. Returns index of the last occurrence */
+/* of (char) c inside x at position i or earlier. The value i */
+/* must be < CORD_len(x). */
+size_t CORD_rchr(CORD x, size_t i, int c);
+
+
+/* The following are also not primitive, but are implemented in */
+/* cordprnt.c. They provide functionality similar to the ANSI C */
+/* functions with corresponding names, but with the following */
+/* additions and changes: */
+/* 1. A %r conversion specification specifies a CORD argument. Field */
+/* width, precision, etc. have the same semantics as for %s. */
+/* (Note that %c,%C, and %S were already taken.) */
+/* 2. The format string is represented as a CORD. */
+/* 3. CORD_sprintf and CORD_vsprintf assign the result through the 1st */ /* argument. Unlike their ANSI C versions, there is no need to guess */
+/* the correct buffer size. */
+/* 4. Most of the conversions are implement through the native */
+/* vsprintf. Hence they are usually no faster, and */
+/* idiosyncracies of the native printf are preserved. However, */
+/* CORD arguments to CORD_sprintf and CORD_vsprintf are NOT copied; */
+/* the result shares the original structure. This may make them */
+/* very efficient in some unusual applications. */
+/* The format string is copied. */
+/* All functions return the number of characters generated or -1 on */
+/* error. This complies with the ANSI standard, but is inconsistent */
+/* with some older implementations of sprintf. */
+
+/* The implementation of these is probably less portable than the rest */
+/* of this package. */
+
+#ifndef CORD_NO_IO
+
+#include <stdarg.h>
+
+int CORD_sprintf(CORD * out, CORD format, ...);
+int CORD_vsprintf(CORD * out, CORD format, va_list args);
+int CORD_fprintf(FILE * f, CORD format, ...);
+int CORD_vfprintf(FILE * f, CORD format, va_list args);
+int CORD_printf(CORD format, ...);
+int CORD_vprintf(CORD format, va_list args);
+
+#endif /* CORD_NO_IO */
+
+# endif /* CORD_H */
diff --git a/tools/build/src/engine/boehm_gc/include/ec.h b/tools/build/src/engine/boehm_gc/include/ec.h
new file mode 100644
index 000000000..c829b83ad
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/ec.h
@@ -0,0 +1,70 @@
+# ifndef EC_H
+# define EC_H
+
+# ifndef CORD_H
+# include "cord.h"
+# endif
+
+/* Extensible cords are strings that may be destructively appended to. */
+/* They allow fast construction of cords from characters that are */
+/* being read from a stream. */
+/*
+ * A client might look like:
+ *
+ * {
+ * CORD_ec x;
+ * CORD result;
+ * char c;
+ * FILE *f;
+ *
+ * ...
+ * CORD_ec_init(x);
+ * while(...) {
+ * c = getc(f);
+ * ...
+ * CORD_ec_append(x, c);
+ * }
+ * result = CORD_balance(CORD_ec_to_cord(x));
+ *
+ * If a C string is desired as the final result, the call to CORD_balance
+ * may be replaced by a call to CORD_to_char_star.
+ */
+
+# ifndef CORD_BUFSZ
+# define CORD_BUFSZ 128
+# endif
+
+typedef struct CORD_ec_struct {
+ CORD ec_cord;
+ char * ec_bufptr;
+ char ec_buf[CORD_BUFSZ+1];
+} CORD_ec[1];
+
+/* This structure represents the concatenation of ec_cord with */
+/* ec_buf[0 ... (ec_bufptr-ec_buf-1)] */
+
+/* Flush the buffer part of the extended chord into ec_cord. */
+/* Note that this is almost the only real function, and it is */
+/* implemented in 6 lines in cordxtra.c */
+void CORD_ec_flush_buf(CORD_ec x);
+
+/* Convert an extensible cord to a cord. */
+# define CORD_ec_to_cord(x) (CORD_ec_flush_buf(x), (x)[0].ec_cord)
+
+/* Initialize an extensible cord. */
+# define CORD_ec_init(x) ((x)[0].ec_cord = 0, (x)[0].ec_bufptr = (x)[0].ec_buf)
+
+/* Append a character to an extensible cord. */
+# define CORD_ec_append(x, c) \
+ { \
+ if ((x)[0].ec_bufptr == (x)[0].ec_buf + CORD_BUFSZ) { \
+ CORD_ec_flush_buf(x); \
+ } \
+ *((x)[0].ec_bufptr)++ = (c); \
+ }
+
+/* Append a cord to an extensible cord. Structure remains shared with */
+/* original. */
+void CORD_ec_append_cord(CORD_ec x, CORD s);
+
+# endif /* EC_H */
diff --git a/tools/build/src/engine/boehm_gc/include/gc.h b/tools/build/src/engine/boehm_gc/include/gc.h
new file mode 100644
index 000000000..cc950888f
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/gc.h
@@ -0,0 +1,1139 @@
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
+ * Copyright 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright 1999 by Hewlett-Packard Company. All rights reserved.
+ * Copyright (C) 2007 Free Software Foundation, Inc
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+/*
+ * Note that this defines a large number of tuning hooks, which can
+ * safely be ignored in nearly all cases. For normal use it suffices
+ * to call only GC_MALLOC and perhaps GC_REALLOC.
+ * For better performance, also look at GC_MALLOC_ATOMIC, and
+ * GC_enable_incremental. If you need an action to be performed
+ * immediately before an object is collected, look at GC_register_finalizer.
+ * If you are using Solaris threads, look at the end of this file.
+ * Everything else is best ignored unless you encounter performance
+ * problems.
+ */
+
+#ifndef _GC_H
+
+# define _GC_H
+
+# include "gc_config_macros.h"
+
+# ifdef __cplusplus
+ extern "C" {
+# endif
+
+
+/* Define word and signed_word to be unsigned and signed types of the */
+/* size as char * or void *. There seems to be no way to do this */
+/* even semi-portably. The following is probably no better/worse */
+/* than almost anything else. */
+/* The ANSI standard suggests that size_t and ptr_diff_t might be */
+/* better choices. But those had incorrect definitions on some older */
+/* systems. Notably "typedef int size_t" is WRONG. */
+#ifndef _WIN64
+ typedef unsigned long GC_word;
+ typedef long GC_signed_word;
+#else
+ /* Win64 isn't really supported yet, but this is the first step. And */
+ /* it might cause error messages to show up in more plausible places. */
+ /* This needs basetsd.h, which is included by windows.h. */
+ typedef unsigned long long GC_word;
+ typedef long long GC_signed_word;
+#endif
+
+/* Public read-only variables */
+
+GC_API GC_word GC_gc_no;/* Counter incremented per collection. */
+ /* Includes empty GCs at startup. */
+
+GC_API int GC_parallel; /* GC is parallelized for performance on */
+ /* multiprocessors. Currently set only */
+ /* implicitly if collector is built with */
+ /* -DPARALLEL_MARK and if either: */
+ /* Env variable GC_NPROC is set to > 1, or */
+ /* GC_NPROC is not set and this is an MP. */
+ /* If GC_parallel is set, incremental */
+ /* collection is only partially functional, */
+ /* and may not be desirable. */
+
+
+/* Public R/W variables */
+
+GC_API void * (*GC_oom_fn) (size_t bytes_requested);
+ /* When there is insufficient memory to satisfy */
+ /* an allocation request, we return */
+ /* (*GC_oom_fn)(). By default this just */
+ /* returns 0. */
+ /* If it returns, it must return 0 or a valid */
+ /* pointer to a previously allocated heap */
+ /* object. */
+
+GC_API int GC_find_leak;
+ /* Do not actually garbage collect, but simply */
+ /* report inaccessible memory that was not */
+ /* deallocated with GC_free. Initial value */
+ /* is determined by FIND_LEAK macro. */
+
+GC_API int GC_all_interior_pointers;
+ /* Arrange for pointers to object interiors to */
+ /* be recognized as valid. May not be changed */
+ /* after GC initialization. */
+ /* Initial value is determined by */
+ /* -DALL_INTERIOR_POINTERS. */
+ /* Unless DONT_ADD_BYTE_AT_END is defined, this */
+ /* also affects whether sizes are increased by */
+ /* at least a byte to allow "off the end" */
+ /* pointer recognition. */
+ /* MUST BE 0 or 1. */
+
+GC_API int GC_finalize_on_demand;
+ /* If nonzero, finalizers will only be run in */
+ /* response to an explicit GC_invoke_finalizers */
+ /* call. The default is determined by whether */
+ /* the FINALIZE_ON_DEMAND macro is defined */
+ /* when the collector is built. */
+
+GC_API int GC_java_finalization;
+ /* Mark objects reachable from finalizable */
+ /* objects in a separate postpass. This makes */
+ /* it a bit safer to use non-topologically- */
+ /* ordered finalization. Default value is */
+ /* determined by JAVA_FINALIZATION macro. */
+ /* Enables register_finalizer_unreachable to */
+ /* work correctly. */
+
+GC_API void (* GC_finalizer_notifier)(void);
+ /* Invoked by the collector when there are */
+ /* objects to be finalized. Invoked at most */
+ /* once per GC cycle. Never invoked unless */
+ /* GC_finalize_on_demand is set. */
+ /* Typically this will notify a finalization */
+ /* thread, which will call GC_invoke_finalizers */
+ /* in response. */
+
+GC_API int GC_dont_gc; /* != 0 ==> Dont collect. In versions 6.2a1+, */
+ /* this overrides explicit GC_gcollect() calls. */
+ /* Used as a counter, so that nested enabling */
+ /* and disabling work correctly. Should */
+ /* normally be updated with GC_enable() and */
+ /* GC_disable() calls. */
+ /* Direct assignment to GC_dont_gc is */
+ /* deprecated. */
+
+GC_API int GC_dont_expand;
+ /* Dont expand heap unless explicitly requested */
+ /* or forced to. */
+
+GC_API int GC_use_entire_heap;
+ /* Causes the nonincremental collector to use the */
+ /* entire heap before collecting. This was the only */
+ /* option for GC versions < 5.0. This sometimes */
+ /* results in more large block fragmentation, since */
+ /* very larg blocks will tend to get broken up */
+ /* during each GC cycle. It is likely to result in a */
+ /* larger working set, but lower collection */
+ /* frequencies, and hence fewer instructions executed */
+ /* in the collector. */
+
+GC_API int GC_full_freq; /* Number of partial collections between */
+ /* full collections. Matters only if */
+ /* GC_incremental is set. */
+ /* Full collections are also triggered if */
+ /* the collector detects a substantial */
+ /* increase in the number of in-use heap */
+ /* blocks. Values in the tens are now */
+ /* perfectly reasonable, unlike for */
+ /* earlier GC versions. */
+
+GC_API GC_word GC_non_gc_bytes;
+ /* Bytes not considered candidates for collection. */
+ /* Used only to control scheduling of collections. */
+ /* Updated by GC_malloc_uncollectable and GC_free. */
+ /* Wizards only. */
+
+GC_API int GC_no_dls;
+ /* Don't register dynamic library data segments. */
+ /* Wizards only. Should be used only if the */
+ /* application explicitly registers all roots. */
+ /* In Microsoft Windows environments, this will */
+ /* usually also prevent registration of the */
+ /* main data segment as part of the root set. */
+
+GC_API GC_word GC_free_space_divisor;
+ /* We try to make sure that we allocate at */
+ /* least N/GC_free_space_divisor bytes between */
+ /* collections, where N is twice the number */
+ /* of traced bytes, plus the number of untraced */
+ /* bytes (bytes in "atomic" objects), plus */
+ /* a rough estimate of the root set size. */
+ /* N approximates GC tracing work per GC. */
+ /* Initially, GC_free_space_divisor = 3. */
+ /* Increasing its value will use less space */
+ /* but more collection time. Decreasing it */
+ /* will appreciably decrease collection time */
+ /* at the expense of space. */
+
+GC_API GC_word GC_max_retries;
+ /* The maximum number of GCs attempted before */
+ /* reporting out of memory after heap */
+ /* expansion fails. Initially 0. */
+
+
+GC_API char *GC_stackbottom; /* Cool end of user stack. */
+ /* May be set in the client prior to */
+ /* calling any GC_ routines. This */
+ /* avoids some overhead, and */
+ /* potentially some signals that can */
+ /* confuse debuggers. Otherwise the */
+ /* collector attempts to set it */
+ /* automatically. */
+ /* For multithreaded code, this is the */
+ /* cold end of the stack for the */
+ /* primordial thread. */
+
+GC_API int GC_dont_precollect; /* Don't collect as part of */
+ /* initialization. Should be set only */
+ /* if the client wants a chance to */
+ /* manually initialize the root set */
+ /* before the first collection. */
+ /* Interferes with blacklisting. */
+ /* Wizards only. */
+
+GC_API unsigned long GC_time_limit;
+ /* If incremental collection is enabled, */
+ /* We try to terminate collections */
+ /* after this many milliseconds. Not a */
+ /* hard time bound. Setting this to */
+ /* GC_TIME_UNLIMITED will essentially */
+ /* disable incremental collection while */
+ /* leaving generational collection */
+ /* enabled. */
+# define GC_TIME_UNLIMITED 999999
+ /* Setting GC_time_limit to this value */
+ /* will disable the "pause time exceeded"*/
+ /* tests. */
+
+/* Public procedures */
+
+/* Initialize the collector. This is only required when using thread-local
+ * allocation, since unlike the regular allocation routines, GC_local_malloc
+ * is not self-initializing. If you use GC_local_malloc you should arrange
+ * to call this somehow (e.g. from a constructor) before doing any allocation.
+ * For win32 threads, it needs to be called explicitly.
+ */
+GC_API void GC_init(void);
+
+/*
+ * general purpose allocation routines, with roughly malloc calling conv.
+ * The atomic versions promise that no relevant pointers are contained
+ * in the object. The nonatomic versions guarantee that the new object
+ * is cleared. GC_malloc_stubborn promises that no changes to the object
+ * will occur after GC_end_stubborn_change has been called on the
+ * result of GC_malloc_stubborn. GC_malloc_uncollectable allocates an object
+ * that is scanned for pointers to collectable objects, but is not itself
+ * collectable. The object is scanned even if it does not appear to
+ * be reachable. GC_malloc_uncollectable and GC_free called on the resulting
+ * object implicitly update GC_non_gc_bytes appropriately.
+ *
+ * Note that the GC_malloc_stubborn support is stubbed out by default
+ * starting in 6.0. GC_malloc_stubborn is an alias for GC_malloc unless
+ * the collector is built with STUBBORN_ALLOC defined.
+ */
+GC_API void * GC_malloc(size_t size_in_bytes);
+GC_API void * GC_malloc_atomic(size_t size_in_bytes);
+GC_API char * GC_strdup (const char *str);
+GC_API void * GC_malloc_uncollectable(size_t size_in_bytes);
+GC_API void * GC_malloc_stubborn(size_t size_in_bytes);
+
+/* The following is only defined if the library has been suitably */
+/* compiled: */
+GC_API void * GC_malloc_atomic_uncollectable(size_t size_in_bytes);
+
+/* Explicitly deallocate an object. Dangerous if used incorrectly. */
+/* Requires a pointer to the base of an object. */
+/* If the argument is stubborn, it should not be changeable when freed. */
+/* An object should not be enable for finalization when it is */
+/* explicitly deallocated. */
+/* GC_free(0) is a no-op, as required by ANSI C for free. */
+GC_API void GC_free(void * object_addr);
+
+/*
+ * Stubborn objects may be changed only if the collector is explicitly informed.
+ * The collector is implicitly informed of coming change when such
+ * an object is first allocated. The following routines inform the
+ * collector that an object will no longer be changed, or that it will
+ * once again be changed. Only nonNIL pointer stores into the object
+ * are considered to be changes. The argument to GC_end_stubborn_change
+ * must be exacly the value returned by GC_malloc_stubborn or passed to
+ * GC_change_stubborn. (In the second case it may be an interior pointer
+ * within 512 bytes of the beginning of the objects.)
+ * There is a performance penalty for allowing more than
+ * one stubborn object to be changed at once, but it is acceptable to
+ * do so. The same applies to dropping stubborn objects that are still
+ * changeable.
+ */
+GC_API void GC_change_stubborn(void *);
+GC_API void GC_end_stubborn_change(void *);
+
+/* Return a pointer to the base (lowest address) of an object given */
+/* a pointer to a location within the object. */
+/* I.e. map an interior pointer to the corresponding bas pointer. */
+/* Note that with debugging allocation, this returns a pointer to the */
+/* actual base of the object, i.e. the debug information, not to */
+/* the base of the user object. */
+/* Return 0 if displaced_pointer doesn't point to within a valid */
+/* object. */
+/* Note that a deallocated object in the garbage collected heap */
+/* may be considered valid, even if it has been deallocated with */
+/* GC_free. */
+GC_API void * GC_base(void * displaced_pointer);
+
+/* Given a pointer to the base of an object, return its size in bytes. */
+/* The returned size may be slightly larger than what was originally */
+/* requested. */
+GC_API size_t GC_size(void * object_addr);
+
+/* For compatibility with C library. This is occasionally faster than */
+/* a malloc followed by a bcopy. But if you rely on that, either here */
+/* or with the standard C library, your code is broken. In my */
+/* opinion, it shouldn't have been invented, but now we're stuck. -HB */
+/* The resulting object has the same kind as the original. */
+/* If the argument is stubborn, the result will have changes enabled. */
+/* It is an error to have changes enabled for the original object. */
+/* Follows ANSI comventions for NULL old_object. */
+GC_API void * GC_realloc(void * old_object, size_t new_size_in_bytes);
+
+/* Explicitly increase the heap size. */
+/* Returns 0 on failure, 1 on success. */
+GC_API int GC_expand_hp(size_t number_of_bytes);
+
+/* Limit the heap size to n bytes. Useful when you're debugging, */
+/* especially on systems that don't handle running out of memory well. */
+/* n == 0 ==> unbounded. This is the default. */
+GC_API void GC_set_max_heap_size(GC_word n);
+
+/* Inform the collector that a certain section of statically allocated */
+/* memory contains no pointers to garbage collected memory. Thus it */
+/* need not be scanned. This is sometimes important if the application */
+/* maps large read/write files into the address space, which could be */
+/* mistaken for dynamic library data segments on some systems. */
+GC_API void GC_exclude_static_roots(void * low_address,
+ void * high_address_plus_1);
+
+/* Clear the set of root segments. Wizards only. */
+GC_API void GC_clear_roots(void);
+
+/* Add a root segment. Wizards only. */
+GC_API void GC_add_roots(void * low_address, void * high_address_plus_1);
+
+/* Remove a root segment. Wizards only. */
+GC_API void GC_remove_roots(void * low_address, void * high_address_plus_1);
+
+/* Add a displacement to the set of those considered valid by the */
+/* collector. GC_register_displacement(n) means that if p was returned */
+/* by GC_malloc, then (char *)p + n will be considered to be a valid */
+/* pointer to p. N must be small and less than the size of p. */
+/* (All pointers to the interior of objects from the stack are */
+/* considered valid in any case. This applies to heap objects and */
+/* static data.) */
+/* Preferably, this should be called before any other GC procedures. */
+/* Calling it later adds to the probability of excess memory */
+/* retention. */
+/* This is a no-op if the collector has recognition of */
+/* arbitrary interior pointers enabled, which is now the default. */
+GC_API void GC_register_displacement(size_t n);
+
+/* The following version should be used if any debugging allocation is */
+/* being done. */
+GC_API void GC_debug_register_displacement(size_t n);
+
+/* Explicitly trigger a full, world-stop collection. */
+GC_API void GC_gcollect(void);
+
+/* Trigger a full world-stopped collection. Abort the collection if */
+/* and when stop_func returns a nonzero value. Stop_func will be */
+/* called frequently, and should be reasonably fast. This works even */
+/* if virtual dirty bits, and hence incremental collection is not */
+/* available for this architecture. Collections can be aborted faster */
+/* than normal pause times for incremental collection. However, */
+/* aborted collections do no useful work; the next collection needs */
+/* to start from the beginning. */
+/* Return 0 if the collection was aborted, 1 if it succeeded. */
+typedef int (* GC_stop_func)(void);
+GC_API int GC_try_to_collect(GC_stop_func stop_func);
+
+/* Return the number of bytes in the heap. Excludes collector private */
+/* data structures. Includes empty blocks and fragmentation loss. */
+/* Includes some pages that were allocated but never written. */
+GC_API size_t GC_get_heap_size(void);
+
+/* Return a lower bound on the number of free bytes in the heap. */
+GC_API size_t GC_get_free_bytes(void);
+
+/* Return the number of bytes allocated since the last collection. */
+GC_API size_t GC_get_bytes_since_gc(void);
+
+/* Return the total number of bytes allocated in this process. */
+/* Never decreases, except due to wrapping. */
+GC_API size_t GC_get_total_bytes(void);
+
+/* Disable garbage collection. Even GC_gcollect calls will be */
+/* ineffective. */
+GC_API void GC_disable(void);
+
+/* Reenable garbage collection. GC_disable() and GC_enable() calls */
+/* nest. Garbage collection is enabled if the number of calls to both */
+/* both functions is equal. */
+GC_API void GC_enable(void);
+
+/* Enable incremental/generational collection. */
+/* Not advisable unless dirty bits are */
+/* available or most heap objects are */
+/* pointerfree(atomic) or immutable. */
+/* Don't use in leak finding mode. */
+/* Ignored if GC_dont_gc is true. */
+/* Only the generational piece of this is */
+/* functional if GC_parallel is TRUE */
+/* or if GC_time_limit is GC_TIME_UNLIMITED. */
+/* Causes GC_local_gcj_malloc() to revert to */
+/* locked allocation. Must be called */
+/* before any GC_local_gcj_malloc() calls. */
+/* For best performance, should be called as early as possible. */
+/* On some platforms, calling it later may have adverse effects.*/
+/* Safe to call before GC_INIT(). Includes a GC_init() call. */
+GC_API void GC_enable_incremental(void);
+
+/* Does incremental mode write-protect pages? Returns zero or */
+/* more of the following, or'ed together: */
+#define GC_PROTECTS_POINTER_HEAP 1 /* May protect non-atomic objs. */
+#define GC_PROTECTS_PTRFREE_HEAP 2
+#define GC_PROTECTS_STATIC_DATA 4 /* Currently never. */
+#define GC_PROTECTS_STACK 8 /* Probably impractical. */
+
+#define GC_PROTECTS_NONE 0
+GC_API int GC_incremental_protection_needs(void);
+
+/* Perform some garbage collection work, if appropriate. */
+/* Return 0 if there is no more work to be done. */
+/* Typically performs an amount of work corresponding roughly */
+/* to marking from one page. May do more work if further */
+/* progress requires it, e.g. if incremental collection is */
+/* disabled. It is reasonable to call this in a wait loop */
+/* until it returns 0. */
+GC_API int GC_collect_a_little(void);
+
+/* Allocate an object of size lb bytes. The client guarantees that */
+/* as long as the object is live, it will be referenced by a pointer */
+/* that points to somewhere within the first 256 bytes of the object. */
+/* (This should normally be declared volatile to prevent the compiler */
+/* from invalidating this assertion.) This routine is only useful */
+/* if a large array is being allocated. It reduces the chance of */
+/* accidentally retaining such an array as a result of scanning an */
+/* integer that happens to be an address inside the array. (Actually, */
+/* it reduces the chance of the allocator not finding space for such */
+/* an array, since it will try hard to avoid introducing such a false */
+/* reference.) On a SunOS 4.X or MS Windows system this is recommended */
+/* for arrays likely to be larger than 100K or so. For other systems, */
+/* or if the collector is not configured to recognize all interior */
+/* pointers, the threshold is normally much higher. */
+GC_API void * GC_malloc_ignore_off_page(size_t lb);
+GC_API void * GC_malloc_atomic_ignore_off_page(size_t lb);
+
+#if defined(__sgi) && !defined(__GNUC__) && _COMPILER_VERSION >= 720
+# define GC_ADD_CALLER
+# define GC_RETURN_ADDR (GC_word)__return_address
+#endif
+
+#if defined(__linux__) || defined(__GLIBC__)
+# include <features.h>
+# if (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 1 || __GLIBC__ > 2) \
+ && !defined(__ia64__)
+# ifndef GC_HAVE_BUILTIN_BACKTRACE
+# define GC_HAVE_BUILTIN_BACKTRACE
+# endif
+# endif
+# if defined(__i386__) || defined(__x86_64__)
+# define GC_CAN_SAVE_CALL_STACKS
+# endif
+#endif
+
+#if defined(_MSC_VER) && _MSC_VER >= 1200 /* version 12.0+ (MSVC 6.0+) */ \
+ && !defined(_AMD64_)
+# ifndef GC_HAVE_NO_BUILTIN_BACKTRACE
+# define GC_HAVE_BUILTIN_BACKTRACE
+# endif
+#endif
+
+#if defined(GC_HAVE_BUILTIN_BACKTRACE) && !defined(GC_CAN_SAVE_CALL_STACKS)
+# define GC_CAN_SAVE_CALL_STACKS
+#endif
+
+#if defined(__sparc__)
+# define GC_CAN_SAVE_CALL_STACKS
+#endif
+
+/* If we're on an a platform on which we can't save call stacks, but */
+/* gcc is normally used, we go ahead and define GC_ADD_CALLER. */
+/* We make this decision independent of whether gcc is actually being */
+/* used, in order to keep the interface consistent, and allow mixing */
+/* of compilers. */
+/* This may also be desirable if it is possible but expensive to */
+/* retrieve the call chain. */
+#if (defined(__linux__) || defined(__NetBSD__) || defined(__OpenBSD__) \
+ || defined(__FreeBSD__) || defined(__DragonFly__)) & !defined(GC_CAN_SAVE_CALL_STACKS)
+# define GC_ADD_CALLER
+# if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 95)
+ /* gcc knows how to retrieve return address, but we don't know */
+ /* how to generate call stacks. */
+# define GC_RETURN_ADDR (GC_word)__builtin_return_address(0)
+# else
+ /* Just pass 0 for gcc compatibility. */
+# define GC_RETURN_ADDR 0
+# endif
+#endif
+
+#ifdef GC_ADD_CALLER
+# define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__
+# define GC_EXTRA_PARAMS GC_word ra, const char * s, int i
+#else
+# define GC_EXTRAS __FILE__, __LINE__
+# define GC_EXTRA_PARAMS const char * s, int i
+#endif
+
+/* Debugging (annotated) allocation. GC_gcollect will check */
+/* objects allocated in this way for overwrites, etc. */
+GC_API void * GC_debug_malloc(size_t size_in_bytes, GC_EXTRA_PARAMS);
+GC_API void * GC_debug_malloc_atomic(size_t size_in_bytes, GC_EXTRA_PARAMS);
+GC_API char * GC_debug_strdup(const char *str, GC_EXTRA_PARAMS);
+GC_API void * GC_debug_malloc_uncollectable
+ (size_t size_in_bytes, GC_EXTRA_PARAMS);
+GC_API void * GC_debug_malloc_stubborn
+ (size_t size_in_bytes, GC_EXTRA_PARAMS);
+GC_API void * GC_debug_malloc_ignore_off_page
+ (size_t size_in_bytes, GC_EXTRA_PARAMS);
+GC_API void * GC_debug_malloc_atomic_ignore_off_page
+ (size_t size_in_bytes, GC_EXTRA_PARAMS);
+GC_API void GC_debug_free (void * object_addr);
+GC_API void * GC_debug_realloc
+ (void * old_object, size_t new_size_in_bytes, GC_EXTRA_PARAMS);
+GC_API void GC_debug_change_stubborn(void *);
+GC_API void GC_debug_end_stubborn_change(void *);
+
+/* Routines that allocate objects with debug information (like the */
+/* above), but just fill in dummy file and line number information. */
+/* Thus they can serve as drop-in malloc/realloc replacements. This */
+/* can be useful for two reasons: */
+/* 1) It allows the collector to be built with DBG_HDRS_ALL defined */
+/* even if some allocation calls come from 3rd party libraries */
+/* that can't be recompiled. */
+/* 2) On some platforms, the file and line information is redundant, */
+/* since it can be reconstructed from a stack trace. On such */
+/* platforms it may be more convenient not to recompile, e.g. for */
+/* leak detection. This can be accomplished by instructing the */
+/* linker to replace malloc/realloc with these. */
+GC_API void * GC_debug_malloc_replacement (size_t size_in_bytes);
+GC_API void * GC_debug_realloc_replacement
+ (void * object_addr, size_t size_in_bytes);
+
+# ifdef GC_DEBUG
+# define GC_MALLOC(sz) GC_debug_malloc(sz, GC_EXTRAS)
+# define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, GC_EXTRAS)
+# define GC_STRDUP(s) GC_debug_strdup((s), GC_EXTRAS)
+# define GC_MALLOC_UNCOLLECTABLE(sz) \
+ GC_debug_malloc_uncollectable(sz, GC_EXTRAS)
+# define GC_MALLOC_IGNORE_OFF_PAGE(sz) \
+ GC_debug_malloc_ignore_off_page(sz, GC_EXTRAS)
+# define GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE(sz) \
+ GC_debug_malloc_atomic_ignore_off_page(sz, GC_EXTRAS)
+# define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, GC_EXTRAS)
+# define GC_FREE(p) GC_debug_free(p)
+# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
+ GC_debug_register_finalizer(p, f, d, of, od)
+# define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
+ GC_debug_register_finalizer_ignore_self(p, f, d, of, od)
+# define GC_REGISTER_FINALIZER_NO_ORDER(p, f, d, of, od) \
+ GC_debug_register_finalizer_no_order(p, f, d, of, od)
+# define GC_REGISTER_FINALIZER_UNREACHABLE(p, f, d, of, od) \
+ GC_debug_register_finalizer_unreachable(p, f, d, of, od)
+# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, GC_EXTRAS);
+# define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p)
+# define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p)
+# define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
+ GC_general_register_disappearing_link(link, GC_base(obj))
+# define GC_REGISTER_DISPLACEMENT(n) GC_debug_register_displacement(n)
+# else
+# define GC_MALLOC(sz) GC_malloc(sz)
+# define GC_MALLOC_ATOMIC(sz) GC_malloc_atomic(sz)
+# define GC_STRDUP(s) GC_strdup(s)
+# define GC_MALLOC_UNCOLLECTABLE(sz) GC_malloc_uncollectable(sz)
+# define GC_MALLOC_IGNORE_OFF_PAGE(sz) \
+ GC_malloc_ignore_off_page(sz)
+# define GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE(sz) \
+ GC_malloc_atomic_ignore_off_page(sz)
+# define GC_REALLOC(old, sz) GC_realloc(old, sz)
+# define GC_FREE(p) GC_free(p)
+# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
+ GC_register_finalizer(p, f, d, of, od)
+# define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
+ GC_register_finalizer_ignore_self(p, f, d, of, od)
+# define GC_REGISTER_FINALIZER_NO_ORDER(p, f, d, of, od) \
+ GC_register_finalizer_no_order(p, f, d, of, od)
+# define GC_REGISTER_FINALIZER_UNREACHABLE(p, f, d, of, od) \
+ GC_register_finalizer_unreachable(p, f, d, of, od)
+# define GC_MALLOC_STUBBORN(sz) GC_malloc_stubborn(sz)
+# define GC_CHANGE_STUBBORN(p) GC_change_stubborn(p)
+# define GC_END_STUBBORN_CHANGE(p) GC_end_stubborn_change(p)
+# define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
+ GC_general_register_disappearing_link(link, obj)
+# define GC_REGISTER_DISPLACEMENT(n) GC_register_displacement(n)
+# endif
+/* The following are included because they are often convenient, and */
+/* reduce the chance for a misspecifed size argument. But calls may */
+/* expand to something syntactically incorrect if t is a complicated */
+/* type expression. */
+# define GC_NEW(t) (t *)GC_MALLOC(sizeof (t))
+# define GC_NEW_ATOMIC(t) (t *)GC_MALLOC_ATOMIC(sizeof (t))
+# define GC_NEW_STUBBORN(t) (t *)GC_MALLOC_STUBBORN(sizeof (t))
+# define GC_NEW_UNCOLLECTABLE(t) (t *)GC_MALLOC_UNCOLLECTABLE(sizeof (t))
+
+/* Finalization. Some of these primitives are grossly unsafe. */
+/* The idea is to make them both cheap, and sufficient to build */
+/* a safer layer, closer to Modula-3, Java, or PCedar finalization. */
+/* The interface represents my conclusions from a long discussion */
+/* with Alan Demers, Dan Greene, Carl Hauser, Barry Hayes, */
+/* Christian Jacobi, and Russ Atkinson. It's not perfect, and */
+/* probably nobody else agrees with it. Hans-J. Boehm 3/13/92 */
+typedef void (*GC_finalization_proc) (void * obj, void * client_data);
+
+GC_API void GC_register_finalizer(void * obj, GC_finalization_proc fn,
+ void * cd, GC_finalization_proc *ofn,
+ void * *ocd);
+GC_API void GC_debug_register_finalizer
+ (void * obj, GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void * *ocd);
+ /* When obj is no longer accessible, invoke */
+ /* (*fn)(obj, cd). If a and b are inaccessible, and */
+ /* a points to b (after disappearing links have been */
+ /* made to disappear), then only a will be */
+ /* finalized. (If this does not create any new */
+ /* pointers to b, then b will be finalized after the */
+ /* next collection.) Any finalizable object that */
+ /* is reachable from itself by following one or more */
+ /* pointers will not be finalized (or collected). */
+ /* Thus cycles involving finalizable objects should */
+ /* be avoided, or broken by disappearing links. */
+ /* All but the last finalizer registered for an object */
+ /* is ignored. */
+ /* Finalization may be removed by passing 0 as fn. */
+ /* Finalizers are implicitly unregistered just before */
+ /* they are invoked. */
+ /* The old finalizer and client data are stored in */
+ /* *ofn and *ocd. */
+ /* Fn is never invoked on an accessible object, */
+ /* provided hidden pointers are converted to real */
+ /* pointers only if the allocation lock is held, and */
+ /* such conversions are not performed by finalization */
+ /* routines. */
+ /* If GC_register_finalizer is aborted as a result of */
+ /* a signal, the object may be left with no */
+ /* finalization, even if neither the old nor new */
+ /* finalizer were NULL. */
+ /* Obj should be the nonNULL starting address of an */
+ /* object allocated by GC_malloc or friends. */
+ /* Note that any garbage collectable object referenced */
+ /* by cd will be considered accessible until the */
+ /* finalizer is invoked. */
+
+/* Another versions of the above follow. It ignores */
+/* self-cycles, i.e. pointers from a finalizable object to */
+/* itself. There is a stylistic argument that this is wrong, */
+/* but it's unavoidable for C++, since the compiler may */
+/* silently introduce these. It's also benign in that specific */
+/* case. And it helps if finalizable objects are split to */
+/* avoid cycles. */
+/* Note that cd will still be viewed as accessible, even if it */
+/* refers to the object itself. */
+GC_API void GC_register_finalizer_ignore_self
+ (void * obj, GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void * *ocd);
+GC_API void GC_debug_register_finalizer_ignore_self
+ (void * obj, GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void * *ocd);
+
+/* Another version of the above. It ignores all cycles. */
+/* It should probably only be used by Java implementations. */
+/* Note that cd will still be viewed as accessible, even if it */
+/* refers to the object itself. */
+GC_API void GC_register_finalizer_no_order
+ (void * obj, GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void * *ocd);
+GC_API void GC_debug_register_finalizer_no_order
+ (void * obj, GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void * *ocd);
+
+/* This is a special finalizer that is useful when an object's */
+/* finalizer must be run when the object is known to be no */
+/* longer reachable, not even from other finalizable objects. */
+/* It behaves like "normal" finalization, except that the */
+/* finalizer is not run while the object is reachable from */
+/* other objects specifying unordered finalization. */
+/* Effectively it allows an object referenced, possibly */
+/* indirectly, from an unordered finalizable object to override */
+/* the unordered finalization request. */
+/* This can be used in combination with finalizer_no_order so */
+/* as to release resources that must not be released while an */
+/* object can still be brought back to life by other */
+/* finalizers. */
+/* Only works if GC_java_finalization is set. Probably only */
+/* of interest when implementing a language that requires */
+/* unordered finalization (e.g. Java, C#). */
+GC_API void GC_register_finalizer_unreachable
+ (void * obj, GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void * *ocd);
+GC_API void GC_debug_register_finalizer_unreachable
+ (void * obj, GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void * *ocd);
+
+/* The following routine may be used to break cycles between */
+/* finalizable objects, thus causing cyclic finalizable */
+/* objects to be finalized in the correct order. Standard */
+/* use involves calling GC_register_disappearing_link(&p), */
+/* where p is a pointer that is not followed by finalization */
+/* code, and should not be considered in determining */
+/* finalization order. */
+GC_API int GC_register_disappearing_link(void * * link );
+ /* Link should point to a field of a heap allocated */
+ /* object obj. *link will be cleared when obj is */
+ /* found to be inaccessible. This happens BEFORE any */
+ /* finalization code is invoked, and BEFORE any */
+ /* decisions about finalization order are made. */
+ /* This is useful in telling the finalizer that */
+ /* some pointers are not essential for proper */
+ /* finalization. This may avoid finalization cycles. */
+ /* Note that obj may be resurrected by another */
+ /* finalizer, and thus the clearing of *link may */
+ /* be visible to non-finalization code. */
+ /* There's an argument that an arbitrary action should */
+ /* be allowed here, instead of just clearing a pointer. */
+ /* But this causes problems if that action alters, or */
+ /* examines connectivity. */
+ /* Returns 1 if link was already registered, 0 */
+ /* otherwise. */
+ /* Only exists for backward compatibility. See below: */
+
+GC_API int GC_general_register_disappearing_link (void * * link, void * obj);
+ /* A slight generalization of the above. *link is */
+ /* cleared when obj first becomes inaccessible. This */
+ /* can be used to implement weak pointers easily and */
+ /* safely. Typically link will point to a location */
+ /* holding a disguised pointer to obj. (A pointer */
+ /* inside an "atomic" object is effectively */
+ /* disguised.) In this way soft */
+ /* pointers are broken before any object */
+ /* reachable from them are finalized. Each link */
+ /* May be registered only once, i.e. with one obj */
+ /* value. This was added after a long email discussion */
+ /* with John Ellis. */
+ /* Obj must be a pointer to the first word of an object */
+ /* we allocated. It is unsafe to explicitly deallocate */
+ /* the object containing link. Explicitly deallocating */
+ /* obj may or may not cause link to eventually be */
+ /* cleared. */
+ /* This can be used to implement certain types of */
+ /* weak pointers. Note however that this generally */
+ /* requires that thje allocation lock is held (see */
+ /* GC_call_with_allock_lock() below) when the disguised */
+ /* pointer is accessed. Otherwise a strong pointer */
+ /* could be recreated between the time the collector */
+ /* decides to reclaim the object and the link is */
+ /* cleared. */
+
+GC_API int GC_unregister_disappearing_link (void * * link);
+ /* Returns 0 if link was not actually registered. */
+ /* Undoes a registration by either of the above two */
+ /* routines. */
+
+/* Returns !=0 if GC_invoke_finalizers has something to do. */
+GC_API int GC_should_invoke_finalizers(void);
+
+GC_API int GC_invoke_finalizers(void);
+ /* Run finalizers for all objects that are ready to */
+ /* be finalized. Return the number of finalizers */
+ /* that were run. Normally this is also called */
+ /* implicitly during some allocations. If */
+ /* GC-finalize_on_demand is nonzero, it must be called */
+ /* explicitly. */
+
+/* Explicitly tell the collector that an object is reachable */
+/* at a particular program point. This prevents the argument */
+/* pointer from being optimized away, even it is otherwise no */
+/* longer needed. It should have no visible effect in the */
+/* absence of finalizers or disappearing links. But it may be */
+/* needed to prevent finalizers from running while the */
+/* associated external resource is still in use. */
+/* The function is sometimes called keep_alive in other */
+/* settings. */
+# if defined(__GNUC__) && !defined(__INTEL_COMPILER)
+# define GC_reachable_here(ptr) \
+ __asm__ volatile(" " : : "X"(ptr) : "memory");
+# else
+ GC_API void GC_noop1(GC_word x);
+# define GC_reachable_here(ptr) GC_noop1((GC_word)(ptr));
+#endif
+
+/* GC_set_warn_proc can be used to redirect or filter warning messages. */
+/* p may not be a NULL pointer. */
+typedef void (*GC_warn_proc) (char *msg, GC_word arg);
+GC_API GC_warn_proc GC_set_warn_proc(GC_warn_proc p);
+ /* Returns old warning procedure. */
+
+GC_API GC_word GC_set_free_space_divisor(GC_word value);
+ /* Set free_space_divisor. See above for definition. */
+ /* Returns old value. */
+
+/* The following is intended to be used by a higher level */
+/* (e.g. Java-like) finalization facility. It is expected */
+/* that finalization code will arrange for hidden pointers to */
+/* disappear. Otherwise objects can be accessed after they */
+/* have been collected. */
+/* Note that putting pointers in atomic objects or in */
+/* nonpointer slots of "typed" objects is equivalent to */
+/* disguising them in this way, and may have other advantages. */
+# if defined(I_HIDE_POINTERS) || defined(GC_I_HIDE_POINTERS)
+ typedef GC_word GC_hidden_pointer;
+# define HIDE_POINTER(p) (~(GC_hidden_pointer)(p))
+# define REVEAL_POINTER(p) ((void *)(HIDE_POINTER(p)))
+ /* Converting a hidden pointer to a real pointer requires verifying */
+ /* that the object still exists. This involves acquiring the */
+ /* allocator lock to avoid a race with the collector. */
+# endif /* I_HIDE_POINTERS */
+
+typedef void * (*GC_fn_type) (void * client_data);
+GC_API void * GC_call_with_alloc_lock (GC_fn_type fn, void * client_data);
+
+/* These routines are intended to explicitly notify the collector */
+/* of new threads. Often this is unnecessary because thread creation */
+/* is implicitly intercepted by the collector, using header-file */
+/* defines, or linker-based interception. In the long run the intent */
+/* is to always make redundant registration safe. In the short run, */
+/* this is being implemented a platform at a time. */
+/* The interface is complicated by the fact that we probably will not */
+/* ever be able to automatically determine the stack base for thread */
+/* stacks on all platforms. */
+
+/* Structure representing the base of a thread stack. On most */
+/* platforms this contains just a single address. */
+struct GC_stack_base {
+ void * mem_base; /* Base of memory stack. */
+# if defined(__ia64) || defined(__ia64__)
+ void * reg_base; /* Base of separate register stack. */
+# endif
+};
+
+typedef void * (*GC_stack_base_func)(struct GC_stack_base *sb, void *arg);
+
+/* Call a function with a stack base structure corresponding to */
+/* somewhere in the GC_call_with_stack_base frame. This often can */
+/* be used to provide a sufficiently accurate stack base. And we */
+/* implement it everywhere. */
+void * GC_call_with_stack_base(GC_stack_base_func fn, void *arg);
+
+/* Register the current thread, with the indicated stack base, as */
+/* a new thread whose stack(s) should be traced by the GC. If a */
+/* platform does not implicitly do so, this must be called before a */
+/* thread can allocate garbage collected memory, or assign pointers */
+/* to the garbage collected heap. Once registered, a thread will be */
+/* stopped during garbage collections. */
+/* Return codes: */
+#define GC_SUCCESS 0
+#define GC_DUPLICATE 1 /* Was already registered. */
+#define GC_NO_THREADS 2 /* No thread support in GC. */
+#define GC_UNIMPLEMENTED 3 /* Not yet implemented on this platform. */
+int GC_register_my_thread(struct GC_stack_base *);
+
+/* Unregister the current thread. The thread may no longer allocate */
+/* garbage collected memory or manipulate pointers to the */
+/* garbage collected heap after making this call. */
+/* Specifically, if it wants to return or otherwise communicate a */
+/* pointer to the garbage-collected heap to another thread, it must */
+/* do this before calling GC_unregister_my_thread, most probably */
+/* by saving it in a global data structure. */
+int GC_unregister_my_thread(void);
+
+/* Attempt to fill in the GC_stack_base structure with the stack base */
+/* for this thread. This appears to be required to implement anything */
+/* like the JNI AttachCurrentThread in an environment in which new */
+/* threads are not automatically registered with the collector. */
+/* It is also unfortunately hard to implement well on many platforms. */
+/* Returns GC_SUCCESS or GC_UNIMPLEMENTED. */
+int GC_get_stack_base(struct GC_stack_base *);
+
+/* The following routines are primarily intended for use with a */
+/* preprocessor which inserts calls to check C pointer arithmetic. */
+/* They indicate failure by invoking the corresponding _print_proc. */
+
+/* Check that p and q point to the same object. */
+/* Fail conspicuously if they don't. */
+/* Returns the first argument. */
+/* Succeeds if neither p nor q points to the heap. */
+/* May succeed if both p and q point to between heap objects. */
+GC_API void * GC_same_obj (void * p, void * q);
+
+/* Checked pointer pre- and post- increment operations. Note that */
+/* the second argument is in units of bytes, not multiples of the */
+/* object size. This should either be invoked from a macro, or the */
+/* call should be automatically generated. */
+GC_API void * GC_pre_incr (void * *p, size_t how_much);
+GC_API void * GC_post_incr (void * *p, size_t how_much);
+
+/* Check that p is visible */
+/* to the collector as a possibly pointer containing location. */
+/* If it isn't fail conspicuously. */
+/* Returns the argument in all cases. May erroneously succeed */
+/* in hard cases. (This is intended for debugging use with */
+/* untyped allocations. The idea is that it should be possible, though */
+/* slow, to add such a call to all indirect pointer stores.) */
+/* Currently useless for multithreaded worlds. */
+GC_API void * GC_is_visible (void * p);
+
+/* Check that if p is a pointer to a heap page, then it points to */
+/* a valid displacement within a heap object. */
+/* Fail conspicuously if this property does not hold. */
+/* Uninteresting with GC_all_interior_pointers. */
+/* Always returns its argument. */
+GC_API void * GC_is_valid_displacement (void * p);
+
+/* Explicitly dump the GC state. This is most often called from the */
+/* debugger, or by setting the GC_DUMP_REGULARLY environment variable, */
+/* but it may be useful to call it from client code during debugging. */
+void GC_dump(void);
+
+/* Safer, but slow, pointer addition. Probably useful mainly with */
+/* a preprocessor. Useful only for heap pointers. */
+#ifdef GC_DEBUG
+# define GC_PTR_ADD3(x, n, type_of_result) \
+ ((type_of_result)GC_same_obj((x)+(n), (x)))
+# define GC_PRE_INCR3(x, n, type_of_result) \
+ ((type_of_result)GC_pre_incr(&(x), (n)*sizeof(*x))
+# define GC_POST_INCR2(x, type_of_result) \
+ ((type_of_result)GC_post_incr(&(x), sizeof(*x))
+# ifdef __GNUC__
+# define GC_PTR_ADD(x, n) \
+ GC_PTR_ADD3(x, n, typeof(x))
+# define GC_PRE_INCR(x, n) \
+ GC_PRE_INCR3(x, n, typeof(x))
+# define GC_POST_INCR(x, n) \
+ GC_POST_INCR3(x, typeof(x))
+# else
+ /* We can't do this right without typeof, which ANSI */
+ /* decided was not sufficiently useful. Repeatedly */
+ /* mentioning the arguments seems too dangerous to be */
+ /* useful. So does not casting the result. */
+# define GC_PTR_ADD(x, n) ((x)+(n))
+# endif
+#else /* !GC_DEBUG */
+# define GC_PTR_ADD3(x, n, type_of_result) ((x)+(n))
+# define GC_PTR_ADD(x, n) ((x)+(n))
+# define GC_PRE_INCR3(x, n, type_of_result) ((x) += (n))
+# define GC_PRE_INCR(x, n) ((x) += (n))
+# define GC_POST_INCR2(x, n, type_of_result) ((x)++)
+# define GC_POST_INCR(x, n) ((x)++)
+#endif
+
+/* Safer assignment of a pointer to a nonstack location. */
+#ifdef GC_DEBUG
+# define GC_PTR_STORE(p, q) \
+ (*(void **)GC_is_visible(p) = GC_is_valid_displacement(q))
+#else /* !GC_DEBUG */
+# define GC_PTR_STORE(p, q) *((p) = (q))
+#endif
+
+/* Functions called to report pointer checking errors */
+GC_API void (*GC_same_obj_print_proc) (void * p, void * q);
+
+GC_API void (*GC_is_valid_displacement_print_proc) (void * p);
+
+GC_API void (*GC_is_visible_print_proc) (void * p);
+
+
+/* For pthread support, we generally need to intercept a number of */
+/* thread library calls. We do that here by macro defining them. */
+
+#if !defined(GC_USE_LD_WRAP) && \
+ (defined(GC_PTHREADS) || defined(GC_SOLARIS_THREADS))
+# include "gc_pthread_redirects.h"
+#endif
+
+# if defined(PCR) || defined(GC_SOLARIS_THREADS) || \
+ defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)
+ /* Any flavor of threads. */
+/* This returns a list of objects, linked through their first */
+/* word. Its use can greatly reduce lock contention problems, since */
+/* the allocation lock can be acquired and released many fewer times. */
+/* It is used internally by gc_local_alloc.h, which provides a simpler */
+/* programming interface on Linux. */
+void * GC_malloc_many(size_t lb);
+#define GC_NEXT(p) (*(void * *)(p)) /* Retrieve the next element */
+ /* in returned list. */
+extern void GC_thr_init(void); /* Needed for Solaris/X86 ?? */
+
+#endif /* THREADS */
+
+/* Register a callback to control the scanning of dynamic libraries.
+ When the GC scans the static data of a dynamic library, it will
+ first call a user-supplied routine with filename of the library and
+ the address and length of the memory region. This routine should
+ return nonzero if that region should be scanned. */
+GC_API void
+GC_register_has_static_roots_callback
+ (int (*callback)(const char *, void *, size_t));
+
+
+#if defined(GC_WIN32_THREADS) && !defined(__CYGWIN32__) \
+ && !defined(__CYGWIN__) \
+ && !defined(GC_PTHREADS)
+
+#ifdef __cplusplus
+ } /* Including windows.h in an extern "C" context no longer works. */
+#endif
+
+# include <windows.h>
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+ /*
+ * All threads must be created using GC_CreateThread or GC_beginthreadex,
+ * or must explicitly call GC_register_my_thread,
+ * so that they will be recorded in the thread table.
+ * For backwards compatibility, it is possible to build the GC
+ * with GC_DLL defined, and to call GC_use_DllMain().
+ * This implicitly registers all created threads, but appears to be
+ * less robust.
+ *
+ * Currently the collector expects all threads to fall through and
+ * terminate normally, or call GC_endthreadex() or GC_ExitThread,
+ * so that the thread is properly unregistered. (An explicit call
+ * to GC_unregister_my_thread() should also work, but risks unregistering
+ * the thread twice.)
+ */
+ GC_API HANDLE WINAPI GC_CreateThread(
+ LPSECURITY_ATTRIBUTES lpThreadAttributes,
+ DWORD dwStackSize, LPTHREAD_START_ROUTINE lpStartAddress,
+ LPVOID lpParameter, DWORD dwCreationFlags, LPDWORD lpThreadId );
+
+
+ GC_API uintptr_t GC_beginthreadex(
+ void *security, unsigned stack_size,
+ unsigned ( __stdcall *start_address )( void * ),
+ void *arglist, unsigned initflag, unsigned *thrdaddr);
+
+ GC_API void GC_endthreadex(unsigned retval);
+
+ GC_API void WINAPI GC_ExitThread(DWORD dwExitCode);
+
+# if defined(_WIN32_WCE)
+ /*
+ * win32_threads.c implements the real WinMain, which will start a new thread
+ * to call GC_WinMain after initializing the garbage collector.
+ */
+ GC_API int WINAPI GC_WinMain(
+ HINSTANCE hInstance,
+ HINSTANCE hPrevInstance,
+ LPWSTR lpCmdLine,
+ int nCmdShow );
+# ifndef GC_BUILD
+# define WinMain GC_WinMain
+# endif
+# endif /* defined(_WIN32_WCE) */
+
+ /*
+ * Use implicit thread registration via DllMain.
+ */
+GC_API void GC_use_DllMain(void);
+
+# define CreateThread GC_CreateThread
+# define ExitThread GC_ExitThread
+# define _beginthreadex GC_beginthreadex
+# define _endthreadex GC_endthreadex
+# define _beginthread { > "Please use _beginthreadex instead of _beginthread" < }
+
+#endif /* defined(GC_WIN32_THREADS) && !cygwin */
+
+ /*
+ * Fully portable code should call GC_INIT() from the main program
+ * before making any other GC_ calls. On most platforms this is a
+ * no-op and the collector self-initializes. But a number of platforms
+ * make that too hard.
+ * A GC_INIT call is required if the collector is built with THREAD_LOCAL_ALLOC
+ * defined and the initial allocation call is not to GC_malloc().
+ */
+#if defined(__CYGWIN32__) || defined (_AIX)
+ /*
+ * Similarly gnu-win32 DLLs need explicit initialization from
+ * the main program, as does AIX.
+ */
+# ifdef __CYGWIN32__
+ extern int _data_start__[];
+ extern int _data_end__[];
+ extern int _bss_start__[];
+ extern int _bss_end__[];
+# define GC_MAX(x,y) ((x) > (y) ? (x) : (y))
+# define GC_MIN(x,y) ((x) < (y) ? (x) : (y))
+# define GC_DATASTART ((void *) GC_MIN(_data_start__, _bss_start__))
+# define GC_DATAEND ((void *) GC_MAX(_data_end__, _bss_end__))
+# if defined(GC_DLL)
+# define GC_INIT() { GC_add_roots(GC_DATASTART, GC_DATAEND); \
+ GC_gcollect(); /* For blacklisting. */}
+# else
+ /* Main program init not required */
+# define GC_INIT() { GC_init(); }
+# endif
+# endif
+# if defined(_AIX)
+ extern int _data[], _end[];
+# define GC_DATASTART ((void *)((ulong)_data))
+# define GC_DATAEND ((void *)((ulong)_end))
+# define GC_INIT() { GC_add_roots(GC_DATASTART, GC_DATAEND); }
+# endif
+#else
+# define GC_INIT() { GC_init(); }
+#endif
+
+#if !defined(_WIN32_WCE) \
+ && ((defined(_MSDOS) || defined(_MSC_VER)) && (_M_IX86 >= 300) \
+ || defined(_WIN32) && !defined(__CYGWIN32__) && !defined(__CYGWIN__))
+ /* win32S may not free all resources on process exit. */
+ /* This explicitly deallocates the heap. */
+ GC_API void GC_win32_free_heap ();
+#endif
+
+#if ( defined(_AMIGA) && !defined(GC_AMIGA_MAKINGLIB) )
+ /* Allocation really goes through GC_amiga_allocwrapper_do */
+# include "gc_amiga_redirects.h"
+#endif
+
+#if defined(GC_REDIRECT_TO_LOCAL) && !defined(GC_LOCAL_ALLOC_H)
+# include "gc_local_alloc.h"
+#endif
+
+#ifdef __cplusplus
+ } /* end of extern "C" */
+#endif
+
+#endif /* _GC_H */
diff --git a/tools/build/src/engine/boehm_gc/include/gc_allocator.h b/tools/build/src/engine/boehm_gc/include/gc_allocator.h
new file mode 100644
index 000000000..4f3117b3b
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/gc_allocator.h
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 1996-1997
+ * Silicon Graphics Computer Systems, Inc.
+ *
+ * Permission to use, copy, modify, distribute and sell this software
+ * and its documentation for any purpose is hereby granted without fee,
+ * provided that the above copyright notice appear in all copies and
+ * that both that copyright notice and this permission notice appear
+ * in supporting documentation. Silicon Graphics makes no
+ * representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied warranty.
+ *
+ * Copyright (c) 2002
+ * Hewlett-Packard Company
+ *
+ * Permission to use, copy, modify, distribute and sell this software
+ * and its documentation for any purpose is hereby granted without fee,
+ * provided that the above copyright notice appear in all copies and
+ * that both that copyright notice and this permission notice appear
+ * in supporting documentation. Hewlett-Packard Company makes no
+ * representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied warranty.
+ */
+
+/*
+ * This implements standard-conforming allocators that interact with
+ * the garbage collector. Gc_alloctor<T> allocates garbage-collectable
+ * objects of type T. Traceable_allocator<T> allocates objects that
+ * are not temselves garbage collected, but are scanned by the
+ * collector for pointers to collectable objects. Traceable_alloc
+ * should be used for explicitly managed STL containers that may
+ * point to collectable objects.
+ *
+ * This code was derived from an earlier version of the GNU C++ standard
+ * library, which itself was derived from the SGI STL implementation.
+ */
+
+#ifndef GC_ALLOCATOR_H
+
+#define GC_ALLOCATOR_H
+
+#include "gc.h"
+#include <new> // for placement new
+
+#if defined(__GNUC__)
+# define GC_ATTR_UNUSED __attribute__((unused))
+#else
+# define GC_ATTR_UNUSED
+#endif
+
+/* First some helpers to allow us to dispatch on whether or not a type
+ * is known to be pointerfree.
+ * These are private, except that the client may invoke the
+ * GC_DECLARE_PTRFREE macro.
+ */
+
+struct GC_true_type {};
+struct GC_false_type {};
+
+template <class GC_tp>
+struct GC_type_traits {
+ GC_false_type GC_is_ptr_free;
+};
+
+# define GC_DECLARE_PTRFREE(T) \
+template<> struct GC_type_traits<T> { GC_true_type GC_is_ptr_free; }
+
+GC_DECLARE_PTRFREE(char);
+GC_DECLARE_PTRFREE(signed char);
+GC_DECLARE_PTRFREE(unsigned char);
+GC_DECLARE_PTRFREE(signed short);
+GC_DECLARE_PTRFREE(unsigned short);
+GC_DECLARE_PTRFREE(signed int);
+GC_DECLARE_PTRFREE(unsigned int);
+GC_DECLARE_PTRFREE(signed long);
+GC_DECLARE_PTRFREE(unsigned long);
+GC_DECLARE_PTRFREE(float);
+GC_DECLARE_PTRFREE(double);
+GC_DECLARE_PTRFREE(long double);
+/* The client may want to add others. */
+
+// In the following GC_Tp is GC_true_type iff we are allocating a
+// pointerfree object.
+template <class GC_Tp>
+inline void * GC_selective_alloc(size_t n, GC_Tp) {
+ return GC_MALLOC(n);
+}
+
+template <>
+inline void * GC_selective_alloc<GC_true_type>(size_t n, GC_true_type) {
+ return GC_MALLOC_ATOMIC(n);
+}
+
+/* Now the public gc_allocator<T> class:
+ */
+template <class GC_Tp>
+class gc_allocator {
+public:
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef GC_Tp* pointer;
+ typedef const GC_Tp* const_pointer;
+ typedef GC_Tp& reference;
+ typedef const GC_Tp& const_reference;
+ typedef GC_Tp value_type;
+
+ template <class GC_Tp1> struct rebind {
+ typedef gc_allocator<GC_Tp1> other;
+ };
+
+ gc_allocator() {}
+ gc_allocator(const gc_allocator&) throw() {}
+# if !(GC_NO_MEMBER_TEMPLATES || 0 < _MSC_VER && _MSC_VER <= 1200)
+ // MSVC++ 6.0 do not support member templates
+ template <class GC_Tp1> gc_allocator(const gc_allocator<GC_Tp1>&) throw() {}
+# endif
+ ~gc_allocator() throw() {}
+
+ pointer address(reference GC_x) const { return &GC_x; }
+ const_pointer address(const_reference GC_x) const { return &GC_x; }
+
+ // GC_n is permitted to be 0. The C++ standard says nothing about what
+ // the return value is when GC_n == 0.
+ GC_Tp* allocate(size_type GC_n, const void* = 0) {
+ GC_type_traits<GC_Tp> traits;
+ return static_cast<GC_Tp *>
+ (GC_selective_alloc(GC_n * sizeof(GC_Tp),
+ traits.GC_is_ptr_free));
+ }
+
+ // __p is not permitted to be a null pointer.
+ void deallocate(pointer __p, size_type GC_ATTR_UNUSED GC_n)
+ { GC_FREE(__p); }
+
+ size_type max_size() const throw()
+ { return size_t(-1) / sizeof(GC_Tp); }
+
+ void construct(pointer __p, const GC_Tp& __val) { new(__p) GC_Tp(__val); }
+ void destroy(pointer __p) { __p->~GC_Tp(); }
+};
+
+template<>
+class gc_allocator<void> {
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef void* pointer;
+ typedef const void* const_pointer;
+ typedef void value_type;
+
+ template <class GC_Tp1> struct rebind {
+ typedef gc_allocator<GC_Tp1> other;
+ };
+};
+
+
+template <class GC_T1, class GC_T2>
+inline bool operator==(const gc_allocator<GC_T1>&, const gc_allocator<GC_T2>&)
+{
+ return true;
+}
+
+template <class GC_T1, class GC_T2>
+inline bool operator!=(const gc_allocator<GC_T1>&, const gc_allocator<GC_T2>&)
+{
+ return false;
+}
+
+/*
+ * And the public traceable_allocator class.
+ */
+
+// Note that we currently don't specialize the pointer-free case, since a
+// pointer-free traceable container doesn't make that much sense,
+// though it could become an issue due to abstraction boundaries.
+template <class GC_Tp>
+class traceable_allocator {
+public:
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef GC_Tp* pointer;
+ typedef const GC_Tp* const_pointer;
+ typedef GC_Tp& reference;
+ typedef const GC_Tp& const_reference;
+ typedef GC_Tp value_type;
+
+ template <class GC_Tp1> struct rebind {
+ typedef traceable_allocator<GC_Tp1> other;
+ };
+
+ traceable_allocator() throw() {}
+ traceable_allocator(const traceable_allocator&) throw() {}
+# if !(GC_NO_MEMBER_TEMPLATES || 0 < _MSC_VER && _MSC_VER <= 1200)
+ // MSVC++ 6.0 do not support member templates
+ template <class GC_Tp1> traceable_allocator
+ (const traceable_allocator<GC_Tp1>&) throw() {}
+# endif
+ ~traceable_allocator() throw() {}
+
+ pointer address(reference GC_x) const { return &GC_x; }
+ const_pointer address(const_reference GC_x) const { return &GC_x; }
+
+ // GC_n is permitted to be 0. The C++ standard says nothing about what
+ // the return value is when GC_n == 0.
+ GC_Tp* allocate(size_type GC_n, const void* = 0) {
+ return static_cast<GC_Tp*>(GC_MALLOC_UNCOLLECTABLE(GC_n * sizeof(GC_Tp)));
+ }
+
+ // __p is not permitted to be a null pointer.
+ void deallocate(pointer __p, size_type GC_ATTR_UNUSED GC_n)
+ { GC_FREE(__p); }
+
+ size_type max_size() const throw()
+ { return size_t(-1) / sizeof(GC_Tp); }
+
+ void construct(pointer __p, const GC_Tp& __val) { new(__p) GC_Tp(__val); }
+ void destroy(pointer __p) { __p->~GC_Tp(); }
+};
+
+template<>
+class traceable_allocator<void> {
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef void* pointer;
+ typedef const void* const_pointer;
+ typedef void value_type;
+
+ template <class GC_Tp1> struct rebind {
+ typedef traceable_allocator<GC_Tp1> other;
+ };
+};
+
+
+template <class GC_T1, class GC_T2>
+inline bool operator==(const traceable_allocator<GC_T1>&, const traceable_allocator<GC_T2>&)
+{
+ return true;
+}
+
+template <class GC_T1, class GC_T2>
+inline bool operator!=(const traceable_allocator<GC_T1>&, const traceable_allocator<GC_T2>&)
+{
+ return false;
+}
+
+#endif /* GC_ALLOCATOR_H */
diff --git a/tools/build/src/engine/boehm_gc/include/gc_amiga_redirects.h b/tools/build/src/engine/boehm_gc/include/gc_amiga_redirects.h
new file mode 100644
index 000000000..9e975c8c8
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/gc_amiga_redirects.h
@@ -0,0 +1,30 @@
+#ifndef GC_AMIGA_REDIRECTS_H
+
+# define GC_AMIGA_REDIRECTS_H
+
+# if ( defined(_AMIGA) && !defined(GC_AMIGA_MAKINGLIB) )
+ extern void *GC_amiga_realloc(void *old_object,size_t new_size_in_bytes);
+# define GC_realloc(a,b) GC_amiga_realloc(a,b)
+ extern void GC_amiga_set_toany(void (*func)(void));
+ extern int GC_amiga_free_space_divisor_inc;
+ extern void *(*GC_amiga_allocwrapper_do) \
+ (size_t size,void *(*AllocFunction)(size_t size2));
+# define GC_malloc(a) \
+ (*GC_amiga_allocwrapper_do)(a,GC_malloc)
+# define GC_malloc_atomic(a) \
+ (*GC_amiga_allocwrapper_do)(a,GC_malloc_atomic)
+# define GC_malloc_uncollectable(a) \
+ (*GC_amiga_allocwrapper_do)(a,GC_malloc_uncollectable)
+# define GC_malloc_stubborn(a) \
+ (*GC_amiga_allocwrapper_do)(a,GC_malloc_stubborn)
+# define GC_malloc_atomic_uncollectable(a) \
+ (*GC_amiga_allocwrapper_do)(a,GC_malloc_atomic_uncollectable)
+# define GC_malloc_ignore_off_page(a) \
+ (*GC_amiga_allocwrapper_do)(a,GC_malloc_ignore_off_page)
+# define GC_malloc_atomic_ignore_off_page(a) \
+ (*GC_amiga_allocwrapper_do)(a,GC_malloc_atomic_ignore_off_page)
+# endif /* _AMIGA && !GC_AMIGA_MAKINGLIB */
+
+#endif /* GC_AMIGA_REDIRECTS_H */
+
+
diff --git a/tools/build/src/engine/boehm_gc/include/gc_backptr.h b/tools/build/src/engine/boehm_gc/include/gc_backptr.h
new file mode 100644
index 000000000..5899496e0
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/gc_backptr.h
@@ -0,0 +1,65 @@
+/*
+ * This is a simple API to implement pointer back tracing, i.e.
+ * to answer questions such as "who is pointing to this" or
+ * "why is this object being retained by the collector"
+ *
+ * This API assumes that we have an ANSI C compiler.
+ *
+ * Most of these calls yield useful information on only after
+ * a garbage collection. Usually the client will first force
+ * a full collection and then gather information, preferably
+ * before much intervening allocation.
+ *
+ * The implementation of the interface is only about 99.9999%
+ * correct. It is intended to be good enough for profiling,
+ * but is not intended to be used with production code.
+ *
+ * Results are likely to be much more useful if all allocation is
+ * accomplished through the debugging allocators.
+ *
+ * The implementation idea is due to A. Demers.
+ */
+
+#ifndef GC_BACKPTR_H
+#define GC_BACKPTR_H
+/* Store information about the object referencing dest in *base_p */
+/* and *offset_p. */
+/* If multiple objects or roots point to dest, the one reported */
+/* will be the last on used by the garbage collector to trace the */
+/* object. */
+/* source is root ==> *base_p = address, *offset_p = 0 */
+/* source is heap object ==> *base_p != 0, *offset_p = offset */
+/* Returns 1 on success, 0 if source couldn't be determined. */
+/* Dest can be any address within a heap object. */
+typedef enum { GC_UNREFERENCED, /* No reference info available. */
+ GC_NO_SPACE, /* Dest not allocated with debug alloc */
+ GC_REFD_FROM_ROOT, /* Referenced directly by root *base_p */
+ GC_REFD_FROM_REG, /* Referenced from a register, i.e. */
+ /* a root without an address. */
+ GC_REFD_FROM_HEAP, /* Referenced from another heap obj. */
+ GC_FINALIZER_REFD /* Finalizable and hence accessible. */
+} GC_ref_kind;
+
+GC_ref_kind GC_get_back_ptr_info(void *dest, void **base_p, size_t *offset_p);
+
+/* Generate a random heap address. */
+/* The resulting address is in the heap, but */
+/* not necessarily inside a valid object. */
+void * GC_generate_random_heap_address(void);
+
+/* Generate a random address inside a valid marked heap object. */
+void * GC_generate_random_valid_address(void);
+
+/* Force a garbage collection and generate a backtrace from a */
+/* random heap address. */
+/* This uses the GC logging mechanism (GC_printf) to produce */
+/* output. It can often be called from a debugger. The */
+/* source in dbg_mlc.c also serves as a sample client. */
+void GC_generate_random_backtrace(void);
+
+/* Print a backtrace from a specific address. Used by the */
+/* above. The client should call GC_gcollect() immediately */
+/* before invocation. */
+void GC_print_backtrace(void *);
+
+#endif /* GC_BACKPTR_H */
diff --git a/tools/build/src/engine/boehm_gc/include/gc_config_macros.h b/tools/build/src/engine/boehm_gc/include/gc_config_macros.h
new file mode 100644
index 000000000..66abf0b1e
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/gc_config_macros.h
@@ -0,0 +1,179 @@
+/*
+ * This should never be included directly. It is included only from gc.h.
+ * We separate it only to make gc.h more suitable as documentation.
+ *
+ * Some tests for old macros. These violate our namespace rules and will
+ * disappear shortly. Use the GC_ names.
+ */
+#if defined(SOLARIS_THREADS) || defined(_SOLARIS_THREADS) \
+ || defined(_SOLARIS_PTHREADS) || defined(GC_SOLARIS_PTHREADS)
+ /* We no longer support old style Solaris threads. */
+ /* GC_SOLARIS_THREADS now means pthreads. */
+# ifndef GC_SOLARIS_THREADS
+# define GC_SOLARIS_THREADS
+# endif
+#endif
+#if defined(IRIX_THREADS)
+# define GC_IRIX_THREADS
+#endif
+#if defined(DGUX_THREADS)
+# if !defined(GC_DGUX386_THREADS)
+# define GC_DGUX386_THREADS
+# endif
+#endif
+#if defined(AIX_THREADS)
+# define GC_AIX_THREADS
+#endif
+#if defined(HPUX_THREADS)
+# define GC_HPUX_THREADS
+#endif
+#if defined(OSF1_THREADS)
+# define GC_OSF1_THREADS
+#endif
+#if defined(LINUX_THREADS)
+# define GC_LINUX_THREADS
+#endif
+#if defined(WIN32_THREADS)
+# define GC_WIN32_THREADS
+#endif
+#if defined(USE_LD_WRAP)
+# define GC_USE_LD_WRAP
+#endif
+
+#if !defined(_REENTRANT) && (defined(GC_SOLARIS_THREADS) \
+ || defined(GC_HPUX_THREADS) \
+ || defined(GC_AIX_THREADS) \
+ || defined(GC_LINUX_THREADS) \
+ || defined(GC_NETBSD_THREADS) \
+ || defined(GC_GNU_THREADS))
+# define _REENTRANT
+ /* Better late than never. This fails if system headers that */
+ /* depend on this were previously included. */
+#endif
+
+#if !defined(_PTHREADS) && defined(GC_NETBSD_THREADS)
+# define _PTHREADS
+#endif
+
+#if defined(GC_DGUX386_THREADS) && !defined(_POSIX4A_DRAFT10_SOURCE)
+# define _POSIX4A_DRAFT10_SOURCE 1
+#endif
+
+# if defined(GC_SOLARIS_THREADS) || defined(GC_FREEBSD_THREADS) || \
+ defined(GC_IRIX_THREADS) || defined(GC_LINUX_THREADS) || \
+ defined(GC_HPUX_THREADS) || defined(GC_OSF1_THREADS) || \
+ defined(GC_DGUX386_THREADS) || defined(GC_DARWIN_THREADS) || \
+ defined(GC_AIX_THREADS) || defined(GC_NETBSD_THREADS) || \
+ (defined(GC_WIN32_THREADS) && defined(__CYGWIN32__)) || \
+ defined(GC_GNU_THREADS)
+# define GC_PTHREADS
+# endif
+
+#if defined(GC_WIN32_PTHREADS)
+# define GC_WIN32_THREADS
+# define GC_PTHREADS
+#endif
+
+#if defined(GC_THREADS) && !defined(GC_PTHREADS)
+# if defined(__linux__)
+# define GC_LINUX_THREADS
+# define GC_PTHREADS
+# endif
+# if !defined(__linux__) && (defined(_PA_RISC1_1) || defined(_PA_RISC2_0) \
+ || defined(hppa) || defined(__HPPA)) \
+ || (defined(__ia64) && defined(_HPUX_SOURCE))
+# define GC_HPUX_THREADS
+# define GC_PTHREADS
+# endif
+# if !defined(__linux__) && (defined(__alpha) || defined(__alpha__))
+# define GC_OSF1_THREADS
+# define GC_PTHREADS
+# endif
+# if defined(__mips) && !defined(__linux__)
+# define GC_IRIX_THREADS
+# define GC_PTHREADS
+# endif
+# if defined(__sparc) && !defined(__linux__) \
+ || defined(sun) && (defined(i386) || defined(__i386__))
+# define GC_SOLARIS_THREADS
+# define GC_PTHREADS
+# endif
+# if defined(__APPLE__) && defined(__MACH__)
+# define GC_DARWIN_THREADS
+# define GC_PTHREADS
+# endif
+# if !defined(GC_PTHREADS) && (defined(__FreeBSD__) || defined(__DragonFly__))
+# define GC_FREEBSD_THREADS
+# define GC_PTHREADS
+# endif
+# if !defined(GC_PTHREADS) && defined(__NetBSD__)
+# define GC_NETBSD_THREADS
+# define GC_PTHREADS
+# endif
+# if defined(DGUX) && (defined(i386) || defined(__i386__))
+# define GC_DGUX386_THREADS
+# define GC_PTHREADS
+# endif
+# if defined(_AIX)
+# define GC_AIX_THREADS
+# define GC_PTHREADS
+# endif
+#endif /* GC_THREADS */
+
+#if defined(GC_THREADS) && !defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS) \
+ && (defined(_WIN32) || defined(_MSC_VER) || defined(__CYGWIN__) \
+ || defined(__MINGW32__) || defined(__BORLANDC__) \
+ || defined(_WIN32_WCE))
+# define GC_WIN32_THREADS
+# if defined(__CYGWIN__)
+# define GC_PTHREADS
+# endif
+#endif
+
+# define __GC
+# ifndef _WIN32_WCE
+# include <stddef.h>
+# if defined(__MINGW32__)
+# include <stdint.h>
+ /* We mention uintptr_t. */
+ /* Perhaps this should be included in pure msft environments */
+ /* as well? */
+# endif
+# else /* ! _WIN32_WCE */
+/* Yet more kluges for WinCE */
+# include <stdlib.h> /* size_t is defined here */
+ typedef long ptrdiff_t; /* ptrdiff_t is not defined */
+# endif
+
+#if defined(_DLL) && !defined(GC_NOT_DLL) && !defined(GC_DLL)
+# define GC_DLL
+#endif
+
+#if defined(__MINGW32__) && defined(GC_DLL)
+# ifdef GC_BUILD
+# define GC_API __declspec(dllexport)
+# else
+# define GC_API __declspec(dllimport)
+# endif
+#endif
+
+#if (defined(__DMC__) || defined(_MSC_VER)) && defined(GC_DLL)
+# ifdef GC_BUILD
+# define GC_API extern __declspec(dllexport)
+# else
+# define GC_API __declspec(dllimport)
+# endif
+#endif
+
+#if defined(__WATCOMC__) && defined(GC_DLL)
+# ifdef GC_BUILD
+# define GC_API extern __declspec(dllexport)
+# else
+# define GC_API extern __declspec(dllimport)
+# endif
+#endif
+
+#ifndef GC_API
+#define GC_API extern
+#endif
+
diff --git a/tools/build/src/engine/boehm_gc/include/gc_cpp.h b/tools/build/src/engine/boehm_gc/include/gc_cpp.h
new file mode 100644
index 000000000..d3df21121
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/gc_cpp.h
@@ -0,0 +1,374 @@
+#ifndef GC_CPP_H
+#define GC_CPP_H
+/****************************************************************************
+Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+
+THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+
+Permission is hereby granted to use or copy this program for any
+purpose, provided the above notices are retained on all copies.
+Permission to modify the code and to distribute modified code is
+granted, provided the above notices are retained, and a notice that
+the code was modified is included with the above copyright notice.
+****************************************************************************
+
+C++ Interface to the Boehm Collector
+
+ John R. Ellis and Jesse Hull
+
+This interface provides access to the Boehm collector. It provides
+basic facilities similar to those described in "Safe, Efficient
+Garbage Collection for C++", by John R. Elis and David L. Detlefs
+(ftp://ftp.parc.xerox.com/pub/ellis/gc).
+
+All heap-allocated objects are either "collectable" or
+"uncollectable". Programs must explicitly delete uncollectable
+objects, whereas the garbage collector will automatically delete
+collectable objects when it discovers them to be inaccessible.
+Collectable objects may freely point at uncollectable objects and vice
+versa.
+
+Objects allocated with the built-in "::operator new" are uncollectable.
+
+Objects derived from class "gc" are collectable. For example:
+
+ class A: public gc {...};
+ A* a = new A; // a is collectable.
+
+Collectable instances of non-class types can be allocated using the GC
+(or UseGC) placement:
+
+ typedef int A[ 10 ];
+ A* a = new (GC) A;
+
+Uncollectable instances of classes derived from "gc" can be allocated
+using the NoGC placement:
+
+ class A: public gc {...};
+ A* a = new (NoGC) A; // a is uncollectable.
+
+The new(PointerFreeGC) syntax allows the allocation of collectable
+objects that are not scanned by the collector. This useful if you
+are allocating compressed data, bitmaps, or network packets. (In
+the latter case, it may remove danger of unfriendly network packets
+intentionally containing values that cause spurious memory retention.)
+
+Both uncollectable and collectable objects can be explicitly deleted
+with "delete", which invokes an object's destructors and frees its
+storage immediately.
+
+A collectable object may have a clean-up function, which will be
+invoked when the collector discovers the object to be inaccessible.
+An object derived from "gc_cleanup" or containing a member derived
+from "gc_cleanup" has a default clean-up function that invokes the
+object's destructors. Explicit clean-up functions may be specified as
+an additional placement argument:
+
+ A* a = ::new (GC, MyCleanup) A;
+
+An object is considered "accessible" by the collector if it can be
+reached by a path of pointers from static variables, automatic
+variables of active functions, or from some object with clean-up
+enabled; pointers from an object to itself are ignored.
+
+Thus, if objects A and B both have clean-up functions, and A points at
+B, B is considered accessible. After A's clean-up is invoked and its
+storage released, B will then become inaccessible and will have its
+clean-up invoked. If A points at B and B points to A, forming a
+cycle, then that's considered a storage leak, and neither will be
+collectable. See the interface gc.h for low-level facilities for
+handling such cycles of objects with clean-up.
+
+The collector cannot guarantee that it will find all inaccessible
+objects. In practice, it finds almost all of them.
+
+
+Cautions:
+
+1. Be sure the collector has been augmented with "make c++" or
+"--enable-cplusplus".
+
+2. If your compiler supports the new "operator new[]" syntax, then
+add -DGC_OPERATOR_NEW_ARRAY to the Makefile.
+
+If your compiler doesn't support "operator new[]", beware that an
+array of type T, where T is derived from "gc", may or may not be
+allocated as a collectable object (it depends on the compiler). Use
+the explicit GC placement to make the array collectable. For example:
+
+ class A: public gc {...};
+ A* a1 = new A[ 10 ]; // collectable or uncollectable?
+ A* a2 = new (GC) A[ 10 ]; // collectable
+
+3. The destructors of collectable arrays of objects derived from
+"gc_cleanup" will not be invoked properly. For example:
+
+ class A: public gc_cleanup {...};
+ A* a = new (GC) A[ 10 ]; // destructors not invoked correctly
+
+Typically, only the destructor for the first element of the array will
+be invoked when the array is garbage-collected. To get all the
+destructors of any array executed, you must supply an explicit
+clean-up function:
+
+ A* a = new (GC, MyCleanUp) A[ 10 ];
+
+(Implementing clean-up of arrays correctly, portably, and in a way
+that preserves the correct exception semantics requires a language
+extension, e.g. the "gc" keyword.)
+
+4. Compiler bugs (now hopefully history):
+
+* Solaris 2's CC (SC3.0) doesn't implement t->~T() correctly, so the
+destructors of classes derived from gc_cleanup won't be invoked.
+You'll have to explicitly register a clean-up function with
+new-placement syntax.
+
+* Evidently cfront 3.0 does not allow destructors to be explicitly
+invoked using the ANSI-conforming syntax t->~T(). If you're using
+cfront 3.0, you'll have to comment out the class gc_cleanup, which
+uses explicit invocation.
+
+5. GC name conflicts:
+
+Many other systems seem to use the identifier "GC" as an abbreviation
+for "Graphics Context". Since version 5.0, GC placement has been replaced
+by UseGC. GC is an alias for UseGC, unless GC_NAME_CONFLICT is defined.
+
+****************************************************************************/
+
+#include "gc.h"
+
+#ifndef THINK_CPLUS
+# define GC_cdecl
+#else
+# define GC_cdecl _cdecl
+#endif
+
+#if ! defined( GC_NO_OPERATOR_NEW_ARRAY ) \
+ && !defined(_ENABLE_ARRAYNEW) /* Digimars */ \
+ && (defined(__BORLANDC__) && (__BORLANDC__ < 0x450) \
+ || (defined(__GNUC__) && \
+ (__GNUC__ < 2 || __GNUC__ == 2 && __GNUC_MINOR__ < 6)) \
+ || (defined(__WATCOMC__) && __WATCOMC__ < 1050))
+# define GC_NO_OPERATOR_NEW_ARRAY
+#endif
+
+#if !defined(GC_NO_OPERATOR_NEW_ARRAY) && !defined(GC_OPERATOR_NEW_ARRAY)
+# define GC_OPERATOR_NEW_ARRAY
+#endif
+
+#if ! defined ( __BORLANDC__ ) /* Confuses the Borland compiler. */ \
+ && ! defined ( __sgi )
+# define GC_PLACEMENT_DELETE
+#endif
+
+enum GCPlacement {UseGC,
+#ifndef GC_NAME_CONFLICT
+ GC=UseGC,
+#endif
+ NoGC, PointerFreeGC};
+
+class gc {public:
+ inline void* operator new( size_t size );
+ inline void* operator new( size_t size, GCPlacement gcp );
+ inline void* operator new( size_t size, void *p );
+ /* Must be redefined here, since the other overloadings */
+ /* hide the global definition. */
+ inline void operator delete( void* obj );
+# ifdef GC_PLACEMENT_DELETE
+ inline void operator delete( void*, void* );
+# endif
+
+#ifdef GC_OPERATOR_NEW_ARRAY
+ inline void* operator new[]( size_t size );
+ inline void* operator new[]( size_t size, GCPlacement gcp );
+ inline void* operator new[]( size_t size, void *p );
+ inline void operator delete[]( void* obj );
+# ifdef GC_PLACEMENT_DELETE
+ inline void operator delete[]( void*, void* );
+# endif
+#endif /* GC_OPERATOR_NEW_ARRAY */
+ };
+ /*
+ Instances of classes derived from "gc" will be allocated in the
+ collected heap by default, unless an explicit NoGC placement is
+ specified. */
+
+class gc_cleanup: virtual public gc {public:
+ inline gc_cleanup();
+ inline virtual ~gc_cleanup();
+private:
+ inline static void GC_cdecl cleanup( void* obj, void* clientData );};
+ /*
+ Instances of classes derived from "gc_cleanup" will be allocated
+ in the collected heap by default. When the collector discovers an
+ inaccessible object derived from "gc_cleanup" or containing a
+ member derived from "gc_cleanup", its destructors will be
+ invoked. */
+
+extern "C" {typedef void (*GCCleanUpFunc)( void* obj, void* clientData );}
+
+#ifdef _MSC_VER
+ // Disable warning that "no matching operator delete found; memory will
+ // not be freed if initialization throws an exception"
+# pragma warning(disable:4291)
+#endif
+
+inline void* operator new(
+ size_t size,
+ GCPlacement gcp,
+ GCCleanUpFunc cleanup = 0,
+ void* clientData = 0 );
+ /*
+ Allocates a collectable or uncollected object, according to the
+ value of "gcp".
+
+ For collectable objects, if "cleanup" is non-null, then when the
+ allocated object "obj" becomes inaccessible, the collector will
+ invoke the function "cleanup( obj, clientData )" but will not
+ invoke the object's destructors. It is an error to explicitly
+ delete an object allocated with a non-null "cleanup".
+
+ It is an error to specify a non-null "cleanup" with NoGC or for
+ classes derived from "gc_cleanup" or containing members derived
+ from "gc_cleanup". */
+
+
+#ifdef _MSC_VER
+ /** This ensures that the system default operator new[] doesn't get
+ * undefined, which is what seems to happen on VC++ 6 for some reason
+ * if we define a multi-argument operator new[].
+ * There seems to be really redirect new in this environment without
+ * including this everywhere.
+ */
+ void *operator new[]( size_t size );
+
+ void operator delete[](void* obj);
+
+ void* operator new( size_t size);
+
+ void operator delete(void* obj);
+
+ // This new operator is used by VC++ in case of Debug builds !
+ void* operator new( size_t size,
+ int ,//nBlockUse,
+ const char * szFileName,
+ int nLine );
+#endif /* _MSC_VER */
+
+
+#ifdef GC_OPERATOR_NEW_ARRAY
+
+inline void* operator new[](
+ size_t size,
+ GCPlacement gcp,
+ GCCleanUpFunc cleanup = 0,
+ void* clientData = 0 );
+ /*
+ The operator new for arrays, identical to the above. */
+
+#endif /* GC_OPERATOR_NEW_ARRAY */
+
+/****************************************************************************
+
+Inline implementation
+
+****************************************************************************/
+
+inline void* gc::operator new( size_t size ) {
+ return GC_MALLOC( size );}
+
+inline void* gc::operator new( size_t size, GCPlacement gcp ) {
+ if (gcp == UseGC)
+ return GC_MALLOC( size );
+ else if (gcp == PointerFreeGC)
+ return GC_MALLOC_ATOMIC( size );
+ else
+ return GC_MALLOC_UNCOLLECTABLE( size );}
+
+inline void* gc::operator new( size_t size, void *p ) {
+ return p;}
+
+inline void gc::operator delete( void* obj ) {
+ GC_FREE( obj );}
+
+#ifdef GC_PLACEMENT_DELETE
+ inline void gc::operator delete( void*, void* ) {}
+#endif
+
+#ifdef GC_OPERATOR_NEW_ARRAY
+
+inline void* gc::operator new[]( size_t size ) {
+ return gc::operator new( size );}
+
+inline void* gc::operator new[]( size_t size, GCPlacement gcp ) {
+ return gc::operator new( size, gcp );}
+
+inline void* gc::operator new[]( size_t size, void *p ) {
+ return p;}
+
+inline void gc::operator delete[]( void* obj ) {
+ gc::operator delete( obj );}
+
+#ifdef GC_PLACEMENT_DELETE
+ inline void gc::operator delete[]( void*, void* ) {}
+#endif
+
+#endif /* GC_OPERATOR_NEW_ARRAY */
+
+
+inline gc_cleanup::~gc_cleanup() {
+ GC_register_finalizer_ignore_self( GC_base(this), 0, 0, 0, 0 );}
+
+inline void gc_cleanup::cleanup( void* obj, void* displ ) {
+ ((gc_cleanup*) ((char*) obj + (ptrdiff_t) displ))->~gc_cleanup();}
+
+inline gc_cleanup::gc_cleanup() {
+ GC_finalization_proc oldProc;
+ void* oldData;
+ void* base = GC_base( (void *) this );
+ if (0 != base) {
+ // Don't call the debug version, since this is a real base address.
+ GC_register_finalizer_ignore_self(
+ base, (GC_finalization_proc)cleanup, (void*) ((char*) this - (char*) base),
+ &oldProc, &oldData );
+ if (0 != oldProc) {
+ GC_register_finalizer_ignore_self( base, oldProc, oldData, 0, 0 );}}}
+
+inline void* operator new(
+ size_t size,
+ GCPlacement gcp,
+ GCCleanUpFunc cleanup,
+ void* clientData )
+{
+ void* obj;
+
+ if (gcp == UseGC) {
+ obj = GC_MALLOC( size );
+ if (cleanup != 0)
+ GC_REGISTER_FINALIZER_IGNORE_SELF(
+ obj, cleanup, clientData, 0, 0 );}
+ else if (gcp == PointerFreeGC) {
+ obj = GC_MALLOC_ATOMIC( size );}
+ else {
+ obj = GC_MALLOC_UNCOLLECTABLE( size );};
+ return obj;}
+
+
+#ifdef GC_OPERATOR_NEW_ARRAY
+
+inline void* operator new[](
+ size_t size,
+ GCPlacement gcp,
+ GCCleanUpFunc cleanup,
+ void* clientData )
+{
+ return ::operator new( size, gcp, cleanup, clientData );}
+
+#endif /* GC_OPERATOR_NEW_ARRAY */
+
+
+#endif /* GC_CPP_H */
+
diff --git a/tools/build/src/engine/boehm_gc/include/gc_gcj.h b/tools/build/src/engine/boehm_gc/include/gc_gcj.h
new file mode 100644
index 000000000..699ddf5d4
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/gc_gcj.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
+ * Copyright 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright 1999 by Hewlett-Packard Company. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+/* This file assumes the collector has been compiled with GC_GCJ_SUPPORT */
+/* and that an ANSI C compiler is available. */
+
+/*
+ * We allocate objects whose first word contains a pointer to a struct
+ * describing the object type. This struct contains a garbage collector mark
+ * descriptor at offset MARK_DESCR_OFFSET. Alternatively, the objects
+ * may be marked by the mark procedure passed to GC_init_gcj_malloc.
+ */
+
+#ifndef GC_GCJ_H
+
+#define GC_GCJ_H
+
+#ifndef MARK_DESCR_OFFSET
+# define MARK_DESCR_OFFSET sizeof(word)
+#endif
+ /* Gcj keeps GC descriptor as second word of vtable. This */
+ /* probably needs to be adjusted for other clients. */
+ /* We currently assume that this offset is such that: */
+ /* - all objects of this kind are large enough to have */
+ /* a value at that offset, and */
+ /* - it is not zero. */
+ /* These assumptions allow objects on the free list to be */
+ /* marked normally. */
+
+#ifndef _GC_H
+# include "gc.h"
+#endif
+
+/* The following allocators signal an out of memory condition with */
+/* return GC_oom_fn(bytes); */
+
+/* The following function must be called before the gcj allocators */
+/* can be invoked. */
+/* mp_index and mp are the index and mark_proc (see gc_mark.h) */
+/* respectively for the allocated objects. Mark_proc will be */
+/* used to build the descriptor for objects allocated through the */
+/* debugging interface. The mark_proc will be invoked on all such */
+/* objects with an "environment" value of 1. The client may choose */
+/* to use the same mark_proc for some of its generated mark descriptors.*/
+/* In that case, it should use a different "environment" value to */
+/* detect the presence or absence of the debug header. */
+/* Mp is really of type mark_proc, as defined in gc_mark.h. We don't */
+/* want to include that here for namespace pollution reasons. */
+extern void GC_init_gcj_malloc(int mp_index, void * /* really mark_proc */mp);
+
+/* Allocate an object, clear it, and store the pointer to the */
+/* type structure (vtable in gcj). */
+/* This adds a byte at the end of the object if GC_malloc would.*/
+extern void * GC_gcj_malloc(size_t lb, void * ptr_to_struct_containing_descr);
+/* The debug versions allocate such that the specified mark_proc */
+/* is always invoked. */
+extern void * GC_debug_gcj_malloc(size_t lb,
+ void * ptr_to_struct_containing_descr,
+ GC_EXTRA_PARAMS);
+
+/* Similar to GC_gcj_malloc, but assumes that a pointer to near the */
+/* beginning of the resulting object is always maintained. */
+extern void * GC_gcj_malloc_ignore_off_page(size_t lb,
+ void * ptr_to_struct_containing_descr);
+
+/* The kind numbers of normal and debug gcj objects. */
+/* Useful only for debug support, we hope. */
+extern int GC_gcj_kind;
+
+extern int GC_gcj_debug_kind;
+
+# ifdef GC_DEBUG
+# define GC_GCJ_MALLOC(s,d) GC_debug_gcj_malloc(s,d,GC_EXTRAS)
+# define GC_GCJ_MALLOC_IGNORE_OFF_PAGE(s,d) GC_debug_gcj_malloc(s,d,GC_EXTRAS)
+# else
+# define GC_GCJ_MALLOC(s,d) GC_gcj_malloc(s,d)
+# define GC_GCJ_MALLOC_IGNORE_OFF_PAGE(s,d) \
+ GC_gcj_malloc_ignore_off_page(s,d)
+# endif
+
+#endif /* GC_GCJ_H */
diff --git a/tools/build/src/engine/boehm_gc/include/gc_inline.h b/tools/build/src/engine/boehm_gc/include/gc_inline.h
new file mode 100644
index 000000000..da7e2e91f
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/gc_inline.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 2005 Hewlett-Packard Development Company, L.P.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+/* USE OF THIS FILE IS NOT RECOMMENDED unless GC_all_interior_pointers */
+/* is not set, or the collector has been built with */
+/* -DDONT_ADD_BYTE_AT_END, or the specified size includes a pointerfree */
+/* word at the end. In the standard collector configuration, */
+/* the final word of each object may not be scanned. */
+/* This interface is most useful for compilers that generate C. */
+/* It is also used internally for thread-local allocation, in which */
+/* case, the size is suitably adjusted by the caller. */
+/* Manual use is hereby discouraged. */
+
+#include "gc.h"
+#include "gc_tiny_fl.h"
+
+#if __GNUC__ >= 3
+# define GC_EXPECT(expr, outcome) __builtin_expect(expr,outcome)
+ /* Equivalent to (expr), but predict that usually (expr)==outcome. */
+#else
+# define GC_EXPECT(expr, outcome) (expr)
+#endif /* __GNUC__ */
+
+/* The ultimately general inline allocation macro. Allocate an object */
+/* of size bytes, putting the resulting pointer in result. Tiny_fl is */
+/* a "tiny" free list array, which will be used first, if the size */
+/* is appropriate. If bytes is too large, we allocate with */
+/* default_expr instead. If we need to refill the free list, we use */
+/* GC_generic_malloc_many with the indicated kind. */
+/* Tiny_fl should be an array of GC_TINY_FREELISTS void * pointers. */
+/* If num_direct is nonzero, and the individual free list pointers */
+/* are initialized to (void *)1, then we allocate numdirect granules */
+/* directly using gmalloc before putting multiple objects into the */
+/* tiny_fl entry. If num_direct is zero, then the free lists may also */
+/* be initialized to (void *)0. */
+/* We rely on much of this hopefully getting optimized away in the */
+/* num_direct = 0 case. */
+/* Particularly if bytes is constant, this should generate a small */
+/* amount of code. */
+# define GC_FAST_MALLOC_GRANS(result,granules,tiny_fl,num_direct,\
+ kind,default_expr,init) \
+{ \
+ if (GC_EXPECT(granules >= GC_TINY_FREELISTS,0)) { \
+ result = default_expr; \
+ } else { \
+ void **my_fl = tiny_fl + granules; \
+ void *my_entry=*my_fl; \
+ void *next; \
+ \
+ while (GC_EXPECT((GC_word)my_entry \
+ <= num_direct + GC_TINY_FREELISTS + 1, 0)) { \
+ /* Entry contains counter or NULL */ \
+ if ((GC_word)my_entry - 1 < num_direct) { \
+ /* Small counter value, not NULL */ \
+ *my_fl = (ptr_t)my_entry + granules + 1; \
+ result = default_expr; \
+ goto out; \
+ } else { \
+ /* Large counter or NULL */ \
+ GC_generic_malloc_many(((granules) == 0? GC_GRANULE_BYTES : \
+ RAW_BYTES_FROM_INDEX(granules)), \
+ kind, my_fl); \
+ my_entry = *my_fl; \
+ if (my_entry == 0) { \
+ result = GC_oom_fn(bytes); \
+ goto out; \
+ } \
+ } \
+ } \
+ next = *(void **)(my_entry); \
+ result = (void *)my_entry; \
+ *my_fl = next; \
+ init; \
+ PREFETCH_FOR_WRITE(next); \
+ GC_ASSERT(GC_size(result) >= bytes + EXTRA_BYTES); \
+ GC_ASSERT((kind) == PTRFREE || ((GC_word *)result)[1] == 0); \
+ out: ; \
+ } \
+}
+
+# define GC_WORDS_TO_WHOLE_GRANULES(n) \
+ GC_WORDS_TO_GRANULES((n) + GC_GRANULE_WORDS - 1)
+
+/* Allocate n words (NOT BYTES). X is made to point to the result. */
+/* This should really only be used if GC_all_interior_pointers is */
+/* not set, or DONT_ADD_BYTE_AT_END is set. See above. */
+/* The semantics changed in version 7.0; we no longer lock, and */
+/* the caller is responsible for supplying a cleared tiny_fl */
+/* free list array. For single-threaded applications, this may be */
+/* a global array. */
+# define GC_MALLOC_WORDS(result,n,tiny_fl) \
+{ \
+ size_t grans = WORDS_TO_WHOLE_GRANULES(n); \
+ GC_FAST_MALLOC_GRANS(result, grans, tiny_fl, 0, \
+ NORMAL, GC_malloc(grans*GRANULE_BYTES), \
+ *(void **)result = 0); \
+}
+
+# define GC_MALLOC_ATOMIC_WORDS(result,n,tiny_fl) \
+{ \
+ size_t grans = WORDS_TO_WHOLE_GRANULES(n); \
+ GC_FAST_MALLOC_GRANS(result, grans, tiny_fl, 0, \
+ PTRFREE, GC_malloc_atomic(grans*GRANULE_BYTES), \
+ /* no initialization */); \
+}
+
+
+/* And once more for two word initialized objects: */
+# define GC_CONS(result, first, second, tiny_fl) \
+{ \
+ size_t grans = WORDS_TO_WHOLE_GRANULES(2); \
+ GC_FAST_MALLOC_GRANS(result, grans, tiny_fl, 0, \
+ NORMAL, GC_malloc(grans*GRANULE_BYTES), \
+ *(void **)result = (void *)(first)); \
+ ((void **)(result))[1] = (void *)(second); \
+}
diff --git a/tools/build/src/engine/boehm_gc/include/gc_mark.h b/tools/build/src/engine/boehm_gc/include/gc_mark.h
new file mode 100644
index 000000000..8ee50b5d4
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/gc_mark.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 2001 by Hewlett-Packard Company. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+/*
+ * This contains interfaces to the GC marker that are likely to be useful to
+ * clients that provide detailed heap layout information to the collector.
+ * This interface should not be used by normal C or C++ clients.
+ * It will be useful to runtimes for other languages.
+ *
+ * This is an experts-only interface! There are many ways to break the
+ * collector in subtle ways by using this functionality.
+ */
+#ifndef GC_MARK_H
+# define GC_MARK_H
+
+# ifndef GC_H
+# include "gc.h"
+# endif
+
+/* A client supplied mark procedure. Returns new mark stack pointer. */
+/* Primary effect should be to push new entries on the mark stack. */
+/* Mark stack pointer values are passed and returned explicitly. */
+/* Global variables decribing mark stack are not necessarily valid. */
+/* (This usually saves a few cycles by keeping things in registers.) */
+/* Assumed to scan about GC_PROC_BYTES on average. If it needs to do */
+/* much more work than that, it should do it in smaller pieces by */
+/* pushing itself back on the mark stack. */
+/* Note that it should always do some work (defined as marking some */
+/* objects) before pushing more than one entry on the mark stack. */
+/* This is required to ensure termination in the event of mark stack */
+/* overflows. */
+/* This procedure is always called with at least one empty entry on the */
+/* mark stack. */
+/* Currently we require that mark procedures look for pointers in a */
+/* subset of the places the conservative marker would. It must be safe */
+/* to invoke the normal mark procedure instead. */
+/* WARNING: Such a mark procedure may be invoked on an unused object */
+/* residing on a free list. Such objects are cleared, except for a */
+/* free list link field in the first word. Thus mark procedures may */
+/* not count on the presence of a type descriptor, and must handle this */
+/* case correctly somehow. */
+# define GC_PROC_BYTES 100
+struct GC_ms_entry;
+typedef struct GC_ms_entry * (*GC_mark_proc) (
+ GC_word * addr, struct GC_ms_entry * mark_stack_ptr,
+ struct GC_ms_entry * mark_stack_limit, GC_word env);
+
+# define GC_LOG_MAX_MARK_PROCS 6
+# define GC_MAX_MARK_PROCS (1 << GC_LOG_MAX_MARK_PROCS)
+
+/* In a few cases it's necessary to assign statically known indices to */
+/* certain mark procs. Thus we reserve a few for well known clients. */
+/* (This is necessary if mark descriptors are compiler generated.) */
+#define GC_RESERVED_MARK_PROCS 8
+# define GC_GCJ_RESERVED_MARK_PROC_INDEX 0
+
+/* Object descriptors on mark stack or in objects. Low order two */
+/* bits are tags distinguishing among the following 4 possibilities */
+/* for the high order 30 bits. */
+#define GC_DS_TAG_BITS 2
+#define GC_DS_TAGS ((1 << GC_DS_TAG_BITS) - 1)
+#define GC_DS_LENGTH 0 /* The entire word is a length in bytes that */
+ /* must be a multiple of 4. */
+#define GC_DS_BITMAP 1 /* 30 (62) bits are a bitmap describing pointer */
+ /* fields. The msb is 1 iff the first word */
+ /* is a pointer. */
+ /* (This unconventional ordering sometimes */
+ /* makes the marker slightly faster.) */
+ /* Zeroes indicate definite nonpointers. Ones */
+ /* indicate possible pointers. */
+ /* Only usable if pointers are word aligned. */
+#define GC_DS_PROC 2
+ /* The objects referenced by this object can be */
+ /* pushed on the mark stack by invoking */
+ /* PROC(descr). ENV(descr) is passed as the */
+ /* last argument. */
+# define GC_MAKE_PROC(proc_index, env) \
+ (((((env) << GC_LOG_MAX_MARK_PROCS) \
+ | (proc_index)) << GC_DS_TAG_BITS) | GC_DS_PROC)
+#define GC_DS_PER_OBJECT 3 /* The real descriptor is at the */
+ /* byte displacement from the beginning of the */
+ /* object given by descr & ~DS_TAGS */
+ /* If the descriptor is negative, the real */
+ /* descriptor is at (*<object_start>) - */
+ /* (descr & ~DS_TAGS) - GC_INDIR_PER_OBJ_BIAS */
+ /* The latter alternative can be used if each */
+ /* object contains a type descriptor in the */
+ /* first word. */
+ /* Note that in multithreaded environments */
+ /* per object descriptors maust be located in */
+ /* either the first two or last two words of */
+ /* the object, since only those are guaranteed */
+ /* to be cleared while the allocation lock is */
+ /* held. */
+#define GC_INDIR_PER_OBJ_BIAS 0x10
+
+extern void * GC_least_plausible_heap_addr;
+extern void * GC_greatest_plausible_heap_addr;
+ /* Bounds on the heap. Guaranteed valid */
+ /* Likely to include future heap expansion. */
+
+/* Handle nested references in a custom mark procedure. */
+/* Check if obj is a valid object. If so, ensure that it is marked. */
+/* If it was not previously marked, push its contents onto the mark */
+/* stack for future scanning. The object will then be scanned using */
+/* its mark descriptor. */
+/* Returns the new mark stack pointer. */
+/* Handles mark stack overflows correctly. */
+/* Since this marks first, it makes progress even if there are mark */
+/* stack overflows. */
+/* Src is the address of the pointer to obj, which is used only */
+/* for back pointer-based heap debugging. */
+/* It is strongly recommended that most objects be handled without mark */
+/* procedures, e.g. with bitmap descriptors, and that mark procedures */
+/* be reserved for exceptional cases. That will ensure that */
+/* performance of this call is not extremely performance critical. */
+/* (Otherwise we would need to inline GC_mark_and_push completely, */
+/* which would tie the client code to a fixed collector version.) */
+/* Note that mark procedures should explicitly call FIXUP_POINTER() */
+/* if required. */
+struct GC_ms_entry *GC_mark_and_push(void * obj,
+ struct GC_ms_entry * mark_stack_ptr,
+ struct GC_ms_entry * mark_stack_limit,
+ void * *src);
+
+#define GC_MARK_AND_PUSH(obj, msp, lim, src) \
+ (((GC_word)obj >= (GC_word)GC_least_plausible_heap_addr && \
+ (GC_word)obj <= (GC_word)GC_greatest_plausible_heap_addr)? \
+ GC_mark_and_push(obj, msp, lim, src) : \
+ msp)
+
+extern size_t GC_debug_header_size;
+ /* The size of the header added to objects allocated through */
+ /* the GC_debug routines. */
+ /* Defined as a variable so that client mark procedures don't */
+ /* need to be recompiled for collector version changes. */
+#define GC_USR_PTR_FROM_BASE(p) ((void *)((char *)(p) + GC_debug_header_size))
+
+/* And some routines to support creation of new "kinds", e.g. with */
+/* custom mark procedures, by language runtimes. */
+/* The _inner versions assume the caller holds the allocation lock. */
+
+/* Return a new free list array. */
+void ** GC_new_free_list(void);
+void ** GC_new_free_list_inner(void);
+
+/* Return a new kind, as specified. */
+unsigned GC_new_kind(void **free_list, GC_word mark_descriptor_template,
+ int add_size_to_descriptor, int clear_new_objects);
+ /* The last two parameters must be zero or one. */
+unsigned GC_new_kind_inner(void **free_list,
+ GC_word mark_descriptor_template,
+ int add_size_to_descriptor,
+ int clear_new_objects);
+
+/* Return a new mark procedure identifier, suitable for use as */
+/* the first argument in GC_MAKE_PROC. */
+unsigned GC_new_proc(GC_mark_proc);
+unsigned GC_new_proc_inner(GC_mark_proc);
+
+/* Allocate an object of a given kind. Note that in multithreaded */
+/* contexts, this is usually unsafe for kinds that have the descriptor */
+/* in the object itself, since there is otherwise a window in which */
+/* the descriptor is not correct. Even in the single-threaded case, */
+/* we need to be sure that cleared objects on a free list don't */
+/* cause a GC crash if they are accidentally traced. */
+void * GC_generic_malloc(size_t lb, int k);
+
+typedef void (*GC_describe_type_fn) (void *p, char *out_buf);
+ /* A procedure which */
+ /* produces a human-readable */
+ /* description of the "type" of object */
+ /* p into the buffer out_buf of length */
+ /* GC_TYPE_DESCR_LEN. This is used by */
+ /* the debug support when printing */
+ /* objects. */
+ /* These functions should be as robust */
+ /* as possible, though we do avoid */
+ /* invoking them on objects on the */
+ /* global free list. */
+# define GC_TYPE_DESCR_LEN 40
+
+void GC_register_describe_type_fn(int kind, GC_describe_type_fn knd);
+ /* Register a describe_type function */
+ /* to be used when printing objects */
+ /* of a particular kind. */
+
+#endif /* GC_MARK_H */
+
diff --git a/tools/build/src/engine/boehm_gc/include/gc_pthread_redirects.h b/tools/build/src/engine/boehm_gc/include/gc_pthread_redirects.h
new file mode 100644
index 000000000..b567f63e1
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/gc_pthread_redirects.h
@@ -0,0 +1,54 @@
+/* Our pthread support normally needs to intercept a number of thread */
+/* calls. We arrange to do that here, if appropriate. */
+
+#ifndef GC_PTHREAD_REDIRECTS_H
+
+#define GC_PTHREAD_REDIRECTS_H
+
+#if !defined(GC_USE_LD_WRAP) && defined(GC_PTHREADS)
+/* We need to intercept calls to many of the threads primitives, so */
+/* that we can locate thread stacks and stop the world. */
+/* Note also that the collector cannot always see thread specific data. */
+/* Thread specific data should generally consist of pointers to */
+/* uncollectable objects (allocated with GC_malloc_uncollectable, */
+/* not the system malloc), which are deallocated using the destructor */
+/* facility in thr_keycreate. Alternatively, keep a redundant pointer */
+/* to thread specific data on the thread stack. */
+
+# include <pthread.h>
+# include <signal.h>
+
+ int GC_pthread_create(pthread_t *new_thread,
+ const pthread_attr_t *attr,
+ void *(*start_routine)(void *), void *arg);
+#ifndef GC_DARWIN_THREADS
+ int GC_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset);
+#endif
+ int GC_pthread_join(pthread_t thread, void **retval);
+ int GC_pthread_detach(pthread_t thread);
+
+#if defined(GC_OSF1_THREADS) \
+ && defined(_PTHREAD_USE_MANGLED_NAMES_) && !defined(_PTHREAD_USE_PTDNAM_)
+/* Unless the compiler supports #pragma extern_prefix, the Tru64 UNIX
+ <pthread.h> redefines some POSIX thread functions to use mangled names.
+ If so, undef them before redefining. */
+# undef pthread_create
+# undef pthread_join
+# undef pthread_detach
+#endif
+
+# define pthread_create GC_pthread_create
+# define pthread_join GC_pthread_join
+# define pthread_detach GC_pthread_detach
+
+#ifndef GC_DARWIN_THREADS
+# ifdef pthread_sigmask
+# undef pthread_sigmask
+# endif /* pthread_sigmask */
+# define pthread_sigmask GC_pthread_sigmask
+# define dlopen GC_dlopen
+#endif
+
+#endif /* GC_xxxxx_THREADS */
+
+#endif /* GC_PTHREAD_REDIRECTS_H */
diff --git a/tools/build/src/engine/boehm_gc/include/gc_tiny_fl.h b/tools/build/src/engine/boehm_gc/include/gc_tiny_fl.h
new file mode 100644
index 000000000..52b6864b6
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/gc_tiny_fl.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 1999-2005 Hewlett-Packard Development Company, L.P.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+#ifndef GC_TINY_FL_H
+#define GC_TINY_FL_H
+/*
+ * Constants and data structures for "tiny" free lists.
+ * These are used for thread-local allocation or in-lined allocators.
+ * Each global free list also essentially starts with one of these.
+ * However, global free lists are known to the GC. "Tiny" free lists
+ * are basically private to the client. Their contents are viewed as
+ * "in use" and marked accordingly by the core of the GC.
+ *
+ * Note that inlined code might know about the layout of these and the constants
+ * involved. Thus any change here may invalidate clients, and such changes should
+ * be avoided. Hence we keep this as simple as possible.
+ */
+
+/*
+ * We always set GRANULE_BYTES to twice the length of a pointer.
+ * This means that all allocation requests are rounded up to the next
+ * multiple of 16 on 64-bit architectures or 8 on 32-bit architectures.
+ * This appears to be a reasonable compromise between fragmentation overhead
+ * and space usage for mark bits (usually mark bytes).
+ * On many 64-bit architectures some memory references require 16-byte
+ * alignment, making this necessary anyway.
+ * For a few 32-bit architecture (e.g. x86), we may also need 16-byte alignment
+ * for certain memory references. But currently that does not seem to be the
+ * default for all conventional malloc implementations, so we ignore that
+ * problem.
+ * It would always be safe, and often useful, to be able to allocate very
+ * small objects with smaller alignment. But that would cost us mark bit
+ * space, so we no longer do so.
+ */
+#ifndef GC_GRANULE_BYTES
+ /* GC_GRANULE_BYTES should not be overridden in any instances of the GC */
+ /* library that may be shared between applications, since it affects */
+ /* the binary interface to the library. */
+# if defined(__LP64__) || defined (_LP64) || defined(_WIN64) \
+ || defined(__s390x__) || defined(__x86_64__) \
+ || defined(__alpha__) || defined(__powerpc64__) \
+ || defined(__arch64__)
+# define GC_GRANULE_BYTES 16
+# define GC_GRANULE_WORDS 2
+# else
+# define GC_GRANULE_BYTES 8
+# define GC_GRANULE_WORDS 2
+# endif
+#endif /* !GC_GRANULE_BYTES */
+
+#if GC_GRANULE_WORDS == 2
+# define GC_WORDS_TO_GRANULES(n) ((n)>>1)
+#else
+# define GC_WORDS_TO_GRANULES(n) ((n)*sizeof(void *)/GRANULE_BYTES)
+#endif
+
+/* A "tiny" free list header contains TINY_FREELISTS pointers to */
+/* singly linked lists of objects of different sizes, the ith one */
+/* containing objects i granules in size. Note that there is a list */
+/* of size zero objects. */
+#ifndef GC_TINY_FREELISTS
+# if GC_GRANULE_BYTES == 16
+# define GC_TINY_FREELISTS 25
+# else
+# define GC_TINY_FREELISTS 33 /* Up to and including 256 bytes */
+# endif
+#endif /* !GC_TINY_FREELISTS */
+
+/* The ith free list corresponds to size i*GRANULE_BYTES */
+/* Internally to the collector, the index can be computed with */
+/* ROUNDED_UP_GRANULES. Externally, we don't know whether */
+/* DONT_ADD_BYTE_AT_END is set, but the client should know. */
+
+/* Convert a free list index to the actual size of objects */
+/* on that list, including extra space we added. Not an */
+/* inverse of the above. */
+#define RAW_BYTES_FROM_INDEX(i) ((i) * GC_GRANULE_BYTES)
+
+#endif /* GC_TINY_FL_H */
diff --git a/tools/build/src/engine/boehm_gc/include/gc_typed.h b/tools/build/src/engine/boehm_gc/include/gc_typed.h
new file mode 100644
index 000000000..1086acdd1
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/gc_typed.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright 1996 Silicon Graphics. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/*
+ * Some simple primitives for allocation with explicit type information.
+ * Facilities for dynamic type inference may be added later.
+ * Should be used only for extremely performance critical applications,
+ * or if conservative collector leakage is otherwise a problem (unlikely).
+ * Note that this is implemented completely separately from the rest
+ * of the collector, and is not linked in unless referenced.
+ * This does not currently support GC_DEBUG in any interesting way.
+ */
+/* Boehm, May 19, 1994 2:13 pm PDT */
+
+#ifndef _GC_TYPED_H
+# define _GC_TYPED_H
+# ifndef _GC_H
+# include "gc.h"
+# endif
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+typedef GC_word * GC_bitmap;
+ /* The least significant bit of the first word is one if */
+ /* the first word in the object may be a pointer. */
+
+# define GC_WORDSZ (8*sizeof(GC_word))
+# define GC_get_bit(bm, index) \
+ (((bm)[index/GC_WORDSZ] >> (index%GC_WORDSZ)) & 1)
+# define GC_set_bit(bm, index) \
+ (bm)[index/GC_WORDSZ] |= ((GC_word)1 << (index%GC_WORDSZ))
+# define GC_WORD_OFFSET(t, f) (offsetof(t,f)/sizeof(GC_word))
+# define GC_WORD_LEN(t) (sizeof(t)/ sizeof(GC_word))
+# define GC_BITMAP_SIZE(t) ((GC_WORD_LEN(t) + GC_WORDSZ-1)/GC_WORDSZ)
+
+typedef GC_word GC_descr;
+
+GC_API GC_descr GC_make_descriptor(GC_bitmap bm, size_t len);
+ /* Return a type descriptor for the object whose layout */
+ /* is described by the argument. */
+ /* The least significant bit of the first word is one */
+ /* if the first word in the object may be a pointer. */
+ /* The second argument specifies the number of */
+ /* meaningful bits in the bitmap. The actual object */
+ /* may be larger (but not smaller). Any additional */
+ /* words in the object are assumed not to contain */
+ /* pointers. */
+ /* Returns a conservative approximation in the */
+ /* (unlikely) case of insufficient memory to build */
+ /* the descriptor. Calls to GC_make_descriptor */
+ /* may consume some amount of a finite resource. This */
+ /* is intended to be called once per type, not once */
+ /* per allocation. */
+
+/* It is possible to generate a descriptor for a C type T with */
+/* word aligned pointer fields f1, f2, ... as follows: */
+/* */
+/* GC_descr T_descr; */
+/* GC_word T_bitmap[GC_BITMAP_SIZE(T)] = {0}; */
+/* GC_set_bit(T_bitmap, GC_WORD_OFFSET(T,f1)); */
+/* GC_set_bit(T_bitmap, GC_WORD_OFFSET(T,f2)); */
+/* ... */
+/* T_descr = GC_make_descriptor(T_bitmap, GC_WORD_LEN(T)); */
+
+GC_API void * GC_malloc_explicitly_typed(size_t size_in_bytes, GC_descr d);
+ /* Allocate an object whose layout is described by d. */
+ /* The resulting object MAY NOT BE PASSED TO REALLOC. */
+ /* The returned object is cleared. */
+
+GC_API void * GC_malloc_explicitly_typed_ignore_off_page
+ (size_t size_in_bytes, GC_descr d);
+
+GC_API void * GC_calloc_explicitly_typed(size_t nelements,
+ size_t element_size_in_bytes,
+ GC_descr d);
+ /* Allocate an array of nelements elements, each of the */
+ /* given size, and with the given descriptor. */
+ /* The elemnt size must be a multiple of the byte */
+ /* alignment required for pointers. E.g. on a 32-bit */
+ /* machine with 16-bit aligned pointers, size_in_bytes */
+ /* must be a multiple of 2. */
+ /* Returned object is cleared. */
+
+#ifdef GC_DEBUG
+# define GC_MALLOC_EXPLICITLY_TYPED(bytes, d) GC_MALLOC(bytes)
+# define GC_CALLOC_EXPLICITLY_TYPED(n, bytes, d) GC_MALLOC(n*bytes)
+#else
+# define GC_MALLOC_EXPLICITLY_TYPED(bytes, d) \
+ GC_malloc_explicitly_typed(bytes, d)
+# define GC_CALLOC_EXPLICITLY_TYPED(n, bytes, d) \
+ GC_calloc_explicitly_typed(n, bytes, d)
+#endif /* !GC_DEBUG */
+
+#ifdef __cplusplus
+ } /* matches extern "C" */
+#endif
+
+#endif /* _GC_TYPED_H */
+
diff --git a/tools/build/src/engine/boehm_gc/include/include.am b/tools/build/src/engine/boehm_gc/include/include.am
new file mode 100644
index 000000000..78c57c346
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/include.am
@@ -0,0 +1,54 @@
+#
+#
+# THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+# OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+#
+# Permission is hereby granted to use or copy this program
+# for any purpose, provided the above notices are retained on all copies.
+# Permission to modify the code and to distribute modified code is granted,
+# provided the above notices are retained, and a notice that the code was
+# modified is included with the above copyright notice.
+#
+# Modified by: Grzegorz Jakacki <jakacki at acm dot org>
+# Modified by: Petter Urkedal <petter.urkedal@nordita.dk>
+
+## Process this file with automake to produce part of Makefile.in.
+
+# installed headers
+#
+pkginclude_HEADERS += \
+ include/gc.h \
+ include/gc_typed.h \
+ include/gc_inline.h \
+ include/gc_mark.h \
+ include/gc_cpp.h \
+ include/weakpointer.h \
+ include/new_gc_alloc.h \
+ include/gc_allocator.h \
+ include/gc_backptr.h \
+ include/gc_gcj.h \
+ include/leak_detector.h \
+ include/gc_amiga_redirects.h \
+ include/gc_pthread_redirects.h \
+ include/gc_config_macros.h \
+ include/gc_tiny_fl.h
+
+# headers which are not installed
+#
+dist_noinst_HEADERS += \
+ include/private/gc_hdrs.h \
+ include/private/gc_priv.h \
+ include/private/gcconfig.h \
+ include/private/gc_pmark.h \
+ include/private/gc_locks.h \
+ include/private/dbg_mlc.h \
+ include/private/specific.h \
+ include/private/cord_pos.h \
+ include/private/pthread_support.h \
+ include/private/pthread_stop_world.h \
+ include/private/darwin_semaphore.h \
+ include/private/darwin_stop_world.h \
+ include/private/thread_local_alloc.h \
+ include/cord.h \
+ include/ec.h \
+ include/javaxfc.h
diff --git a/tools/build/src/engine/boehm_gc/include/javaxfc.h b/tools/build/src/engine/boehm_gc/include/javaxfc.h
new file mode 100644
index 000000000..23e01005a
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/javaxfc.h
@@ -0,0 +1,21 @@
+# ifndef GC_H
+# include "gc.h"
+# endif
+
+/*
+ * Invoke all remaining finalizers that haven't yet been run.
+ * This is needed for strict compliance with the Java standard,
+ * which can make the runtime guarantee that all finalizers are run.
+ * This is problematic for several reasons:
+ * 1) It means that finalizers, and all methods calle by them,
+ * must be prepared to deal with objects that have been finalized in
+ * spite of the fact that they are still referenced by statically
+ * allocated pointer variables.
+ * 1) It may mean that we get stuck in an infinite loop running
+ * finalizers which create new finalizable objects, though that's
+ * probably unlikely.
+ * Thus this is not recommended for general use.
+ */
+void GC_finalize_all();
+
+
diff --git a/tools/build/src/engine/boehm_gc/include/leak_detector.h b/tools/build/src/engine/boehm_gc/include/leak_detector.h
new file mode 100644
index 000000000..1d02f4007
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/leak_detector.h
@@ -0,0 +1,9 @@
+#define GC_DEBUG
+#include "gc.h"
+#define malloc(n) GC_MALLOC(n)
+#define calloc(m,n) GC_MALLOC((m)*(n))
+#define free(p) GC_FREE(p)
+#define realloc(p,n) GC_REALLOC((p),(n))
+#undef strdup
+#define strdup(s) GC_STRDUP((s))
+#define CHECK_LEAKS() GC_gcollect()
diff --git a/tools/build/src/engine/boehm_gc/include/new_gc_alloc.h b/tools/build/src/engine/boehm_gc/include/new_gc_alloc.h
new file mode 100644
index 000000000..b4906af54
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/new_gc_alloc.h
@@ -0,0 +1,484 @@
+/*
+ * Copyright (c) 1996-1998 by Silicon Graphics. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+//
+// This is a revision of gc_alloc.h for SGI STL versions > 3.0
+// Unlike earlier versions, it supplements the standard "alloc.h"
+// instead of replacing it.
+//
+// This is sloppy about variable names used in header files.
+// It also doesn't yet understand the new header file names or
+// namespaces.
+//
+// This assumes the collector has been compiled with -DATOMIC_UNCOLLECTABLE.
+// The user should also consider -DREDIRECT_MALLOC=GC_uncollectable_malloc,
+// to ensure that object allocated through malloc are traced.
+//
+// Some of this could be faster in the explicit deallocation case.
+// In particular, we spend too much time clearing objects on the
+// free lists. That could be avoided.
+//
+// This uses template classes with static members, and hence does not work
+// with g++ 2.7.2 and earlier.
+//
+// Unlike its predecessor, this one simply defines
+// gc_alloc
+// single_client_gc_alloc
+// traceable_alloc
+// single_client_traceable_alloc
+//
+// It does not redefine alloc. Nor does it change the default allocator,
+// though the user may wish to do so. (The argument against changing
+// the default allocator is that it may introduce subtle link compatibility
+// problems. The argument for changing it is that the usual default
+// allocator is usually a very bad choice for a garbage collected environment.)
+//
+// This code assumes that the collector itself has been compiled with a
+// compiler that defines __STDC__ .
+//
+
+#ifndef GC_ALLOC_H
+
+#include "gc.h"
+
+#if (__GNUC__ < 3)
+# include <stack> // A more portable way to get stl_alloc.h .
+#else
+# include <bits/stl_alloc.h>
+# ifndef __STL_BEGIN_NAMESPACE
+# define __STL_BEGIN_NAMESPACE namespace std {
+# define __STL_END_NAMESPACE };
+# endif
+#ifndef __STL_USE_STD_ALLOCATORS
+#define __STL_USE_STD_ALLOCATORS
+#endif
+#endif
+
+/* A hack to deal with gcc 3.1. If you are using gcc3.1 and later, */
+/* you should probably really use gc_allocator.h instead. */
+#if defined (__GNUC__) && \
+ (__GNUC__ > 3 || (__GNUC__ == 3 && (__GNUC_MINOR__ >= 1)))
+# define simple_alloc __simple_alloc
+#endif
+
+
+
+#define GC_ALLOC_H
+
+#include <stddef.h>
+#include <string.h>
+
+// The following need to match collector data structures.
+// We can't include gc_priv.h, since that pulls in way too much stuff.
+// This should eventually be factored out into another include file.
+
+extern "C" {
+ extern void ** const GC_objfreelist_ptr;
+ extern void ** const GC_aobjfreelist_ptr;
+ extern void ** const GC_uobjfreelist_ptr;
+ extern void ** const GC_auobjfreelist_ptr;
+
+ extern void GC_incr_bytes_allocd(size_t bytes);
+ extern void GC_incr_mem_freed(size_t words); /* FIXME: use bytes */
+
+ extern char * GC_generic_malloc_words_small(size_t word, int kind);
+ /* FIXME: Doesn't exist anymore. */
+}
+
+// Object kinds; must match PTRFREE, NORMAL, UNCOLLECTABLE, and
+// AUNCOLLECTABLE in gc_priv.h.
+
+enum { GC_PTRFREE = 0, GC_NORMAL = 1, GC_UNCOLLECTABLE = 2,
+ GC_AUNCOLLECTABLE = 3 };
+
+enum { GC_max_fast_bytes = 255 };
+
+enum { GC_bytes_per_word = sizeof(char *) };
+
+enum { GC_byte_alignment = 8 };
+
+enum { GC_word_alignment = GC_byte_alignment/GC_bytes_per_word };
+
+inline void * &GC_obj_link(void * p)
+{ return *reinterpret_cast<void **>(p); }
+
+// Compute a number of words >= n+1 bytes.
+// The +1 allows for pointers one past the end.
+inline size_t GC_round_up(size_t n)
+{
+ return ((n + GC_byte_alignment)/GC_byte_alignment)*GC_word_alignment;
+}
+
+// The same but don't allow for extra byte.
+inline size_t GC_round_up_uncollectable(size_t n)
+{
+ return ((n + GC_byte_alignment - 1)/GC_byte_alignment)*GC_word_alignment;
+}
+
+template <int dummy>
+class GC_aux_template {
+public:
+ // File local count of allocated words. Occasionally this is
+ // added into the global count. A separate count is necessary since the
+ // real one must be updated with a procedure call.
+ static size_t GC_bytes_recently_allocd;
+
+ // Same for uncollectable mmory. Not yet reflected in either
+ // GC_bytes_recently_allocd or GC_non_gc_bytes.
+ static size_t GC_uncollectable_bytes_recently_allocd;
+
+ // Similar counter for explicitly deallocated memory.
+ static size_t GC_bytes_recently_freed;
+
+ // Again for uncollectable memory.
+ static size_t GC_uncollectable_bytes_recently_freed;
+
+ static void * GC_out_of_line_malloc(size_t nwords, int kind);
+};
+
+template <int dummy>
+size_t GC_aux_template<dummy>::GC_bytes_recently_allocd = 0;
+
+template <int dummy>
+size_t GC_aux_template<dummy>::GC_uncollectable_bytes_recently_allocd = 0;
+
+template <int dummy>
+size_t GC_aux_template<dummy>::GC_bytes_recently_freed = 0;
+
+template <int dummy>
+size_t GC_aux_template<dummy>::GC_uncollectable_bytes_recently_freed = 0;
+
+template <int dummy>
+void * GC_aux_template<dummy>::GC_out_of_line_malloc(size_t nwords, int kind)
+{
+ GC_bytes_recently_allocd += GC_uncollectable_bytes_recently_allocd;
+ GC_non_gc_bytes +=
+ GC_uncollectable_bytes_recently_allocd;
+ GC_uncollectable_bytes_recently_allocd = 0;
+
+ GC_bytes_recently_freed += GC_uncollectable_bytes_recently_freed;
+ GC_non_gc_bytes -= GC_uncollectable_bytes_recently_freed;
+ GC_uncollectable_bytes_recently_freed = 0;
+
+ GC_incr_bytes_allocd(GC_bytes_recently_allocd);
+ GC_bytes_recently_allocd = 0;
+
+ GC_incr_mem_freed(GC_bytes_per_word(GC_bytes_recently_freed));
+ GC_bytes_recently_freed = 0;
+
+ return GC_generic_malloc_words_small(nwords, kind);
+}
+
+typedef GC_aux_template<0> GC_aux;
+
+// A fast, single-threaded, garbage-collected allocator
+// We assume the first word will be immediately overwritten.
+// In this version, deallocation is not a noop, and explicit
+// deallocation is likely to help performance.
+template <int dummy>
+class single_client_gc_alloc_template {
+ public:
+ static void * allocate(size_t n)
+ {
+ size_t nwords = GC_round_up(n);
+ void ** flh;
+ void * op;
+
+ if (n > GC_max_fast_bytes) return GC_malloc(n);
+ flh = GC_objfreelist_ptr + nwords;
+ if (0 == (op = *flh)) {
+ return GC_aux::GC_out_of_line_malloc(nwords, GC_NORMAL);
+ }
+ *flh = GC_obj_link(op);
+ GC_aux::GC_bytes_recently_allocd += nwords * GC_bytes_per_word;
+ return op;
+ }
+ static void * ptr_free_allocate(size_t n)
+ {
+ size_t nwords = GC_round_up(n);
+ void ** flh;
+ void * op;
+
+ if (n > GC_max_fast_bytes) return GC_malloc_atomic(n);
+ flh = GC_aobjfreelist_ptr + nwords;
+ if (0 == (op = *flh)) {
+ return GC_aux::GC_out_of_line_malloc(nwords, GC_PTRFREE);
+ }
+ *flh = GC_obj_link(op);
+ GC_aux::GC_bytes_recently_allocd += nwords * GC_bytes_per_word;
+ return op;
+ }
+ static void deallocate(void *p, size_t n)
+ {
+ size_t nwords = GC_round_up(n);
+ void ** flh;
+
+ if (n > GC_max_fast_bytes) {
+ GC_free(p);
+ } else {
+ flh = GC_objfreelist_ptr + nwords;
+ GC_obj_link(p) = *flh;
+ memset(reinterpret_cast<char *>(p) + GC_bytes_per_word, 0,
+ GC_bytes_per_word * (nwords - 1));
+ *flh = p;
+ GC_aux::GC_bytes_recently_freed += nwords * GC_bytes_per_word;
+ }
+ }
+ static void ptr_free_deallocate(void *p, size_t n)
+ {
+ size_t nwords = GC_round_up(n);
+ void ** flh;
+
+ if (n > GC_max_fast_bytes) {
+ GC_free(p);
+ } else {
+ flh = GC_aobjfreelist_ptr + nwords;
+ GC_obj_link(p) = *flh;
+ *flh = p;
+ GC_aux::GC_bytes_recently_freed += nwords * GC_bytes_per_word;
+ }
+ }
+};
+
+typedef single_client_gc_alloc_template<0> single_client_gc_alloc;
+
+// Once more, for uncollectable objects.
+template <int dummy>
+class single_client_traceable_alloc_template {
+ public:
+ static void * allocate(size_t n)
+ {
+ size_t nwords = GC_round_up_uncollectable(n);
+ void ** flh;
+ void * op;
+
+ if (n > GC_max_fast_bytes) return GC_malloc_uncollectable(n);
+ flh = GC_uobjfreelist_ptr + nwords;
+ if (0 == (op = *flh)) {
+ return GC_aux::GC_out_of_line_malloc(nwords, GC_UNCOLLECTABLE);
+ }
+ *flh = GC_obj_link(op);
+ GC_aux::GC_uncollectable_bytes_recently_allocd +=
+ nwords * GC_bytes_per_word;
+ return op;
+ }
+ static void * ptr_free_allocate(size_t n)
+ {
+ size_t nwords = GC_round_up_uncollectable(n);
+ void ** flh;
+ void * op;
+
+ if (n > GC_max_fast_bytes) return GC_malloc_atomic_uncollectable(n);
+ flh = GC_auobjfreelist_ptr + nwords;
+ if (0 == (op = *flh)) {
+ return GC_aux::GC_out_of_line_malloc(nwords, GC_AUNCOLLECTABLE);
+ }
+ *flh = GC_obj_link(op);
+ GC_aux::GC_uncollectable_bytes_recently_allocd +=
+ nwords * GC_bytes_per_word;
+ return op;
+ }
+ static void deallocate(void *p, size_t n)
+ {
+ size_t nwords = GC_round_up_uncollectable(n);
+ void ** flh;
+
+ if (n > GC_max_fast_bytes) {
+ GC_free(p);
+ } else {
+ flh = GC_uobjfreelist_ptr + nwords;
+ GC_obj_link(p) = *flh;
+ *flh = p;
+ GC_aux::GC_uncollectable_bytes_recently_freed +=
+ nwords * GC_bytes_per_word;
+ }
+ }
+ static void ptr_free_deallocate(void *p, size_t n)
+ {
+ size_t nwords = GC_round_up_uncollectable(n);
+ void ** flh;
+
+ if (n > GC_max_fast_bytes) {
+ GC_free(p);
+ } else {
+ flh = GC_auobjfreelist_ptr + nwords;
+ GC_obj_link(p) = *flh;
+ *flh = p;
+ GC_aux::GC_uncollectable_bytes_recently_freed +=
+ nwords * GC_bytes_per_word;
+ }
+ }
+};
+
+typedef single_client_traceable_alloc_template<0> single_client_traceable_alloc;
+
+template < int dummy >
+class gc_alloc_template {
+ public:
+ static void * allocate(size_t n) { return GC_malloc(n); }
+ static void * ptr_free_allocate(size_t n)
+ { return GC_malloc_atomic(n); }
+ static void deallocate(void *, size_t) { }
+ static void ptr_free_deallocate(void *, size_t) { }
+};
+
+typedef gc_alloc_template < 0 > gc_alloc;
+
+template < int dummy >
+class traceable_alloc_template {
+ public:
+ static void * allocate(size_t n) { return GC_malloc_uncollectable(n); }
+ static void * ptr_free_allocate(size_t n)
+ { return GC_malloc_atomic_uncollectable(n); }
+ static void deallocate(void *p, size_t) { GC_free(p); }
+ static void ptr_free_deallocate(void *p, size_t) { GC_free(p); }
+};
+
+typedef traceable_alloc_template < 0 > traceable_alloc;
+
+// We want to specialize simple_alloc so that it does the right thing
+// for all pointerfree types. At the moment there is no portable way to
+// even approximate that. The following approximation should work for
+// SGI compilers, and recent versions of g++.
+
+# define __GC_SPECIALIZE(T,alloc) \
+class simple_alloc<T, alloc> { \
+public: \
+ static T *allocate(size_t n) \
+ { return 0 == n? 0 : \
+ reinterpret_cast<T*>(alloc::ptr_free_allocate(n * sizeof (T))); } \
+ static T *allocate(void) \
+ { return reinterpret_cast<T*>(alloc::ptr_free_allocate(sizeof (T))); } \
+ static void deallocate(T *p, size_t n) \
+ { if (0 != n) alloc::ptr_free_deallocate(p, n * sizeof (T)); } \
+ static void deallocate(T *p) \
+ { alloc::ptr_free_deallocate(p, sizeof (T)); } \
+};
+
+__STL_BEGIN_NAMESPACE
+
+__GC_SPECIALIZE(char, gc_alloc)
+__GC_SPECIALIZE(int, gc_alloc)
+__GC_SPECIALIZE(unsigned, gc_alloc)
+__GC_SPECIALIZE(float, gc_alloc)
+__GC_SPECIALIZE(double, gc_alloc)
+
+__GC_SPECIALIZE(char, traceable_alloc)
+__GC_SPECIALIZE(int, traceable_alloc)
+__GC_SPECIALIZE(unsigned, traceable_alloc)
+__GC_SPECIALIZE(float, traceable_alloc)
+__GC_SPECIALIZE(double, traceable_alloc)
+
+__GC_SPECIALIZE(char, single_client_gc_alloc)
+__GC_SPECIALIZE(int, single_client_gc_alloc)
+__GC_SPECIALIZE(unsigned, single_client_gc_alloc)
+__GC_SPECIALIZE(float, single_client_gc_alloc)
+__GC_SPECIALIZE(double, single_client_gc_alloc)
+
+__GC_SPECIALIZE(char, single_client_traceable_alloc)
+__GC_SPECIALIZE(int, single_client_traceable_alloc)
+__GC_SPECIALIZE(unsigned, single_client_traceable_alloc)
+__GC_SPECIALIZE(float, single_client_traceable_alloc)
+__GC_SPECIALIZE(double, single_client_traceable_alloc)
+
+__STL_END_NAMESPACE
+
+#ifdef __STL_USE_STD_ALLOCATORS
+
+__STL_BEGIN_NAMESPACE
+
+template <class _Tp>
+struct _Alloc_traits<_Tp, gc_alloc >
+{
+ static const bool _S_instanceless = true;
+ typedef simple_alloc<_Tp, gc_alloc > _Alloc_type;
+ typedef __allocator<_Tp, gc_alloc > allocator_type;
+};
+
+inline bool operator==(const gc_alloc&,
+ const gc_alloc&)
+{
+ return true;
+}
+
+inline bool operator!=(const gc_alloc&,
+ const gc_alloc&)
+{
+ return false;
+}
+
+template <class _Tp>
+struct _Alloc_traits<_Tp, single_client_gc_alloc >
+{
+ static const bool _S_instanceless = true;
+ typedef simple_alloc<_Tp, single_client_gc_alloc > _Alloc_type;
+ typedef __allocator<_Tp, single_client_gc_alloc > allocator_type;
+};
+
+inline bool operator==(const single_client_gc_alloc&,
+ const single_client_gc_alloc&)
+{
+ return true;
+}
+
+inline bool operator!=(const single_client_gc_alloc&,
+ const single_client_gc_alloc&)
+{
+ return false;
+}
+
+template <class _Tp>
+struct _Alloc_traits<_Tp, traceable_alloc >
+{
+ static const bool _S_instanceless = true;
+ typedef simple_alloc<_Tp, traceable_alloc > _Alloc_type;
+ typedef __allocator<_Tp, traceable_alloc > allocator_type;
+};
+
+inline bool operator==(const traceable_alloc&,
+ const traceable_alloc&)
+{
+ return true;
+}
+
+inline bool operator!=(const traceable_alloc&,
+ const traceable_alloc&)
+{
+ return false;
+}
+
+template <class _Tp>
+struct _Alloc_traits<_Tp, single_client_traceable_alloc >
+{
+ static const bool _S_instanceless = true;
+ typedef simple_alloc<_Tp, single_client_traceable_alloc > _Alloc_type;
+ typedef __allocator<_Tp, single_client_traceable_alloc > allocator_type;
+};
+
+inline bool operator==(const single_client_traceable_alloc&,
+ const single_client_traceable_alloc&)
+{
+ return true;
+}
+
+inline bool operator!=(const single_client_traceable_alloc&,
+ const single_client_traceable_alloc&)
+{
+ return false;
+}
+
+__STL_END_NAMESPACE
+
+#endif /* __STL_USE_STD_ALLOCATORS */
+
+#endif /* GC_ALLOC_H */
diff --git a/tools/build/src/engine/boehm_gc/include/private/cord_pos.h b/tools/build/src/engine/boehm_gc/include/private/cord_pos.h
new file mode 100644
index 000000000..d2b24bb8a
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/private/cord_pos.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 1993-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 2:23 pm PDT */
+# ifndef CORD_POSITION_H
+
+/* The representation of CORD_position. This is private to the */
+/* implementation, but the size is known to clients. Also */
+/* the implementation of some exported macros relies on it. */
+/* Don't use anything defined here and not in cord.h. */
+
+# define MAX_DEPTH 48
+ /* The maximum depth of a balanced cord + 1. */
+ /* We don't let cords get deeper than MAX_DEPTH. */
+
+struct CORD_pe {
+ CORD pe_cord;
+ size_t pe_start_pos;
+};
+
+/* A structure describing an entry on the path from the root */
+/* to current position. */
+typedef struct CORD_Pos {
+ size_t cur_pos;
+ int path_len;
+# define CORD_POS_INVALID (0x55555555)
+ /* path_len == INVALID <==> position invalid */
+ const char *cur_leaf; /* Current leaf, if it is a string. */
+ /* If the current leaf is a function, */
+ /* then this may point to function_buf */
+ /* containing the next few characters. */
+ /* Always points to a valid string */
+ /* containing the current character */
+ /* unless cur_end is 0. */
+ size_t cur_start; /* Start position of cur_leaf */
+ size_t cur_end; /* Ending position of cur_leaf */
+ /* 0 if cur_leaf is invalid. */
+ struct CORD_pe path[MAX_DEPTH + 1];
+ /* path[path_len] is the leaf corresponding to cur_pos */
+ /* path[0].pe_cord is the cord we point to. */
+# define FUNCTION_BUF_SZ 8
+ char function_buf[FUNCTION_BUF_SZ]; /* Space for next few chars */
+ /* from function node. */
+} CORD_pos[1];
+
+/* Extract the cord from a position: */
+CORD CORD_pos_to_cord(CORD_pos p);
+
+/* Extract the current index from a position: */
+size_t CORD_pos_to_index(CORD_pos p);
+
+/* Fetch the character located at the given position: */
+char CORD_pos_fetch(CORD_pos p);
+
+/* Initialize the position to refer to the give cord and index. */
+/* Note that this is the most expensive function on positions: */
+void CORD_set_pos(CORD_pos p, CORD x, size_t i);
+
+/* Advance the position to the next character. */
+/* P must be initialized and valid. */
+/* Invalidates p if past end: */
+void CORD_next(CORD_pos p);
+
+/* Move the position to the preceding character. */
+/* P must be initialized and valid. */
+/* Invalidates p if past beginning: */
+void CORD_prev(CORD_pos p);
+
+/* Is the position valid, i.e. inside the cord? */
+int CORD_pos_valid(CORD_pos p);
+
+char CORD__pos_fetch(CORD_pos);
+void CORD__next(CORD_pos);
+void CORD__prev(CORD_pos);
+
+#define CORD_pos_fetch(p) \
+ (((p)[0].cur_end != 0)? \
+ (p)[0].cur_leaf[(p)[0].cur_pos - (p)[0].cur_start] \
+ : CORD__pos_fetch(p))
+
+#define CORD_next(p) \
+ (((p)[0].cur_pos + 1 < (p)[0].cur_end)? \
+ (p)[0].cur_pos++ \
+ : (CORD__next(p), 0))
+
+#define CORD_prev(p) \
+ (((p)[0].cur_end != 0 && (p)[0].cur_pos > (p)[0].cur_start)? \
+ (p)[0].cur_pos-- \
+ : (CORD__prev(p), 0))
+
+#define CORD_pos_to_index(p) ((p)[0].cur_pos)
+
+#define CORD_pos_to_cord(p) ((p)[0].path[0].pe_cord)
+
+#define CORD_pos_valid(p) ((p)[0].path_len != CORD_POS_INVALID)
+
+/* Some grubby stuff for performance-critical friends: */
+#define CORD_pos_chars_left(p) ((long)((p)[0].cur_end) - (long)((p)[0].cur_pos))
+ /* Number of characters in cache. <= 0 ==> none */
+
+#define CORD_pos_advance(p,n) ((p)[0].cur_pos += (n) - 1, CORD_next(p))
+ /* Advance position by n characters */
+ /* 0 < n < CORD_pos_chars_left(p) */
+
+#define CORD_pos_cur_char_addr(p) \
+ (p)[0].cur_leaf + ((p)[0].cur_pos - (p)[0].cur_start)
+ /* address of current character in cache. */
+
+#endif
diff --git a/tools/build/src/engine/boehm_gc/include/private/darwin_semaphore.h b/tools/build/src/engine/boehm_gc/include/private/darwin_semaphore.h
new file mode 100644
index 000000000..0f43982d5
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/private/darwin_semaphore.h
@@ -0,0 +1,68 @@
+#ifndef GC_DARWIN_SEMAPHORE_H
+#define GC_DARWIN_SEMAPHORE_H
+
+#if !defined(GC_DARWIN_THREADS)
+#error darwin_semaphore.h included with GC_DARWIN_THREADS not defined
+#endif
+
+/*
+ This is a very simple semaphore implementation for darwin. It
+ is implemented in terms of pthreads calls so it isn't async signal
+ safe. This isn't a problem because signals aren't used to
+ suspend threads on darwin.
+*/
+
+typedef struct {
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ int value;
+} sem_t;
+
+static int sem_init(sem_t *sem, int pshared, int value) {
+ int ret;
+ if(pshared)
+ GC_abort("sem_init with pshared set");
+ sem->value = value;
+
+ ret = pthread_mutex_init(&sem->mutex,NULL);
+ if(ret < 0) return -1;
+ ret = pthread_cond_init(&sem->cond,NULL);
+ if(ret < 0) return -1;
+ return 0;
+}
+
+static int sem_post(sem_t *sem) {
+ if(pthread_mutex_lock(&sem->mutex) < 0)
+ return -1;
+ sem->value++;
+ if(pthread_cond_signal(&sem->cond) < 0) {
+ pthread_mutex_unlock(&sem->mutex);
+ return -1;
+ }
+ if(pthread_mutex_unlock(&sem->mutex) < 0)
+ return -1;
+ return 0;
+}
+
+static int sem_wait(sem_t *sem) {
+ if(pthread_mutex_lock(&sem->mutex) < 0)
+ return -1;
+ while(sem->value == 0) {
+ pthread_cond_wait(&sem->cond,&sem->mutex);
+ }
+ sem->value--;
+ if(pthread_mutex_unlock(&sem->mutex) < 0)
+ return -1;
+ return 0;
+}
+
+static int sem_destroy(sem_t *sem) {
+ int ret;
+ ret = pthread_cond_destroy(&sem->cond);
+ if(ret < 0) return -1;
+ ret = pthread_mutex_destroy(&sem->mutex);
+ if(ret < 0) return -1;
+ return 0;
+}
+
+#endif
diff --git a/tools/build/src/engine/boehm_gc/include/private/darwin_stop_world.h b/tools/build/src/engine/boehm_gc/include/private/darwin_stop_world.h
new file mode 100644
index 000000000..f6f5314ee
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/private/darwin_stop_world.h
@@ -0,0 +1,22 @@
+#ifndef GC_DARWIN_STOP_WORLD_H
+#define GC_DARWIN_STOP_WORLD_H
+
+#if !defined(GC_DARWIN_THREADS)
+#error darwin_stop_world.h included without GC_DARWIN_THREADS defined
+#endif
+
+#include <mach/mach.h>
+#include <mach/thread_act.h>
+
+struct thread_stop_info {
+ mach_port_t mach_thread;
+};
+
+struct GC_mach_thread {
+ thread_act_t thread;
+ int already_suspended;
+};
+
+void GC_darwin_register_mach_handler_thread(mach_port_t thread);
+
+#endif
diff --git a/tools/build/src/engine/boehm_gc/include/private/dbg_mlc.h b/tools/build/src/engine/boehm_gc/include/private/dbg_mlc.h
new file mode 100644
index 000000000..fcd027c4f
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/private/dbg_mlc.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1997 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+/*
+ * This is mostly an internal header file. Typical clients should
+ * not use it. Clients that define their own object kinds with
+ * debugging allocators will probably want to include this, however.
+ * No attempt is made to keep the namespace clean. This should not be
+ * included from header files that are frequently included by clients.
+ */
+
+#ifndef _DBG_MLC_H
+
+#define _DBG_MLC_H
+
+# define I_HIDE_POINTERS
+# include "gc_priv.h"
+# ifdef KEEP_BACK_PTRS
+# include "gc_backptr.h"
+# endif
+
+#ifndef HIDE_POINTER
+ /* Gc.h was previously included, and hence the I_HIDE_POINTERS */
+ /* definition had no effect. Repeat the gc.h definitions here to */
+ /* get them anyway. */
+ typedef GC_word GC_hidden_pointer;
+# define HIDE_POINTER(p) (~(GC_hidden_pointer)(p))
+# define REVEAL_POINTER(p) ((void *)(HIDE_POINTER(p)))
+#endif /* HIDE_POINTER */
+
+# define START_FLAG ((word)0xfedcedcb)
+# define END_FLAG ((word)0xbcdecdef)
+ /* Stored both one past the end of user object, and one before */
+ /* the end of the object as seen by the allocator. */
+
+# if defined(KEEP_BACK_PTRS) || defined(PRINT_BLACK_LIST) \
+ || defined(MAKE_BACK_GRAPH)
+ /* Pointer "source"s that aren't real locations. */
+ /* Used in oh_back_ptr fields and as "source" */
+ /* argument to some marking functions. */
+# define NOT_MARKED (ptr_t)(0)
+# define MARKED_FOR_FINALIZATION (ptr_t)(2)
+ /* Object was marked because it is finalizable. */
+# define MARKED_FROM_REGISTER (ptr_t)(4)
+ /* Object was marked from a rgister. Hence the */
+ /* source of the reference doesn't have an address. */
+# endif /* KEEP_BACK_PTRS || PRINT_BLACK_LIST */
+
+/* Object header */
+typedef struct {
+# if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
+ /* We potentially keep two different kinds of back */
+ /* pointers. KEEP_BACK_PTRS stores a single back */
+ /* pointer in each reachable object to allow reporting */
+ /* of why an object was retained. MAKE_BACK_GRAPH */
+ /* builds a graph containing the inverse of all */
+ /* "points-to" edges including those involving */
+ /* objects that have just become unreachable. This */
+ /* allows detection of growing chains of unreachable */
+ /* objects. It may be possible to eventually combine */
+ /* both, but for now we keep them separate. Both */
+ /* kinds of back pointers are hidden using the */
+ /* following macros. In both cases, the plain version */
+ /* is constrained to have an least significant bit of 1,*/
+ /* to allow it to be distinguished from a free list */
+ /* link. This means the plain version must have an */
+ /* lsb of 0. */
+ /* Note that blocks dropped by black-listing will */
+ /* also have the lsb clear once debugging has */
+ /* started. */
+ /* We're careful never to overwrite a value with lsb 0. */
+# if ALIGNMENT == 1
+ /* Fudge back pointer to be even. */
+# define HIDE_BACK_PTR(p) HIDE_POINTER(~1 & (GC_word)(p))
+# else
+# define HIDE_BACK_PTR(p) HIDE_POINTER(p)
+# endif
+
+# ifdef KEEP_BACK_PTRS
+ GC_hidden_pointer oh_back_ptr;
+# endif
+# ifdef MAKE_BACK_GRAPH
+ GC_hidden_pointer oh_bg_ptr;
+# endif
+# if defined(KEEP_BACK_PTRS) != defined(MAKE_BACK_GRAPH)
+ /* Keep double-pointer-sized alignment. */
+ word oh_dummy;
+# endif
+# endif
+ const char * oh_string; /* object descriptor string */
+ word oh_int; /* object descriptor integers */
+# ifdef NEED_CALLINFO
+ struct callinfo oh_ci[NFRAMES];
+# endif
+# ifndef SHORT_DBG_HDRS
+ word oh_sz; /* Original malloc arg. */
+ word oh_sf; /* start flag */
+# endif /* SHORT_DBG_HDRS */
+} oh;
+/* The size of the above structure is assumed not to dealign things, */
+/* and to be a multiple of the word length. */
+
+#ifdef SHORT_DBG_HDRS
+# define DEBUG_BYTES (sizeof (oh))
+# define UNCOLLECTABLE_DEBUG_BYTES DEBUG_BYTES
+#else
+ /* Add space for END_FLAG, but use any extra space that was already */
+ /* added to catch off-the-end pointers. */
+ /* For uncollectable objects, the extra byte is not added. */
+# define UNCOLLECTABLE_DEBUG_BYTES (sizeof (oh) + sizeof (word))
+# define DEBUG_BYTES (UNCOLLECTABLE_DEBUG_BYTES - EXTRA_BYTES)
+#endif
+
+/* Round bytes to words without adding extra byte at end. */
+#define SIMPLE_ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + WORDS_TO_BYTES(1) - 1)
+
+/* ADD_CALL_CHAIN stores a (partial) call chain into an object */
+/* header. It may be called with or without the allocation */
+/* lock. */
+/* PRINT_CALL_CHAIN prints the call chain stored in an object */
+/* to stderr. It requires that we do not hold the lock. */
+#if defined(SAVE_CALL_CHAIN)
+ struct callinfo;
+ void GC_save_callers(struct callinfo info[NFRAMES]);
+ void GC_print_callers(struct callinfo info[NFRAMES]);
+# define ADD_CALL_CHAIN(base, ra) GC_save_callers(((oh *)(base)) -> oh_ci)
+# define PRINT_CALL_CHAIN(base) GC_print_callers(((oh *)(base)) -> oh_ci)
+#elif defined(GC_ADD_CALLER)
+ struct callinfo;
+ void GC_print_callers(struct callinfo info[NFRAMES]);
+# define ADD_CALL_CHAIN(base, ra) ((oh *)(base)) -> oh_ci[0].ci_pc = (ra)
+# define PRINT_CALL_CHAIN(base) GC_print_callers(((oh *)(base)) -> oh_ci)
+#else
+# define ADD_CALL_CHAIN(base, ra)
+# define PRINT_CALL_CHAIN(base)
+#endif
+
+# ifdef GC_ADD_CALLER
+# define OPT_RA ra,
+# else
+# define OPT_RA
+# endif
+
+
+/* Check whether object with base pointer p has debugging info */
+/* p is assumed to point to a legitimate object in our part */
+/* of the heap. */
+#ifdef SHORT_DBG_HDRS
+# define GC_has_other_debug_info(p) TRUE
+#else
+ GC_bool GC_has_other_debug_info(/* p */);
+#endif
+
+#if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
+# define GC_HAS_DEBUG_INFO(p) \
+ ((*((word *)p) & 1) && GC_has_other_debug_info(p))
+#else
+# define GC_HAS_DEBUG_INFO(p) GC_has_other_debug_info(p)
+#endif
+
+/* Store debugging info into p. Return displaced pointer. */
+/* Assumes we don't hold allocation lock. */
+ptr_t GC_store_debug_info(/* p, sz, string, integer */);
+
+#endif /* _DBG_MLC_H */
diff --git a/tools/build/src/engine/boehm_gc/include/private/gc_hdrs.h b/tools/build/src/engine/boehm_gc/include/private/gc_hdrs.h
new file mode 100644
index 000000000..559556ca7
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/private/gc_hdrs.h
@@ -0,0 +1,206 @@
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, July 11, 1995 11:54 am PDT */
+# ifndef GC_HEADERS_H
+# define GC_HEADERS_H
+typedef struct hblkhdr hdr;
+
+# if CPP_WORDSZ != 32 && CPP_WORDSZ < 36
+ --> Get a real machine.
+# endif
+
+/*
+ * The 2 level tree data structure that is used to find block headers.
+ * If there are more than 32 bits in a pointer, the top level is a hash
+ * table.
+ *
+ * This defines HDR, GET_HDR, and SET_HDR, the main macros used to
+ * retrieve and set object headers.
+ *
+ * We take advantage of a header lookup
+ * cache. This is a locally declared direct mapped cache, used inside
+ * the marker. The HC_GET_HDR macro uses and maintains this
+ * cache. Assuming we get reasonable hit rates, this shaves a few
+ * memory references from each pointer validation.
+ */
+
+# if CPP_WORDSZ > 32
+# define HASH_TL
+# endif
+
+/* Define appropriate out-degrees for each of the two tree levels */
+# ifdef SMALL_CONFIG
+# define LOG_BOTTOM_SZ 11
+ /* Keep top index size reasonable with smaller blocks. */
+# else
+# define LOG_BOTTOM_SZ 10
+# endif
+# ifndef HASH_TL
+# define LOG_TOP_SZ (WORDSZ - LOG_BOTTOM_SZ - LOG_HBLKSIZE)
+# else
+# define LOG_TOP_SZ 11
+# endif
+# define TOP_SZ (1 << LOG_TOP_SZ)
+# define BOTTOM_SZ (1 << LOG_BOTTOM_SZ)
+
+#ifndef SMALL_CONFIG
+# define USE_HDR_CACHE
+#endif
+
+/* #define COUNT_HDR_CACHE_HITS */
+
+# ifdef COUNT_HDR_CACHE_HITS
+ extern word GC_hdr_cache_hits;
+ extern word GC_hdr_cache_misses;
+# define HC_HIT() ++GC_hdr_cache_hits
+# define HC_MISS() ++GC_hdr_cache_misses
+# else
+# define HC_HIT()
+# define HC_MISS()
+# endif
+
+ typedef struct hce {
+ word block_addr; /* right shifted by LOG_HBLKSIZE */
+ hdr * hce_hdr;
+ } hdr_cache_entry;
+
+# define HDR_CACHE_SIZE 8 /* power of 2 */
+
+# define DECLARE_HDR_CACHE \
+ hdr_cache_entry hdr_cache[HDR_CACHE_SIZE]
+
+# define INIT_HDR_CACHE BZERO(hdr_cache, sizeof(hdr_cache))
+
+# define HCE(h) hdr_cache + (((word)(h) >> LOG_HBLKSIZE) & (HDR_CACHE_SIZE-1))
+
+# define HCE_VALID_FOR(hce,h) ((hce) -> block_addr == \
+ ((word)(h) >> LOG_HBLKSIZE))
+
+# define HCE_HDR(h) ((hce) -> hce_hdr)
+
+#ifdef PRINT_BLACK_LIST
+ hdr * GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce, ptr_t source);
+# define HEADER_CACHE_MISS(p, hce, source) \
+ GC_header_cache_miss(p, hce, source)
+#else
+ hdr * GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce);
+# define HEADER_CACHE_MISS(p, hce, source) GC_header_cache_miss(p, hce)
+#endif
+
+/* Set hhdr to the header for p. Analogous to GET_HDR below, */
+/* except that in the case of large objects, it */
+/* gets the header for the object beginning, if GC_all_interior_ptrs */
+/* is set. */
+/* Returns zero if p points to somewhere other than the first page */
+/* of an object, and it is not a valid pointer to the object. */
+# define HC_GET_HDR(p, hhdr, source, exit_label) \
+ { \
+ hdr_cache_entry * hce = HCE(p); \
+ if (EXPECT(HCE_VALID_FOR(hce, p), 1)) { \
+ HC_HIT(); \
+ hhdr = hce -> hce_hdr; \
+ } else { \
+ hhdr = HEADER_CACHE_MISS(p, hce, source); \
+ if (0 == hhdr) goto exit_label; \
+ } \
+ }
+
+typedef struct bi {
+ hdr * index[BOTTOM_SZ];
+ /*
+ * The bottom level index contains one of three kinds of values:
+ * 0 means we're not responsible for this block,
+ * or this is a block other than the first one in a free block.
+ * 1 < (long)X <= MAX_JUMP means the block starts at least
+ * X * HBLKSIZE bytes before the current address.
+ * A valid pointer points to a hdr structure. (The above can't be
+ * valid pointers due to the GET_MEM return convention.)
+ */
+ struct bi * asc_link; /* All indices are linked in */
+ /* ascending order... */
+ struct bi * desc_link; /* ... and in descending order. */
+ word key; /* high order address bits. */
+# ifdef HASH_TL
+ struct bi * hash_link; /* Hash chain link. */
+# endif
+} bottom_index;
+
+/* extern bottom_index GC_all_nils; - really part of GC_arrays */
+
+/* extern bottom_index * GC_top_index []; - really part of GC_arrays */
+ /* Each entry points to a bottom_index. */
+ /* On a 32 bit machine, it points to */
+ /* the index for a set of high order */
+ /* bits equal to the index. For longer */
+ /* addresses, we hash the high order */
+ /* bits to compute the index in */
+ /* GC_top_index, and each entry points */
+ /* to a hash chain. */
+ /* The last entry in each chain is */
+ /* GC_all_nils. */
+
+
+# define MAX_JUMP (HBLKSIZE - 1)
+
+# define HDR_FROM_BI(bi, p) \
+ ((bi)->index[((word)(p) >> LOG_HBLKSIZE) & (BOTTOM_SZ - 1)])
+# ifndef HASH_TL
+# define BI(p) (GC_top_index \
+ [(word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE)])
+# define HDR_INNER(p) HDR_FROM_BI(BI(p),p)
+# ifdef SMALL_CONFIG
+# define HDR(p) GC_find_header((ptr_t)(p))
+# else
+# define HDR(p) HDR_INNER(p)
+# endif
+# define GET_BI(p, bottom_indx) (bottom_indx) = BI(p)
+# define GET_HDR(p, hhdr) (hhdr) = HDR(p)
+# define SET_HDR(p, hhdr) HDR_INNER(p) = (hhdr)
+# define GET_HDR_ADDR(p, ha) (ha) = &(HDR_INNER(p))
+# else /* hash */
+/* Hash function for tree top level */
+# define TL_HASH(hi) ((hi) & (TOP_SZ - 1))
+/* Set bottom_indx to point to the bottom index for address p */
+# define GET_BI(p, bottom_indx) \
+ { \
+ register word hi = \
+ (word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE); \
+ register bottom_index * _bi = GC_top_index[TL_HASH(hi)]; \
+ \
+ while (_bi -> key != hi && _bi != GC_all_nils) \
+ _bi = _bi -> hash_link; \
+ (bottom_indx) = _bi; \
+ }
+# define GET_HDR_ADDR(p, ha) \
+ { \
+ register bottom_index * bi; \
+ \
+ GET_BI(p, bi); \
+ (ha) = &(HDR_FROM_BI(bi, p)); \
+ }
+# define GET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \
+ (hhdr) = *_ha; }
+# define SET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \
+ *_ha = (hhdr); }
+# define HDR(p) GC_find_header((ptr_t)(p))
+# endif
+
+/* Is the result a forwarding address to someplace closer to the */
+/* beginning of the block or NIL? */
+# define IS_FORWARDING_ADDR_OR_NIL(hhdr) ((size_t) (hhdr) <= MAX_JUMP)
+
+/* Get an HBLKSIZE aligned address closer to the beginning of the block */
+/* h. Assumes hhdr == HDR(h) and IS_FORWARDING_ADDR(hhdr). */
+# define FORWARDED_ADDR(h, hhdr) ((struct hblk *)(h) - (size_t)(hhdr))
+# endif /* GC_HEADERS_H */
diff --git a/tools/build/src/engine/boehm_gc/include/private/gc_locks.h b/tools/build/src/engine/boehm_gc/include/private/gc_locks.h
new file mode 100644
index 000000000..d7c83b07b
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/private/gc_locks.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+#ifndef GC_LOCKS_H
+#define GC_LOCKS_H
+
+/*
+ * Mutual exclusion between allocator/collector routines.
+ * Needed if there is more than one allocator thread.
+ * DCL_LOCK_STATE declares any local variables needed by LOCK and UNLOCK.
+ *
+ * Note that I_HOLD_LOCK and I_DONT_HOLD_LOCK are used only positively
+ * in assertions, and may return TRUE in the "dont know" case.
+ */
+# ifdef THREADS
+# include <atomic_ops.h>
+
+ void GC_noop1(word);
+# ifdef PCR
+# include <base/PCR_Base.h>
+# include <th/PCR_Th.h>
+ extern PCR_Th_ML GC_allocate_ml;
+# define DCL_LOCK_STATE \
+ PCR_ERes GC_fastLockRes; PCR_sigset_t GC_old_sig_mask
+# define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
+# define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
+# endif
+
+# if !defined(AO_HAVE_test_and_set_acquire) && defined(GC_PTHREADS)
+# define USE_PTHREAD_LOCKS
+# endif
+
+# if defined(GC_WIN32_THREADS) && defined(GC_PTHREADS)
+# define USE_PTHREAD_LOCKS
+# endif
+
+# if defined(GC_WIN32_THREADS) && !defined(USE_PTHREAD_LOCKS)
+# include <windows.h>
+# define NO_THREAD (DWORD)(-1)
+ extern DWORD GC_lock_holder;
+ GC_API CRITICAL_SECTION GC_allocate_ml;
+# ifdef GC_ASSERTIONS
+# define UNCOND_LOCK() \
+ { EnterCriticalSection(&GC_allocate_ml); \
+ SET_LOCK_HOLDER(); }
+# define UNCOND_UNLOCK() \
+ { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
+ LeaveCriticalSection(&GC_allocate_ml); }
+# else
+# define UNCOND_LOCK() EnterCriticalSection(&GC_allocate_ml);
+# define UNCOND_UNLOCK() LeaveCriticalSection(&GC_allocate_ml);
+# endif /* !GC_ASSERTIONS */
+# define SET_LOCK_HOLDER() GC_lock_holder = GetCurrentThreadId()
+# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
+# define I_HOLD_LOCK() (!GC_need_to_lock \
+ || GC_lock_holder == GetCurrentThreadId())
+# define I_DONT_HOLD_LOCK() (!GC_need_to_lock \
+ || GC_lock_holder != GetCurrentThreadId())
+# elif defined(GC_PTHREADS)
+# include <pthread.h>
+
+ /* Posix allows pthread_t to be a struct, though it rarely is. */
+ /* Unfortunately, we need to use a pthread_t to index a data */
+ /* structure. It also helps if comparisons don't involve a */
+ /* function call. Hence we introduce platform-dependent macros */
+ /* to compare pthread_t ids and to map them to integers. */
+ /* the mapping to integers does not need to result in different */
+ /* integers for each thread, though that should be true as much */
+ /* as possible. */
+ /* Refine to exclude platforms on which pthread_t is struct */
+# if !defined(GC_WIN32_PTHREADS)
+# define NUMERIC_THREAD_ID(id) ((unsigned long)(id))
+# define THREAD_EQUAL(id1, id2) ((id1) == (id2))
+# define NUMERIC_THREAD_ID_UNIQUE
+# else
+# if defined(GC_WIN32_PTHREADS)
+# define NUMERIC_THREAD_ID(id) ((unsigned long)(id.p))
+ /* Using documented internal details of win32_pthread library. */
+ /* Faster than pthread_equal(). Should not change with */
+ /* future versions of win32_pthread library. */
+# define THREAD_EQUAL(id1, id2) ((id1.p == id2.p) && (id1.x == id2.x))
+# undef NUMERIC_THREAD_ID_UNIQUE
+# else
+ /* Generic definitions that always work, but will result in */
+ /* poor performance and weak assertion checking. */
+# define NUMERIC_THREAD_ID(id) 1l
+# define THREAD_EQUAL(id1, id2) pthread_equal(id1, id2)
+# undef NUMERIC_THREAD_ID_UNIQUE
+# endif
+# endif
+# define NO_THREAD (-1l)
+ /* != NUMERIC_THREAD_ID(pthread_self()) for any thread */
+
+# if !defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_LOCKS)
+ /* In the THREAD_LOCAL_ALLOC case, the allocation lock tends to */
+ /* be held for long periods, if it is held at all. Thus spinning */
+ /* and sleeping for fixed periods are likely to result in */
+ /* significant wasted time. We thus rely mostly on queued locks. */
+# define USE_SPIN_LOCK
+ extern volatile AO_TS_t GC_allocate_lock;
+ extern void GC_lock(void);
+ /* Allocation lock holder. Only set if acquired by client through */
+ /* GC_call_with_alloc_lock. */
+# ifdef GC_ASSERTIONS
+# define UNCOND_LOCK() \
+ { if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_SET) \
+ GC_lock(); \
+ SET_LOCK_HOLDER(); }
+# define UNCOND_UNLOCK() \
+ { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
+ AO_CLEAR(&GC_allocate_lock); }
+# else
+# define UNCOND_LOCK() \
+ { if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_SET) \
+ GC_lock(); }
+# define UNCOND_UNLOCK() \
+ AO_CLEAR(&GC_allocate_lock)
+# endif /* !GC_ASSERTIONS */
+# else /* THREAD_LOCAL_ALLOC || USE_PTHREAD_LOCKS */
+# ifndef USE_PTHREAD_LOCKS
+# define USE_PTHREAD_LOCKS
+# endif
+# endif /* THREAD_LOCAL_ALLOC || USE_PTHREAD_LOCK */
+# ifdef USE_PTHREAD_LOCKS
+# include <pthread.h>
+ extern pthread_mutex_t GC_allocate_ml;
+# ifdef GC_ASSERTIONS
+# define UNCOND_LOCK() \
+ { GC_lock(); \
+ SET_LOCK_HOLDER(); }
+# define UNCOND_UNLOCK() \
+ { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
+ pthread_mutex_unlock(&GC_allocate_ml); }
+# else /* !GC_ASSERTIONS */
+# if defined(NO_PTHREAD_TRYLOCK)
+# define UNCOND_LOCK() GC_lock();
+# else /* !defined(NO_PTHREAD_TRYLOCK) */
+# define UNCOND_LOCK() \
+ { if (0 != pthread_mutex_trylock(&GC_allocate_ml)) GC_lock(); }
+# endif
+# define UNCOND_UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
+# endif /* !GC_ASSERTIONS */
+# endif /* USE_PTHREAD_LOCKS */
+# define SET_LOCK_HOLDER() \
+ GC_lock_holder = NUMERIC_THREAD_ID(pthread_self())
+# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
+# define I_HOLD_LOCK() \
+ (!GC_need_to_lock || \
+ GC_lock_holder == NUMERIC_THREAD_ID(pthread_self()))
+# ifndef NUMERIC_THREAD_ID_UNIQUE
+# define I_DONT_HOLD_LOCK() 1 /* Conservatively say yes */
+# else
+# define I_DONT_HOLD_LOCK() \
+ (!GC_need_to_lock \
+ || GC_lock_holder != NUMERIC_THREAD_ID(pthread_self()))
+# endif
+ extern volatile GC_bool GC_collecting;
+# define ENTER_GC() GC_collecting = 1;
+# define EXIT_GC() GC_collecting = 0;
+ extern void GC_lock(void);
+ extern unsigned long GC_lock_holder;
+# ifdef GC_ASSERTIONS
+ extern unsigned long GC_mark_lock_holder;
+# endif
+# endif /* GC_PTHREADS with linux_threads.c implementation */
+
+
+# else /* !THREADS */
+# define LOCK()
+# define UNLOCK()
+# define SET_LOCK_HOLDER()
+# define UNSET_LOCK_HOLDER()
+# define I_HOLD_LOCK() TRUE
+# define I_DONT_HOLD_LOCK() TRUE
+ /* Used only in positive assertions or to test whether */
+ /* we still need to acaquire the lock. TRUE works in */
+ /* either case. */
+# endif /* !THREADS */
+
+#if defined(UNCOND_LOCK) && !defined(LOCK)
+ GC_API GC_bool GC_need_to_lock;
+ /* At least two thread running; need to lock. */
+# define LOCK() if (GC_need_to_lock) { UNCOND_LOCK(); }
+# define UNLOCK() if (GC_need_to_lock) { UNCOND_UNLOCK(); }
+#endif
+
+# ifndef ENTER_GC
+# define ENTER_GC()
+# define EXIT_GC()
+# endif
+
+# ifndef DCL_LOCK_STATE
+# define DCL_LOCK_STATE
+# endif
+
+#endif /* GC_LOCKS_H */
diff --git a/tools/build/src/engine/boehm_gc/include/private/gc_pmark.h b/tools/build/src/engine/boehm_gc/include/private/gc_pmark.h
new file mode 100644
index 000000000..36083970a
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/private/gc_pmark.h
@@ -0,0 +1,494 @@
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 2001 by Hewlett-Packard Company. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+/* Private declarations of GC marker data structures and macros */
+
+/*
+ * Declarations of mark stack. Needed by marker and client supplied mark
+ * routines. Transitively include gc_priv.h.
+ * (Note that gc_priv.h should not be included before this, since this
+ * includes dbg_mlc.h, which wants to include gc_priv.h AFTER defining
+ * I_HIDE_POINTERS.)
+ */
+#ifndef GC_PMARK_H
+# define GC_PMARK_H
+
+# if defined(KEEP_BACK_PTRS) || defined(PRINT_BLACK_LIST)
+# include "dbg_mlc.h"
+# endif
+# ifndef GC_MARK_H
+# include "../gc_mark.h"
+# endif
+# ifndef GC_PRIVATE_H
+# include "gc_priv.h"
+# endif
+
+/* The real declarations of the following is in gc_priv.h, so that */
+/* we can avoid scanning the following table. */
+/*
+extern mark_proc GC_mark_procs[MAX_MARK_PROCS];
+*/
+
+/*
+ * Mark descriptor stuff that should remain private for now, mostly
+ * because it's hard to export WORDSZ without including gcconfig.h.
+ */
+# define BITMAP_BITS (WORDSZ - GC_DS_TAG_BITS)
+# define PROC(descr) \
+ (GC_mark_procs[((descr) >> GC_DS_TAG_BITS) & (GC_MAX_MARK_PROCS-1)])
+# define ENV(descr) \
+ ((descr) >> (GC_DS_TAG_BITS + GC_LOG_MAX_MARK_PROCS))
+# define MAX_ENV \
+ (((word)1 << (WORDSZ - GC_DS_TAG_BITS - GC_LOG_MAX_MARK_PROCS)) - 1)
+
+
+extern unsigned GC_n_mark_procs;
+
+/* Number of mark stack entries to discard on overflow. */
+#define GC_MARK_STACK_DISCARDS (INITIAL_MARK_STACK_SIZE/8)
+
+typedef struct GC_ms_entry {
+ ptr_t mse_start; /* First word of object */
+ GC_word mse_descr; /* Descriptor; low order two bits are tags, */
+ /* identifying the upper 30 bits as one of the */
+ /* following: */
+} mse;
+
+extern size_t GC_mark_stack_size;
+
+extern mse * GC_mark_stack_limit;
+
+#ifdef PARALLEL_MARK
+ extern mse * volatile GC_mark_stack_top;
+#else
+ extern mse * GC_mark_stack_top;
+#endif
+
+extern mse * GC_mark_stack;
+
+#ifdef PARALLEL_MARK
+ /*
+ * Allow multiple threads to participate in the marking process.
+ * This works roughly as follows:
+ * The main mark stack never shrinks, but it can grow.
+ *
+ * The initiating threads holds the GC lock, and sets GC_help_wanted.
+ *
+ * Other threads:
+ * 1) update helper_count (while holding mark_lock.)
+ * 2) allocate a local mark stack
+ * repeatedly:
+ * 3) Steal a global mark stack entry by atomically replacing
+ * its descriptor with 0.
+ * 4) Copy it to the local stack.
+ * 5) Mark on the local stack until it is empty, or
+ * it may be profitable to copy it back.
+ * 6) If necessary, copy local stack to global one,
+ * holding mark lock.
+ * 7) Stop when the global mark stack is empty.
+ * 8) decrement helper_count (holding mark_lock).
+ *
+ * This is an experiment to see if we can do something along the lines
+ * of the University of Tokyo SGC in a less intrusive, though probably
+ * also less performant, way.
+ */
+ void GC_do_parallel_mark();
+ /* inititate parallel marking. */
+
+ extern GC_bool GC_help_wanted; /* Protected by mark lock */
+ extern unsigned GC_helper_count; /* Number of running helpers. */
+ /* Protected by mark lock */
+ extern unsigned GC_active_count; /* Number of active helpers. */
+ /* Protected by mark lock */
+ /* May increase and decrease */
+ /* within each mark cycle. But */
+ /* once it returns to 0, it */
+ /* stays zero for the cycle. */
+ /* GC_mark_stack_top is also protected by mark lock. */
+ /*
+ * GC_notify_all_marker() is used when GC_help_wanted is first set,
+ * when the last helper becomes inactive,
+ * when something is added to the global mark stack, and just after
+ * GC_mark_no is incremented.
+ * This could be split into multiple CVs (and probably should be to
+ * scale to really large numbers of processors.)
+ */
+#endif /* PARALLEL_MARK */
+
+/* Return a pointer to within 1st page of object. */
+/* Set *new_hdr_p to corr. hdr. */
+ptr_t GC_find_start(ptr_t current, hdr *hhdr, hdr **new_hdr_p);
+
+mse * GC_signal_mark_stack_overflow(mse *msp);
+
+/* Push the object obj with corresponding heap block header hhdr onto */
+/* the mark stack. */
+# define PUSH_OBJ(obj, hhdr, mark_stack_top, mark_stack_limit) \
+{ \
+ register word _descr = (hhdr) -> hb_descr; \
+ \
+ if (_descr != 0) { \
+ mark_stack_top++; \
+ if (mark_stack_top >= mark_stack_limit) { \
+ mark_stack_top = GC_signal_mark_stack_overflow(mark_stack_top); \
+ } \
+ mark_stack_top -> mse_start = (obj); \
+ mark_stack_top -> mse_descr = _descr; \
+ } \
+}
+
+/* Push the contents of current onto the mark stack if it is a valid */
+/* ptr to a currently unmarked object. Mark it. */
+/* If we assumed a standard-conforming compiler, we could probably */
+/* generate the exit_label transparently. */
+# define PUSH_CONTENTS(current, mark_stack_top, mark_stack_limit, \
+ source, exit_label) \
+{ \
+ hdr * my_hhdr; \
+ \
+ HC_GET_HDR(current, my_hhdr, source, exit_label); \
+ PUSH_CONTENTS_HDR(current, mark_stack_top, mark_stack_limit, \
+ source, exit_label, my_hhdr, TRUE); \
+exit_label: ; \
+}
+
+/* Set mark bit, exit if it was already set. */
+
+# ifdef USE_MARK_BITS
+# ifdef PARALLEL_MARK
+ /* The following may fail to exit even if the bit was already set. */
+ /* For our uses, that's benign: */
+# define OR_WORD_EXIT_IF_SET(addr, bits, exit_label) \
+ { \
+ if (!(*(addr) & (mask))) { \
+ AO_or((AO_t *)(addr), (mask); \
+ } else { \
+ goto label; \
+ } \
+ }
+# else
+# define OR_WORD_EXIT_IF_SET(addr, bits, exit_label) \
+ { \
+ word old = *(addr); \
+ word my_bits = (bits); \
+ if (old & my_bits) goto exit_label; \
+ *(addr) = (old | my_bits); \
+ }
+# endif /* !PARALLEL_MARK */
+# define SET_MARK_BIT_EXIT_IF_SET(hhdr,bit_no,exit_label) \
+ { \
+ word * mark_word_addr = hhdr -> hb_marks + divWORDSZ(bit_no); \
+ \
+ OR_WORD_EXIT_IF_SET(mark_word_addr, (word)1 << modWORDSZ(bit_no), \
+ exit_label); \
+ }
+# endif
+
+
+#ifdef USE_MARK_BYTES
+# if defined(I386) && defined(__GNUC__)
+# define LONG_MULT(hprod, lprod, x, y) { \
+ asm("mull %2" : "=a"(lprod), "=d"(hprod) : "g"(y), "0"(x)); \
+ }
+# else /* No in-line X86 assembly code */
+# define LONG_MULT(hprod, lprod, x, y) { \
+ unsigned long long prod = (unsigned long long)x \
+ * (unsigned long long)y; \
+ hprod = prod >> 32; \
+ lprod = (unsigned32)prod; \
+ }
+# endif
+
+ /* There is a race here, and we may set */
+ /* the bit twice in the concurrent case. This can result in the */
+ /* object being pushed twice. But that's only a performance issue. */
+# define SET_MARK_BIT_EXIT_IF_SET(hhdr,bit_no,exit_label) \
+ { \
+ char * mark_byte_addr = (char *)hhdr -> hb_marks + (bit_no); \
+ char mark_byte = *mark_byte_addr; \
+ \
+ if (mark_byte) goto exit_label; \
+ *mark_byte_addr = 1; \
+ }
+#endif /* USE_MARK_BYTES */
+
+#ifdef PARALLEL_MARK
+# define INCR_MARKS(hhdr) \
+ AO_store(&(hhdr -> hb_n_marks), AO_load(&(hhdr -> hb_n_marks))+1);
+#else
+# define INCR_MARKS(hhdr) ++(hhdr -> hb_n_marks)
+#endif
+
+#ifdef ENABLE_TRACE
+# define TRACE(source, cmd) \
+ if (GC_trace_addr != 0 && (ptr_t)(source) == GC_trace_addr) cmd
+# define TRACE_TARGET(target, cmd) \
+ if (GC_trace_addr != 0 && (target) == *(ptr_t *)GC_trace_addr) cmd
+#else
+# define TRACE(source, cmd)
+# define TRACE_TARGET(source, cmd)
+#endif
+/* If the mark bit corresponding to current is not set, set it, and */
+/* push the contents of the object on the mark stack. Current points */
+/* to the bginning of the object. We rely on the fact that the */
+/* preceding header calculation will succeed for a pointer past the */
+/* forst page of an object, only if it is in fact a valid pointer */
+/* to the object. Thus we can omit the otherwise necessary tests */
+/* here. Note in particular tha the "displ" value is the displacement */
+/* from the beggining of the heap block, which may itself be in the */
+/* interior of a large object. */
+#ifdef MARK_BIT_PER_GRANULE
+# define PUSH_CONTENTS_HDR(current, mark_stack_top, mark_stack_limit, \
+ source, exit_label, hhdr, do_offset_check) \
+{ \
+ size_t displ = HBLKDISPL(current); /* Displacement in block; in bytes. */\
+ /* displ is always within range. If current doesn't point to */ \
+ /* first block, then we are in the all_interior_pointers case, and */ \
+ /* it is safe to use any displacement value. */ \
+ size_t gran_displ = BYTES_TO_GRANULES(displ); \
+ size_t gran_offset = hhdr -> hb_map[gran_displ]; \
+ size_t byte_offset = displ & (GRANULE_BYTES - 1); \
+ ptr_t base = current; \
+ /* The following always fails for large block references. */ \
+ if (EXPECT((gran_offset | byte_offset) != 0, FALSE)) { \
+ if (hhdr -> hb_large_block) { \
+ /* gran_offset is bogus. */ \
+ size_t obj_displ; \
+ base = (ptr_t)(hhdr -> hb_block); \
+ obj_displ = (ptr_t)(current) - base; \
+ if (obj_displ != displ) { \
+ GC_ASSERT(obj_displ < hhdr -> hb_sz); \
+ /* Must be in all_interior_pointer case, not first block */ \
+ /* already did validity check on cache miss. */ \
+ ; \
+ } else { \
+ if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
+ GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
+ goto exit_label; \
+ } \
+ } \
+ gran_displ = 0; \
+ GC_ASSERT(hhdr -> hb_sz > HBLKSIZE || \
+ hhdr -> hb_block == HBLKPTR(current)); \
+ GC_ASSERT((ptr_t)(hhdr -> hb_block) <= (ptr_t) current); \
+ } else { \
+ size_t obj_displ = GRANULES_TO_BYTES(gran_offset) \
+ + byte_offset; \
+ if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
+ GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
+ goto exit_label; \
+ } \
+ gran_displ -= gran_offset; \
+ base -= obj_displ; \
+ } \
+ } \
+ GC_ASSERT(hhdr == GC_find_header(base)); \
+ GC_ASSERT(gran_displ % BYTES_TO_GRANULES(hhdr -> hb_sz) == 0); \
+ TRACE(source, GC_log_printf("GC:%d: passed validity tests\n",GC_gc_no)); \
+ SET_MARK_BIT_EXIT_IF_SET(hhdr, gran_displ, exit_label); \
+ TRACE(source, GC_log_printf("GC:%d: previously unmarked\n",GC_gc_no)); \
+ TRACE_TARGET(base, \
+ GC_log_printf("GC:%d: marking %p from %p instead\n", GC_gc_no, \
+ base, source)); \
+ INCR_MARKS(hhdr); \
+ GC_STORE_BACK_PTR((ptr_t)source, base); \
+ PUSH_OBJ(base, hhdr, mark_stack_top, mark_stack_limit); \
+}
+#endif /* MARK_BIT_PER_GRANULE */
+
+#ifdef MARK_BIT_PER_OBJ
+# define PUSH_CONTENTS_HDR(current, mark_stack_top, mark_stack_limit, \
+ source, exit_label, hhdr, do_offset_check) \
+{ \
+ size_t displ = HBLKDISPL(current); /* Displacement in block; in bytes. */\
+ unsigned32 low_prod, high_prod, offset_fraction; \
+ unsigned32 inv_sz = hhdr -> hb_inv_sz; \
+ ptr_t base = current; \
+ LONG_MULT(high_prod, low_prod, displ, inv_sz); \
+ /* product is > and within sz_in_bytes of displ * sz_in_bytes * 2**32 */ \
+ if (EXPECT(low_prod >> 16 != 0, FALSE)) { \
+ FIXME: fails if offset is a multiple of HBLKSIZE which becomes 0 \
+ if (inv_sz == LARGE_INV_SZ) { \
+ size_t obj_displ; \
+ base = (ptr_t)(hhdr -> hb_block); \
+ obj_displ = (ptr_t)(current) - base; \
+ if (obj_displ != displ) { \
+ GC_ASSERT(obj_displ < hhdr -> hb_sz); \
+ /* Must be in all_interior_pointer case, not first block */ \
+ /* already did validity check on cache miss. */ \
+ ; \
+ } else { \
+ if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
+ GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
+ goto exit_label; \
+ } \
+ } \
+ GC_ASSERT(hhdr -> hb_sz > HBLKSIZE || \
+ hhdr -> hb_block == HBLKPTR(current)); \
+ GC_ASSERT((ptr_t)(hhdr -> hb_block) < (ptr_t) current); \
+ } else { \
+ /* Accurate enough if HBLKSIZE <= 2**15. */ \
+ GC_ASSERT(HBLKSIZE <= (1 << 15)); \
+ size_t obj_displ = (((low_prod >> 16) + 1) * (hhdr -> hb_sz)) >> 16; \
+ if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
+ GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
+ goto exit_label; \
+ } \
+ base -= obj_displ; \
+ } \
+ } \
+ /* May get here for pointer to start of block not at */ \
+ /* beginning of object. If so, it's valid, and we're fine. */ \
+ GC_ASSERT(high_prod >= 0 && high_prod <= HBLK_OBJS(hhdr -> hb_sz)); \
+ TRACE(source, GC_log_printf("GC:%d: passed validity tests\n",GC_gc_no)); \
+ SET_MARK_BIT_EXIT_IF_SET(hhdr, high_prod, exit_label); \
+ TRACE(source, GC_log_printf("GC:%d: previously unmarked\n",GC_gc_no)); \
+ TRACE_TARGET(base, \
+ GC_log_printf("GC:%d: marking %p from %p instead\n", GC_gc_no, \
+ base, source)); \
+ INCR_MARKS(hhdr); \
+ GC_STORE_BACK_PTR((ptr_t)source, base); \
+ PUSH_OBJ(base, hhdr, mark_stack_top, mark_stack_limit); \
+}
+#endif /* MARK_BIT_PER_OBJ */
+
+#if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
+# define PUSH_ONE_CHECKED_STACK(p, source) \
+ GC_mark_and_push_stack(p, (ptr_t)(source))
+#else
+# define PUSH_ONE_CHECKED_STACK(p, source) \
+ GC_mark_and_push_stack(p)
+#endif
+
+/*
+ * Push a single value onto mark stack. Mark from the object pointed to by p.
+ * Invoke FIXUP_POINTER(p) before any further processing.
+ * P is considered valid even if it is an interior pointer.
+ * Previously marked objects are not pushed. Hence we make progress even
+ * if the mark stack overflows.
+ */
+
+# if NEED_FIXUP_POINTER
+ /* Try both the raw version and the fixed up one. */
+# define GC_PUSH_ONE_STACK(p, source) \
+ if ((p) >= (ptr_t)GC_least_plausible_heap_addr \
+ && (p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
+ PUSH_ONE_CHECKED_STACK(p, source); \
+ } \
+ FIXUP_POINTER(p); \
+ if ((p) >= (ptr_t)GC_least_plausible_heap_addr \
+ && (p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
+ PUSH_ONE_CHECKED_STACK(p, source); \
+ }
+# else /* !NEED_FIXUP_POINTER */
+# define GC_PUSH_ONE_STACK(p, source) \
+ if ((ptr_t)(p) >= (ptr_t)GC_least_plausible_heap_addr \
+ && (ptr_t)(p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
+ PUSH_ONE_CHECKED_STACK(p, source); \
+ }
+# endif
+
+
+/*
+ * As above, but interior pointer recognition as for
+ * normal heap pointers.
+ */
+# define GC_PUSH_ONE_HEAP(p,source) \
+ FIXUP_POINTER(p); \
+ if ((p) >= (ptr_t)GC_least_plausible_heap_addr \
+ && (p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
+ GC_mark_stack_top = GC_mark_and_push( \
+ (void *)(p), GC_mark_stack_top, \
+ GC_mark_stack_limit, (void * *)(source)); \
+ }
+
+/* Mark starting at mark stack entry top (incl.) down to */
+/* mark stack entry bottom (incl.). Stop after performing */
+/* about one page worth of work. Return the new mark stack */
+/* top entry. */
+mse * GC_mark_from(mse * top, mse * bottom, mse *limit);
+
+#define MARK_FROM_MARK_STACK() \
+ GC_mark_stack_top = GC_mark_from(GC_mark_stack_top, \
+ GC_mark_stack, \
+ GC_mark_stack + GC_mark_stack_size);
+
+/*
+ * Mark from one finalizable object using the specified
+ * mark proc. May not mark the object pointed to by
+ * real_ptr. That is the job of the caller, if appropriate.
+ * Note that this is called with the mutator running, but
+ * with us holding the allocation lock. This is safe only if the
+ * mutator needs tha allocation lock to reveal hidden pointers.
+ * FIXME: Why do we need the GC_mark_state test below?
+ */
+# define GC_MARK_FO(real_ptr, mark_proc) \
+{ \
+ (*(mark_proc))(real_ptr); \
+ while (!GC_mark_stack_empty()) MARK_FROM_MARK_STACK(); \
+ if (GC_mark_state != MS_NONE) { \
+ GC_set_mark_bit(real_ptr); \
+ while (!GC_mark_some((ptr_t)0)) {} \
+ } \
+}
+
+extern GC_bool GC_mark_stack_too_small;
+ /* We need a larger mark stack. May be */
+ /* set by client supplied mark routines.*/
+
+typedef int mark_state_t; /* Current state of marking, as follows:*/
+ /* Used to remember where we are during */
+ /* concurrent marking. */
+
+ /* We say something is dirty if it was */
+ /* written since the last time we */
+ /* retrieved dirty bits. We say it's */
+ /* grungy if it was marked dirty in the */
+ /* last set of bits we retrieved. */
+
+ /* Invariant I: all roots and marked */
+ /* objects p are either dirty, or point */
+ /* to objects q that are either marked */
+ /* or a pointer to q appears in a range */
+ /* on the mark stack. */
+
+# define MS_NONE 0 /* No marking in progress. I holds. */
+ /* Mark stack is empty. */
+
+# define MS_PUSH_RESCUERS 1 /* Rescuing objects are currently */
+ /* being pushed. I holds, except */
+ /* that grungy roots may point to */
+ /* unmarked objects, as may marked */
+ /* grungy objects above scan_ptr. */
+
+# define MS_PUSH_UNCOLLECTABLE 2
+ /* I holds, except that marked */
+ /* uncollectable objects above scan_ptr */
+ /* may point to unmarked objects. */
+ /* Roots may point to unmarked objects */
+
+# define MS_ROOTS_PUSHED 3 /* I holds, mark stack may be nonempty */
+
+# define MS_PARTIALLY_INVALID 4 /* I may not hold, e.g. because of M.S. */
+ /* overflow. However marked heap */
+ /* objects below scan_ptr point to */
+ /* marked or stacked objects. */
+
+# define MS_INVALID 5 /* I may not hold. */
+
+extern mark_state_t GC_mark_state;
+
+#endif /* GC_PMARK_H */
+
diff --git a/tools/build/src/engine/boehm_gc/include/private/gc_priv.h b/tools/build/src/engine/boehm_gc/include/private/gc_priv.h
new file mode 100644
index 000000000..ec93ffea9
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/private/gc_priv.h
@@ -0,0 +1,2040 @@
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+
+# ifndef GC_PRIVATE_H
+# define GC_PRIVATE_H
+
+# include <stdlib.h>
+# if !(defined( sony_news ) )
+# include <stddef.h>
+# endif
+
+#ifdef DGUX
+# include <sys/types.h>
+# include <sys/time.h>
+# include <sys/resource.h>
+#endif /* DGUX */
+
+#ifdef BSD_TIME
+# include <sys/types.h>
+# include <sys/time.h>
+# include <sys/resource.h>
+#endif /* BSD_TIME */
+
+#ifdef PARALLEL_MARK
+# define AO_REQUIRE_CAS
+#endif
+
+#ifndef _GC_H
+# include "../gc.h"
+#endif
+
+#ifndef GC_TINY_FL_H
+# include "../gc_tiny_fl.h"
+#endif
+
+#ifndef GC_MARK_H
+# include "../gc_mark.h"
+#endif
+
+typedef GC_word word;
+typedef GC_signed_word signed_word;
+typedef unsigned int unsigned32;
+
+typedef int GC_bool;
+# define TRUE 1
+# define FALSE 0
+
+typedef char * ptr_t; /* A generic pointer to which we can add */
+ /* byte displacements and which can be used */
+ /* for address comparisons. */
+
+# ifndef GCCONFIG_H
+# include "gcconfig.h"
+# endif
+
+# ifndef HEADERS_H
+# include "gc_hdrs.h"
+# endif
+
+#if __GNUC__ >= 3
+# define EXPECT(expr, outcome) __builtin_expect(expr,outcome)
+ /* Equivalent to (expr), but predict that usually (expr)==outcome. */
+# define INLINE inline
+#else
+# define EXPECT(expr, outcome) (expr)
+# define INLINE
+#endif /* __GNUC__ */
+
+# ifndef GC_LOCKS_H
+# include "gc_locks.h"
+# endif
+
+# ifdef STACK_GROWS_DOWN
+# define COOLER_THAN >
+# define HOTTER_THAN <
+# define MAKE_COOLER(x,y) if ((x)+(y) > (x)) {(x) += (y);} \
+ else {(x) = (ptr_t)ONES;}
+# define MAKE_HOTTER(x,y) (x) -= (y)
+# else
+# define COOLER_THAN <
+# define HOTTER_THAN >
+# define MAKE_COOLER(x,y) if ((x)-(y) < (x)) {(x) -= (y);} else {(x) = 0;}
+# define MAKE_HOTTER(x,y) (x) += (y)
+# endif
+
+#if defined(AMIGA) && defined(__SASC)
+# define GC_FAR __far
+#else
+# define GC_FAR
+#endif
+
+
+/*********************************/
+/* */
+/* Definitions for conservative */
+/* collector */
+/* */
+/*********************************/
+
+/*********************************/
+/* */
+/* Easily changeable parameters */
+/* */
+/*********************************/
+
+/* #define STUBBORN_ALLOC */
+ /* Enable stubborm allocation, and thus a limited */
+ /* form of incremental collection w/o dirty bits. */
+
+/* #define ALL_INTERIOR_POINTERS */
+ /* Forces all pointers into the interior of an */
+ /* object to be considered valid. Also causes the */
+ /* sizes of all objects to be inflated by at least */
+ /* one byte. This should suffice to guarantee */
+ /* that in the presence of a compiler that does */
+ /* not perform garbage-collector-unsafe */
+ /* optimizations, all portable, strictly ANSI */
+ /* conforming C programs should be safely usable */
+ /* with malloc replaced by GC_malloc and free */
+ /* calls removed. There are several disadvantages: */
+ /* 1. There are probably no interesting, portable, */
+ /* strictly ANSI conforming C programs. */
+ /* 2. This option makes it hard for the collector */
+ /* to allocate space that is not ``pointed to'' */
+ /* by integers, etc. Under SunOS 4.X with a */
+ /* statically linked libc, we empiricaly */
+ /* observed that it would be difficult to */
+ /* allocate individual objects larger than 100K. */
+ /* Even if only smaller objects are allocated, */
+ /* more swap space is likely to be needed. */
+ /* Fortunately, much of this will never be */
+ /* touched. */
+ /* If you can easily avoid using this option, do. */
+ /* If not, try to keep individual objects small. */
+ /* This is now really controlled at startup, */
+ /* through GC_all_interior_pointers. */
+
+
+#define GC_INVOKE_FINALIZERS() GC_notify_or_invoke_finalizers()
+
+#if !defined(DONT_ADD_BYTE_AT_END)
+# define EXTRA_BYTES GC_all_interior_pointers
+# define MAX_EXTRA_BYTES 1
+#else
+# define EXTRA_BYTES 0
+# define MAX_EXTRA_BYTES 0
+#endif
+
+
+# ifndef LARGE_CONFIG
+# define MINHINCR 16 /* Minimum heap increment, in blocks of HBLKSIZE */
+ /* Must be multiple of largest page size. */
+# define MAXHINCR 2048 /* Maximum heap increment, in blocks */
+# else
+# define MINHINCR 64
+# define MAXHINCR 4096
+# endif
+
+# define TIME_LIMIT 50 /* We try to keep pause times from exceeding */
+ /* this by much. In milliseconds. */
+
+# define BL_LIMIT GC_black_list_spacing
+ /* If we need a block of N bytes, and we have */
+ /* a block of N + BL_LIMIT bytes available, */
+ /* and N > BL_LIMIT, */
+ /* but all possible positions in it are */
+ /* blacklisted, we just use it anyway (and */
+ /* print a warning, if warnings are enabled). */
+ /* This risks subsequently leaking the block */
+ /* due to a false reference. But not using */
+ /* the block risks unreasonable immediate */
+ /* heap growth. */
+
+/*********************************/
+/* */
+/* Stack saving for debugging */
+/* */
+/*********************************/
+
+#ifdef NEED_CALLINFO
+ struct callinfo {
+ word ci_pc; /* Caller, not callee, pc */
+# if NARGS > 0
+ word ci_arg[NARGS]; /* bit-wise complement to avoid retention */
+# endif
+# if (NFRAMES * (NARGS + 1)) % 2 == 1
+ /* Likely alignment problem. */
+ word ci_dummy;
+# endif
+ };
+#endif
+
+#ifdef SAVE_CALL_CHAIN
+
+/* Fill in the pc and argument information for up to NFRAMES of my */
+/* callers. Ignore my frame and my callers frame. */
+void GC_save_callers(struct callinfo info[NFRAMES]);
+
+void GC_print_callers(struct callinfo info[NFRAMES]);
+
+#endif
+
+
+/*********************************/
+/* */
+/* OS interface routines */
+/* */
+/*********************************/
+
+#ifdef BSD_TIME
+# undef CLOCK_TYPE
+# undef GET_TIME
+# undef MS_TIME_DIFF
+# define CLOCK_TYPE struct timeval
+# define GET_TIME(x) { struct rusage rusage; \
+ getrusage (RUSAGE_SELF, &rusage); \
+ x = rusage.ru_utime; }
+# define MS_TIME_DIFF(a,b) ((double) (a.tv_sec - b.tv_sec) * 1000.0 \
+ + (double) (a.tv_usec - b.tv_usec) / 1000.0)
+#else /* !BSD_TIME */
+# if defined(MSWIN32) || defined(MSWINCE)
+# include <windows.h>
+# include <winbase.h>
+# define CLOCK_TYPE DWORD
+# define GET_TIME(x) x = GetTickCount()
+# define MS_TIME_DIFF(a,b) ((long)((a)-(b)))
+# else /* !MSWIN32, !MSWINCE, !BSD_TIME */
+# include <time.h>
+# if !defined(__STDC__) && defined(SPARC) && defined(SUNOS4)
+ clock_t clock(); /* Not in time.h, where it belongs */
+# endif
+# if defined(FREEBSD) && !defined(CLOCKS_PER_SEC)
+# include <machine/limits.h>
+# define CLOCKS_PER_SEC CLK_TCK
+# endif
+# if !defined(CLOCKS_PER_SEC)
+# define CLOCKS_PER_SEC 1000000
+/*
+ * This is technically a bug in the implementation. ANSI requires that
+ * CLOCKS_PER_SEC be defined. But at least under SunOS4.1.1, it isn't.
+ * Also note that the combination of ANSI C and POSIX is incredibly gross
+ * here. The type clock_t is used by both clock() and times(). But on
+ * some machines these use different notions of a clock tick, CLOCKS_PER_SEC
+ * seems to apply only to clock. Hence we use it here. On many machines,
+ * including SunOS, clock actually uses units of microseconds (which are
+ * not really clock ticks).
+ */
+# endif
+# define CLOCK_TYPE clock_t
+# define GET_TIME(x) x = clock()
+# define MS_TIME_DIFF(a,b) ((unsigned long) \
+ (1000.0*(double)((a)-(b))/(double)CLOCKS_PER_SEC))
+# endif /* !MSWIN32 */
+#endif /* !BSD_TIME */
+
+/* We use bzero and bcopy internally. They may not be available. */
+# if defined(SPARC) && defined(SUNOS4)
+# define BCOPY_EXISTS
+# endif
+# if defined(M68K) && defined(AMIGA)
+# define BCOPY_EXISTS
+# endif
+# if defined(M68K) && defined(NEXT)
+# define BCOPY_EXISTS
+# endif
+# if defined(VAX)
+# define BCOPY_EXISTS
+# endif
+# if defined(AMIGA)
+# include <string.h>
+# define BCOPY_EXISTS
+# endif
+# if defined(DARWIN)
+# include <string.h>
+# define BCOPY_EXISTS
+# endif
+
+# ifndef BCOPY_EXISTS
+# include <string.h>
+# define BCOPY(x,y,n) memcpy(y, x, (size_t)(n))
+# define BZERO(x,n) memset(x, 0, (size_t)(n))
+# else
+# define BCOPY(x,y,n) bcopy((void *)(x),(void *)(y),(size_t)(n))
+# define BZERO(x,n) bzero((void *)(x),(size_t)(n))
+# endif
+
+/*
+ * Stop and restart mutator threads.
+ */
+# ifdef PCR
+# include "th/PCR_ThCtl.h"
+# define STOP_WORLD() \
+ PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_stopNormal, \
+ PCR_allSigsBlocked, \
+ PCR_waitForever)
+# define START_WORLD() \
+ PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_null, \
+ PCR_allSigsBlocked, \
+ PCR_waitForever);
+# else
+# if defined(GC_SOLARIS_THREADS) || defined(GC_WIN32_THREADS) \
+ || defined(GC_PTHREADS)
+ void GC_stop_world();
+ void GC_start_world();
+# define STOP_WORLD() GC_stop_world()
+# define START_WORLD() GC_start_world()
+# else
+# define STOP_WORLD()
+# define START_WORLD()
+# endif
+# endif
+
+/* Abandon ship */
+# ifdef PCR
+# define ABORT(s) PCR_Base_Panic(s)
+# else
+# ifdef SMALL_CONFIG
+# define ABORT(msg) abort()
+# else
+ GC_API void GC_abort(const char * msg);
+# define ABORT(msg) GC_abort(msg)
+# endif
+# endif
+
+/* Exit abnormally, but without making a mess (e.g. out of memory) */
+# ifdef PCR
+# define EXIT() PCR_Base_Exit(1,PCR_waitForever)
+# else
+# define EXIT() (void)exit(1)
+# endif
+
+/* Print warning message, e.g. almost out of memory. */
+# define WARN(msg,arg) (*GC_current_warn_proc)("GC Warning: " msg, (GC_word)(arg))
+extern GC_warn_proc GC_current_warn_proc;
+
+/* Get environment entry */
+#if !defined(NO_GETENV)
+# if defined(EMPTY_GETENV_RESULTS)
+ /* Workaround for a reputed Wine bug. */
+ static inline char * fixed_getenv(const char *name)
+ {
+ char * tmp = getenv(name);
+ if (tmp == 0 || strlen(tmp) == 0)
+ return 0;
+ return tmp;
+ }
+# define GETENV(name) fixed_getenv(name)
+# else
+# define GETENV(name) getenv(name)
+# endif
+#else
+# define GETENV(name) 0
+#endif
+
+#if defined(DARWIN)
+# if defined(POWERPC)
+# if CPP_WORDSZ == 32
+# define GC_THREAD_STATE_T ppc_thread_state_t
+# define GC_MACH_THREAD_STATE PPC_THREAD_STATE
+# define GC_MACH_THREAD_STATE_COUNT PPC_THREAD_STATE_COUNT
+# define GC_MACH_HEADER mach_header
+# define GC_MACH_SECTION section
+# else
+# define GC_THREAD_STATE_T ppc_thread_state64_t
+# define GC_MACH_THREAD_STATE PPC_THREAD_STATE64
+# define GC_MACH_THREAD_STATE_COUNT PPC_THREAD_STATE64_COUNT
+# define GC_MACH_HEADER mach_header_64
+# define GC_MACH_SECTION section_64
+# endif
+# elif defined(I386) || defined(X86_64)
+# if CPP_WORDSZ == 32
+# define GC_THREAD_STATE_T x86_thread_state32_t
+# define GC_MACH_THREAD_STATE x86_THREAD_STATE32
+# define GC_MACH_THREAD_STATE_COUNT x86_THREAD_STATE32_COUNT
+# define GC_MACH_HEADER mach_header
+# define GC_MACH_SECTION section
+# else
+# define GC_THREAD_STATE_T x86_thread_state64_t
+# define GC_MACH_THREAD_STATE x86_THREAD_STATE64
+# define GC_MACH_THREAD_STATE_COUNT x86_THREAD_STATE64_COUNT
+# define GC_MACH_HEADER mach_header_64
+# define GC_MACH_SECTION section_64
+# endif
+# else
+# error define GC_THREAD_STATE_T
+# define GC_MACH_THREAD_STATE MACHINE_THREAD_STATE
+# define GC_MACH_THREAD_STATE_COUNT MACHINE_THREAD_STATE_COUNT
+# endif
+/* Try to work out the right way to access thread state structure members.
+ The structure has changed its definition in different Darwin versions.
+ This now defaults to the (older) names without __, thus hopefully,
+ not breaking any existing Makefile.direct builds. */
+# if defined (HAS_PPC_THREAD_STATE___R0) \
+ || defined (HAS_PPC_THREAD_STATE64___R0) \
+ || defined (HAS_X86_THREAD_STATE32___EAX) \
+ || defined (HAS_X86_THREAD_STATE64___RAX)
+# define THREAD_FLD(x) __ ## x
+# else
+# define THREAD_FLD(x) x
+# endif
+#endif
+
+/*********************************/
+/* */
+/* Word-size-dependent defines */
+/* */
+/*********************************/
+
+#if CPP_WORDSZ == 32
+# define WORDS_TO_BYTES(x) ((x)<<2)
+# define BYTES_TO_WORDS(x) ((x)>>2)
+# define LOGWL ((word)5) /* log[2] of CPP_WORDSZ */
+# define modWORDSZ(n) ((n) & 0x1f) /* n mod size of word */
+# if ALIGNMENT != 4
+# define UNALIGNED
+# endif
+#endif
+
+#if CPP_WORDSZ == 64
+# define WORDS_TO_BYTES(x) ((x)<<3)
+# define BYTES_TO_WORDS(x) ((x)>>3)
+# define LOGWL ((word)6) /* log[2] of CPP_WORDSZ */
+# define modWORDSZ(n) ((n) & 0x3f) /* n mod size of word */
+# if ALIGNMENT != 8
+# define UNALIGNED
+# endif
+#endif
+
+/* The first TINY_FREELISTS free lists correspond to the first */
+/* TINY_FREELISTS multiples of GRANULE_BYTES, i.e. we keep */
+/* separate free lists for each multiple of GRANULE_BYTES */
+/* up to (TINY_FREELISTS-1) * GRANULE_BYTES. After that they */
+/* may be spread out further. */
+#include "../gc_tiny_fl.h"
+#define GRANULE_BYTES GC_GRANULE_BYTES
+#define TINY_FREELISTS GC_TINY_FREELISTS
+
+#define WORDSZ ((word)CPP_WORDSZ)
+#define SIGNB ((word)1 << (WORDSZ-1))
+#define BYTES_PER_WORD ((word)(sizeof (word)))
+#define ONES ((word)(signed_word)(-1))
+#define divWORDSZ(n) ((n) >> LOGWL) /* divide n by size of word */
+
+#if GRANULE_BYTES == 8
+# define BYTES_TO_GRANULES(n) ((n)>>3)
+# define GRANULES_TO_BYTES(n) ((n)<<3)
+# if CPP_WORDSZ == 64
+# define GRANULES_TO_WORDS(n) (n)
+# elif CPP_WORDSZ == 32
+# define GRANULES_TO_WORDS(n) ((n)<<1)
+# else
+# define GRANULES_TO_WORDS(n) BYTES_TO_WORDS(GRANULES_TO_BYTES(n))
+# endif
+#elif GRANULE_BYTES == 16
+# define BYTES_TO_GRANULES(n) ((n)>>4)
+# define GRANULES_TO_BYTES(n) ((n)<<4)
+# if CPP_WORDSZ == 64
+# define GRANULES_TO_WORDS(n) ((n)<<1)
+# elif CPP_WORDSZ == 32
+# define GRANULES_TO_WORDS(n) ((n)<<2)
+# else
+# define GRANULES_TO_WORDS(n) BYTES_TO_WORDS(GRANULES_TO_BYTES(n))
+# endif
+#else
+# error Bad GRANULE_BYTES value
+#endif
+
+/*********************/
+/* */
+/* Size Parameters */
+/* */
+/*********************/
+
+/* heap block size, bytes. Should be power of 2 */
+
+#ifndef HBLKSIZE
+# ifdef SMALL_CONFIG
+# define CPP_LOG_HBLKSIZE 10
+# else
+# if (CPP_WORDSZ == 32) || (defined(HPUX) && defined(HP_PA))
+ /* HPUX/PA seems to use 4K pages with the 64 bit ABI */
+# define CPP_LOG_HBLKSIZE 12
+# else
+# define CPP_LOG_HBLKSIZE 13
+# endif
+# endif
+#else
+# if HBLKSIZE == 512
+# define CPP_LOG_HBLKSIZE 9
+# endif
+# if HBLKSIZE == 1024
+# define CPP_LOG_HBLKSIZE 10
+# endif
+# if HBLKSIZE == 2048
+# define CPP_LOG_HBLKSIZE 11
+# endif
+# if HBLKSIZE == 4096
+# define CPP_LOG_HBLKSIZE 12
+# endif
+# if HBLKSIZE == 8192
+# define CPP_LOG_HBLKSIZE 13
+# endif
+# if HBLKSIZE == 16384
+# define CPP_LOG_HBLKSIZE 14
+# endif
+# ifndef CPP_LOG_HBLKSIZE
+ --> fix HBLKSIZE
+# endif
+# undef HBLKSIZE
+#endif
+# define CPP_HBLKSIZE (1 << CPP_LOG_HBLKSIZE)
+# define LOG_HBLKSIZE ((size_t)CPP_LOG_HBLKSIZE)
+# define HBLKSIZE ((size_t)CPP_HBLKSIZE)
+
+
+/* max size objects supported by freelist (larger objects are */
+/* allocated directly with allchblk(), by rounding to the next */
+/* multiple of HBLKSIZE. */
+
+#define CPP_MAXOBJBYTES (CPP_HBLKSIZE/2)
+#define MAXOBJBYTES ((size_t)CPP_MAXOBJBYTES)
+#define CPP_MAXOBJWORDS BYTES_TO_WORDS(CPP_MAXOBJBYTES)
+#define MAXOBJWORDS ((size_t)CPP_MAXOBJWORDS)
+#define CPP_MAXOBJGRANULES BYTES_TO_GRANULES(CPP_MAXOBJBYTES)
+#define MAXOBJGRANULES ((size_t)CPP_MAXOBJGRANULES)
+
+# define divHBLKSZ(n) ((n) >> LOG_HBLKSIZE)
+
+# define HBLK_PTR_DIFF(p,q) divHBLKSZ((ptr_t)p - (ptr_t)q)
+ /* Equivalent to subtracting 2 hblk pointers. */
+ /* We do it this way because a compiler should */
+ /* find it hard to use an integer division */
+ /* instead of a shift. The bundled SunOS 4.1 */
+ /* o.w. sometimes pessimizes the subtraction to */
+ /* involve a call to .div. */
+
+# define modHBLKSZ(n) ((n) & (HBLKSIZE-1))
+
+# define HBLKPTR(objptr) ((struct hblk *)(((word) (objptr)) & ~(HBLKSIZE-1)))
+
+# define HBLKDISPL(objptr) (((size_t) (objptr)) & (HBLKSIZE-1))
+
+/* Round up byte allocation requests to integral number of words, etc. */
+# define ROUNDED_UP_WORDS(n) \
+ BYTES_TO_WORDS((n) + (WORDS_TO_BYTES(1) - 1 + EXTRA_BYTES))
+# define ROUNDED_UP_GRANULES(n) \
+ BYTES_TO_GRANULES((n) + (GRANULE_BYTES - 1 + EXTRA_BYTES))
+# if MAX_EXTRA_BYTES == 0
+# define SMALL_OBJ(bytes) EXPECT((bytes) <= (MAXOBJBYTES), 1)
+# else
+# define SMALL_OBJ(bytes) \
+ (EXPECT((bytes) <= (MAXOBJBYTES - MAX_EXTRA_BYTES), 1) || \
+ (bytes) <= (MAXOBJBYTES - EXTRA_BYTES))
+ /* This really just tests bytes <= MAXOBJBYTES - EXTRA_BYTES. */
+ /* But we try to avoid looking up EXTRA_BYTES. */
+# endif
+# define ADD_SLOP(bytes) ((bytes) + EXTRA_BYTES)
+# ifndef MIN_WORDS
+# define MIN_WORDS 2 /* FIXME: obsolete */
+# endif
+
+
+/*
+ * Hash table representation of sets of pages.
+ * Implements a map from aligned HBLKSIZE chunks of the address space to one
+ * bit each.
+ * This assumes it is OK to spuriously set bits, e.g. because multiple
+ * addresses are represented by a single location.
+ * Used by black-listing code, and perhaps by dirty bit maintenance code.
+ */
+
+# ifdef LARGE_CONFIG
+# define LOG_PHT_ENTRIES 20 /* Collisions likely at 1M blocks, */
+ /* which is >= 4GB. Each table takes */
+ /* 128KB, some of which may never be */
+ /* touched. */
+# else
+# ifdef SMALL_CONFIG
+# define LOG_PHT_ENTRIES 14 /* Collisions are likely if heap grows */
+ /* to more than 16K hblks = 64MB. */
+ /* Each hash table occupies 2K bytes. */
+# else /* default "medium" configuration */
+# define LOG_PHT_ENTRIES 16 /* Collisions are likely if heap grows */
+ /* to more than 64K hblks >= 256MB. */
+ /* Each hash table occupies 8K bytes. */
+ /* Even for somewhat smaller heaps, */
+ /* say half that, collisions may be an */
+ /* issue because we blacklist */
+ /* addresses outside the heap. */
+# endif
+# endif
+# define PHT_ENTRIES ((word)1 << LOG_PHT_ENTRIES)
+# define PHT_SIZE (PHT_ENTRIES >> LOGWL)
+typedef word page_hash_table[PHT_SIZE];
+
+# define PHT_HASH(addr) ((((word)(addr)) >> LOG_HBLKSIZE) & (PHT_ENTRIES - 1))
+
+# define get_pht_entry_from_index(bl, index) \
+ (((bl)[divWORDSZ(index)] >> modWORDSZ(index)) & 1)
+# define set_pht_entry_from_index(bl, index) \
+ (bl)[divWORDSZ(index)] |= (word)1 << modWORDSZ(index)
+# define clear_pht_entry_from_index(bl, index) \
+ (bl)[divWORDSZ(index)] &= ~((word)1 << modWORDSZ(index))
+/* And a dumb but thread-safe version of set_pht_entry_from_index. */
+/* This sets (many) extra bits. */
+# define set_pht_entry_from_index_safe(bl, index) \
+ (bl)[divWORDSZ(index)] = ONES
+
+
+
+/********************************************/
+/* */
+/* H e a p B l o c k s */
+/* */
+/********************************************/
+
+/* heap block header */
+#define HBLKMASK (HBLKSIZE-1)
+
+#define MARK_BITS_PER_HBLK (HBLKSIZE/GRANULE_BYTES)
+ /* upper bound */
+ /* We allocate 1 bit per allocation granule. */
+ /* If MARK_BIT_PER_GRANULE is defined, we use */
+ /* every nth bit, where n is the number of */
+ /* allocation granules per object. If */
+ /* MARK_BIT_PER_OBJ is defined, we only use the */
+ /* initial group of mark bits, and it is safe */
+ /* to allocate smaller header for large objects. */
+
+# ifdef USE_MARK_BYTES
+# define MARK_BITS_SZ (MARK_BITS_PER_HBLK + 1)
+ /* Unlike the other case, this is in units of bytes. */
+ /* Since we force doubleword alignment, we need at most one */
+ /* mark bit per 2 words. But we do allocate and set one */
+ /* extra mark bit to avoid an explicit check for the */
+ /* partial object at the end of each block. */
+# else
+# define MARK_BITS_SZ (MARK_BITS_PER_HBLK/CPP_WORDSZ + 1)
+# endif
+
+#ifdef PARALLEL_MARK
+# include <atomic_ops.h>
+ typedef AO_t counter_t;
+#else
+ typedef size_t counter_t;
+#endif
+
+/* We maintain layout maps for heap blocks containing objects of a given */
+/* size. Each entry in this map describes a byte offset and has the */
+/* following type. */
+struct hblkhdr {
+ struct hblk * hb_next; /* Link field for hblk free list */
+ /* and for lists of chunks waiting to be */
+ /* reclaimed. */
+ struct hblk * hb_prev; /* Backwards link for free list. */
+ struct hblk * hb_block; /* The corresponding block. */
+ unsigned char hb_obj_kind;
+ /* Kind of objects in the block. Each kind */
+ /* identifies a mark procedure and a set of */
+ /* list headers. Sometimes called regions. */
+ unsigned char hb_flags;
+# define IGNORE_OFF_PAGE 1 /* Ignore pointers that do not */
+ /* point to the first page of */
+ /* this object. */
+# define WAS_UNMAPPED 2 /* This is a free block, which has */
+ /* been unmapped from the address */
+ /* space. */
+ /* GC_remap must be invoked on it */
+ /* before it can be reallocated. */
+ /* Only set with USE_MUNMAP. */
+# define FREE_BLK 4 /* Block is free, i.e. not in use. */
+ unsigned short hb_last_reclaimed;
+ /* Value of GC_gc_no when block was */
+ /* last allocated or swept. May wrap. */
+ /* For a free block, this is maintained */
+ /* only for USE_MUNMAP, and indicates */
+ /* when the header was allocated, or */
+ /* when the size of the block last */
+ /* changed. */
+ size_t hb_sz; /* If in use, size in bytes, of objects in the block. */
+ /* if free, the size in bytes of the whole block */
+ word hb_descr; /* object descriptor for marking. See */
+ /* mark.h. */
+# ifdef MARK_BIT_PER_OBJ
+ unsigned32 hb_inv_sz; /* A good upper bound for 2**32/hb_sz. */
+ /* For large objects, we use */
+ /* LARGE_INV_SZ. */
+# define LARGE_INV_SZ (1 << 16)
+# else
+ unsigned char hb_large_block;
+ short * hb_map; /* Essentially a table of remainders */
+ /* mod BYTES_TO_GRANULES(hb_sz), except */
+ /* for large blocks. See GC_obj_map. */
+# endif
+ counter_t hb_n_marks; /* Number of set mark bits, excluding */
+ /* the one always set at the end. */
+ /* Currently it is concurrently */
+ /* updated and hence only approximate. */
+ /* But a zero value does guarantee that */
+ /* the block contains no marked */
+ /* objects. */
+ /* Ensuring this property means that we */
+ /* never decrement it to zero during a */
+ /* collection, and hence the count may */
+ /* be one too high. Due to concurrent */
+ /* updates, an arbitrary number of */
+ /* increments, but not all of them (!) */
+ /* may be lost, hence it may in theory */
+ /* be much too low. */
+ /* The count may also be too high if */
+ /* multiple mark threads mark the */
+ /* same object due to a race. */
+ /* Without parallel marking, the count */
+ /* is accurate. */
+# ifdef USE_MARK_BYTES
+ union {
+ char _hb_marks[MARK_BITS_SZ];
+ /* The i'th byte is 1 if the object */
+ /* starting at granule i or object i is */
+ /* marked, 0 o.w. */
+ /* The mark bit for the "one past the */
+ /* end" object is always set to avoid a */
+ /* special case test in the marker. */
+ word dummy; /* Force word alignment of mark bytes. */
+ } _mark_byte_union;
+# define hb_marks _mark_byte_union._hb_marks
+# else
+ word hb_marks[MARK_BITS_SZ];
+# endif /* !USE_MARK_BYTES */
+};
+
+# define ANY_INDEX 23 /* "Random" mark bit index for assertions */
+
+/* heap block body */
+
+# define HBLK_WORDS (HBLKSIZE/sizeof(word))
+# define HBLK_GRANULES (HBLKSIZE/GRANULE_BYTES)
+
+/* The number of objects in a block dedicated to a certain size. */
+/* may erroneously yield zero (instead of one) for large objects. */
+# define HBLK_OBJS(sz_in_bytes) (HBLKSIZE/(sz_in_bytes))
+
+struct hblk {
+ char hb_body[HBLKSIZE];
+};
+
+# define HBLK_IS_FREE(hdr) (((hdr) -> hb_flags & FREE_BLK) != 0)
+
+# define OBJ_SZ_TO_BLOCKS(sz) divHBLKSZ(sz + HBLKSIZE-1)
+ /* Size of block (in units of HBLKSIZE) needed to hold objects of */
+ /* given sz (in bytes). */
+
+/* Object free list link */
+# define obj_link(p) (*(void **)(p))
+
+# define LOG_MAX_MARK_PROCS 6
+# define MAX_MARK_PROCS (1 << LOG_MAX_MARK_PROCS)
+
+/* Root sets. Logically private to mark_rts.c. But we don't want the */
+/* tables scanned, so we put them here. */
+/* MAX_ROOT_SETS is the maximum number of ranges that can be */
+/* registered as static roots. */
+# ifdef LARGE_CONFIG
+# define MAX_ROOT_SETS 4096
+# else
+ /* GCJ LOCAL: MAX_ROOT_SETS increased to permit more shared */
+ /* libraries to be loaded. */
+# define MAX_ROOT_SETS 1024
+# endif
+
+# define MAX_EXCLUSIONS (MAX_ROOT_SETS/4)
+/* Maximum number of segments that can be excluded from root sets. */
+
+/*
+ * Data structure for excluded static roots.
+ */
+struct exclusion {
+ ptr_t e_start;
+ ptr_t e_end;
+};
+
+/* Data structure for list of root sets. */
+/* We keep a hash table, so that we can filter out duplicate additions. */
+/* Under Win32, we need to do a better job of filtering overlaps, so */
+/* we resort to sequential search, and pay the price. */
+struct roots {
+ ptr_t r_start;
+ ptr_t r_end;
+# if !defined(MSWIN32) && !defined(MSWINCE)
+ struct roots * r_next;
+# endif
+ GC_bool r_tmp;
+ /* Delete before registering new dynamic libraries */
+};
+
+#if !defined(MSWIN32) && !defined(MSWINCE)
+ /* Size of hash table index to roots. */
+# define LOG_RT_SIZE 6
+# define RT_SIZE (1 << LOG_RT_SIZE) /* Power of 2, may be != MAX_ROOT_SETS */
+#endif
+
+/* Lists of all heap blocks and free lists */
+/* as well as other random data structures */
+/* that should not be scanned by the */
+/* collector. */
+/* These are grouped together in a struct */
+/* so that they can be easily skipped by the */
+/* GC_mark routine. */
+/* The ordering is weird to make GC_malloc */
+/* faster by keeping the important fields */
+/* sufficiently close together that a */
+/* single load of a base register will do. */
+/* Scalars that could easily appear to */
+/* be pointers are also put here. */
+/* The main fields should precede any */
+/* conditionally included fields, so that */
+/* gc_inl.h will work even if a different set */
+/* of macros is defined when the client is */
+/* compiled. */
+
+struct _GC_arrays {
+ word _heapsize; /* Heap size in bytes. */
+ word _max_heapsize;
+ word _requested_heapsize; /* Heap size due to explicit expansion */
+ ptr_t _last_heap_addr;
+ ptr_t _prev_heap_addr;
+ word _large_free_bytes;
+ /* Total bytes contained in blocks on large object free */
+ /* list. */
+ word _large_allocd_bytes;
+ /* Total number of bytes in allocated large objects blocks. */
+ /* For the purposes of this counter and the next one only, a */
+ /* large object is one that occupies a block of at least */
+ /* 2*HBLKSIZE. */
+ word _max_large_allocd_bytes;
+ /* Maximum number of bytes that were ever allocated in */
+ /* large object blocks. This is used to help decide when it */
+ /* is safe to split up a large block. */
+ word _bytes_allocd_before_gc;
+ /* Number of words allocated before this */
+ /* collection cycle. */
+# ifndef SEPARATE_GLOBALS
+ word _bytes_allocd;
+ /* Number of words allocated during this collection cycle */
+# endif
+ word _bytes_finalized;
+ /* Approximate number of bytes in objects (and headers) */
+ /* That became ready for finalization in the last */
+ /* collection. */
+ word _non_gc_bytes_at_gc;
+ /* Number of explicitly managed bytes of storage */
+ /* at last collection. */
+ word _bytes_freed;
+ /* Number of explicitly deallocated bytes of memory */
+ /* since last collection. */
+ word _finalizer_bytes_freed;
+ /* Bytes of memory explicitly deallocated while */
+ /* finalizers were running. Used to approximate mem. */
+ /* explicitly deallocated by finalizers. */
+ ptr_t _scratch_end_ptr;
+ ptr_t _scratch_last_end_ptr;
+ /* Used by headers.c, and can easily appear to point to */
+ /* heap. */
+ GC_mark_proc _mark_procs[MAX_MARK_PROCS];
+ /* Table of user-defined mark procedures. There is */
+ /* a small number of these, which can be referenced */
+ /* by DS_PROC mark descriptors. See gc_mark.h. */
+
+# ifndef SEPARATE_GLOBALS
+ void *_objfreelist[MAXOBJGRANULES+1];
+ /* free list for objects */
+ void *_aobjfreelist[MAXOBJGRANULES+1];
+ /* free list for atomic objs */
+# endif
+
+ void *_uobjfreelist[MAXOBJGRANULES+1];
+ /* uncollectable but traced objs */
+ /* objects on this and auobjfreelist */
+ /* are always marked, except during */
+ /* garbage collections. */
+# ifdef ATOMIC_UNCOLLECTABLE
+ void *_auobjfreelist[MAXOBJGRANULES+1];
+# endif
+ /* uncollectable but traced objs */
+
+ word _composite_in_use;
+ /* Number of words in accessible composite */
+ /* objects. */
+ word _atomic_in_use;
+ /* Number of words in accessible atomic */
+ /* objects. */
+# ifdef USE_MUNMAP
+ word _unmapped_bytes;
+# endif
+
+ size_t _size_map[MAXOBJBYTES+1];
+ /* Number of words to allocate for a given allocation request in */
+ /* bytes. */
+
+# ifdef STUBBORN_ALLOC
+ ptr_t _sobjfreelist[MAXOBJGRANULES+1];
+# endif
+ /* free list for immutable objects */
+# ifdef MARK_BIT_PER_GRANULE
+ short * _obj_map[MAXOBJGRANULES+1];
+ /* If not NIL, then a pointer to a map of valid */
+ /* object addresses. */
+ /* _obj_map[sz_in_granules][i] is */
+ /* i % sz_in_granules. */
+ /* This is now used purely to replace a */
+ /* division in the marker by a table lookup. */
+ /* _obj_map[0] is used for large objects and */
+ /* contains all nonzero entries. This gets us */
+ /* out of the marker fast path without an extra */
+ /* test. */
+# define MAP_LEN BYTES_TO_GRANULES(HBLKSIZE)
+# endif
+# define VALID_OFFSET_SZ HBLKSIZE
+ char _valid_offsets[VALID_OFFSET_SZ];
+ /* GC_valid_offsets[i] == TRUE ==> i */
+ /* is registered as a displacement. */
+ char _modws_valid_offsets[sizeof(word)];
+ /* GC_valid_offsets[i] ==> */
+ /* GC_modws_valid_offsets[i%sizeof(word)] */
+# ifdef STUBBORN_ALLOC
+ page_hash_table _changed_pages;
+ /* Stubborn object pages that were changes since last call to */
+ /* GC_read_changed. */
+ page_hash_table _prev_changed_pages;
+ /* Stubborn object pages that were changes before last call to */
+ /* GC_read_changed. */
+# endif
+# if defined(PROC_VDB) || defined(MPROTECT_VDB) || \
+ defined(GWW_VDB) || defined(MANUAL_VDB)
+ page_hash_table _grungy_pages; /* Pages that were dirty at last */
+ /* GC_read_dirty. */
+# endif
+# if defined(MPROTECT_VDB) || defined(MANUAL_VDB)
+ volatile page_hash_table _dirty_pages;
+ /* Pages dirtied since last GC_read_dirty. */
+# endif
+# if defined(PROC_VDB) || defined(GWW_VDB)
+ page_hash_table _written_pages; /* Pages ever dirtied */
+# endif
+# ifdef LARGE_CONFIG
+# if CPP_WORDSZ > 32
+# define MAX_HEAP_SECTS 4096 /* overflows at roughly 64 GB */
+# else
+# define MAX_HEAP_SECTS 768 /* Separately added heap sections. */
+# endif
+# else
+# ifdef SMALL_CONFIG
+# define MAX_HEAP_SECTS 128 /* Roughly 256MB (128*2048*1K) */
+# else
+# define MAX_HEAP_SECTS 384 /* Roughly 3GB */
+# endif
+# endif
+ struct HeapSect {
+ ptr_t hs_start; size_t hs_bytes;
+ } _heap_sects[MAX_HEAP_SECTS];
+# if defined(MSWIN32) || defined(MSWINCE)
+ ptr_t _heap_bases[MAX_HEAP_SECTS];
+ /* Start address of memory regions obtained from kernel. */
+# endif
+# ifdef MSWINCE
+ word _heap_lengths[MAX_HEAP_SECTS];
+ /* Commited lengths of memory regions obtained from kernel. */
+# endif
+ struct roots _static_roots[MAX_ROOT_SETS];
+# if !defined(MSWIN32) && !defined(MSWINCE)
+ struct roots * _root_index[RT_SIZE];
+# endif
+ struct exclusion _excl_table[MAX_EXCLUSIONS];
+ /* Block header index; see gc_headers.h */
+ bottom_index * _all_nils;
+ bottom_index * _top_index [TOP_SZ];
+#ifdef ENABLE_TRACE
+ ptr_t _trace_addr;
+#endif
+#ifdef SAVE_CALL_CHAIN
+ struct callinfo _last_stack[NFRAMES]; /* Stack at last garbage collection.*/
+ /* Useful for debugging mysterious */
+ /* object disappearances. */
+ /* In the multithreaded case, we */
+ /* currently only save the calling */
+ /* stack. */
+#endif
+};
+
+GC_API GC_FAR struct _GC_arrays GC_arrays;
+
+# ifndef SEPARATE_GLOBALS
+# define GC_objfreelist GC_arrays._objfreelist
+# define GC_aobjfreelist GC_arrays._aobjfreelist
+# define GC_bytes_allocd GC_arrays._bytes_allocd
+# endif
+# define GC_uobjfreelist GC_arrays._uobjfreelist
+# ifdef ATOMIC_UNCOLLECTABLE
+# define GC_auobjfreelist GC_arrays._auobjfreelist
+# endif
+# define GC_sobjfreelist GC_arrays._sobjfreelist
+# define GC_valid_offsets GC_arrays._valid_offsets
+# define GC_modws_valid_offsets GC_arrays._modws_valid_offsets
+# ifdef STUBBORN_ALLOC
+# define GC_changed_pages GC_arrays._changed_pages
+# define GC_prev_changed_pages GC_arrays._prev_changed_pages
+# endif
+# ifdef MARK_BIT_PER_GRANULE
+# define GC_obj_map GC_arrays._obj_map
+# endif
+# define GC_last_heap_addr GC_arrays._last_heap_addr
+# define GC_prev_heap_addr GC_arrays._prev_heap_addr
+# define GC_large_free_bytes GC_arrays._large_free_bytes
+# define GC_large_allocd_bytes GC_arrays._large_allocd_bytes
+# define GC_max_large_allocd_bytes GC_arrays._max_large_allocd_bytes
+# define GC_bytes_finalized GC_arrays._bytes_finalized
+# define GC_non_gc_bytes_at_gc GC_arrays._non_gc_bytes_at_gc
+# define GC_bytes_freed GC_arrays._bytes_freed
+# define GC_finalizer_bytes_freed GC_arrays._finalizer_bytes_freed
+# define GC_scratch_end_ptr GC_arrays._scratch_end_ptr
+# define GC_scratch_last_end_ptr GC_arrays._scratch_last_end_ptr
+# define GC_mark_procs GC_arrays._mark_procs
+# define GC_heapsize GC_arrays._heapsize
+# define GC_max_heapsize GC_arrays._max_heapsize
+# define GC_requested_heapsize GC_arrays._requested_heapsize
+# define GC_bytes_allocd_before_gc GC_arrays._bytes_allocd_before_gc
+# define GC_heap_sects GC_arrays._heap_sects
+# define GC_last_stack GC_arrays._last_stack
+#ifdef ENABLE_TRACE
+#define GC_trace_addr GC_arrays._trace_addr
+#endif
+# ifdef USE_MUNMAP
+# define GC_unmapped_bytes GC_arrays._unmapped_bytes
+# endif
+# if defined(MSWIN32) || defined(MSWINCE)
+# define GC_heap_bases GC_arrays._heap_bases
+# endif
+# ifdef MSWINCE
+# define GC_heap_lengths GC_arrays._heap_lengths
+# endif
+# define GC_static_roots GC_arrays._static_roots
+# define GC_root_index GC_arrays._root_index
+# define GC_excl_table GC_arrays._excl_table
+# define GC_all_nils GC_arrays._all_nils
+# define GC_top_index GC_arrays._top_index
+# if defined(PROC_VDB) || defined(MPROTECT_VDB) || \
+ defined(GWW_VDB) || defined(MANUAL_VDB)
+# define GC_grungy_pages GC_arrays._grungy_pages
+# endif
+# if defined(MPROTECT_VDB) || defined(MANUAL_VDB)
+# define GC_dirty_pages GC_arrays._dirty_pages
+# endif
+# if defined(PROC_VDB) || defined(GWW_VDB)
+# define GC_written_pages GC_arrays._written_pages
+# endif
+# define GC_composite_in_use GC_arrays._composite_in_use
+# define GC_atomic_in_use GC_arrays._atomic_in_use
+# define GC_size_map GC_arrays._size_map
+
+# define beginGC_arrays ((ptr_t)(&GC_arrays))
+# define endGC_arrays (((ptr_t)(&GC_arrays)) + (sizeof GC_arrays))
+
+#define USED_HEAP_SIZE (GC_heapsize - GC_large_free_bytes)
+
+/* Object kinds: */
+# define MAXOBJKINDS 16
+
+extern struct obj_kind {
+ void **ok_freelist; /* Array of free listheaders for this kind of object */
+ /* Point either to GC_arrays or to storage allocated */
+ /* with GC_scratch_alloc. */
+ struct hblk **ok_reclaim_list;
+ /* List headers for lists of blocks waiting to be */
+ /* swept. */
+ /* Indexed by object size in granules. */
+ word ok_descriptor; /* Descriptor template for objects in this */
+ /* block. */
+ GC_bool ok_relocate_descr;
+ /* Add object size in bytes to descriptor */
+ /* template to obtain descriptor. Otherwise */
+ /* template is used as is. */
+ GC_bool ok_init; /* Clear objects before putting them on the free list. */
+} GC_obj_kinds[MAXOBJKINDS];
+
+# define beginGC_obj_kinds ((ptr_t)(&GC_obj_kinds))
+# define endGC_obj_kinds (beginGC_obj_kinds + (sizeof GC_obj_kinds))
+
+/* Variables that used to be in GC_arrays, but need to be accessed by */
+/* inline allocation code. If they were in GC_arrays, the inlined */
+/* allocation code would include GC_arrays offsets (as it did), which */
+/* introduce maintenance problems. */
+
+#ifdef SEPARATE_GLOBALS
+ word GC_bytes_allocd;
+ /* Number of words allocated during this collection cycle */
+ ptr_t GC_objfreelist[MAXOBJGRANULES+1];
+ /* free list for NORMAL objects */
+# define beginGC_objfreelist ((ptr_t)(&GC_objfreelist))
+# define endGC_objfreelist (beginGC_objfreelist + sizeof(GC_objfreelist))
+
+ ptr_t GC_aobjfreelist[MAXOBJGRANULES+1];
+ /* free list for atomic (PTRFREE) objs */
+# define beginGC_aobjfreelist ((ptr_t)(&GC_aobjfreelist))
+# define endGC_aobjfreelist (beginGC_aobjfreelist + sizeof(GC_aobjfreelist))
+#endif
+
+/* Predefined kinds: */
+# define PTRFREE 0
+# define NORMAL 1
+# define UNCOLLECTABLE 2
+# ifdef ATOMIC_UNCOLLECTABLE
+# define AUNCOLLECTABLE 3
+# define STUBBORN 4
+# define IS_UNCOLLECTABLE(k) (((k) & ~1) == UNCOLLECTABLE)
+# else
+# define STUBBORN 3
+# define IS_UNCOLLECTABLE(k) ((k) == UNCOLLECTABLE)
+# endif
+
+extern unsigned GC_n_kinds;
+
+GC_API word GC_fo_entries;
+
+extern word GC_n_heap_sects; /* Number of separately added heap */
+ /* sections. */
+
+extern word GC_page_size;
+
+# if defined(MSWIN32) || defined(MSWINCE)
+ struct _SYSTEM_INFO;
+ extern struct _SYSTEM_INFO GC_sysinfo;
+ extern word GC_n_heap_bases; /* See GC_heap_bases. */
+# endif
+
+extern word GC_total_stack_black_listed;
+ /* Number of bytes on stack blacklist. */
+
+extern word GC_black_list_spacing;
+ /* Average number of bytes between blacklisted */
+ /* blocks. Approximate. */
+ /* Counts only blocks that are */
+ /* "stack-blacklisted", i.e. that are */
+ /* problematic in the interior of an object. */
+
+extern struct hblk * GC_hblkfreelist[];
+ /* List of completely empty heap blocks */
+ /* Linked through hb_next field of */
+ /* header structure associated with */
+ /* block. */
+
+extern GC_bool GC_objects_are_marked; /* There are marked objects in */
+ /* the heap. */
+
+#ifndef SMALL_CONFIG
+ extern GC_bool GC_incremental;
+ /* Using incremental/generational collection. */
+# define TRUE_INCREMENTAL \
+ (GC_incremental && GC_time_limit != GC_TIME_UNLIMITED)
+ /* True incremental, not just generational, mode */
+#else
+# define GC_incremental FALSE
+ /* Hopefully allow optimizer to remove some code. */
+# define TRUE_INCREMENTAL FALSE
+#endif
+
+extern GC_bool GC_dirty_maintained;
+ /* Dirty bits are being maintained, */
+ /* either for incremental collection, */
+ /* or to limit the root set. */
+
+extern word GC_root_size; /* Total size of registered root sections */
+
+extern GC_bool GC_debugging_started; /* GC_debug_malloc has been called. */
+
+extern long GC_large_alloc_warn_interval;
+ /* Interval between unsuppressed warnings. */
+
+extern long GC_large_alloc_warn_suppressed;
+ /* Number of warnings suppressed so far. */
+
+#ifdef THREADS
+ extern GC_bool GC_world_stopped;
+#endif
+
+/* Operations */
+# ifndef abs
+# define abs(x) ((x) < 0? (-(x)) : (x))
+# endif
+
+
+/* Marks are in a reserved area in */
+/* each heap block. Each word has one mark bit associated */
+/* with it. Only those corresponding to the beginning of an */
+/* object are used. */
+
+/* Set mark bit correctly, even if mark bits may be concurrently */
+/* accessed. */
+#ifdef PARALLEL_MARK
+# define OR_WORD(addr, bits) \
+ { AO_or((volatile AO_t *)(addr), (AO_t)bits); }
+#else
+# define OR_WORD(addr, bits) *(addr) |= (bits)
+#endif
+
+/* Mark bit operations */
+
+/*
+ * Retrieve, set, clear the nth mark bit in a given heap block.
+ *
+ * (Recall that bit n corresponds to nth object or allocation granule
+ * relative to the beginning of the block, including unused words)
+ */
+
+#ifdef USE_MARK_BYTES
+# define mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[n])
+# define set_mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[n]) = 1
+# define clear_mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[n]) = 0
+#else /* !USE_MARK_BYTES */
+# define mark_bit_from_hdr(hhdr,n) (((hhdr)->hb_marks[divWORDSZ(n)] \
+ >> (modWORDSZ(n))) & (word)1)
+# define set_mark_bit_from_hdr(hhdr,n) \
+ OR_WORD((hhdr)->hb_marks+divWORDSZ(n), \
+ (word)1 << modWORDSZ(n))
+# define clear_mark_bit_from_hdr(hhdr,n) (hhdr)->hb_marks[divWORDSZ(n)] \
+ &= ~((word)1 << modWORDSZ(n))
+#endif /* !USE_MARK_BYTES */
+
+#ifdef MARK_BIT_PER_OBJ
+# define MARK_BIT_NO(offset, sz) (((unsigned)(offset))/(sz))
+ /* Get the mark bit index corresponding to the given byte */
+ /* offset and size (in bytes). */
+# define MARK_BIT_OFFSET(sz) 1
+ /* Spacing between useful mark bits. */
+# define IF_PER_OBJ(x) x
+# define FINAL_MARK_BIT(sz) ((sz) > MAXOBJBYTES? 1 : HBLK_OBJS(sz))
+ /* Position of final, always set, mark bit. */
+#else /* MARK_BIT_PER_GRANULE */
+# define MARK_BIT_NO(offset, sz) BYTES_TO_GRANULES((unsigned)(offset))
+# define MARK_BIT_OFFSET(sz) BYTES_TO_GRANULES(sz)
+# define IF_PER_OBJ(x)
+# define FINAL_MARK_BIT(sz) \
+ ((sz) > MAXOBJBYTES? MARK_BITS_PER_HBLK \
+ : BYTES_TO_GRANULES(sz * HBLK_OBJS(sz)))
+#endif
+
+/* Important internal collector routines */
+
+ptr_t GC_approx_sp(void);
+
+GC_bool GC_should_collect(void);
+
+void GC_apply_to_all_blocks(void (*fn) (struct hblk *h, word client_data),
+ word client_data);
+ /* Invoke fn(hbp, client_data) for each */
+ /* allocated heap block. */
+struct hblk * GC_next_used_block(struct hblk * h);
+ /* Return first in-use block >= h */
+struct hblk * GC_prev_block(struct hblk * h);
+ /* Return last block <= h. Returned block */
+ /* is managed by GC, but may or may not be in */
+ /* use. */
+void GC_mark_init(void);
+void GC_clear_marks(void); /* Clear mark bits for all heap objects. */
+void GC_invalidate_mark_state(void);
+ /* Tell the marker that marked */
+ /* objects may point to unmarked */
+ /* ones, and roots may point to */
+ /* unmarked objects. */
+ /* Reset mark stack. */
+GC_bool GC_mark_stack_empty(void);
+GC_bool GC_mark_some(ptr_t cold_gc_frame);
+ /* Perform about one pages worth of marking */
+ /* work of whatever kind is needed. Returns */
+ /* quickly if no collection is in progress. */
+ /* Return TRUE if mark phase finished. */
+void GC_initiate_gc(void);
+ /* initiate collection. */
+ /* If the mark state is invalid, this */
+ /* becomes full colleection. Otherwise */
+ /* it's partial. */
+void GC_push_all(ptr_t bottom, ptr_t top);
+ /* Push everything in a range */
+ /* onto mark stack. */
+void GC_push_selected(ptr_t bottom, ptr_t top,
+ int (*dirty_fn) (struct hblk *h),
+ void (*push_fn) (ptr_t bottom, ptr_t top) );
+ /* Push all pages h in [b,t) s.t. */
+ /* select_fn(h) != 0 onto mark stack. */
+#ifndef SMALL_CONFIG
+ void GC_push_conditional (ptr_t b, ptr_t t, GC_bool all);
+#else
+# define GC_push_conditional(b, t, all) GC_push_all(b, t)
+#endif
+ /* Do either of the above, depending */
+ /* on the third arg. */
+void GC_push_all_stack (ptr_t b, ptr_t t);
+ /* As above, but consider */
+ /* interior pointers as valid */
+void GC_push_all_eager (ptr_t b, ptr_t t);
+ /* Same as GC_push_all_stack, but */
+ /* ensures that stack is scanned */
+ /* immediately, not just scheduled */
+ /* for scanning. */
+#ifndef THREADS
+ void GC_push_all_stack_partially_eager(ptr_t bottom, ptr_t top,
+ ptr_t cold_gc_frame);
+ /* Similar to GC_push_all_eager, but only the */
+ /* part hotter than cold_gc_frame is scanned */
+ /* immediately. Needed to ensure that callee- */
+ /* save registers are not missed. */
+#else
+ /* In the threads case, we push part of the current thread stack */
+ /* with GC_push_all_eager when we push the registers. This gets the */
+ /* callee-save registers that may disappear. The remainder of the */
+ /* stacks are scheduled for scanning in *GC_push_other_roots, which */
+ /* is thread-package-specific. */
+#endif
+void GC_push_current_stack(ptr_t cold_gc_frame, void *context);
+ /* Push enough of the current stack eagerly to */
+ /* ensure that callee-save registers saved in */
+ /* GC frames are scanned. */
+ /* In the non-threads case, schedule entire */
+ /* stack for scanning. */
+ /* The second argument is a pointer to the */
+ /* (possibly null) thread context, for */
+ /* (currently hypothetical) more precise */
+ /* stack scanning. */
+void GC_push_roots(GC_bool all, ptr_t cold_gc_frame);
+ /* Push all or dirty roots. */
+extern void (*GC_push_other_roots)(void);
+ /* Push system or application specific roots */
+ /* onto the mark stack. In some environments */
+ /* (e.g. threads environments) this is */
+ /* predfined to be non-zero. A client supplied */
+ /* replacement should also call the original */
+ /* function. */
+extern void GC_push_gc_structures(void);
+ /* Push GC internal roots. These are normally */
+ /* included in the static data segment, and */
+ /* Thus implicitly pushed. But we must do this */
+ /* explicitly if normal root processing is */
+ /* disabled. Calls the following: */
+ extern void GC_push_finalizer_structures(void);
+ extern void GC_push_stubborn_structures (void);
+# ifdef THREADS
+ extern void GC_push_thread_structures (void);
+# endif
+extern void (*GC_start_call_back) (void);
+ /* Called at start of full collections. */
+ /* Not called if 0. Called with allocation */
+ /* lock held. */
+ /* 0 by default. */
+void GC_push_regs_and_stack(ptr_t cold_gc_frame);
+
+void GC_push_regs(void);
+
+void GC_with_callee_saves_pushed(void (*fn)(ptr_t, void *),
+ ptr_t arg);
+
+# if defined(SPARC) || defined(IA64)
+ /* Cause all stacked registers to be saved in memory. Return a */
+ /* pointer to the top of the corresponding memory stack. */
+ ptr_t GC_save_regs_in_stack(void);
+# endif
+ /* Push register contents onto mark stack. */
+ /* If NURSERY is defined, the default push */
+ /* action can be overridden with GC_push_proc */
+
+# ifdef NURSERY
+ extern void (*GC_push_proc)(ptr_t);
+# endif
+# if defined(MSWIN32) || defined(MSWINCE)
+ void __cdecl GC_push_one(word p);
+# else
+ void GC_push_one(word p);
+ /* If p points to an object, mark it */
+ /* and push contents on the mark stack */
+ /* Pointer recognition test always */
+ /* accepts interior pointers, i.e. this */
+ /* is appropriate for pointers found on */
+ /* stack. */
+# endif
+# if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
+ void GC_mark_and_push_stack(ptr_t p, ptr_t source);
+ /* Ditto, omits plausibility test */
+# else
+ void GC_mark_and_push_stack(ptr_t p);
+# endif
+void GC_push_marked(struct hblk * h, hdr * hhdr);
+ /* Push contents of all marked objects in h onto */
+ /* mark stack. */
+#ifdef SMALL_CONFIG
+# define GC_push_next_marked_dirty(h) GC_push_next_marked(h)
+#else
+ struct hblk * GC_push_next_marked_dirty(struct hblk * h);
+ /* Invoke GC_push_marked on next dirty block above h. */
+ /* Return a pointer just past the end of this block. */
+#endif /* !SMALL_CONFIG */
+struct hblk * GC_push_next_marked(struct hblk * h);
+ /* Ditto, but also mark from clean pages. */
+struct hblk * GC_push_next_marked_uncollectable(struct hblk * h);
+ /* Ditto, but mark only from uncollectable pages. */
+GC_bool GC_stopped_mark(GC_stop_func stop_func);
+ /* Stop world and mark from all roots */
+ /* and rescuers. */
+void GC_clear_hdr_marks(hdr * hhdr);
+ /* Clear the mark bits in a header */
+void GC_set_hdr_marks(hdr * hhdr);
+ /* Set the mark bits in a header */
+void GC_set_fl_marks(ptr_t p);
+ /* Set all mark bits associated with */
+ /* a free list. */
+#ifdef GC_ASSERTIONS
+ void GC_check_fl_marks(ptr_t p);
+ /* Check that all mark bits */
+ /* associated with a free list are */
+ /* set. Abort if not. */
+#endif
+void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp);
+void GC_remove_roots_inner(ptr_t b, ptr_t e);
+GC_bool GC_is_static_root(ptr_t p);
+ /* Is the address p in one of the registered static */
+ /* root sections? */
+# if defined(MSWIN32) || defined(_WIN32_WCE_EMULATION)
+GC_bool GC_is_tmp_root(ptr_t p);
+ /* Is the address p in one of the temporary static */
+ /* root sections? */
+# endif
+void GC_register_dynamic_libraries(void);
+ /* Add dynamic library data sections to the root set. */
+void GC_cond_register_dynamic_libraries(void);
+ /* Remove and reregister dynamic libraries if we're */
+ /* configured to do that at each GC. */
+
+GC_bool GC_register_main_static_data(void);
+ /* We need to register the main data segment. Returns */
+ /* TRUE unless this is done implicitly as part of */
+ /* dynamic library registration. */
+
+/* Machine dependent startup routines */
+ptr_t GC_get_main_stack_base(void); /* Cold end of stack */
+#ifdef IA64
+ ptr_t GC_get_register_stack_base(void);
+ /* Cold end of register stack. */
+#endif
+void GC_register_data_segments(void);
+
+/* Black listing: */
+void GC_bl_init(void);
+# ifdef PRINT_BLACK_LIST
+ void GC_add_to_black_list_normal(word p, ptr_t source);
+ /* Register bits as a possible future false */
+ /* reference from the heap or static data */
+# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
+ if (GC_all_interior_pointers) { \
+ GC_add_to_black_list_stack((word)(bits), (source)); \
+ } else { \
+ GC_add_to_black_list_normal((word)(bits), (source)); \
+ }
+# else
+ void GC_add_to_black_list_normal(word p);
+# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
+ if (GC_all_interior_pointers) { \
+ GC_add_to_black_list_stack((word)(bits)); \
+ } else { \
+ GC_add_to_black_list_normal((word)(bits)); \
+ }
+# endif
+
+# ifdef PRINT_BLACK_LIST
+ void GC_add_to_black_list_stack(word p, ptr_t source);
+# define GC_ADD_TO_BLACK_LIST_STACK(bits, source) \
+ GC_add_to_black_list_stack((word)(bits), (source))
+# else
+ void GC_add_to_black_list_stack(word p);
+# define GC_ADD_TO_BLACK_LIST_STACK(bits, source) \
+ GC_add_to_black_list_stack((word)(bits))
+# endif
+struct hblk * GC_is_black_listed(struct hblk * h, word len);
+ /* If there are likely to be false references */
+ /* to a block starting at h of the indicated */
+ /* length, then return the next plausible */
+ /* starting location for h that might avoid */
+ /* these false references. */
+void GC_promote_black_lists(void);
+ /* Declare an end to a black listing phase. */
+void GC_unpromote_black_lists(void);
+ /* Approximately undo the effect of the above. */
+ /* This actually loses some information, but */
+ /* only in a reasonably safe way. */
+word GC_number_stack_black_listed(struct hblk *start, struct hblk *endp1);
+ /* Return the number of (stack) blacklisted */
+ /* blocks in the range for statistical */
+ /* purposes. */
+
+ptr_t GC_scratch_alloc(size_t bytes);
+ /* GC internal memory allocation for */
+ /* small objects. Deallocation is not */
+ /* possible. */
+
+/* Heap block layout maps: */
+GC_bool GC_add_map_entry(size_t sz);
+ /* Add a heap block map for objects of */
+ /* size sz to obj_map. */
+ /* Return FALSE on failure. */
+void GC_register_displacement_inner(size_t offset);
+ /* Version of GC_register_displacement */
+ /* that assumes lock is already held */
+ /* and signals are already disabled. */
+
+void GC_initialize_offsets(void);
+ /* Initialize GC_valid_offsets, */
+ /* depending on current */
+ /* GC_all_interior_pointers settings. */
+
+/* hblk allocation: */
+void GC_new_hblk(size_t size_in_granules, int kind);
+ /* Allocate a new heap block, and build */
+ /* a free list in it. */
+
+ptr_t GC_build_fl(struct hblk *h, size_t words, GC_bool clear, ptr_t list);
+ /* Build a free list for objects of */
+ /* size sz in block h. Append list to */
+ /* end of the free lists. Possibly */
+ /* clear objects on the list. Normally */
+ /* called by GC_new_hblk, but also */
+ /* called explicitly without GC lock. */
+
+struct hblk * GC_allochblk (size_t size_in_bytes, int kind,
+ unsigned flags);
+ /* Allocate a heap block, inform */
+ /* the marker that block is valid */
+ /* for objects of indicated size. */
+
+ptr_t GC_alloc_large (size_t lb, int k, unsigned flags);
+ /* Allocate a large block of size lb bytes. */
+ /* The block is not cleared. */
+ /* Flags is 0 or IGNORE_OFF_PAGE. */
+ /* Calls GC_allchblk to do the actual */
+ /* allocation, but also triggers GC and/or */
+ /* heap expansion as appropriate. */
+ /* Does not update GC_bytes_allocd, but does */
+ /* other accounting. */
+
+ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags);
+ /* As above, but clear block if appropriate */
+ /* for kind k. */
+
+void GC_freehblk(struct hblk * p);
+ /* Deallocate a heap block and mark it */
+ /* as invalid. */
+
+/* Misc GC: */
+void GC_init_inner(void);
+GC_bool GC_expand_hp_inner(word n);
+void GC_start_reclaim(int abort_if_found);
+ /* Restore unmarked objects to free */
+ /* lists, or (if abort_if_found is */
+ /* TRUE) report them. */
+ /* Sweeping of small object pages is */
+ /* largely deferred. */
+void GC_continue_reclaim(size_t sz, int kind);
+ /* Sweep pages of the given size and */
+ /* kind, as long as possible, and */
+ /* as long as the corr. free list is */
+ /* empty. Sz is in granules. */
+void GC_reclaim_or_delete_all(void);
+ /* Arrange for all reclaim lists to be */
+ /* empty. Judiciously choose between */
+ /* sweeping and discarding each page. */
+GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old);
+ /* Reclaim all blocks. Abort (in a */
+ /* consistent state) if f returns TRUE. */
+ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz,
+ GC_bool init, ptr_t list, signed_word *count);
+ /* Rebuild free list in hbp with */
+ /* header hhdr, with objects of size sz */
+ /* bytes. Add list to the end of the */
+ /* free list. Add the number of */
+ /* reclaimed bytes to *count. */
+GC_bool GC_block_empty(hdr * hhdr);
+ /* Block completely unmarked? */
+GC_bool GC_never_stop_func(void);
+ /* Returns FALSE. */
+GC_bool GC_try_to_collect_inner(GC_stop_func f);
+
+ /* Collect; caller must have acquired */
+ /* lock and disabled signals. */
+ /* Collection is aborted if f returns */
+ /* TRUE. Returns TRUE if it completes */
+ /* successfully. */
+# define GC_gcollect_inner() \
+ (void) GC_try_to_collect_inner(GC_never_stop_func)
+void GC_finish_collection(void);
+ /* Finish collection. Mark bits are */
+ /* consistent and lock is still held. */
+GC_bool GC_collect_or_expand(word needed_blocks, GC_bool ignore_off_page);
+ /* Collect or expand heap in an attempt */
+ /* make the indicated number of free */
+ /* blocks available. Should be called */
+ /* until the blocks are available or */
+ /* until it fails by returning FALSE. */
+
+extern GC_bool GC_is_initialized; /* GC_init() has been run. */
+
+#if defined(MSWIN32) || defined(MSWINCE)
+ void GC_deinit(void);
+ /* Free any resources allocated by */
+ /* GC_init */
+#endif
+
+void GC_collect_a_little_inner(int n);
+ /* Do n units worth of garbage */
+ /* collection work, if appropriate. */
+ /* A unit is an amount appropriate for */
+ /* HBLKSIZE bytes of allocation. */
+/* void * GC_generic_malloc(size_t lb, int k); */
+ /* Allocate an object of the given */
+ /* kind. By default, there are only */
+ /* a few kinds: composite(pointerfree), */
+ /* atomic, uncollectable, etc. */
+ /* We claim it's possible for clever */
+ /* client code that understands GC */
+ /* internals to add more, e.g. to */
+ /* communicate object layout info */
+ /* to the collector. */
+ /* The actual decl is in gc_mark.h. */
+void * GC_generic_malloc_ignore_off_page(size_t b, int k);
+ /* As above, but pointers past the */
+ /* first page of the resulting object */
+ /* are ignored. */
+void * GC_generic_malloc_inner(size_t lb, int k);
+ /* Ditto, but I already hold lock, etc. */
+void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k);
+ /* Allocate an object, where */
+ /* the client guarantees that there */
+ /* will always be a pointer to the */
+ /* beginning of the object while the */
+ /* object is live. */
+void GC_generic_malloc_many(size_t lb, int k, void **result);
+ /* Store a pointer to a list of newly */
+ /* allocated objects of kind k and size */
+ /* lb in *result. */
+ /* Caler must make sure that *result is */
+ /* traced even if objects are ptrfree. */
+ptr_t GC_allocobj(size_t sz, int kind);
+ /* Make the indicated */
+ /* free list nonempty, and return its */
+ /* head. Sz is in granules. */
+
+/* Allocation routines that bypass the thread local cache. */
+/* Used internally. */
+#ifdef THREAD_LOCAL_ALLOC
+ void * GC_core_malloc(size_t);
+ void * GC_core_malloc_atomic(size_t);
+# ifdef GC_GCJ_SUPPORT
+ void *GC_core_gcj_malloc(size_t, void *);
+# endif
+#endif /* THREAD_LOCAL_ALLOC */
+
+void GC_free_inner(void * p);
+void GC_debug_free_inner(void * p);
+
+void GC_init_headers(void);
+struct hblkhdr * GC_install_header(struct hblk *h);
+ /* Install a header for block h. */
+ /* Return 0 on failure, or the header */
+ /* otherwise. */
+GC_bool GC_install_counts(struct hblk * h, size_t sz);
+ /* Set up forwarding counts for block */
+ /* h of size sz. */
+ /* Return FALSE on failure. */
+void GC_remove_header(struct hblk * h);
+ /* Remove the header for block h. */
+void GC_remove_counts(struct hblk * h, size_t sz);
+ /* Remove forwarding counts for h. */
+hdr * GC_find_header(ptr_t h); /* Debugging only. */
+
+void GC_finalize(void);
+ /* Perform all indicated finalization actions */
+ /* on unmarked objects. */
+ /* Unreachable finalizable objects are enqueued */
+ /* for processing by GC_invoke_finalizers. */
+ /* Invoked with lock. */
+
+void GC_notify_or_invoke_finalizers(void);
+ /* If GC_finalize_on_demand is not set, invoke */
+ /* eligible finalizers. Otherwise: */
+ /* Call *GC_finalizer_notifier if there are */
+ /* finalizers to be run, and we haven't called */
+ /* this procedure yet this GC cycle. */
+
+GC_API void * GC_make_closure(GC_finalization_proc fn, void * data);
+GC_API void GC_debug_invoke_finalizer(void * obj, void * data);
+ /* Auxiliary fns to make finalization work */
+ /* correctly with displaced pointers introduced */
+ /* by the debugging allocators. */
+
+void GC_add_to_heap(struct hblk *p, size_t bytes);
+ /* Add a HBLKSIZE aligned chunk to the heap. */
+
+void GC_print_obj(ptr_t p);
+ /* P points to somewhere inside an object with */
+ /* debugging info. Print a human readable */
+ /* description of the object to stderr. */
+extern void (*GC_check_heap)(void);
+ /* Check that all objects in the heap with */
+ /* debugging info are intact. */
+ /* Add any that are not to GC_smashed list. */
+extern void (*GC_print_all_smashed) (void);
+ /* Print GC_smashed if it's not empty. */
+ /* Clear GC_smashed list. */
+extern void GC_print_all_errors (void);
+ /* Print smashed and leaked objects, if any. */
+ /* Clear the lists of such objects. */
+extern void (*GC_print_heap_obj) (ptr_t p);
+ /* If possible print s followed by a more */
+ /* detailed description of the object */
+ /* referred to by p. */
+#if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
+ void GC_print_address_map (void);
+ /* Print an address map of the process. */
+#endif
+
+extern GC_bool GC_have_errors; /* We saw a smashed or leaked object. */
+ /* Call error printing routine */
+ /* occasionally. */
+
+#ifndef SMALL_CONFIG
+ extern int GC_print_stats; /* Nonzero generates basic GC log. */
+ /* VERBOSE generates add'l messages. */
+#else
+# define GC_print_stats 0
+ /* Will this keep the message character strings from the executable? */
+ /* It should ... */
+#endif
+#define VERBOSE 2
+
+#ifndef NO_DEBUGGING
+ extern GC_bool GC_dump_regularly; /* Generate regular debugging dumps. */
+# define COND_DUMP if (GC_dump_regularly) GC_dump();
+#else
+# define COND_DUMP
+#endif
+
+#ifdef KEEP_BACK_PTRS
+ extern long GC_backtraces;
+ void GC_generate_random_backtrace_no_gc(void);
+#endif
+
+extern GC_bool GC_print_back_height;
+
+#ifdef MAKE_BACK_GRAPH
+ void GC_print_back_graph_stats(void);
+#endif
+
+/* Macros used for collector internal allocation. */
+/* These assume the collector lock is held. */
+#ifdef DBG_HDRS_ALL
+ extern void * GC_debug_generic_malloc_inner(size_t lb, int k);
+ extern void * GC_debug_generic_malloc_inner_ignore_off_page(size_t lb,
+ int k);
+# define GC_INTERNAL_MALLOC GC_debug_generic_malloc_inner
+# define GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE \
+ GC_debug_generic_malloc_inner_ignore_off_page
+# ifdef THREADS
+# define GC_INTERNAL_FREE GC_debug_free_inner
+# else
+# define GC_INTERNAL_FREE GC_debug_free
+# endif
+#else
+# define GC_INTERNAL_MALLOC GC_generic_malloc_inner
+# define GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE \
+ GC_generic_malloc_inner_ignore_off_page
+# ifdef THREADS
+# define GC_INTERNAL_FREE GC_free_inner
+# else
+# define GC_INTERNAL_FREE GC_free
+# endif
+#endif
+
+/* Memory unmapping: */
+#ifdef USE_MUNMAP
+ void GC_unmap_old(void);
+ void GC_merge_unmapped(void);
+ void GC_unmap(ptr_t start, size_t bytes);
+ void GC_remap(ptr_t start, size_t bytes);
+ void GC_unmap_gap(ptr_t start1, size_t bytes1, ptr_t start2, size_t bytes2);
+#endif
+
+/* Virtual dirty bit implementation: */
+/* Each implementation exports the following: */
+void GC_read_dirty(void);
+ /* Retrieve dirty bits. */
+GC_bool GC_page_was_dirty(struct hblk *h);
+ /* Read retrieved dirty bits. */
+GC_bool GC_page_was_ever_dirty(struct hblk *h);
+ /* Could the page contain valid heap pointers? */
+void GC_remove_protection(struct hblk *h, word nblocks,
+ GC_bool pointerfree);
+ /* h is about to be writteni or allocated. Ensure */
+ /* that it's not write protected by the virtual */
+ /* dirty bit implementation. */
+
+void GC_dirty_init(void);
+
+/* Slow/general mark bit manipulation: */
+GC_API GC_bool GC_is_marked(ptr_t p);
+void GC_clear_mark_bit(ptr_t p);
+void GC_set_mark_bit(ptr_t p);
+
+/* Stubborn objects: */
+void GC_read_changed(void); /* Analogous to GC_read_dirty */
+GC_bool GC_page_was_changed(struct hblk * h);
+ /* Analogous to GC_page_was_dirty */
+void GC_clean_changing_list(void);
+ /* Collect obsolete changing list entries */
+void GC_stubborn_init(void);
+
+/* Debugging print routines: */
+void GC_print_block_list(void);
+void GC_print_hblkfreelist(void);
+void GC_print_heap_sects(void);
+void GC_print_static_roots(void);
+void GC_print_finalization_stats(void);
+/* void GC_dump(void); - declared in gc.h */
+
+#ifdef KEEP_BACK_PTRS
+ void GC_store_back_pointer(ptr_t source, ptr_t dest);
+ void GC_marked_for_finalization(ptr_t dest);
+# define GC_STORE_BACK_PTR(source, dest) GC_store_back_pointer(source, dest)
+# define GC_MARKED_FOR_FINALIZATION(dest) GC_marked_for_finalization(dest)
+#else
+# define GC_STORE_BACK_PTR(source, dest)
+# define GC_MARKED_FOR_FINALIZATION(dest)
+#endif
+
+/* Make arguments appear live to compiler */
+# ifdef __WATCOMC__
+ void GC_noop(void*, ...);
+# else
+# ifdef __DMC__
+ GC_API void GC_noop(...);
+# else
+ GC_API void GC_noop();
+# endif
+# endif
+
+void GC_noop1(word);
+
+/* Logging and diagnostic output: */
+GC_API void GC_printf (const char * format, ...);
+ /* A version of printf that doesn't allocate, */
+ /* 1K total output length. */
+ /* (We use sprintf. Hopefully that doesn't */
+ /* allocate for long arguments.) */
+GC_API void GC_err_printf(const char * format, ...);
+GC_API void GC_log_printf(const char * format, ...);
+void GC_err_puts(const char *s);
+ /* Write s to stderr, don't buffer, don't add */
+ /* newlines, don't ... */
+
+#if defined(LINUX) && !defined(SMALL_CONFIG)
+ void GC_err_write(const char *buf, size_t len);
+ /* Write buf to stderr, don't buffer, don't add */
+ /* newlines, don't ... */
+#endif
+
+
+# ifdef GC_ASSERTIONS
+# define GC_ASSERT(expr) if(!(expr)) {\
+ GC_err_printf("Assertion failure: %s:%ld\n", \
+ __FILE__, (unsigned long)__LINE__); \
+ ABORT("assertion failure"); }
+# else
+# define GC_ASSERT(expr)
+# endif
+
+/* Check a compile time assertion at compile time. The error */
+/* message for failure is a bit baroque, but ... */
+#if defined(mips) && !defined(__GNUC__)
+/* DOB: MIPSPro C gets an internal error taking the sizeof an array type.
+ This code works correctly (ugliness is to avoid "unused var" warnings) */
+# define GC_STATIC_ASSERT(expr) do { if (0) { char j[(expr)? 1 : -1]; j[0]='\0'; j[0]=j[0]; } } while(0)
+#else
+# define GC_STATIC_ASSERT(expr) sizeof(char[(expr)? 1 : -1])
+#endif
+
+# if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
+ /* We need additional synchronization facilities from the thread */
+ /* support. We believe these are less performance critical */
+ /* than the main garbage collector lock; standard pthreads-based */
+ /* implementations should be sufficient. */
+
+ /* The mark lock and condition variable. If the GC lock is also */
+ /* acquired, the GC lock must be acquired first. The mark lock is */
+ /* used to both protect some variables used by the parallel */
+ /* marker, and to protect GC_fl_builder_count, below. */
+ /* GC_notify_all_marker() is called when */
+ /* the state of the parallel marker changes */
+ /* in some significant way (see gc_mark.h for details). The */
+ /* latter set of events includes incrementing GC_mark_no. */
+ /* GC_notify_all_builder() is called when GC_fl_builder_count */
+ /* reaches 0. */
+
+ extern void GC_acquire_mark_lock();
+ extern void GC_release_mark_lock();
+ extern void GC_notify_all_builder();
+ /* extern void GC_wait_builder(); */
+ extern void GC_wait_for_reclaim();
+
+ extern word GC_fl_builder_count; /* Protected by mark lock. */
+# endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
+# ifdef PARALLEL_MARK
+ extern void GC_notify_all_marker();
+ extern void GC_wait_marker();
+ extern word GC_mark_no; /* Protected by mark lock. */
+
+ extern void GC_help_marker(word my_mark_no);
+ /* Try to help out parallel marker for mark cycle */
+ /* my_mark_no. Returns if the mark cycle finishes or */
+ /* was already done, or there was nothing to do for */
+ /* some other reason. */
+# endif /* PARALLEL_MARK */
+
+# if defined(GC_PTHREADS)
+ /* We define the thread suspension signal here, so that we can refer */
+ /* to it in the dirty bit implementation, if necessary. Ideally we */
+ /* would allocate a (real-time ?) signal using the standard mechanism.*/
+ /* unfortunately, there is no standard mechanism. (There is one */
+ /* in Linux glibc, but it's not exported.) Thus we continue to use */
+ /* the same hard-coded signals we've always used. */
+# if !defined(SIG_SUSPEND)
+# if defined(GC_LINUX_THREADS) || defined(GC_DGUX386_THREADS)
+# if defined(SPARC) && !defined(SIGPWR)
+ /* SPARC/Linux doesn't properly define SIGPWR in <signal.h>.
+ * It is aliased to SIGLOST in asm/signal.h, though. */
+# define SIG_SUSPEND SIGLOST
+# else
+ /* Linuxthreads itself uses SIGUSR1 and SIGUSR2. */
+# define SIG_SUSPEND SIGPWR
+# endif
+# else /* !GC_LINUX_THREADS */
+# if defined(_SIGRTMIN)
+# define SIG_SUSPEND _SIGRTMIN + 6
+# else
+# define SIG_SUSPEND SIGRTMIN + 6
+# endif
+# endif
+# endif /* !SIG_SUSPEND */
+
+# endif
+
+/* Some macros for setjmp that works across signal handlers */
+/* were possible, and a couple of routines to facilitate */
+/* catching accesses to bad addresses when that's */
+/* possible/needed. */
+#ifdef UNIX_LIKE
+# include <setjmp.h>
+# if defined(SUNOS5SIGS) && !defined(FREEBSD)
+# include <sys/siginfo.h>
+# endif
+ /* Define SETJMP and friends to be the version that restores */
+ /* the signal mask. */
+# define SETJMP(env) sigsetjmp(env, 1)
+# define LONGJMP(env, val) siglongjmp(env, val)
+# define JMP_BUF sigjmp_buf
+#else
+# ifdef ECOS
+# define SETJMP(env) hal_setjmp(env)
+# else
+# define SETJMP(env) setjmp(env)
+# endif
+# define LONGJMP(env, val) longjmp(env, val)
+# define JMP_BUF jmp_buf
+#endif
+
+/* Do we need the GC_find_limit machinery to find the end of a */
+/* data segment. */
+# if defined(HEURISTIC2) || defined(SEARCH_FOR_DATA_START)
+# define NEED_FIND_LIMIT
+# endif
+
+# if !defined(STACKBOTTOM) && defined(HEURISTIC2)
+# define NEED_FIND_LIMIT
+# endif
+
+# if (defined(SVR4) || defined(AUX) || defined(DGUX) \
+ || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
+# define NEED_FIND_LIMIT
+# endif
+
+#if defined(FREEBSD) && (defined(I386) || defined(X86_64) || defined(powerpc) \
+ || defined(__powerpc__))
+# include <machine/trap.h>
+# if !defined(PCR)
+# define NEED_FIND_LIMIT
+# endif
+#endif
+
+#if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__) \
+ && !defined(NEED_FIND_LIMIT)
+ /* Used by GC_init_netbsd_elf() in os_dep.c. */
+# define NEED_FIND_LIMIT
+#endif
+
+#if defined(IA64) && !defined(NEED_FIND_LIMIT)
+# define NEED_FIND_LIMIT
+ /* May be needed for register backing store base. */
+#endif
+
+# if defined(NEED_FIND_LIMIT) || \
+ defined(USE_PROC_FOR_LIBRARIES) && defined(THREADS)
+JMP_BUF GC_jmp_buf;
+
+/* Set up a handler for address faults which will longjmp to */
+/* GC_jmp_buf; */
+extern void GC_setup_temporary_fault_handler(void);
+
+/* Undo the effect of GC_setup_temporary_fault_handler. */
+extern void GC_reset_fault_handler(void);
+
+# endif /* Need to handle address faults. */
+
+# endif /* GC_PRIVATE_H */
diff --git a/tools/build/src/engine/boehm_gc/include/private/gcconfig.h b/tools/build/src/engine/boehm_gc/include/private/gcconfig.h
new file mode 100644
index 000000000..20f35bc3a
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/private/gcconfig.h
@@ -0,0 +1,2339 @@
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2000-2004 Hewlett-Packard Development Company, L.P.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+/*
+ * This header is private to the gc. It is almost always included from
+ * gc_priv.h. However it is possible to include it by itself if just the
+ * configuration macros are needed. In that
+ * case, a few declarations relying on types declared in gc_priv.h will be
+ * omitted.
+ */
+
+#ifndef GCCONFIG_H
+
+# define GCCONFIG_H
+
+# ifndef GC_PRIVATE_H
+ /* Fake ptr_t declaration, just to avoid compilation errors. */
+ /* This avoids many instances if "ifndef GC_PRIVATE_H" below. */
+ typedef struct GC_undefined_struct * ptr_t;
+# include <stddef.h> /* For size_t etc. */
+# endif
+
+/* Machine dependent parameters. Some tuning parameters can be found */
+/* near the top of gc_private.h. */
+
+/* Machine specific parts contributed by various people. See README file. */
+
+/* First a unified test for Linux: */
+# if defined(linux) || defined(__linux__)
+# ifndef LINUX
+# define LINUX
+# endif
+# endif
+
+/* And one for NetBSD: */
+# if defined(__NetBSD__)
+# define NETBSD
+# endif
+
+/* And one for OpenBSD: */
+# if defined(__OpenBSD__)
+# define OPENBSD
+# endif
+
+/* And one for FreeBSD: */
+# if (defined(__FreeBSD__) || defined(__DragonFly__) || \
+ defined(__FreeBSD_kernel__)) && !defined(FREEBSD)
+# define FREEBSD
+# endif
+
+/* Determine the machine type: */
+# if defined(__arm__) || defined(__thumb__)
+# define ARM32
+# if !defined(LINUX) && !defined(NETBSD)
+# define NOSYS
+# define mach_type_known
+# endif
+# endif
+# if defined(sun) && defined(mc68000)
+# error SUNOS4 no longer supported
+# endif
+# if defined(hp9000s300)
+# error M68K based HP machines no longer supported.
+# endif
+# if defined(OPENBSD) && defined(m68k)
+# define M68K
+# define mach_type_known
+# endif
+# if defined(OPENBSD) && defined(__sparc__)
+# define SPARC
+# define mach_type_known
+# endif
+# if defined(NETBSD) && (defined(m68k) || defined(__m68k__))
+# define M68K
+# define mach_type_known
+# endif
+# if defined(NETBSD) && defined(__powerpc__)
+# define POWERPC
+# define mach_type_known
+# endif
+# if defined(NETBSD) && (defined(__arm32__) || defined(__arm__))
+# define ARM32
+# define mach_type_known
+# endif
+# if defined(NETBSD) && defined(__sh__)
+# define SH
+# define mach_type_known
+# endif
+# if defined(vax)
+# define VAX
+# ifdef ultrix
+# define ULTRIX
+# else
+# define BSD
+# endif
+# define mach_type_known
+# endif
+# if defined(__NetBSD__) && defined(__vax__)
+# define VAX
+# define mach_type_known
+# endif
+# if defined(mips) || defined(__mips) || defined(_mips)
+# define MIPS
+# if defined(nec_ews) || defined(_nec_ews)
+# define EWS4800
+# endif
+# if !defined(LINUX) && !defined(EWS4800) && !defined(NETBSD)
+# if defined(ultrix) || defined(__ultrix)
+# define ULTRIX
+# else
+# define IRIX5 /* or IRIX 6.X */
+# endif
+# endif /* !LINUX */
+# if defined(__NetBSD__) && defined(__MIPSEL__)
+# undef ULTRIX
+# endif
+# define mach_type_known
+# endif
+# if defined(DGUX) && (defined(i386) || defined(__i386__))
+# define I386
+# ifndef _USING_DGUX
+# define _USING_DGUX
+# endif
+# define mach_type_known
+# endif
+# if defined(sequent) && (defined(i386) || defined(__i386__))
+# define I386
+# define SEQUENT
+# define mach_type_known
+# endif
+# if defined(sun) && (defined(i386) || defined(__i386__))
+# define I386
+# define SOLARIS
+# define mach_type_known
+# endif
+# if defined(sun) && defined(__amd64)
+# define X86_64
+# define SOLARIS
+# define mach_type_known
+# endif
+# if (defined(__OS2__) || defined(__EMX__)) && defined(__32BIT__)
+# define I386
+# define OS2
+# define mach_type_known
+# endif
+# if defined(ibm032)
+# error IBM PC/RT no longer supported.
+# endif
+# if defined(sun) && (defined(sparc) || defined(__sparc))
+# define SPARC
+ /* Test for SunOS 5.x */
+# include <errno.h>
+# define SOLARIS
+# define mach_type_known
+# endif
+# if defined(sparc) && defined(unix) && !defined(sun) && !defined(linux) \
+ && !defined(__OpenBSD__) && !defined(__NetBSD__) && !defined(__FreeBSD__) \
+ && !defined(__DragonFly__)
+# define SPARC
+# define DRSNX
+# define mach_type_known
+# endif
+# if defined(_IBMR2)
+# define POWERPC
+# define AIX
+# define mach_type_known
+# endif
+# if defined(__NetBSD__) && defined(__sparc__)
+# define SPARC
+# define mach_type_known
+# endif
+# if defined(_M_XENIX) && defined(_M_SYSV) && defined(_M_I386)
+ /* The above test may need refinement */
+# define I386
+# if defined(_SCO_ELF)
+# define SCO_ELF
+# else
+# define SCO
+# endif
+# define mach_type_known
+# endif
+# if defined(_AUX_SOURCE)
+# error A/UX no longer supported
+# endif
+# if defined(_PA_RISC1_0) || defined(_PA_RISC1_1) || defined(_PA_RISC2_0) \
+ || defined(hppa) || defined(__hppa__)
+# define HP_PA
+# if !defined(LINUX) && !defined(HPUX)
+# define HPUX
+# endif
+# define mach_type_known
+# endif
+# if defined(__ia64) && defined(_HPUX_SOURCE)
+# define IA64
+# ifndef HPUX
+# define HPUX
+# endif
+# define mach_type_known
+# endif
+# if defined(__BEOS__) && defined(_X86_)
+# define I386
+# define BEOS
+# define mach_type_known
+# endif
+# if defined(LINUX) && (defined(i386) || defined(__i386__))
+# define I386
+# define mach_type_known
+# endif
+# if defined(LINUX) && defined(__x86_64__)
+# define X86_64
+# define mach_type_known
+# endif
+# if defined(LINUX) && (defined(__ia64__) || defined(__ia64))
+# define IA64
+# define mach_type_known
+# endif
+# if defined(LINUX) && defined(__arm__)
+# define ARM32
+# define mach_type_known
+# endif
+# if defined(LINUX) && defined(__cris__)
+# ifndef CRIS
+# define CRIS
+# endif
+# define mach_type_known
+# endif
+# if defined(LINUX) && (defined(powerpc) || defined(__powerpc__) || \
+ defined(powerpc64) || defined(__powerpc64__))
+# define POWERPC
+# define mach_type_known
+# endif
+# if defined(FREEBSD) && (defined(powerpc) || defined(__powerpc__))
+# define POWERPC
+# define mach_type_known
+# endif
+# if defined(LINUX) && defined(__mc68000__)
+# define M68K
+# define mach_type_known
+# endif
+# if defined(LINUX) && (defined(sparc) || defined(__sparc__))
+# define SPARC
+# define mach_type_known
+# endif
+# if defined(LINUX) && defined(__arm__)
+# define ARM32
+# define mach_type_known
+# endif
+# if defined(LINUX) && defined(__sh__)
+# define SH
+# define mach_type_known
+# endif
+# if defined(LINUX) && defined(__m32r__)
+# define M32R
+# define mach_type_known
+# endif
+# if defined(__alpha) || defined(__alpha__)
+# define ALPHA
+# if !defined(LINUX) && !defined(NETBSD) && !defined(OPENBSD) && !defined(FREEBSD)
+# define OSF1 /* a.k.a Digital Unix */
+# endif
+# define mach_type_known
+# endif
+# if defined(_AMIGA) && !defined(AMIGA)
+# define AMIGA
+# endif
+# ifdef AMIGA
+# define M68K
+# define mach_type_known
+# endif
+# if defined(THINK_C) || defined(__MWERKS__) && !defined(__powerc)
+# define M68K
+# define MACOS
+# define mach_type_known
+# endif
+# if defined(__MWERKS__) && defined(__powerc) && !defined(__MACH__)
+# define POWERPC
+# define MACOS
+# define mach_type_known
+# endif
+# if defined(macosx) || (defined(__APPLE__) && defined(__MACH__))
+# define DARWIN
+# if defined(__ppc__) || defined(__ppc64__)
+# define POWERPC
+# define mach_type_known
+# elif defined(__x86_64__)
+# define X86_64
+# define mach_type_known
+# elif defined(__i386__)
+# define I386
+# define mach_type_known
+# endif
+# endif
+# if defined(NeXT) && defined(mc68000)
+# define M68K
+# define NEXT
+# define mach_type_known
+# endif
+# if defined(NeXT) && (defined(i386) || defined(__i386__))
+# define I386
+# define NEXT
+# define mach_type_known
+# endif
+# if defined(__OpenBSD__) && (defined(i386) || defined(__i386__))
+# define I386
+# define OPENBSD
+# define mach_type_known
+# endif
+# if defined(FREEBSD) && (defined(i386) || defined(__i386__))
+# define I386
+# define mach_type_known
+# endif
+# if defined(FREEBSD) && defined(__x86_64__)
+# define X86_64
+# define mach_type_known
+# endif
+# if defined(__NetBSD__) && (defined(i386) || defined(__i386__))
+# define I386
+# define mach_type_known
+# endif
+# if defined(__NetBSD__) && defined(__x86_64__)
+# define X86_64
+# define mach_type_known
+# endif
+# if defined(FREEBSD) && defined(__sparc__)
+# define SPARC
+# define mach_type_known
+# endif
+# if defined(bsdi) && (defined(i386) || defined(__i386__))
+# define I386
+# define BSDI
+# define mach_type_known
+# endif
+# if !defined(mach_type_known) && defined(__386BSD__)
+# define I386
+# define THREE86BSD
+# define mach_type_known
+# endif
+# if defined(_CX_UX) && defined(_M88K)
+# define M88K
+# define CX_UX
+# define mach_type_known
+# endif
+# if defined(DGUX) && defined(m88k)
+# define M88K
+ /* DGUX defined */
+# define mach_type_known
+# endif
+# if defined(_WIN32_WCE)
+ /* SH3, SH4, MIPS already defined for corresponding architectures */
+# if defined(SH3) || defined(SH4)
+# define SH
+# endif
+# if defined(x86)
+# define I386
+# endif
+# if defined(ARM)
+# define ARM32
+# endif
+# define MSWINCE
+# define mach_type_known
+# else
+# if (defined(_MSDOS) || defined(_MSC_VER)) && (_M_IX86 >= 300) \
+ || defined(_WIN32) && !defined(__CYGWIN32__) && !defined(__CYGWIN__)
+# if defined(__LP64__) || defined(_WIN64)
+# define X86_64
+# else
+# define I386
+# endif
+# define MSWIN32 /* or Win64 */
+# define mach_type_known
+# endif
+# if defined(_MSC_VER) && defined(_M_IA64)
+# define IA64
+# define MSWIN32 /* Really win64, but we don't treat 64-bit */
+ /* variants as a differnt platform. */
+# endif
+# endif
+# if defined(__DJGPP__)
+# define I386
+# ifndef DJGPP
+# define DJGPP /* MSDOS running the DJGPP port of GCC */
+# endif
+# define mach_type_known
+# endif
+# if defined(__CYGWIN32__) || defined(__CYGWIN__)
+# define I386
+# define CYGWIN32
+# define mach_type_known
+# endif
+# if defined(__MINGW32__)
+# define I386
+# define MSWIN32
+# define mach_type_known
+# endif
+# if defined(__BORLANDC__)
+# define I386
+# define MSWIN32
+# define mach_type_known
+# endif
+# if defined(_UTS) && !defined(mach_type_known)
+# define S370
+# define UTS4
+# define mach_type_known
+# endif
+# if defined(__pj__)
+# error PicoJava no longer supported
+ /* The implementation had problems, and I haven't heard of users */
+ /* in ages. If you want it resurrected, let me know. */
+# endif
+# if defined(__embedded__) && defined(PPC)
+# define POWERPC
+# define NOSYS
+# define mach_type_known
+# endif
+/* Ivan Demakov */
+# if defined(__WATCOMC__) && defined(__386__)
+# define I386
+# if !defined(OS2) && !defined(MSWIN32) && !defined(DOS4GW)
+# if defined(__OS2__)
+# define OS2
+# else
+# if defined(__WINDOWS_386__) || defined(__NT__)
+# define MSWIN32
+# else
+# define DOS4GW
+# endif
+# endif
+# endif
+# define mach_type_known
+# endif
+# if defined(__s390__) && defined(LINUX)
+# define S390
+# define mach_type_known
+# endif
+# if defined(__GNU__)
+# if defined(__i386__)
+/* The Debian Hurd running on generic PC */
+# define HURD
+# define I386
+# define mach_type_known
+# endif
+# endif
+# if defined(__TANDEM)
+ /* Nonstop S-series */
+ /* FIXME: Should recognize Integrity series? */
+# define MIPS
+# define NONSTOP
+# define mach_type_known
+# endif
+
+/* Feel free to add more clauses here */
+
+/* Or manually define the machine type here. A machine type is */
+/* characterized by the architecture. Some */
+/* machine types are further subdivided by OS. */
+/* Macros such as LINUX, FREEBSD, etc. distinguish them. */
+/* SYSV on an M68K actually means A/UX. */
+/* The distinction in these cases is usually the stack starting address */
+# ifndef mach_type_known
+# error "The collector has not been ported to this machine/OS combination."
+# endif
+ /* Mapping is: M68K ==> Motorola 680X0 */
+ /* (NEXT, and SYSV (A/UX), */
+ /* MACOS and AMIGA variants) */
+ /* I386 ==> Intel 386 */
+ /* (SEQUENT, OS2, SCO, LINUX, NETBSD, */
+ /* FREEBSD, THREE86BSD, MSWIN32, */
+ /* BSDI,SOLARIS, NEXT, other variants) */
+ /* NS32K ==> Encore Multimax */
+ /* MIPS ==> R2000 through R14K */
+ /* (many variants) */
+ /* VAX ==> DEC VAX */
+ /* (BSD, ULTRIX variants) */
+ /* HP_PA ==> HP9000/700 & /800 */
+ /* HP/UX, LINUX */
+ /* SPARC ==> SPARC v7/v8/v9 */
+ /* (SOLARIS, LINUX, DRSNX variants) */
+ /* ALPHA ==> DEC Alpha */
+ /* (OSF1 and LINUX variants) */
+ /* M88K ==> Motorola 88XX0 */
+ /* (CX_UX and DGUX) */
+ /* S370 ==> 370-like machine */
+ /* running Amdahl UTS4 */
+ /* S390 ==> 390-like machine */
+ /* running LINUX */
+ /* ARM32 ==> Intel StrongARM */
+ /* IA64 ==> Intel IPF */
+ /* (e.g. Itanium) */
+ /* (LINUX and HPUX) */
+ /* SH ==> Hitachi SuperH */
+ /* (LINUX & MSWINCE) */
+ /* X86_64 ==> AMD x86-64 */
+ /* POWERPC ==> IBM/Apple PowerPC */
+ /* (MACOS(<=9),DARWIN(incl.MACOSX),*/
+ /* LINUX, NETBSD, AIX, NOSYS */
+ /* variants) */
+ /* Handles 32 and 64-bit variants. */
+ /* CRIS ==> Axis Etrax */
+ /* M32R ==> Renesas M32R */
+
+
+/*
+ * For each architecture and OS, the following need to be defined:
+ *
+ * CPP_WORDSZ is a simple integer constant representing the word size.
+ * in bits. We assume byte addressibility, where a byte has 8 bits.
+ * We also assume CPP_WORDSZ is either 32 or 64.
+ * (We care about the length of pointers, not hardware
+ * bus widths. Thus a 64 bit processor with a C compiler that uses
+ * 32 bit pointers should use CPP_WORDSZ of 32, not 64. Default is 32.)
+ *
+ * MACH_TYPE is a string representation of the machine type.
+ * OS_TYPE is analogous for the OS.
+ *
+ * ALIGNMENT is the largest N, such that
+ * all pointer are guaranteed to be aligned on N byte boundaries.
+ * defining it to be 1 will always work, but perform poorly.
+ *
+ * DATASTART is the beginning of the data segment.
+ * On some platforms SEARCH_FOR_DATA_START is defined.
+ * SEARCH_FOR_DATASTART will cause GC_data_start to
+ * be set to an address determined by accessing data backwards from _end
+ * until an unmapped page is found. DATASTART will be defined to be
+ * GC_data_start.
+ * On UNIX-like systems, the collector will scan the area between DATASTART
+ * and DATAEND for root pointers.
+ *
+ * DATAEND, if not `end' where `end' is defined as ``extern int end[];''.
+ * RTH suggests gaining access to linker script synth'd values with
+ * this idiom instead of `&end' where `end' is defined as ``extern int end;'' .
+ * Otherwise, ``GCC will assume these are in .sdata/.sbss'' and it will, e.g.,
+ * cause failures on alpha*-*-* with ``-msmall-data or -fpic'' or mips-*-*
+ * without any special options.
+ *
+ * STACKBOTTOM is the cool end of the stack, which is usually the
+ * highest address in the stack.
+ * Under PCR or OS/2, we have other ways of finding thread stacks.
+ * For each machine, the following should:
+ * 1) define STACK_GROWS_UP if the stack grows toward higher addresses, and
+ * 2) define exactly one of
+ * STACKBOTTOM (should be defined to be an expression)
+ * LINUX_STACKBOTTOM
+ * HEURISTIC1
+ * HEURISTIC2
+ * If STACKBOTTOM is defined, then it's value will be used directly as the
+ * stack base. If LINUX_STACKBOTTOM is defined, then it will be determined
+ * with a method appropriate for most Linux systems. Currently we look
+ * first for __libc_stack_end, and if that fails read it from /proc.
+ * If either of the last two macros are defined, then STACKBOTTOM is computed
+ * during collector startup using one of the following two heuristics:
+ * HEURISTIC1: Take an address inside GC_init's frame, and round it up to
+ * the next multiple of STACK_GRAN.
+ * HEURISTIC2: Take an address inside GC_init's frame, increment it repeatedly
+ * in small steps (decrement if STACK_GROWS_UP), and read the value
+ * at each location. Remember the value when the first
+ * Segmentation violation or Bus error is signalled. Round that
+ * to the nearest plausible page boundary, and use that instead
+ * of STACKBOTTOM.
+ *
+ * Gustavo Rodriguez-Rivera points out that on most (all?) Unix machines,
+ * the value of environ is a pointer that can serve as STACKBOTTOM.
+ * I expect that HEURISTIC2 can be replaced by this approach, which
+ * interferes far less with debugging. However it has the disadvantage
+ * that it's confused by a putenv call before the collector is initialized.
+ * This could be dealt with by intercepting putenv ...
+ *
+ * If no expression for STACKBOTTOM can be found, and neither of the above
+ * heuristics are usable, the collector can still be used with all of the above
+ * undefined, provided one of the following is done:
+ * 1) GC_mark_roots can be changed to somehow mark from the correct stack(s)
+ * without reference to STACKBOTTOM. This is appropriate for use in
+ * conjunction with thread packages, since there will be multiple stacks.
+ * (Allocating thread stacks in the heap, and treating them as ordinary
+ * heap data objects is also possible as a last resort. However, this is
+ * likely to introduce significant amounts of excess storage retention
+ * unless the dead parts of the thread stacks are periodically cleared.)
+ * 2) Client code may set GC_stackbottom before calling any GC_ routines.
+ * If the author of the client code controls the main program, this is
+ * easily accomplished by introducing a new main program, setting
+ * GC_stackbottom to the address of a local variable, and then calling
+ * the original main program. The new main program would read something
+ * like:
+ *
+ * # include "gc_private.h"
+ *
+ * main(argc, argv, envp)
+ * int argc;
+ * char **argv, **envp;
+ * {
+ * int dummy;
+ *
+ * GC_stackbottom = (ptr_t)(&dummy);
+ * return(real_main(argc, argv, envp));
+ * }
+ *
+ *
+ * Each architecture may also define the style of virtual dirty bit
+ * implementation to be used:
+ * MPROTECT_VDB: Write protect the heap and catch faults.
+ * GWW_VDB: Use win32 GetWriteWatch primitive.
+ * PROC_VDB: Use the SVR4 /proc primitives to read dirty bits.
+ *
+ * The first and second one may be combined, in which case a runtime
+ * selection will be made, based on GetWriteWatch availability.
+ *
+ * An architecture may define DYNAMIC_LOADING if dynamic_load.c
+ * defined GC_register_dynamic_libraries() for the architecture.
+ *
+ * An architecture may define PREFETCH(x) to preload the cache with *x.
+ * This defaults to a no-op.
+ *
+ * PREFETCH_FOR_WRITE(x) is used if *x is about to be written.
+ *
+ * An architecture may also define CLEAR_DOUBLE(x) to be a fast way to
+ * clear the two words at GC_malloc-aligned address x. By default,
+ * word stores of 0 are used instead.
+ *
+ * HEAP_START may be defined as the initial address hint for mmap-based
+ * allocation.
+ */
+
+/* If we are using a recent version of gcc, we can use __builtin_unwind_init()
+ * to push the relevant registers onto the stack.
+ */
+# if defined(__GNUC__) && ((__GNUC__ >= 3) || \
+ (__GNUC__ == 2 && __GNUC_MINOR__ >= 8)) \
+ && !defined(__INTEL_COMPILER) && !defined(__PATHCC__)
+# define HAVE_BUILTIN_UNWIND_INIT
+# endif
+
+# define STACK_GRAN 0x1000000
+# ifdef M68K
+# define MACH_TYPE "M68K"
+# define ALIGNMENT 2
+# ifdef OPENBSD
+# define OS_TYPE "OPENBSD"
+# define HEURISTIC2
+# ifdef __ELF__
+# define DATASTART GC_data_start
+# define DYNAMIC_LOADING
+# else
+ extern char etext[];
+# define DATASTART ((ptr_t)(etext))
+# endif
+# endif
+# ifdef NETBSD
+# define OS_TYPE "NETBSD"
+# define HEURISTIC2
+# ifdef __ELF__
+# define DATASTART GC_data_start
+# define DYNAMIC_LOADING
+# else
+ extern char etext[];
+# define DATASTART ((ptr_t)(etext))
+# endif
+# endif
+# ifdef LINUX
+# define OS_TYPE "LINUX"
+# define LINUX_STACKBOTTOM
+# define MPROTECT_VDB
+# ifdef __ELF__
+# define DYNAMIC_LOADING
+# include <features.h>
+# if defined(__GLIBC__)&& __GLIBC__>=2
+# define SEARCH_FOR_DATA_START
+# else /* !GLIBC2 */
+ extern char **__environ;
+# define DATASTART ((ptr_t)(&__environ))
+ /* hideous kludge: __environ is the first */
+ /* word in crt0.o, and delimits the start */
+ /* of the data segment, no matter which */
+ /* ld options were passed through. */
+ /* We could use _etext instead, but that */
+ /* would include .rodata, which may */
+ /* contain large read-only data tables */
+ /* that we'd rather not scan. */
+# endif /* !GLIBC2 */
+ extern int _end[];
+# define DATAEND (_end)
+# else
+ extern int etext[];
+# define DATASTART ((ptr_t)((((word) (etext)) + 0xfff) & ~0xfff))
+# endif
+# endif
+# ifdef AMIGA
+# define OS_TYPE "AMIGA"
+ /* STACKBOTTOM and DATASTART handled specially */
+ /* in os_dep.c */
+# define DATAEND /* not needed */
+# define GETPAGESIZE() 4096
+# endif
+# ifdef MACOS
+# ifndef __LOWMEM__
+# include <LowMem.h>
+# endif
+# define OS_TYPE "MACOS"
+ /* see os_dep.c for details of global data segments. */
+# define STACKBOTTOM ((ptr_t) LMGetCurStackBase())
+# define DATAEND /* not needed */
+# define GETPAGESIZE() 4096
+# endif
+# ifdef NEXT
+# define OS_TYPE "NEXT"
+# define DATASTART ((ptr_t) get_etext())
+# define STACKBOTTOM ((ptr_t) 0x4000000)
+# define DATAEND /* not needed */
+# endif
+# endif
+
+# if defined(POWERPC)
+# define MACH_TYPE "POWERPC"
+# ifdef MACOS
+# define ALIGNMENT 2 /* Still necessary? Could it be 4? */
+# ifndef __LOWMEM__
+# include <LowMem.h>
+# endif
+# define OS_TYPE "MACOS"
+ /* see os_dep.c for details of global data segments. */
+# define STACKBOTTOM ((ptr_t) LMGetCurStackBase())
+# define DATAEND /* not needed */
+# endif
+# ifdef LINUX
+# if defined(__powerpc64__)
+# define ALIGNMENT 8
+# define CPP_WORDSZ 64
+# ifndef HBLKSIZE
+# define HBLKSIZE 4096
+# endif
+# else
+# define ALIGNMENT 4
+# endif
+# define OS_TYPE "LINUX"
+ /* HEURISTIC1 has been reliably reported to fail for a 32-bit */
+ /* executable on a 64 bit kernel. */
+# define LINUX_STACKBOTTOM
+# define DYNAMIC_LOADING
+# define SEARCH_FOR_DATA_START
+ extern int _end[];
+# define DATAEND (_end)
+# endif
+# ifdef DARWIN
+# define OS_TYPE "DARWIN"
+# define DYNAMIC_LOADING
+# if defined(__ppc64__)
+# define ALIGNMENT 8
+# define CPP_WORDSZ 64
+# define STACKBOTTOM ((ptr_t) 0x7fff5fc00000)
+# define CACHE_LINE_SIZE 64
+# ifndef HBLKSIZE
+# define HBLKSIZE 4096
+# endif
+# else
+# define ALIGNMENT 4
+# define STACKBOTTOM ((ptr_t) 0xc0000000)
+# endif
+ /* XXX: see get_end(3), get_etext() and get_end() should not be used.
+ These aren't used when dyld support is enabled (it is by default) */
+# define DATASTART ((ptr_t) get_etext())
+# define DATAEND ((ptr_t) get_end())
+# define USE_MMAP
+# define USE_MMAP_ANON
+# ifdef GC_DARWIN_THREADS
+# define MPROTECT_VDB
+# endif
+# include <unistd.h>
+# define GETPAGESIZE() getpagesize()
+# if defined(USE_PPC_PREFETCH) && defined(__GNUC__)
+ /* The performance impact of prefetches is untested */
+# define PREFETCH(x) \
+ __asm__ __volatile__ ("dcbt 0,%0" : : "r" ((const void *) (x)))
+# define PREFETCH_FOR_WRITE(x) \
+ __asm__ __volatile__ ("dcbtst 0,%0" : : "r" ((const void *) (x)))
+# endif
+ /* There seems to be some issues with trylock hanging on darwin. This
+ should be looked into some more */
+# define NO_PTHREAD_TRYLOCK
+# endif
+# ifdef FREEBSD
+# define ALIGNMENT 4
+# define OS_TYPE "FREEBSD"
+# ifndef GC_FREEBSD_THREADS
+# define MPROTECT_VDB
+# endif
+# define SIG_SUSPEND SIGUSR1
+# define SIG_THR_RESTART SIGUSR2
+# define FREEBSD_STACKBOTTOM
+# ifdef __ELF__
+# define DYNAMIC_LOADING
+# endif
+ extern char etext[];
+ extern char * GC_FreeBSDGetDataStart();
+# define DATASTART GC_FreeBSDGetDataStart(0x1000, &etext)
+# endif
+# ifdef NETBSD
+# define ALIGNMENT 4
+# define OS_TYPE "NETBSD"
+# define HEURISTIC2
+ extern char etext[];
+# define DATASTART GC_data_start
+# define DYNAMIC_LOADING
+# endif
+# ifdef AIX
+# define OS_TYPE "AIX"
+# undef ALIGNMENT /* in case it's defined */
+# ifdef IA64
+# undef IA64
+ /* DOB: some AIX installs stupidly define IA64 in */
+ /* /usr/include/sys/systemcfg.h */
+# endif
+# ifdef __64BIT__
+# define ALIGNMENT 8
+# define CPP_WORDSZ 64
+# define STACKBOTTOM ((ptr_t)0x1000000000000000)
+# else
+# define ALIGNMENT 4
+# define CPP_WORDSZ 32
+# define STACKBOTTOM ((ptr_t)((ulong)&errno))
+# endif
+# define USE_MMAP
+# define USE_MMAP_ANON
+ /* From AIX linker man page:
+ _text Specifies the first location of the program.
+ _etext Specifies the first location after the program.
+ _data Specifies the first location of the data.
+ _edata Specifies the first location after the initialized data
+ _end or end Specifies the first location after all data.
+ */
+ extern int _data[], _end[];
+# define DATASTART ((ptr_t)((ulong)_data))
+# define DATAEND ((ptr_t)((ulong)_end))
+ extern int errno;
+# define DYNAMIC_LOADING
+ /* For really old versions of AIX, this may have to be removed. */
+# endif
+
+# ifdef NOSYS
+# define ALIGNMENT 4
+# define OS_TYPE "NOSYS"
+ extern void __end[], __dso_handle[];
+# define DATASTART (__dso_handle) /* OK, that's ugly. */
+# define DATAEND (__end)
+ /* Stack starts at 0xE0000000 for the simulator. */
+# undef STACK_GRAN
+# define STACK_GRAN 0x10000000
+# define HEURISTIC1
+# endif
+# endif
+
+# ifdef VAX
+# define MACH_TYPE "VAX"
+# define ALIGNMENT 4 /* Pointers are longword aligned by 4.2 C compiler */
+ extern char etext[];
+# define DATASTART ((ptr_t)(etext))
+# ifdef BSD
+# define OS_TYPE "BSD"
+# define HEURISTIC1
+ /* HEURISTIC2 may be OK, but it's hard to test. */
+# endif
+# ifdef ULTRIX
+# define OS_TYPE "ULTRIX"
+# define STACKBOTTOM ((ptr_t) 0x7fffc800)
+# endif
+# endif
+
+# ifdef SPARC
+# define MACH_TYPE "SPARC"
+# if defined(__arch64__) || defined(__sparcv9)
+# define ALIGNMENT 8
+# define CPP_WORDSZ 64
+# define ELF_CLASS ELFCLASS64
+# else
+# define ALIGNMENT 4 /* Required by hardware */
+# define CPP_WORDSZ 32
+# endif
+ /* Don't define USE_ASM_PUSH_REGS. We do use an asm helper, but */
+ /* not to push the registers on the mark stack. */
+# ifdef SOLARIS
+# define OS_TYPE "SOLARIS"
+ extern int _etext[];
+ extern int _end[];
+ extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
+# define DATASTART GC_SysVGetDataStart(0x10000, (ptr_t)_etext)
+# define DATAEND (_end)
+# if !defined(USE_MMAP) && defined(REDIRECT_MALLOC)
+# define USE_MMAP
+ /* Otherwise we now use calloc. Mmap may result in the */
+ /* heap interleaved with thread stacks, which can result in */
+ /* excessive blacklisting. Sbrk is unusable since it */
+ /* doesn't interact correctly with the system malloc. */
+# endif
+# ifdef USE_MMAP
+# define HEAP_START (ptr_t)0x40000000
+# else
+# define HEAP_START DATAEND
+# endif
+# define PROC_VDB
+/* HEURISTIC1 reportedly no longer works under 2.7. */
+/* HEURISTIC2 probably works, but this appears to be preferable. */
+/* Apparently USRSTACK is defined to be USERLIMIT, but in some */
+/* installations that's undefined. We work around this with a */
+/* gross hack: */
+# include <sys/vmparam.h>
+# ifdef USERLIMIT
+ /* This should work everywhere, but doesn't. */
+# define STACKBOTTOM USRSTACK
+# else
+# define HEURISTIC2
+# endif
+# include <unistd.h>
+# define GETPAGESIZE() sysconf(_SC_PAGESIZE)
+ /* getpagesize() appeared to be missing from at least one */
+ /* Solaris 5.4 installation. Weird. */
+# define DYNAMIC_LOADING
+# endif
+# ifdef DRSNX
+# define OS_TYPE "DRSNX"
+ extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
+ extern int etext[];
+# define DATASTART GC_SysVGetDataStart(0x10000, (ptr_t)etext)
+# define MPROTECT_VDB
+# define STACKBOTTOM ((ptr_t) 0xdfff0000)
+# define DYNAMIC_LOADING
+# endif
+# ifdef LINUX
+# define OS_TYPE "LINUX"
+# ifdef __ELF__
+# define DYNAMIC_LOADING
+# else
+ Linux Sparc/a.out not supported
+# endif
+ extern int _end[];
+ extern int _etext[];
+# define DATAEND (_end)
+# define SVR4
+ extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
+# ifdef __arch64__
+# define DATASTART GC_SysVGetDataStart(0x100000, (ptr_t)_etext)
+# else
+# define DATASTART GC_SysVGetDataStart(0x10000, (ptr_t)_etext)
+# endif
+# define LINUX_STACKBOTTOM
+# endif
+# ifdef OPENBSD
+# define OS_TYPE "OPENBSD"
+# define STACKBOTTOM ((ptr_t) 0xf8000000)
+ extern int etext[];
+# define DATASTART ((ptr_t)(etext))
+# endif
+# ifdef NETBSD
+# define OS_TYPE "NETBSD"
+# define HEURISTIC2
+# ifdef __ELF__
+# define DATASTART GC_data_start
+# define DYNAMIC_LOADING
+# else
+ extern char etext[];
+# define DATASTART ((ptr_t)(etext))
+# endif
+# endif
+# ifdef FREEBSD
+# define OS_TYPE "FREEBSD"
+# define SIG_SUSPEND SIGUSR1
+# define SIG_THR_RESTART SIGUSR2
+# define FREEBSD_STACKBOTTOM
+# ifdef __ELF__
+# define DYNAMIC_LOADING
+# endif
+ extern char etext[];
+ extern char edata[];
+ extern char end[];
+# define NEED_FIND_LIMIT
+# define DATASTART ((ptr_t)(&etext))
+# define DATAEND (GC_find_limit (DATASTART, TRUE))
+# define DATASTART2 ((ptr_t)(&edata))
+# define DATAEND2 ((ptr_t)(&end))
+# endif
+# endif
+
+# ifdef I386
+# define MACH_TYPE "I386"
+# if defined(__LP64__) || defined(_WIN64)
+# error This should be handled as X86_64
+# else
+# define CPP_WORDSZ 32
+# define ALIGNMENT 4
+ /* Appears to hold for all "32 bit" compilers */
+ /* except Borland. The -a4 option fixes */
+ /* Borland. */
+ /* Ivan Demakov: For Watcom the option is -zp4. */
+# endif
+# ifdef SEQUENT
+# define OS_TYPE "SEQUENT"
+ extern int etext[];
+# define DATASTART ((ptr_t)((((word) (etext)) + 0xfff) & ~0xfff))
+# define STACKBOTTOM ((ptr_t) 0x3ffff000)
+# endif
+# ifdef BEOS
+# define OS_TYPE "BEOS"
+# include <OS.h>
+# define GETPAGESIZE() B_PAGE_SIZE
+ extern int etext[];
+# define DATASTART ((ptr_t)((((word) (etext)) + 0xfff) & ~0xfff))
+# endif
+# ifdef SOLARIS
+# define OS_TYPE "SOLARIS"
+ extern int _etext[], _end[];
+ extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
+# define DATASTART GC_SysVGetDataStart(0x1000, (ptr_t)_etext)
+# define DATAEND (_end)
+/* # define STACKBOTTOM ((ptr_t)(_start)) worked through 2.7, */
+/* but reportedly breaks under 2.8. It appears that the stack */
+/* base is a property of the executable, so this should not break */
+/* old executables. */
+/* HEURISTIC2 probably works, but this appears to be preferable. */
+# include <sys/vm.h>
+# define STACKBOTTOM USRSTACK
+/* At least in Solaris 2.5, PROC_VDB gives wrong values for dirty bits. */
+/* It appears to be fixed in 2.8 and 2.9. */
+# ifdef SOLARIS25_PROC_VDB_BUG_FIXED
+# define PROC_VDB
+# endif
+# define DYNAMIC_LOADING
+# if !defined(USE_MMAP) && defined(REDIRECT_MALLOC)
+# define USE_MMAP
+ /* Otherwise we now use calloc. Mmap may result in the */
+ /* heap interleaved with thread stacks, which can result in */
+ /* excessive blacklisting. Sbrk is unusable since it */
+ /* doesn't interact correctly with the system malloc. */
+# endif
+# ifdef USE_MMAP
+# define HEAP_START (ptr_t)0x40000000
+# else
+# define HEAP_START DATAEND
+# endif
+# endif
+# ifdef SCO
+# define OS_TYPE "SCO"
+ extern int etext[];
+# define DATASTART ((ptr_t)((((word) (etext)) + 0x3fffff) \
+ & ~0x3fffff) \
+ +((word)etext & 0xfff))
+# define STACKBOTTOM ((ptr_t) 0x7ffffffc)
+# endif
+# ifdef SCO_ELF
+# define OS_TYPE "SCO_ELF"
+ extern int etext[];
+# define DATASTART ((ptr_t)(etext))
+# define STACKBOTTOM ((ptr_t) 0x08048000)
+# define DYNAMIC_LOADING
+# define ELF_CLASS ELFCLASS32
+# endif
+# ifdef DGUX
+# define OS_TYPE "DGUX"
+ extern int _etext, _end;
+ extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
+# define DATASTART GC_SysVGetDataStart(0x1000, (ptr_t)(&_etext))
+# define DATAEND (&_end)
+# define STACK_GROWS_DOWN
+# define HEURISTIC2
+# include <unistd.h>
+# define GETPAGESIZE() sysconf(_SC_PAGESIZE)
+# define DYNAMIC_LOADING
+# ifndef USE_MMAP
+# define USE_MMAP
+# endif /* USE_MMAP */
+# define MAP_FAILED (void *) -1
+# ifdef USE_MMAP
+# define HEAP_START (ptr_t)0x40000000
+# else /* USE_MMAP */
+# define HEAP_START DATAEND
+# endif /* USE_MMAP */
+# endif /* DGUX */
+
+# ifdef LINUX
+# define OS_TYPE "LINUX"
+# define LINUX_STACKBOTTOM
+# if 0
+# define HEURISTIC1
+# undef STACK_GRAN
+# define STACK_GRAN 0x10000000
+ /* STACKBOTTOM is usually 0xc0000000, but this changes with */
+ /* different kernel configurations. In particular, systems */
+ /* with 2GB physical memory will usually move the user */
+ /* address space limit, and hence initial SP to 0x80000000. */
+# endif
+# if !defined(GC_LINUX_THREADS) || !defined(REDIRECT_MALLOC)
+# define MPROTECT_VDB
+# else
+ /* We seem to get random errors in incremental mode, */
+ /* possibly because Linux threads is itself a malloc client */
+ /* and can't deal with the signals. */
+# endif
+# define HEAP_START (ptr_t)0x1000
+ /* This encourages mmap to give us low addresses, */
+ /* thus allowing the heap to grow to ~3GB */
+# ifdef __ELF__
+# define DYNAMIC_LOADING
+# ifdef UNDEFINED /* includes ro data */
+ extern int _etext[];
+# define DATASTART ((ptr_t)((((word) (_etext)) + 0xfff) & ~0xfff))
+# endif
+# include <features.h>
+# if defined(__GLIBC__) && __GLIBC__ >= 2
+# define SEARCH_FOR_DATA_START
+# else
+ extern char **__environ;
+# define DATASTART ((ptr_t)(&__environ))
+ /* hideous kludge: __environ is the first */
+ /* word in crt0.o, and delimits the start */
+ /* of the data segment, no matter which */
+ /* ld options were passed through. */
+ /* We could use _etext instead, but that */
+ /* would include .rodata, which may */
+ /* contain large read-only data tables */
+ /* that we'd rather not scan. */
+# endif
+ extern int _end[];
+# define DATAEND (_end)
+# else
+ extern int etext[];
+# define DATASTART ((ptr_t)((((word) (etext)) + 0xfff) & ~0xfff))
+# endif
+# ifdef USE_I686_PREFETCH
+ /* FIXME: Thus should use __builtin_prefetch, but we'll leave that */
+ /* for the next rtelease. */
+# define PREFETCH(x) \
+ __asm__ __volatile__ (" prefetchnta %0": : "m"(*(char *)(x)))
+ /* Empirically prefetcht0 is much more effective at reducing */
+ /* cache miss stalls for the targetted load instructions. But it */
+ /* seems to interfere enough with other cache traffic that the net */
+ /* result is worse than prefetchnta. */
+# if 0
+ /* Using prefetches for write seems to have a slight negative */
+ /* impact on performance, at least for a PIII/500. */
+# define PREFETCH_FOR_WRITE(x) \
+ __asm__ __volatile__ (" prefetcht0 %0": : "m"(*(char *)(x)))
+# endif
+# endif
+# ifdef USE_3DNOW_PREFETCH
+# define PREFETCH(x) \
+ __asm__ __volatile__ (" prefetch %0": : "m"(*(char *)(x)))
+# define PREFETCH_FOR_WRITE(x) \
+ __asm__ __volatile__ (" prefetchw %0": : "m"(*(char *)(x)))
+# endif
+# endif
+# ifdef CYGWIN32
+# define OS_TYPE "CYGWIN32"
+# define DATASTART ((ptr_t)GC_DATASTART) /* From gc.h */
+# define DATAEND ((ptr_t)GC_DATAEND)
+# undef STACK_GRAN
+# define STACK_GRAN 0x10000
+# define HEURISTIC1
+# endif
+# ifdef OS2
+# define OS_TYPE "OS2"
+ /* STACKBOTTOM and DATASTART are handled specially in */
+ /* os_dep.c. OS2 actually has the right */
+ /* system call! */
+# define DATAEND /* not needed */
+# endif
+# ifdef MSWIN32
+# define OS_TYPE "MSWIN32"
+ /* STACKBOTTOM and DATASTART are handled specially in */
+ /* os_dep.c. */
+# if !defined(__WATCOMC__)
+# define MPROTECT_VDB
+ /* We also avoided doing this in the past with GC_WIN32_THREADS */
+ /* Hopefully that's fixed. */
+# endif
+# if _MSC_VER >= 1300 /* .NET, i.e. > VisualStudio 6 */
+# define GWW_VDB
+# endif
+# define DATAEND /* not needed */
+# endif
+# ifdef MSWINCE
+# define OS_TYPE "MSWINCE"
+# define DATAEND /* not needed */
+# endif
+# ifdef DJGPP
+# define OS_TYPE "DJGPP"
+# include "stubinfo.h"
+ extern int etext[];
+ extern int _stklen;
+ extern int __djgpp_stack_limit;
+# define DATASTART ((ptr_t)((((word) (etext)) + 0x1ff) & ~0x1ff))
+/* # define STACKBOTTOM ((ptr_t)((word) _stubinfo + _stubinfo->size \
+ + _stklen)) */
+# define STACKBOTTOM ((ptr_t)((word) __djgpp_stack_limit + _stklen))
+ /* This may not be right. */
+# endif
+# ifdef OPENBSD
+# define OS_TYPE "OPENBSD"
+# endif
+# ifdef FREEBSD
+# define OS_TYPE "FREEBSD"
+# ifndef GC_FREEBSD_THREADS
+# define MPROTECT_VDB
+# endif
+# ifdef __GLIBC__
+# define SIG_SUSPEND (32+6)
+# define SIG_THR_RESTART (32+5)
+ extern int _end[];
+# define DATAEND (_end)
+# else
+# define SIG_SUSPEND SIGUSR1
+# define SIG_THR_RESTART SIGUSR2
+# endif
+# define FREEBSD_STACKBOTTOM
+# ifdef __ELF__
+# define DYNAMIC_LOADING
+# endif
+ extern char etext[];
+ extern char * GC_FreeBSDGetDataStart(size_t, ptr_t);
+# define DATASTART GC_FreeBSDGetDataStart(0x1000, (ptr_t)etext)
+# endif
+# ifdef NETBSD
+# define OS_TYPE "NETBSD"
+# ifdef __ELF__
+# define DYNAMIC_LOADING
+# endif
+# endif
+# ifdef THREE86BSD
+# define OS_TYPE "THREE86BSD"
+# endif
+# ifdef BSDI
+# define OS_TYPE "BSDI"
+# endif
+# if defined(OPENBSD) || defined(NETBSD) \
+ || defined(THREE86BSD) || defined(BSDI)
+# define HEURISTIC2
+ extern char etext[];
+# define DATASTART ((ptr_t)(etext))
+# endif
+# ifdef NEXT
+# define OS_TYPE "NEXT"
+# define DATASTART ((ptr_t) get_etext())
+# define STACKBOTTOM ((ptr_t)0xc0000000)
+# define DATAEND /* not needed */
+# endif
+# ifdef DOS4GW
+# define OS_TYPE "DOS4GW"
+ extern long __nullarea;
+ extern char _end;
+ extern char *_STACKTOP;
+ /* Depending on calling conventions Watcom C either precedes
+ or does not precedes with undescore names of C-variables.
+ Make sure startup code variables always have the same names. */
+ #pragma aux __nullarea "*";
+ #pragma aux _end "*";
+# define STACKBOTTOM ((ptr_t) _STACKTOP)
+ /* confused? me too. */
+# define DATASTART ((ptr_t) &__nullarea)
+# define DATAEND ((ptr_t) &_end)
+# endif
+# ifdef HURD
+# define OS_TYPE "HURD"
+# define STACK_GROWS_DOWN
+# define HEURISTIC2
+# define SIG_SUSPEND SIGUSR1
+# define SIG_THR_RESTART SIGUSR2
+# define SEARCH_FOR_DATA_START
+ extern int _end[];
+# define DATAEND ((ptr_t) (_end))
+/* # define MPROTECT_VDB Not quite working yet? */
+# define DYNAMIC_LOADING
+# endif
+# ifdef DARWIN
+# define OS_TYPE "DARWIN"
+# define DARWIN_DONT_PARSE_STACK
+# define DYNAMIC_LOADING
+ /* XXX: see get_end(3), get_etext() and get_end() should not be used.
+ These aren't used when dyld support is enabled (it is by default) */
+# define DATASTART ((ptr_t) get_etext())
+# define DATAEND ((ptr_t) get_end())
+# define STACKBOTTOM ((ptr_t) 0xc0000000)
+# define USE_MMAP
+# define USE_MMAP_ANON
+# ifdef GC_DARWIN_THREADS
+# define MPROTECT_VDB
+# endif
+# include <unistd.h>
+# define GETPAGESIZE() getpagesize()
+ /* There seems to be some issues with trylock hanging on darwin. This
+ should be looked into some more */
+# define NO_PTHREAD_TRYLOCK
+# endif /* DARWIN */
+# endif
+
+# ifdef NS32K
+# define MACH_TYPE "NS32K"
+# define ALIGNMENT 4
+ extern char **environ;
+# define DATASTART ((ptr_t)(&environ))
+ /* hideous kludge: environ is the first */
+ /* word in crt0.o, and delimits the start */
+ /* of the data segment, no matter which */
+ /* ld options were passed through. */
+# define STACKBOTTOM ((ptr_t) 0xfffff000) /* for Encore */
+# endif
+
+# ifdef MIPS
+# define MACH_TYPE "MIPS"
+# ifdef LINUX
+ /* This was developed for a linuxce style platform. Probably */
+ /* needs to be tweaked for workstation class machines. */
+# define OS_TYPE "LINUX"
+# define DYNAMIC_LOADING
+ extern int _end[];
+# define DATAEND (_end)
+ extern int __data_start[];
+# define DATASTART ((ptr_t)(__data_start))
+# define ALIGNMENT 4
+# if __GLIBC__ == 2 && __GLIBC_MINOR__ >= 2 || __GLIBC__ > 2
+# define LINUX_STACKBOTTOM
+# else
+# define STACKBOTTOM 0x80000000
+# endif
+# endif /* Linux */
+# ifdef EWS4800
+# define HEURISTIC2
+# if defined(_MIPS_SZPTR) && (_MIPS_SZPTR == 64)
+ extern int _fdata[], _end[];
+# define DATASTART ((ptr_t)_fdata)
+# define DATAEND ((ptr_t)_end)
+# define CPP_WORDSZ _MIPS_SZPTR
+# define ALIGNMENT (_MIPS_SZPTR/8)
+# else
+ extern int etext[], edata[], end[];
+ extern int _DYNAMIC_LINKING[], _gp[];
+# define DATASTART ((ptr_t)((((word)etext + 0x3ffff) & ~0x3ffff) \
+ + ((word)etext & 0xffff)))
+# define DATAEND (edata)
+# define DATASTART2 (_DYNAMIC_LINKING \
+ ? (ptr_t)(((word)_gp + 0x8000 + 0x3ffff) & ~0x3ffff) \
+ : (ptr_t)edata)
+# define DATAEND2 (end)
+# define ALIGNMENT 4
+# endif
+# define OS_TYPE "EWS4800"
+# endif
+# ifdef ULTRIX
+# define HEURISTIC2
+# define DATASTART (ptr_t)0x10000000
+ /* Could probably be slightly higher since */
+ /* startup code allocates lots of stuff. */
+# define OS_TYPE "ULTRIX"
+# define ALIGNMENT 4
+# endif
+# ifdef IRIX5
+# define HEURISTIC2
+ extern int _fdata[];
+# define DATASTART ((ptr_t)(_fdata))
+# ifdef USE_MMAP
+# define HEAP_START (ptr_t)0x30000000
+# else
+# define HEAP_START DATASTART
+# endif
+ /* Lowest plausible heap address. */
+ /* In the MMAP case, we map there. */
+ /* In either case it is used to identify */
+ /* heap sections so they're not */
+ /* considered as roots. */
+# define OS_TYPE "IRIX5"
+/*# define MPROTECT_VDB DOB: this should work, but there is evidence */
+/* of recent breakage. */
+# ifdef _MIPS_SZPTR
+# define CPP_WORDSZ _MIPS_SZPTR
+# define ALIGNMENT (_MIPS_SZPTR/8)
+# else
+# define ALIGNMENT 4
+# endif
+# define DYNAMIC_LOADING
+# endif
+# ifdef MSWINCE
+# define OS_TYPE "MSWINCE"
+# define ALIGNMENT 4
+# define DATAEND /* not needed */
+# endif
+# if defined(NETBSD)
+# define OS_TYPE "NETBSD"
+# define ALIGNMENT 4
+# define HEURISTIC2
+# ifdef __ELF__
+ extern int etext[];
+# define DATASTART GC_data_start
+# define NEED_FIND_LIMIT
+# define DYNAMIC_LOADING
+# else
+# define DATASTART ((ptr_t) 0x10000000)
+# define STACKBOTTOM ((ptr_t) 0x7ffff000)
+# endif /* _ELF_ */
+# endif
+# if defined(NONSTOP)
+# define CPP_WORDSZ 32
+# define OS_TYPE "NONSTOP"
+# define ALIGNMENT 4
+# define DATASTART ((ptr_t) 0x08000000)
+ extern char **environ;
+# define DATAEND ((ptr_t)(environ - 0x10))
+# define STACKBOTTOM ((ptr_t) 0x4fffffff)
+# endif
+# endif
+
+# ifdef HP_PA
+# define MACH_TYPE "HP_PA"
+# ifdef __LP64__
+# define CPP_WORDSZ 64
+# define ALIGNMENT 8
+# else
+# define CPP_WORDSZ 32
+# define ALIGNMENT 4
+# endif
+# if !defined(GC_HPUX_THREADS) && !defined(GC_LINUX_THREADS)
+# ifndef LINUX /* For now. */
+# define MPROTECT_VDB
+# endif
+# else
+# ifdef PARALLEL_MARK
+# define USE_MARK_BYTES
+ /* Minimize compare-and-swap usage. */
+# endif
+# endif
+# define STACK_GROWS_UP
+# ifdef HPUX
+# define OS_TYPE "HPUX"
+ extern int __data_start[];
+# define DATASTART ((ptr_t)(__data_start))
+# if 0
+ /* The following appears to work for 7xx systems running HP/UX */
+ /* 9.xx Furthermore, it might result in much faster */
+ /* collections than HEURISTIC2, which may involve scanning */
+ /* segments that directly precede the stack. It is not the */
+ /* default, since it may not work on older machine/OS */
+ /* combinations. (Thanks to Raymond X.T. Nijssen for uncovering */
+ /* this.) */
+# define STACKBOTTOM ((ptr_t) 0x7b033000) /* from /etc/conf/h/param.h */
+# else
+ /* Gustavo Rodriguez-Rivera suggested changing HEURISTIC2 */
+ /* to this. Note that the GC must be initialized before the */
+ /* first putenv call. */
+ extern char ** environ;
+# define STACKBOTTOM ((ptr_t)environ)
+# endif
+# define DYNAMIC_LOADING
+# include <unistd.h>
+# define GETPAGESIZE() sysconf(_SC_PAGE_SIZE)
+# ifndef __GNUC__
+# define PREFETCH(x) { \
+ register long addr = (long)(x); \
+ (void) _asm ("LDW", 0, 0, addr, 0); \
+ }
+# endif
+# endif /* HPUX */
+# ifdef LINUX
+# define OS_TYPE "LINUX"
+# define LINUX_STACKBOTTOM
+# define DYNAMIC_LOADING
+# define SEARCH_FOR_DATA_START
+ extern int _end[];
+# define DATAEND (&_end)
+# endif /* LINUX */
+# endif /* HP_PA */
+
+# ifdef ALPHA
+# define MACH_TYPE "ALPHA"
+# define ALIGNMENT 8
+# define CPP_WORDSZ 64
+# ifdef NETBSD
+# define OS_TYPE "NETBSD"
+# define HEURISTIC2
+# define DATASTART GC_data_start
+# define ELFCLASS32 32
+# define ELFCLASS64 64
+# define ELF_CLASS ELFCLASS64
+# define DYNAMIC_LOADING
+# endif
+# ifdef OPENBSD
+# define OS_TYPE "OPENBSD"
+# define HEURISTIC2
+# ifdef __ELF__ /* since OpenBSD/Alpha 2.9 */
+# define DATASTART GC_data_start
+# define ELFCLASS32 32
+# define ELFCLASS64 64
+# define ELF_CLASS ELFCLASS64
+# else /* ECOFF, until OpenBSD/Alpha 2.7 */
+# define DATASTART ((ptr_t) 0x140000000)
+# endif
+# endif
+# ifdef FREEBSD
+# define OS_TYPE "FREEBSD"
+/* MPROTECT_VDB is not yet supported at all on FreeBSD/alpha. */
+# define SIG_SUSPEND SIGUSR1
+# define SIG_THR_RESTART SIGUSR2
+# define FREEBSD_STACKBOTTOM
+# ifdef __ELF__
+# define DYNAMIC_LOADING
+# endif
+/* Handle unmapped hole alpha*-*-freebsd[45]* puts between etext and edata. */
+ extern char etext[];
+ extern char edata[];
+ extern char end[];
+# define NEED_FIND_LIMIT
+# define DATASTART ((ptr_t)(&etext))
+# define DATAEND (GC_find_limit (DATASTART, TRUE))
+# define DATASTART2 ((ptr_t)(&edata))
+# define DATAEND2 ((ptr_t)(&end))
+# endif
+# ifdef OSF1
+# define OS_TYPE "OSF1"
+# define DATASTART ((ptr_t) 0x140000000)
+ extern int _end[];
+# define DATAEND ((ptr_t) &_end)
+ extern char ** environ;
+ /* round up from the value of environ to the nearest page boundary */
+ /* Probably breaks if putenv is called before collector */
+ /* initialization. */
+# define STACKBOTTOM ((ptr_t)(((word)(environ) | (getpagesize()-1))+1))
+/* # define HEURISTIC2 */
+ /* Normally HEURISTIC2 is too conervative, since */
+ /* the text segment immediately follows the stack. */
+ /* Hence we give an upper pound. */
+ /* This is currently unused, since we disabled HEURISTIC2 */
+ extern int __start[];
+# define HEURISTIC2_LIMIT ((ptr_t)((word)(__start) & ~(getpagesize()-1)))
+# ifndef GC_OSF1_THREADS
+ /* Unresolved signal issues with threads. */
+# define MPROTECT_VDB
+# endif
+# define DYNAMIC_LOADING
+# endif
+# ifdef LINUX
+# define OS_TYPE "LINUX"
+# define LINUX_STACKBOTTOM
+# ifdef __ELF__
+# define SEARCH_FOR_DATA_START
+# define DYNAMIC_LOADING
+# else
+# define DATASTART ((ptr_t) 0x140000000)
+# endif
+ extern int _end[];
+# define DATAEND (_end)
+# define MPROTECT_VDB
+ /* Has only been superficially tested. May not */
+ /* work on all versions. */
+# endif
+# endif
+
+# ifdef IA64
+# define MACH_TYPE "IA64"
+# ifdef HPUX
+# ifdef _ILP32
+# define CPP_WORDSZ 32
+ /* Requires 8 byte alignment for malloc */
+# define ALIGNMENT 4
+# else
+# ifndef _LP64
+ ---> unknown ABI
+# endif
+# define CPP_WORDSZ 64
+ /* Requires 16 byte alignment for malloc */
+# define ALIGNMENT 8
+# endif
+# define OS_TYPE "HPUX"
+ extern int __data_start[];
+# define DATASTART ((ptr_t)(__data_start))
+ /* Gustavo Rodriguez-Rivera suggested changing HEURISTIC2 */
+ /* to this. Note that the GC must be initialized before the */
+ /* first putenv call. */
+ extern char ** environ;
+# define STACKBOTTOM ((ptr_t)environ)
+# define HPUX_STACKBOTTOM
+# define DYNAMIC_LOADING
+# include <unistd.h>
+# define GETPAGESIZE() sysconf(_SC_PAGE_SIZE)
+ /* The following was empirically determined, and is probably */
+ /* not very robust. */
+ /* Note that the backing store base seems to be at a nice */
+ /* address minus one page. */
+# define BACKING_STORE_DISPLACEMENT 0x1000000
+# define BACKING_STORE_ALIGNMENT 0x1000
+ extern ptr_t GC_register_stackbottom;
+# define BACKING_STORE_BASE GC_register_stackbottom
+ /* Known to be wrong for recent HP/UX versions!!! */
+# endif
+# ifdef LINUX
+# define CPP_WORDSZ 64
+# define ALIGNMENT 8
+# define OS_TYPE "LINUX"
+ /* The following works on NUE and older kernels: */
+/* # define STACKBOTTOM ((ptr_t) 0xa000000000000000l) */
+ /* This does not work on NUE: */
+# define LINUX_STACKBOTTOM
+ /* We also need the base address of the register stack */
+ /* backing store. This is computed in */
+ /* GC_linux_register_stack_base based on the following */
+ /* constants: */
+# define BACKING_STORE_ALIGNMENT 0x100000
+# define BACKING_STORE_DISPLACEMENT 0x80000000
+ extern ptr_t GC_register_stackbottom;
+# define BACKING_STORE_BASE GC_register_stackbottom
+# define SEARCH_FOR_DATA_START
+# ifdef __GNUC__
+# define DYNAMIC_LOADING
+# else
+ /* In the Intel compiler environment, we seem to end up with */
+ /* statically linked executables and an undefined reference */
+ /* to _DYNAMIC */
+# endif
+# define MPROTECT_VDB
+ /* Requires Linux 2.3.47 or later. */
+ extern int _end[];
+# define DATAEND (_end)
+# ifdef __GNUC__
+# ifndef __INTEL_COMPILER
+# define PREFETCH(x) \
+ __asm__ (" lfetch [%0]": : "r"(x))
+# define PREFETCH_FOR_WRITE(x) \
+ __asm__ (" lfetch.excl [%0]": : "r"(x))
+# define CLEAR_DOUBLE(x) \
+ __asm__ (" stf.spill [%0]=f0": : "r"((void *)(x)))
+# else
+# include <ia64intrin.h>
+# define PREFETCH(x) \
+ __lfetch(__lfhint_none, (x))
+# define PREFETCH_FOR_WRITE(x) \
+ __lfetch(__lfhint_nta, (x))
+# define CLEAR_DOUBLE(x) \
+ __stf_spill((void *)(x), 0)
+# endif // __INTEL_COMPILER
+# endif
+# endif
+# ifdef MSWIN32
+ /* FIXME: This is a very partial guess. There is no port, yet. */
+# define OS_TYPE "MSWIN32"
+ /* STACKBOTTOM and DATASTART are handled specially in */
+ /* os_dep.c. */
+# define DATAEND /* not needed */
+# if defined(_WIN64)
+# define CPP_WORDSZ 64
+# else
+# define CPP_WORDSZ 32 /* Is this possible? */
+# endif
+# define ALIGNMENT 8
+# define STRTOULL _strtoui64
+# endif
+# endif
+
+# ifdef M88K
+# define MACH_TYPE "M88K"
+# define ALIGNMENT 4
+ extern int etext[];
+# ifdef CX_UX
+# define OS_TYPE "CX_UX"
+# define DATASTART ((((word)etext + 0x3fffff) & ~0x3fffff) + 0x10000)
+# endif
+# ifdef DGUX
+# define OS_TYPE "DGUX"
+ extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
+# define DATASTART GC_SysVGetDataStart(0x10000, (ptr_t)etext)
+# endif
+# define STACKBOTTOM ((char*)0xf0000000) /* determined empirically */
+# endif
+
+# ifdef S370
+ /* If this still works, and if anyone cares, this should probably */
+ /* be moved to the S390 category. */
+# define MACH_TYPE "S370"
+# define ALIGNMENT 4 /* Required by hardware */
+# ifdef UTS4
+# define OS_TYPE "UTS4"
+ extern int etext[];
+ extern int _etext[];
+ extern int _end[];
+ extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
+# define DATASTART GC_SysVGetDataStart(0x10000, (ptr_t)_etext)
+# define DATAEND (_end)
+# define HEURISTIC2
+# endif
+# endif
+
+# ifdef S390
+# define MACH_TYPE "S390"
+# ifndef __s390x__
+# define ALIGNMENT 4
+# define CPP_WORDSZ 32
+# else
+# define ALIGNMENT 8
+# define CPP_WORDSZ 64
+# ifndef HBLKSIZE
+# define HBLKSIZE 4096
+# endif
+# endif
+# ifdef LINUX
+# define OS_TYPE "LINUX"
+# define LINUX_STACKBOTTOM
+# define DYNAMIC_LOADING
+ extern int __data_start[];
+# define DATASTART ((ptr_t)(__data_start))
+ extern int _end[];
+# define DATAEND (_end)
+# define CACHE_LINE_SIZE 256
+# define GETPAGESIZE() 4096
+# endif
+# endif
+
+# ifdef ARM32
+# define CPP_WORDSZ 32
+# define MACH_TYPE "ARM32"
+# define ALIGNMENT 4
+# ifdef NETBSD
+# define OS_TYPE "NETBSD"
+# define HEURISTIC2
+# ifdef __ELF__
+# define DATASTART GC_data_start
+# define DYNAMIC_LOADING
+# else
+ extern char etext[];
+# define DATASTART ((ptr_t)(etext))
+# endif
+# endif
+# ifdef LINUX
+# define OS_TYPE "LINUX"
+# define LINUX_STACKBOTTOM
+# undef STACK_GRAN
+# define STACK_GRAN 0x10000000
+# ifdef __ELF__
+# define DYNAMIC_LOADING
+# include <features.h>
+# if defined(__GLIBC__) && __GLIBC__ >= 2
+# define SEARCH_FOR_DATA_START
+# else
+ extern char **__environ;
+# define DATASTART ((ptr_t)(&__environ))
+ /* hideous kludge: __environ is the first */
+ /* word in crt0.o, and delimits the start */
+ /* of the data segment, no matter which */
+ /* ld options were passed through. */
+ /* We could use _etext instead, but that */
+ /* would include .rodata, which may */
+ /* contain large read-only data tables */
+ /* that we'd rather not scan. */
+# endif
+ extern int _end[];
+# define DATAEND (_end)
+# else
+ extern int etext[];
+# define DATASTART ((ptr_t)((((word) (etext)) + 0xfff) & ~0xfff))
+# endif
+# endif
+# ifdef MSWINCE
+# define OS_TYPE "MSWINCE"
+# define DATAEND /* not needed */
+# endif
+# ifdef NOSYS
+ /* __data_start is usually defined in the target linker script. */
+ extern int __data_start[];
+# define DATASTART (ptr_t)(__data_start)
+ /* __stack_base__ is set in newlib/libc/sys/arm/crt0.S */
+ extern void *__stack_base__;
+# define STACKBOTTOM ((ptr_t) (__stack_base__))
+# endif
+#endif
+
+# ifdef CRIS
+# define MACH_TYPE "CRIS"
+# define CPP_WORDSZ 32
+# define ALIGNMENT 1
+# define OS_TYPE "LINUX"
+# define DYNAMIC_LOADING
+# define LINUX_STACKBOTTOM
+# define SEARCH_FOR_DATA_START
+ extern int _end[];
+# define DATAEND (_end)
+# endif
+
+# ifdef SH
+# define MACH_TYPE "SH"
+# define ALIGNMENT 4
+# ifdef MSWINCE
+# define OS_TYPE "MSWINCE"
+# define DATAEND /* not needed */
+# endif
+# ifdef LINUX
+# define OS_TYPE "LINUX"
+# define LINUX_STACKBOTTOM
+# define DYNAMIC_LOADING
+# define SEARCH_FOR_DATA_START
+ extern int _end[];
+# define DATAEND (_end)
+# endif
+# ifdef NETBSD
+# define OS_TYPE "NETBSD"
+# define HEURISTIC2
+# define DATASTART GC_data_start
+# define DYNAMIC_LOADING
+# endif
+# endif
+
+# ifdef SH4
+# define MACH_TYPE "SH4"
+# define OS_TYPE "MSWINCE"
+# define ALIGNMENT 4
+# define DATAEND /* not needed */
+# endif
+
+# ifdef M32R
+# define CPP_WORDSZ 32
+# define MACH_TYPE "M32R"
+# define ALIGNMENT 4
+# ifdef LINUX
+# define OS_TYPE "LINUX"
+# define LINUX_STACKBOTTOM
+# undef STACK_GRAN
+# define STACK_GRAN 0x10000000
+# define DYNAMIC_LOADING
+# define SEARCH_FOR_DATA_START
+ extern int _end[];
+# define DATAEND (_end)
+# endif
+# endif
+
+# ifdef X86_64
+# define MACH_TYPE "X86_64"
+# define ALIGNMENT 8
+# define CPP_WORDSZ 64
+# ifndef HBLKSIZE
+# define HBLKSIZE 4096
+# endif
+# define CACHE_LINE_SIZE 64
+# ifdef LINUX
+# define OS_TYPE "LINUX"
+# define LINUX_STACKBOTTOM
+# if !defined(GC_LINUX_THREADS) || !defined(REDIRECT_MALLOC)
+# define MPROTECT_VDB
+# else
+ /* We seem to get random errors in incremental mode, */
+ /* possibly because Linux threads is itself a malloc client */
+ /* and can't deal with the signals. */
+# endif
+# ifdef __ELF__
+# define DYNAMIC_LOADING
+# ifdef UNDEFINED /* includes ro data */
+ extern int _etext[];
+# define DATASTART ((ptr_t)((((word) (_etext)) + 0xfff) & ~0xfff))
+# endif
+# include <features.h>
+# define SEARCH_FOR_DATA_START
+ extern int _end[];
+# define DATAEND (_end)
+# else
+ extern int etext[];
+# define DATASTART ((ptr_t)((((word) (etext)) + 0xfff) & ~0xfff))
+# endif
+# if defined(__GNUC__) && __GNUC__ >= 3
+# define PREFETCH(x) __builtin_prefetch((x), 0, 0)
+# define PREFETCH_FOR_WRITE(x) __builtin_prefetch((x), 1)
+# endif
+# endif
+# ifdef DARWIN
+# define OS_TYPE "DARWIN"
+# define DARWIN_DONT_PARSE_STACK
+# define DYNAMIC_LOADING
+ /* XXX: see get_end(3), get_etext() and get_end() should not be used.
+ These aren't used when dyld support is enabled (it is by default) */
+# define DATASTART ((ptr_t) get_etext())
+# define DATAEND ((ptr_t) get_end())
+# define STACKBOTTOM ((ptr_t) 0x7fff5fc00000)
+# define USE_MMAP
+# define USE_MMAP_ANON
+# ifdef GC_DARWIN_THREADS
+# define MPROTECT_VDB
+# endif
+# include <unistd.h>
+# define GETPAGESIZE() getpagesize()
+ /* There seems to be some issues with trylock hanging on darwin. This
+ should be looked into some more */
+# define NO_PTHREAD_TRYLOCK
+# endif
+# ifdef FREEBSD
+# define OS_TYPE "FREEBSD"
+# ifndef GC_FREEBSD_THREADS
+# define MPROTECT_VDB
+# endif
+# ifdef __GLIBC__
+# define SIG_SUSPEND (32+6)
+# define SIG_THR_RESTART (32+5)
+ extern int _end[];
+# define DATAEND (_end)
+# else
+# define SIG_SUSPEND SIGUSR1
+# define SIG_THR_RESTART SIGUSR2
+# endif
+# define FREEBSD_STACKBOTTOM
+# ifdef __ELF__
+# define DYNAMIC_LOADING
+# endif
+ extern char etext[];
+ extern char * GC_FreeBSDGetDataStart();
+# define DATASTART GC_FreeBSDGetDataStart(0x1000, &etext)
+# endif
+# ifdef NETBSD
+# define OS_TYPE "NETBSD"
+# ifdef __ELF__
+# define DYNAMIC_LOADING
+# endif
+# define HEURISTIC2
+ extern char etext[];
+# define SEARCH_FOR_DATA_START
+# endif
+# ifdef SOLARIS
+# define OS_TYPE "SOLARIS"
+# define ELF_CLASS ELFCLASS64
+ extern int _etext[], _end[];
+ extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
+# define DATASTART GC_SysVGetDataStart(0x1000, (ptr_t)_etext)
+# define DATAEND (_end)
+/* # define STACKBOTTOM ((ptr_t)(_start)) worked through 2.7, */
+/* but reportedly breaks under 2.8. It appears that the stack */
+/* base is a property of the executable, so this should not break */
+/* old executables. */
+/* HEURISTIC2 probably works, but this appears to be preferable. */
+/* Apparently USRSTACK is defined to be USERLIMIT, but in some */
+/* installations that's undefined. We work around this with a */
+/* gross hack: */
+# include <sys/vmparam.h>
+# ifdef USERLIMIT
+ /* This should work everywhere, but doesn't. */
+# define STACKBOTTOM USRSTACK
+# else
+# define HEURISTIC2
+# endif
+/* At least in Solaris 2.5, PROC_VDB gives wrong values for dirty bits. */
+/* It appears to be fixed in 2.8 and 2.9. */
+# ifdef SOLARIS25_PROC_VDB_BUG_FIXED
+# define PROC_VDB
+# endif
+# define DYNAMIC_LOADING
+# if !defined(USE_MMAP) && defined(REDIRECT_MALLOC)
+# define USE_MMAP
+ /* Otherwise we now use calloc. Mmap may result in the */
+ /* heap interleaved with thread stacks, which can result in */
+ /* excessive blacklisting. Sbrk is unusable since it */
+ /* doesn't interact correctly with the system malloc. */
+# endif
+# ifdef USE_MMAP
+# define HEAP_START (ptr_t)0x40000000
+# else
+# define HEAP_START DATAEND
+# endif
+# endif
+# ifdef MSWIN32
+# define OS_TYPE "MSWIN32"
+ /* STACKBOTTOM and DATASTART are handled specially in */
+ /* os_dep.c. */
+# if !defined(__WATCOMC__)
+# define MPROTECT_VDB
+ /* We also avoided doing this in the past with GC_WIN32_THREADS */
+ /* Hopefully that's fixed. */
+# endif
+# if _MSC_VER >= 1300 /* .NET, i.e. > VisualStudio 6 */
+# define GWW_VDB
+# endif
+# define DATAEND /* not needed */
+# endif
+# endif
+
+#if defined(LINUX) && defined(USE_MMAP)
+ /* The kernel may do a somewhat better job merging mappings etc. */
+ /* with anonymous mappings. */
+# define USE_MMAP_ANON
+#endif
+
+#if defined(GC_LINUX_THREADS) && defined(REDIRECT_MALLOC)
+ /* Nptl allocates thread stacks with mmap, which is fine. But it */
+ /* keeps a cache of thread stacks. Thread stacks contain the */
+ /* thread control blocks. These in turn contain a pointer to */
+ /* (sizeof (void *) from the beginning of) the dtv for thread-local */
+ /* storage, which is calloc allocated. If we don't scan the cached */
+ /* thread stacks, we appear to lose the dtv. This tends to */
+ /* result in something that looks like a bogus dtv count, which */
+ /* tends to result in a memset call on a block that is way too */
+ /* large. Sometimes we're lucky and the process just dies ... */
+ /* There seems to be a similar issue with some other memory */
+ /* allocated by the dynamic loader. */
+ /* This can be avoided by either: */
+ /* - Defining USE_PROC_FOR_LIBRARIES here. */
+ /* That performs very poorly, precisely because we end up */
+ /* scanning cached stacks. */
+ /* - Have calloc look at its callers. That is currently what we do.*/
+ /* In spite of the fact that it is gross and disgusting. */
+/* # define USE_PROC_FOR_LIBRARIES */
+#endif
+
+# ifndef STACK_GROWS_UP
+# define STACK_GROWS_DOWN
+# endif
+
+# ifndef CPP_WORDSZ
+# define CPP_WORDSZ 32
+# endif
+
+# ifndef OS_TYPE
+# define OS_TYPE ""
+# endif
+
+# ifndef DATAEND
+ extern int end[];
+# define DATAEND (end)
+# endif
+
+# if defined(SVR4) && !defined(GETPAGESIZE)
+# include <unistd.h>
+# define GETPAGESIZE() sysconf(_SC_PAGESIZE)
+# endif
+
+# ifndef GETPAGESIZE
+# if defined(SOLARIS) || defined(IRIX5) || defined(LINUX) \
+ || defined(NETBSD) || defined(FREEBSD) || defined(HPUX)
+# include <unistd.h>
+# endif
+# define GETPAGESIZE() getpagesize()
+# endif
+
+# if defined(SOLARIS) || defined(DRSNX) || defined(UTS4)
+ /* OS has SVR4 generic features. */
+ /* Probably others also qualify. */
+# define SVR4
+# endif
+
+# if defined(SOLARIS) || defined(DRSNX)
+ /* OS has SOLARIS style semi-undocumented interface */
+ /* to dynamic loader. */
+# define SOLARISDL
+ /* OS has SOLARIS style signal handlers. */
+# define SUNOS5SIGS
+# endif
+
+# if defined(HPUX)
+# define SUNOS5SIGS
+# endif
+
+# if defined(FREEBSD) && \
+ (defined(__DragonFly__) || __FreeBSD__ >= 4 || (__FreeBSD_kernel__ >= 4))
+# define SUNOS5SIGS
+# endif
+
+# ifdef GC_NETBSD_THREADS
+# define SIGRTMIN 33
+# define SIGRTMAX 63
+# endif
+
+# if defined(SVR4) || defined(LINUX) || defined(IRIX5) || defined(HPUX) \
+ || defined(OPENBSD) || defined(NETBSD) || defined(FREEBSD) \
+ || defined(DGUX) || defined(BSD) \
+ || defined(AIX) || defined(DARWIN) || defined(OSF1) \
+ || defined(HURD)
+# define UNIX_LIKE /* Basic Unix-like system calls work. */
+# endif
+
+# if CPP_WORDSZ != 32 && CPP_WORDSZ != 64
+ -> bad word size
+# endif
+
+# ifdef PCR
+# undef DYNAMIC_LOADING
+# undef STACKBOTTOM
+# undef HEURISTIC1
+# undef HEURISTIC2
+# undef PROC_VDB
+# undef MPROTECT_VDB
+# define PCR_VDB
+# endif
+
+# ifdef SMALL_CONFIG
+ /* Presumably not worth the space it takes. */
+# undef PROC_VDB
+# undef MPROTECT_VDB
+# endif
+
+# ifdef USE_MUNMAP
+# undef MPROTECT_VDB /* Can't deal with address space holes. */
+# endif
+
+# ifdef PARALLEL_MARK
+# undef MPROTECT_VDB /* For now. */
+# endif
+
+# if !defined(PCR_VDB) && !defined(PROC_VDB) && !defined(MPROTECT_VDB) \
+ && !defined(GWW_VDB)
+# define DEFAULT_VDB
+# endif
+
+# ifndef PREFETCH
+# define PREFETCH(x)
+# define NO_PREFETCH
+# endif
+
+# ifndef PREFETCH_FOR_WRITE
+# define PREFETCH_FOR_WRITE(x)
+# define NO_PREFETCH_FOR_WRITE
+# endif
+
+# ifndef CACHE_LINE_SIZE
+# define CACHE_LINE_SIZE 32 /* Wild guess */
+# endif
+
+# if defined(LINUX) || defined(HURD) || defined(__GLIBC__)
+# define REGISTER_LIBRARIES_EARLY
+ /* We sometimes use dl_iterate_phdr, which may acquire an internal */
+ /* lock. This isn't safe after the world has stopped. So we must */
+ /* call GC_register_dynamic_libraries before stopping the world. */
+ /* For performance reasons, this may be beneficial on other */
+ /* platforms as well, though it should be avoided in win32. */
+# endif /* LINUX */
+
+# if defined(SEARCH_FOR_DATA_START)
+ extern ptr_t GC_data_start;
+# define DATASTART GC_data_start
+# endif
+
+# ifndef CLEAR_DOUBLE
+# define CLEAR_DOUBLE(x) \
+ ((word*)x)[0] = 0; \
+ ((word*)x)[1] = 0;
+# endif /* CLEAR_DOUBLE */
+
+# if defined(GC_LINUX_THREADS) && defined(REDIRECT_MALLOC) \
+ && !defined(INCLUDE_LINUX_THREAD_DESCR)
+ /* Will not work, since libc and the dynamic loader use thread */
+ /* locals, sometimes as the only reference. */
+# define INCLUDE_LINUX_THREAD_DESCR
+# endif
+
+# if defined(GC_IRIX_THREADS) && !defined(IRIX5)
+ --> inconsistent configuration
+# endif
+# if defined(GC_LINUX_THREADS) && !defined(LINUX)
+ --> inconsistent configuration
+# endif
+# if defined(GC_NETBSD_THREADS) && !defined(NETBSD)
+ --> inconsistent configuration
+# endif
+# if defined(GC_SOLARIS_THREADS) && !defined(SOLARIS)
+ --> inconsistent configuration
+# endif
+# if defined(GC_HPUX_THREADS) && !defined(HPUX)
+ --> inconsistent configuration
+# endif
+# if defined(GC_AIX_THREADS) && !defined(_AIX)
+ --> inconsistent configuration
+# endif
+# if defined(GC_GNU_THREADS) && !defined(HURD)
+ --> inconsistent configuration
+# endif
+# if defined(GC_WIN32_THREADS) && !defined(MSWIN32) && !defined(CYGWIN32)
+ --> inconsistent configuration
+# endif
+
+# if defined(PCR) || defined(GC_WIN32_THREADS) || defined(GC_PTHREADS)
+# define THREADS
+# endif
+
+# if !defined(USE_MARK_BITS) && !defined(USE_MARK_BYTES)
+# if defined(THREADS) && defined(PARALLEL_MARK)
+# define USE_MARK_BYTES
+# else
+# define USE_MARK_BITS
+# endif
+# endif
+
+# if defined(MSWINCE)
+# define NO_GETENV
+# endif
+
+# if defined(SPARC)
+# define ASM_CLEAR_CODE /* Stack clearing is crucial, and we */
+ /* include assembly code to do it well. */
+# endif
+
+ /* Can we save call chain in objects for debugging? */
+ /* SET NFRAMES (# of saved frames) and NARGS (#of args for each */
+ /* frame) to reasonable values for the platform. */
+ /* Set SAVE_CALL_CHAIN if we can. SAVE_CALL_COUNT can be specified */
+ /* at build time, though we feel free to adjust it slightly. */
+ /* Define NEED_CALLINFO if we either save the call stack or */
+ /* GC_ADD_CALLER is defined. */
+ /* GC_CAN_SAVE_CALL_STACKS is set in gc.h. */
+
+#if defined(SPARC)
+# define CAN_SAVE_CALL_ARGS
+#endif
+#if (defined(I386) || defined(X86_64)) && (defined(LINUX) || defined(__GLIBC__))
+ /* SAVE_CALL_CHAIN is supported if the code is compiled to save */
+ /* frame pointers by default, i.e. no -fomit-frame-pointer flag. */
+# define CAN_SAVE_CALL_ARGS
+#endif
+
+# if defined(SAVE_CALL_COUNT) && !defined(GC_ADD_CALLER) \
+ && defined(GC_CAN_SAVE_CALL_STACKS)
+# define SAVE_CALL_CHAIN
+# endif
+# ifdef SAVE_CALL_CHAIN
+# if defined(SAVE_CALL_NARGS) && defined(CAN_SAVE_CALL_ARGS)
+# define NARGS SAVE_CALL_NARGS
+# else
+# define NARGS 0 /* Number of arguments to save for each call. */
+# endif
+# endif
+# ifdef SAVE_CALL_CHAIN
+# ifndef SAVE_CALL_COUNT
+# define NFRAMES 6 /* Number of frames to save. Even for */
+ /* alignment reasons. */
+# else
+# define NFRAMES ((SAVE_CALL_COUNT + 1) & ~1)
+# endif
+# define NEED_CALLINFO
+# endif /* SAVE_CALL_CHAIN */
+# ifdef GC_ADD_CALLER
+# define NFRAMES 1
+# define NARGS 0
+# define NEED_CALLINFO
+# endif
+
+# if defined(MAKE_BACK_GRAPH) && !defined(DBG_HDRS_ALL)
+# define DBG_HDRS_ALL
+# endif
+
+# if defined(POINTER_MASK) && !defined(POINTER_SHIFT)
+# define POINTER_SHIFT 0
+# endif
+
+# if defined(POINTER_SHIFT) && !defined(POINTER_MASK)
+# define POINTER_MASK ((GC_word)(-1))
+# endif
+
+# if !defined(FIXUP_POINTER) && defined(POINTER_MASK)
+# define FIXUP_POINTER(p) (p) = ((p) & (POINTER_MASK) << POINTER_SHIFT)
+# endif
+
+# if defined(FIXUP_POINTER)
+# define NEED_FIXUP_POINTER 1
+# else
+# define NEED_FIXUP_POINTER 0
+# define FIXUP_POINTER(p)
+# endif
+
+# if !defined(MARK_BIT_PER_GRANULE) && !defined(MARK_BIT_PER_OBJ)
+# define MARK_BIT_PER_GRANULE /* Usually faster */
+# endif
+
+/* Some static sanity tests. */
+# if defined(MARK_BIT_PER_GRANULE) && defined(MARK_BIT_PER_OBJ)
+# error Define only one of MARK_BIT_PER_GRANULE and MARK_BIT_PER_OBJ.
+# endif
+
+# if defined(STACK_GROWS_UP) && defined(STACK_GROWS_DOWN)
+# error "Only one of STACK_GROWS_UP and STACK_GROWS_DOWN should be defd."
+# endif
+# if !defined(STACK_GROWS_UP) && !defined(STACK_GROWS_DOWN)
+# error "One of STACK_GROWS_UP and STACK_GROWS_DOWN should be defd."
+# endif
+
+# if defined(REDIRECT_MALLOC) && defined(THREADS) && !defined(LINUX)
+# error "REDIRECT_MALLOC with THREADS works at most on Linux."
+# endif
+
+#ifdef GC_PRIVATE_H
+ /* This relies on some type definitions from gc_priv.h, from */
+ /* where it's normally included. */
+ /* */
+ /* How to get heap memory from the OS: */
+ /* Note that sbrk()-like allocation is preferred, since it */
+ /* usually makes it possible to merge consecutively allocated */
+ /* chunks. It also avoids unintented recursion with */
+ /* -DREDIRECT_MALLOC. */
+ /* GET_MEM() returns a HLKSIZE aligned chunk. */
+ /* 0 is taken to mean failure. */
+ /* In the case os USE_MMAP, the argument must also be a */
+ /* physical page size. */
+ /* GET_MEM is currently not assumed to retrieve 0 filled space, */
+ /* though we should perhaps take advantage of the case in which */
+ /* does. */
+ struct hblk; /* See gc_priv.h. */
+# if defined(PCR)
+ char * real_malloc();
+# define GET_MEM(bytes) HBLKPTR(real_malloc((size_t)bytes + GC_page_size) \
+ + GC_page_size-1)
+# elif defined(OS2)
+ void * os2_alloc(size_t bytes);
+# define GET_MEM(bytes) HBLKPTR((ptr_t)os2_alloc((size_t)bytes \
+ + GC_page_size) \
+ + GC_page_size-1)
+# elif defined(NEXT) || defined(DOS4GW) || defined(NONSTOP) || \
+ (defined(AMIGA) && !defined(GC_AMIGA_FASTALLOC)) || \
+ (defined(SOLARIS) && !defined(USE_MMAP))
+# define GET_MEM(bytes) HBLKPTR((size_t) calloc(1, (size_t)bytes + GC_page_size) \
+ + GC_page_size-1)
+# elif defined(MSWIN32)
+ extern ptr_t GC_win32_get_mem();
+# define GET_MEM(bytes) (struct hblk *)GC_win32_get_mem(bytes)
+# elif defined(MACOS)
+# if defined(USE_TEMPORARY_MEMORY)
+ extern Ptr GC_MacTemporaryNewPtr(size_t size, Boolean clearMemory);
+# define GET_MEM(bytes) HBLKPTR( \
+ GC_MacTemporaryNewPtr(bytes + GC_page_size, true) \
+ + GC_page_size-1)
+# else
+# define GET_MEM(bytes) HBLKPTR( \
+ NewPtrClear(bytes + GC_page_size) + GC_page_size-1)
+# endif
+# elif defined(MSWINCE)
+ extern ptr_t GC_wince_get_mem();
+# define GET_MEM(bytes) (struct hblk *)GC_wince_get_mem(bytes)
+# elif defined(AMIGA) && defined(GC_AMIGA_FASTALLOC)
+ extern void *GC_amiga_get_mem(size_t size);
+# define GET_MEM(bytes) HBLKPTR((size_t) \
+ GC_amiga_get_mem((size_t)bytes + GC_page_size) \
+ + GC_page_size-1)
+# else
+ extern ptr_t GC_unix_get_mem();
+# define GET_MEM(bytes) (struct hblk *)GC_unix_get_mem(bytes)
+# endif
+
+#endif /* GC_PRIVATE_H */
+
+# endif /* GCCONFIG_H */
diff --git a/tools/build/src/engine/boehm_gc/include/private/msvc_dbg.h b/tools/build/src/engine/boehm_gc/include/private/msvc_dbg.h
new file mode 100644
index 000000000..1d3030aaa
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/private/msvc_dbg.h
@@ -0,0 +1,69 @@
+/*
+ Copyright (c) 2004-2005 Andrei Polushin
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+*/
+#ifndef _MSVC_DBG_H
+#define _MSVC_DBG_H
+
+#include <stdlib.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if !MSVC_DBG_DLL
+#define MSVC_DBG_EXPORT
+#elif MSVC_DBG_BUILD
+#define MSVC_DBG_EXPORT __declspec(dllexport)
+#else
+#define MSVC_DBG_EXPORT __declspec(dllimport)
+#endif
+
+#ifndef MAX_SYM_NAME
+#define MAX_SYM_NAME 2000
+#endif
+
+typedef void* HANDLE;
+typedef struct _CONTEXT CONTEXT;
+
+MSVC_DBG_EXPORT size_t GetStackFrames(size_t skip, void* frames[], size_t maxFrames);
+MSVC_DBG_EXPORT size_t GetStackFramesFromContext(HANDLE hProcess, HANDLE hThread, CONTEXT* context, size_t skip, void* frames[], size_t maxFrames);
+
+MSVC_DBG_EXPORT size_t GetModuleNameFromAddress(void* address, char* moduleName, size_t size);
+MSVC_DBG_EXPORT size_t GetModuleNameFromStack(size_t skip, char* moduleName, size_t size);
+
+MSVC_DBG_EXPORT size_t GetSymbolNameFromAddress(void* address, char* symbolName, size_t size, size_t* offsetBytes);
+MSVC_DBG_EXPORT size_t GetSymbolNameFromStack(size_t skip, char* symbolName, size_t size, size_t* offsetBytes);
+
+MSVC_DBG_EXPORT size_t GetFileLineFromAddress(void* address, char* fileName, size_t size, size_t* lineNumber, size_t* offsetBytes);
+MSVC_DBG_EXPORT size_t GetFileLineFromStack(size_t skip, char* fileName, size_t size, size_t* lineNumber, size_t* offsetBytes);
+
+MSVC_DBG_EXPORT size_t GetDescriptionFromAddress(void* address, const char* format, char* description, size_t size);
+MSVC_DBG_EXPORT size_t GetDescriptionFromStack(void*const frames[], size_t count, const char* format, char* description[], size_t size);
+
+/* Compatibility with <execinfo.h> */
+MSVC_DBG_EXPORT int backtrace(void* addresses[], int count);
+MSVC_DBG_EXPORT char** backtrace_symbols(void*const addresses[], int count);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif/*_MSVC_DBG_H*/
diff --git a/tools/build/src/engine/boehm_gc/include/private/pthread_stop_world.h b/tools/build/src/engine/boehm_gc/include/private/pthread_stop_world.h
new file mode 100644
index 000000000..6f9197a1f
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/private/pthread_stop_world.h
@@ -0,0 +1,11 @@
+#ifndef GC_PTHREAD_STOP_WORLD_H
+#define GC_PTHREAD_STOP_WORLD_H
+
+struct thread_stop_info {
+ word last_stop_count; /* GC_last_stop_count value when thread */
+ /* last successfully handled a suspend */
+ /* signal. */
+ ptr_t stack_ptr; /* Valid only when stopped. */
+};
+
+#endif
diff --git a/tools/build/src/engine/boehm_gc/include/private/pthread_support.h b/tools/build/src/engine/boehm_gc/include/private/pthread_support.h
new file mode 100644
index 000000000..77f1ad1a9
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/private/pthread_support.h
@@ -0,0 +1,84 @@
+#ifndef GC_PTHREAD_SUPPORT_H
+#define GC_PTHREAD_SUPPORT_H
+
+# include "private/gc_priv.h"
+
+# if defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS)
+
+#if defined(GC_DARWIN_THREADS)
+# include "private/darwin_stop_world.h"
+#else
+# include "private/pthread_stop_world.h"
+#endif
+
+#ifdef THREAD_LOCAL_ALLOC
+# include "thread_local_alloc.h"
+#endif /* THREAD_LOCAL_ALLOC */
+
+/* We use the allocation lock to protect thread-related data structures. */
+
+/* The set of all known threads. We intercept thread creation and */
+/* joins. */
+/* Protected by allocation/GC lock. */
+/* Some of this should be declared volatile, but that's inconsistent */
+/* with some library routine declarations. */
+typedef struct GC_Thread_Rep {
+ struct GC_Thread_Rep * next; /* More recently allocated threads */
+ /* with a given pthread id come */
+ /* first. (All but the first are */
+ /* guaranteed to be dead, but we may */
+ /* not yet have registered the join.) */
+ pthread_t id;
+ /* Extra bookkeeping information the stopping code uses */
+ struct thread_stop_info stop_info;
+
+ short flags;
+# define FINISHED 1 /* Thread has exited. */
+# define DETACHED 2 /* Thread is treated as detached. */
+ /* Thread may really be detached, or */
+ /* it may have have been explicitly */
+ /* registered, in which case we can */
+ /* deallocate its GC_Thread_Rep once */
+ /* it unregisters itself, since it */
+ /* may not return a GC pointer. */
+# define MAIN_THREAD 4 /* True for the original thread only. */
+ short thread_blocked; /* Protected by GC lock. */
+ /* Treated as a boolean value. If set, */
+ /* thread will acquire GC lock before */
+ /* doing any pointer manipulations, and */
+ /* has set its sp value. Thus it does */
+ /* not need to be sent a signal to stop */
+ /* it. */
+ ptr_t stack_end; /* Cold end of the stack. */
+# ifdef IA64
+ ptr_t backing_store_end;
+ ptr_t backing_store_ptr;
+# endif
+ void * status; /* The value returned from the thread. */
+ /* Used only to avoid premature */
+ /* reclamation of any data it might */
+ /* reference. */
+ /* This is unfortunately also the */
+ /* reason we need to intercept join */
+ /* and detach. */
+# ifdef THREAD_LOCAL_ALLOC
+ struct thread_local_freelists tlfs;
+# endif
+} * GC_thread;
+
+# define THREAD_TABLE_SZ 256 /* Must be power of 2 */
+extern volatile GC_thread GC_threads[THREAD_TABLE_SZ];
+
+extern GC_bool GC_thr_initialized;
+
+GC_thread GC_lookup_thread(pthread_t id);
+
+void GC_stop_init();
+
+extern GC_bool GC_in_thread_creation;
+ /* We may currently be in thread creation or destruction. */
+ /* Only set to TRUE while allocation lock is held. */
+ /* When set, it is OK to run GC from unknown thread. */
+
+#endif /* GC_PTHREADS && !GC_SOLARIS_THREADS.... etc */
+#endif /* GC_PTHREAD_SUPPORT_H */
diff --git a/tools/build/src/engine/boehm_gc/include/private/specific.h b/tools/build/src/engine/boehm_gc/include/private/specific.h
new file mode 100644
index 000000000..fc2e8f9e6
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/private/specific.h
@@ -0,0 +1,96 @@
+/*
+ * This is a reimplementation of a subset of the pthread_getspecific/setspecific
+ * interface. This appears to outperform the standard linuxthreads one
+ * by a significant margin.
+ * The major restriction is that each thread may only make a single
+ * pthread_setspecific call on a single key. (The current data structure
+ * doesn't really require that. The restriction should be easily removable.)
+ * We don't currently support the destruction functions, though that
+ * could be done.
+ * We also currently assume that only one pthread_setspecific call
+ * can be executed at a time, though that assumption would be easy to remove
+ * by adding a lock.
+ */
+
+#include <errno.h>
+#include "atomic_ops.h"
+
+/* Called during key creation or setspecific. */
+/* For the GC we already hold lock. */
+/* Currently allocated objects leak on thread exit. */
+/* That's hard to fix, but OK if we allocate garbage */
+/* collected memory. */
+#define MALLOC_CLEAR(n) GC_INTERNAL_MALLOC(n, NORMAL)
+#define PREFIXED(name) GC_##name
+
+#define TS_CACHE_SIZE 1024
+#define CACHE_HASH(n) (((((long)n) >> 8) ^ (long)n) & (TS_CACHE_SIZE - 1))
+#define TS_HASH_SIZE 1024
+#define HASH(n) (((((long)n) >> 8) ^ (long)n) & (TS_HASH_SIZE - 1))
+
+/* An entry describing a thread-specific value for a given thread. */
+/* All such accessible structures preserve the invariant that if either */
+/* thread is a valid pthread id or qtid is a valid "quick tread id" */
+/* for a thread, then value holds the corresponding thread specific */
+/* value. This invariant must be preserved at ALL times, since */
+/* asynchronous reads are allowed. */
+typedef struct thread_specific_entry {
+ volatile AO_t qtid; /* quick thread id, only for cache */
+ void * value;
+ struct thread_specific_entry *next;
+ pthread_t thread;
+} tse;
+
+
+/* We represent each thread-specific datum as two tables. The first is */
+/* a cache, indexed by a "quick thread identifier". The "quick" thread */
+/* identifier is an easy to compute value, which is guaranteed to */
+/* determine the thread, though a thread may correspond to more than */
+/* one value. We typically use the address of a page in the stack. */
+/* The second is a hash table, indexed by pthread_self(). It is used */
+/* only as a backup. */
+
+/* Return the "quick thread id". Default version. Assumes page size, */
+/* or at least thread stack separation, is at least 4K. */
+/* Must be defined so that it never returns 0. (Page 0 can't really */
+/* be part of any stack, since that would make 0 a valid stack pointer.)*/
+static __inline__ unsigned long quick_thread_id() {
+ int dummy;
+ return (unsigned long)(&dummy) >> 12;
+}
+
+#define INVALID_QTID ((unsigned long)0)
+#define INVALID_THREADID ((pthread_t)0)
+
+typedef struct thread_specific_data {
+ tse * volatile cache[TS_CACHE_SIZE];
+ /* A faster index to the hash table */
+ tse * hash[TS_HASH_SIZE];
+ pthread_mutex_t lock;
+} tsd;
+
+typedef tsd * PREFIXED(key_t);
+
+extern int PREFIXED(key_create) (tsd ** key_ptr, void (* destructor)(void *));
+
+extern int PREFIXED(setspecific) (tsd * key, void * value);
+
+extern void PREFIXED(remove_specific) (tsd * key);
+
+/* An internal version of getspecific that assumes a cache miss. */
+void * PREFIXED(slow_getspecific) (tsd * key, unsigned long qtid,
+ tse * volatile * cache_entry);
+
+static __inline__ void * PREFIXED(getspecific) (tsd * key) {
+ long qtid = quick_thread_id();
+ unsigned hash_val = CACHE_HASH(qtid);
+ tse * volatile * entry_ptr = key -> cache + hash_val;
+ tse * entry = *entry_ptr; /* Must be loaded only once. */
+ if (EXPECT(entry -> qtid == qtid, 1)) {
+ GC_ASSERT(entry -> thread == pthread_self());
+ return entry -> value;
+ }
+ return PREFIXED(slow_getspecific) (key, qtid, entry_ptr);
+}
+
+
diff --git a/tools/build/src/engine/boehm_gc/include/private/thread_local_alloc.h b/tools/build/src/engine/boehm_gc/include/private/thread_local_alloc.h
new file mode 100644
index 000000000..4c2c5362f
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/private/thread_local_alloc.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2000-2005 by Hewlett-Packard Company. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+/* Included indirectly from a thread-library-specific file. */
+/* This is the interface for thread-local allocation, whose */
+/* implementation is mostly thread-library-independent. */
+/* Here we describe only the interface that needs to be known */
+/* and invoked from the thread support layer; the actual */
+/* implementation also exports GC_malloc and friends, which */
+/* are declared in gc.h. */
+
+#include "private/gc_priv.h"
+
+#if defined(THREAD_LOCAL_ALLOC)
+
+#include "gc_inline.h"
+
+
+# if defined USE_HPUX_TLS
+# error USE_HPUX_TLS macro was replaced by USE_COMPILER_TLS
+# endif
+
+# if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC) && \
+ !defined(USE_WIN32_COMPILER_TLS) && !defined(USE_COMPILER_TLS) && \
+ !defined(USE_CUSTOM_SPECIFIC)
+# if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
+# if defined(__GNUC__) /* Fixed for versions past 2.95? */
+# define USE_WIN32_SPECIFIC
+# else
+# define USE_WIN32_COMPILER_TLS
+# endif /* !GNU */
+# elif defined(LINUX) && !defined(ARM32) && \
+ (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >=3))
+# define USE_COMPILER_TLS
+# elif (defined(GC_DGUX386_THREADS) || defined(GC_OSF1_THREADS) || \
+ defined(GC_DARWIN_THREADS) || defined(GC_AIX_THREADS)) || \
+ defined(GC_NETBSD_THREADS)
+# define USE_PTHREAD_SPECIFIC
+# elif defined(GC_HPUX_THREADS)
+# ifdef __GNUC__
+# define USE_PTHREAD_SPECIFIC
+ /* Empirically, as of gcc 3.3, USE_COMPILER_TLS doesn't work. */
+# else
+# define USE_COMPILER_TLS
+# endif
+# else
+# define USE_CUSTOM_SPECIFIC /* Use our own. */
+# endif
+# endif
+
+# include <stdlib.h>
+
+/* One of these should be declared as the tlfs field in the */
+/* structure pointed to by a GC_thread. */
+typedef struct thread_local_freelists {
+# ifdef THREAD_LOCAL_ALLOC
+ void * ptrfree_freelists[TINY_FREELISTS];
+ void * normal_freelists[TINY_FREELISTS];
+# ifdef GC_GCJ_SUPPORT
+ void * gcj_freelists[TINY_FREELISTS];
+# define ERROR_FL (void *)(-1)
+ /* Value used for gcj_freelist[-1]; allocation is */
+ /* erroneous. */
+# endif
+ /* Free lists contain either a pointer or a small count */
+ /* reflecting the number of granules allocated at that */
+ /* size. */
+ /* 0 ==> thread-local allocation in use, free list */
+ /* empty. */
+ /* > 0, <= DIRECT_GRANULES ==> Using global allocation, */
+ /* too few objects of this size have been */
+ /* allocated by this thread. */
+ /* >= HBLKSIZE => pointer to nonempty free list. */
+ /* > DIRECT_GRANULES, < HBLKSIZE ==> transition to */
+ /* local alloc, equivalent to 0. */
+# define DIRECT_GRANULES (HBLKSIZE/GRANULE_BYTES)
+ /* Don't use local free lists for up to this much */
+ /* allocation. */
+
+# endif
+} *GC_tlfs;
+
+# if defined(USE_PTHREAD_SPECIFIC)
+# define GC_getspecific pthread_getspecific
+# define GC_setspecific pthread_setspecific
+# define GC_key_create pthread_key_create
+# define GC_remove_specific(key) /* No need for cleanup on exit. */
+ typedef pthread_key_t GC_key_t;
+# elif defined(USE_COMPILER_TLS) || defined(USE_WIN32_COMPILER_TLS)
+# define GC_getspecific(x) (x)
+# define GC_setspecific(key, v) ((key) = (v), 0)
+# define GC_key_create(key, d) 0
+# define GC_remove_specific(key) /* No need for cleanup on exit. */
+ typedef void * GC_key_t;
+# elif defined(USE_WIN32_SPECIFIC)
+# include <windows.h>
+# define GC_getspecific TlsGetValue
+# define GC_setspecific(key, v) !TlsSetValue(key, v)
+ /* We assume 0 == success, msft does the opposite. */
+# define GC_key_create(key, d) \
+ ((d) != 0? (ABORT("Destructor unsupported by TlsAlloc"),0) \
+ : (*(key) = TlsAlloc(), 0))
+# define GC_remove_specific(key) /* No need for cleanup on thread exit. */
+ /* Need TlsFree on process exit/detach ? */
+ typedef DWORD GC_key_t;
+# elif defined(USE_CUSTOM_SPECIFIC)
+# include "private/specific.h"
+# else
+# error implement me
+# endif
+
+
+/* Each thread structure must be initialized. */
+/* This call must be made from the new thread. */
+/* Caller holds allocation lock. */
+void GC_init_thread_local(GC_tlfs p);
+
+/* Called when a thread is unregistered, or exits. */
+/* We hold the allocator lock. */
+void GC_destroy_thread_local(GC_tlfs p);
+
+/* The thread support layer must arrange to mark thread-local */
+/* free lists explicitly, since the link field is often */
+/* invisible to the marker. It knows hoe to find all threads; */
+/* we take care of an individual thread freelist structure. */
+void GC_mark_thread_local_fls_for(GC_tlfs p);
+
+extern
+#if defined(USE_COMPILER_TLS)
+ __thread
+#elif defined(USE_WIN32_COMPILER_TLS)
+ __declspec(thread)
+#endif
+GC_key_t GC_thread_key;
+
+/* This is set up by the thread_local_alloc implementation. But the */
+/* thread support layer calls GC_remove_specific(GC_thread_key) */
+/* before a thread exits. */
+/* And the thread support layer makes sure that GC_thread_key is traced,*/
+/* if necessary. */
+
+#endif /* THREAD_LOCAL_ALLOC */
diff --git a/tools/build/src/engine/boehm_gc/include/weakpointer.h b/tools/build/src/engine/boehm_gc/include/weakpointer.h
new file mode 100644
index 000000000..84906b00a
--- /dev/null
+++ b/tools/build/src/engine/boehm_gc/include/weakpointer.h
@@ -0,0 +1,221 @@
+#ifndef _weakpointer_h_
+#define _weakpointer_h_
+
+/****************************************************************************
+
+WeakPointer and CleanUp
+
+ Copyright (c) 1991 by Xerox Corporation. All rights reserved.
+
+ THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+
+ Permission is hereby granted to copy this code for any purpose,
+ provided the above notices are retained on all copies.
+
+ Last modified on Mon Jul 17 18:16:01 PDT 1995 by ellis
+
+****************************************************************************/
+
+/****************************************************************************
+
+WeakPointer
+
+A weak pointer is a pointer to a heap-allocated object that doesn't
+prevent the object from being garbage collected. Weak pointers can be
+used to track which objects haven't yet been reclaimed by the
+collector. A weak pointer is deactivated when the collector discovers
+its referent object is unreachable by normal pointers (reachability
+and deactivation are defined more precisely below). A deactivated weak
+pointer remains deactivated forever.
+
+****************************************************************************/
+
+
+template< class T > class WeakPointer {
+public:
+
+WeakPointer( T* t = 0 )
+ /* Constructs a weak pointer for *t. t may be null. It is an error
+ if t is non-null and *t is not a collected object. */
+ {impl = _WeakPointer_New( t );}
+
+T* Pointer()
+ /* wp.Pointer() returns a pointer to the referent object of wp or
+ null if wp has been deactivated (because its referent object
+ has been discovered unreachable by the collector). */
+ {return (T*) _WeakPointer_Pointer( this->impl );}
+
+int operator==( WeakPointer< T > wp2 )
+ /* Given weak pointers wp1 and wp2, if wp1 == wp2, then wp1 and
+ wp2 refer to the same object. If wp1 != wp2, then either wp1
+ and wp2 don't refer to the same object, or if they do, one or
+ both of them has been deactivated. (Note: If objects t1 and t2
+ are never made reachable by their clean-up functions, then
+ WeakPointer<T>(t1) == WeakPointer<T>(t2) if and only t1 == t2.) */
+ {return _WeakPointer_Equal( this->impl, wp2.impl );}
+
+int Hash()
+ /* Returns a hash code suitable for use by multiplicative- and
+ division-based hash tables. If wp1 == wp2, then wp1.Hash() ==
+ wp2.Hash(). */
+ {return _WeakPointer_Hash( this->impl );}
+
+private:
+void* impl;
+};
+
+/*****************************************************************************
+
+CleanUp
+
+A garbage-collected object can have an associated clean-up function
+that will be invoked some time after the collector discovers the
+object is unreachable via normal pointers. Clean-up functions can be
+used to release resources such as open-file handles or window handles
+when their containing objects become unreachable. If a C++ object has
+a non-empty explicit destructor (i.e. it contains programmer-written
+code), the destructor will be automatically registered as the object's
+initial clean-up function.
+
+There is no guarantee that the collector will detect every unreachable
+object (though it will find almost all of them). Clients should not
+rely on clean-up to cause some action to occur immediately -- clean-up
+is only a mechanism for improving resource usage.
+
+Every object with a clean-up function also has a clean-up queue. When
+the collector finds the object is unreachable, it enqueues it on its
+queue. The clean-up function is applied when the object is removed
+from the queue. By default, objects are enqueued on the garbage
+collector's queue, and the collector removes all objects from its
+queue after each collection. If a client supplies another queue for
+objects, it is his responsibility to remove objects (and cause their
+functions to be called) by polling it periodically.
+
+Clean-up queues allow clean-up functions accessing global data to
+synchronize with the main program. Garbage collection can occur at any
+time, and clean-ups invoked by the collector might access data in an
+inconsistent state. A client can control this by defining an explicit
+queue for objects and polling it at safe points.
+
+The following definitions are used by the specification below:
+
+Given a pointer t to a collected object, the base object BO(t) is the
+value returned by new when it created the object. (Because of multiple
+inheritance, t and BO(t) may not be the same address.)
+
+A weak pointer wp references an object *t if BO(wp.Pointer()) ==
+BO(t).
+
+***************************************************************************/
+
+template< class T, class Data > class CleanUp {
+public:
+
+static void Set( T* t, void c( Data* d, T* t ), Data* d = 0 )
+ /* Sets the clean-up function of object BO(t) to be <c, d>,
+ replacing any previously defined clean-up function for BO(t); c
+ and d can be null, but t cannot. Sets the clean-up queue for
+ BO(t) to be the collector's queue. When t is removed from its
+ clean-up queue, its clean-up will be applied by calling c(d,
+ t). It is an error if *t is not a collected object. */
+ {_CleanUp_Set( t, c, d );}
+
+static void Call( T* t )
+ /* Sets the new clean-up function for BO(t) to be null and, if the
+ old one is non-null, calls it immediately, even if BO(t) is
+ still reachable. Deactivates any weak pointers to BO(t). */
+ {_CleanUp_Call( t );}
+
+class Queue {public:
+ Queue()
+ /* Constructs a new queue. */
+ {this->head = _CleanUp_Queue_NewHead();}
+
+ void Set( T* t )
+ /* q.Set(t) sets the clean-up queue of BO(t) to be q. */
+ {_CleanUp_Queue_Set( this->head, t );}
+
+ int Call()
+ /* If q is non-empty, q.Call() removes the first object and
+ calls its clean-up function; does nothing if q is
+ empty. Returns true if there are more objects in the
+ queue. */
+ {return _CleanUp_Queue_Call( this->head );}
+
+ private:
+ void* head;
+ };
+};
+
+/**********************************************************************
+
+Reachability and Clean-up
+
+An object O is reachable if it can be reached via a non-empty path of
+normal pointers from the registers, stacks, global variables, or an
+object with a non-null clean-up function (including O itself),
+ignoring pointers from an object to itself.
+
+This definition of reachability ensures that if object B is accessible
+from object A (and not vice versa) and if both A and B have clean-up
+functions, then A will always be cleaned up before B. Note that as
+long as an object with a clean-up function is contained in a cycle of
+pointers, it will always be reachable and will never be cleaned up or
+collected.
+
+When the collector finds an unreachable object with a null clean-up
+function, it atomically deactivates all weak pointers referencing the
+object and recycles its storage. If object B is accessible from object
+A via a path of normal pointers, A will be discovered unreachable no
+later than B, and a weak pointer to A will be deactivated no later
+than a weak pointer to B.
+
+When the collector finds an unreachable object with a non-null
+clean-up function, the collector atomically deactivates all weak
+pointers referencing the object, redefines its clean-up function to be
+null, and enqueues it on its clean-up queue. The object then becomes
+reachable again and remains reachable at least until its clean-up
+function executes.
+
+The clean-up function is assured that its argument is the only
+accessible pointer to the object. Nothing prevents the function from
+redefining the object's clean-up function or making the object
+reachable again (for example, by storing the pointer in a global
+variable).
+
+If the clean-up function does not make its object reachable again and
+does not redefine its clean-up function, then the object will be
+collected by a subsequent collection (because the object remains
+unreachable and now has a null clean-up function). If the clean-up
+function does make its object reachable again and a clean-up function
+is subsequently redefined for the object, then the new clean-up
+function will be invoked the next time the collector finds the object
+unreachable.
+
+Note that a destructor for a collected object cannot safely redefine a
+clean-up function for its object, since after the destructor executes,
+the object has been destroyed into "raw memory". (In most
+implementations, destroying an object mutates its vtbl.)
+
+Finally, note that calling delete t on a collected object first
+deactivates any weak pointers to t and then invokes its clean-up
+function (destructor).
+
+**********************************************************************/
+
+extern "C" {
+ void* _WeakPointer_New( void* t );
+ void* _WeakPointer_Pointer( void* wp );
+ int _WeakPointer_Equal( void* wp1, void* wp2 );
+ int _WeakPointer_Hash( void* wp );
+ void _CleanUp_Set( void* t, void (*c)( void* d, void* t ), void* d );
+ void _CleanUp_Call( void* t );
+ void* _CleanUp_Queue_NewHead ();
+ void _CleanUp_Queue_Set( void* h, void* t );
+ int _CleanUp_Queue_Call( void* h );
+}
+
+#endif /* _weakpointer_h_ */
+
+