From f8c1da573b1b2b72501630f18fc1452e6b9e9c0c Mon Sep 17 00:00:00 2001
From: bryce <bryce@138bc75d-0d04-0410-961f-82ee72b054a4>
Date: Mon, 21 May 2001 08:35:14 +0000
Subject: 	Imported version version 6.0alpha7.

	* README, README.Mac, README.OS2, README.QUICK, README.alpha,
	README.amiga, README.debugging, README.dj, README.hp, README.linux,
	README.rs6000, README.sgi, README.solaris2, README.uts,
	README.win32, SCoptions.amiga, backptr.h, barrett_diagram,
	dbg_mlc.h, gc.h, gc.man, gc_alloc.h, gc_cpp.h, gc_hdrs.h, gc_mark.h,
	gc_priv.h, gc_private.h, gc_typed.h, gcconfig.h,
	hpux_irix_threads.c, makefile.depend, nursery.c,
	solaris_threads.h, test.c, test_cpp.cc, weakpointer.h, cord/README,
	cord/SCOPTIONS.amiga, cord/SMakefile.amiga, cord/cord.h,
	cord/ec.h, cord/gc.h, cord/private/cord_pos.h, include/backptr.h,
	include/gc_copy_descr.h, include/gc_nursery.h: Remove obsolete/moved
	files.


git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@42379 138bc75d-0d04-0410-961f-82ee72b054a4
---
 boehm-gc/mallocx.c | 376 +++++++++++++++++++++++++++++++++++++++++++----------
 1 file changed, 305 insertions(+), 71 deletions(-)

(limited to 'boehm-gc/mallocx.c')

diff --git a/boehm-gc/mallocx.c b/boehm-gc/mallocx.c
index c842665237e..77c750fafbc 100644
--- a/boehm-gc/mallocx.c
+++ b/boehm-gc/mallocx.c
@@ -2,6 +2,7 @@
  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
  * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
+ * Copyright (c) 2000 by Hewlett-Packard Company.  All rights reserved.
  *
  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
@@ -21,7 +22,7 @@
  */
 
 #include <stdio.h>
-#include "gc_priv.h"
+#include "private/gc_priv.h"
 
 extern ptr_t GC_clear_stack();  /* in misc.c, behaves like identity */
 void GC_extend_size_map();      /* in misc.c. */
@@ -30,69 +31,179 @@ GC_bool GC_alloc_reclaim_list();	/* in malloc.c */
 /* Some externally visible but unadvertised variables to allow access to */
 /* free lists from inlined allocators without including gc_priv.h	 */
 /* or introducing dependencies on internal data structure layouts.	 */
-ptr_t * CONST GC_objfreelist_ptr = GC_objfreelist;
-ptr_t * CONST GC_aobjfreelist_ptr = GC_aobjfreelist;
-ptr_t * CONST GC_uobjfreelist_ptr = GC_uobjfreelist;
+ptr_t * GC_CONST GC_objfreelist_ptr = GC_objfreelist;
+ptr_t * GC_CONST GC_aobjfreelist_ptr = GC_aobjfreelist;
+ptr_t * GC_CONST GC_uobjfreelist_ptr = GC_uobjfreelist;
 # ifdef ATOMIC_UNCOLLECTABLE
-    ptr_t * CONST GC_auobjfreelist_ptr = GC_auobjfreelist;
+    ptr_t * GC_CONST GC_auobjfreelist_ptr = GC_auobjfreelist;
 # endif
 
-/* Allocate a composite object of size n bytes.  The caller guarantees  */
-/* that pointers past the first page are not relevant.  Caller holds    */
-/* allocation lock.                                                     */
-ptr_t GC_generic_malloc_inner_ignore_off_page(lb, k)
-register size_t lb;
-register int k;
+
+GC_PTR GC_generic_or_special_malloc(lb,knd)
+word lb;
+int knd;
 {
-    register struct hblk * h;
-    register word n_blocks;
-    register word lw;
-    register ptr_t op;
-
-    if (lb <= HBLKSIZE)
-        return(GC_generic_malloc_inner((word)lb, k));
-    n_blocks = divHBLKSZ(ADD_SLOP(lb) + HDR_BYTES + HBLKSIZE-1);
-    if (!GC_is_initialized) GC_init_inner();
-    /* Do our share of marking work */
-    if(GC_incremental && !GC_dont_gc)
-        GC_collect_a_little_inner((int)n_blocks);
-    lw = ROUNDED_UP_WORDS(lb);
-    h = GC_allochblk(lw, k, IGNORE_OFF_PAGE);
-#   ifdef USE_MUNMAP
-      if (0 == h) {
-        GC_merge_unmapped();
-        h = GC_allochblk(lw, k, IGNORE_OFF_PAGE);
-      }
-#   endif
-    while (0 == h && GC_collect_or_expand(n_blocks, TRUE)) {
-      h = GC_allochblk(lw, k, IGNORE_OFF_PAGE);
+    switch(knd) {
+#     ifdef STUBBORN_ALLOC
+	case STUBBORN:
+	    return(GC_malloc_stubborn((size_t)lb));
+#     endif
+	case PTRFREE:
+	    return(GC_malloc_atomic((size_t)lb));
+	case NORMAL:
+	    return(GC_malloc((size_t)lb));
+	case UNCOLLECTABLE:
+	    return(GC_malloc_uncollectable((size_t)lb));
+#       ifdef ATOMIC_UNCOLLECTABLE
+	  case AUNCOLLECTABLE:
+	    return(GC_malloc_atomic_uncollectable((size_t)lb));
+#	endif /* ATOMIC_UNCOLLECTABLE */
+	default:
+	    return(GC_generic_malloc(lb,knd));
     }
-    if (h == 0) {
-        op = 0;
+}
+
+
+/* Change the size of the block pointed to by p to contain at least   */
+/* lb bytes.  The object may be (and quite likely will be) moved.     */
+/* The kind (e.g. atomic) is the same as that of the old.	      */
+/* Shrinking of large blocks is not implemented well.                 */
+# ifdef __STDC__
+    GC_PTR GC_realloc(GC_PTR p, size_t lb)
+# else
+    GC_PTR GC_realloc(p,lb)
+    GC_PTR p;
+    size_t lb;
+# endif
+{
+register struct hblk * h;
+register hdr * hhdr;
+register word sz;	 /* Current size in bytes	*/
+register word orig_sz;	 /* Original sz in bytes	*/
+int obj_kind;
+
+    if (p == 0) return(GC_malloc(lb));	/* Required by ANSI */
+    h = HBLKPTR(p);
+    hhdr = HDR(h);
+    sz = hhdr -> hb_sz;
+    obj_kind = hhdr -> hb_obj_kind;
+    sz = WORDS_TO_BYTES(sz);
+    orig_sz = sz;
+
+    if (sz > MAXOBJBYTES) {
+	/* Round it up to the next whole heap block */
+	  register word descr;
+	  
+	  sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
+	  hhdr -> hb_sz = BYTES_TO_WORDS(sz);
+	  descr = GC_obj_kinds[obj_kind].ok_descriptor;
+          if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
+          hhdr -> hb_descr = descr;
+	  if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
+	  /* Extra area is already cleared by GC_alloc_large_and_clear. */
+    }
+    if (ADD_SLOP(lb) <= sz) {
+	if (lb >= (sz >> 1)) {
+#	    ifdef STUBBORN_ALLOC
+	        if (obj_kind == STUBBORN) GC_change_stubborn(p);
+#	    endif
+	    if (orig_sz > lb) {
+	      /* Clear unneeded part of object to avoid bogus pointer */
+	      /* tracing.					      */
+	      /* Safe for stubborn objects.			      */
+	        BZERO(((ptr_t)p) + lb, orig_sz - lb);
+	    }
+	    return(p);
+	} else {
+	    /* shrink */
+	      GC_PTR result =
+	      		GC_generic_or_special_malloc((word)lb, obj_kind);
+
+	      if (result == 0) return(0);
+	          /* Could also return original object.  But this 	*/
+	          /* gives the client warning of imminent disaster.	*/
+	      BCOPY(p, result, lb);
+#	      ifndef IGNORE_FREE
+	        GC_free(p);
+#	      endif
+	      return(result);
+	}
     } else {
-        op = (ptr_t) (h -> hb_body);
-        GC_words_wasted += BYTES_TO_WORDS(n_blocks * HBLKSIZE) - lw;
+	/* grow */
+	  GC_PTR result =
+	  	GC_generic_or_special_malloc((word)lb, obj_kind);
+
+	  if (result == 0) return(0);
+	  BCOPY(p, result, sz);
+#	  ifndef IGNORE_FREE
+	    GC_free(p);
+#	  endif
+	  return(result);
     }
-    GC_words_allocd += lw;
-    return((ptr_t)op);
 }
 
+# if defined(REDIRECT_MALLOC) || defined(REDIRECT_REALLOC)
+# ifdef __STDC__
+    GC_PTR realloc(GC_PTR p, size_t lb)
+# else
+    GC_PTR realloc(p,lb)
+    GC_PTR p;
+    size_t lb;
+# endif
+  {
+#   ifdef REDIRECT_REALLOC
+      return(REDIRECT_REALLOC(p, lb));
+#   else
+      return(GC_realloc(p, lb));
+#   endif
+  }
+# endif /* REDIRECT_MALLOC */
+
+
+/* The same thing, except caller does not hold allocation lock.	*/
+/* We avoid holding allocation lock while we clear memory.	*/
 ptr_t GC_generic_malloc_ignore_off_page(lb, k)
 register size_t lb;
 register int k;
 {
     register ptr_t result;
+    word lw;
+    word n_blocks;
+    GC_bool init;
     DCL_LOCK_STATE;
     
+    if (SMALL_OBJ(lb))
+        return(GC_generic_malloc((word)lb, k));
+    lw = ROUNDED_UP_WORDS(lb);
+    n_blocks = OBJ_SZ_TO_BLOCKS(lw);
+    init = GC_obj_kinds[k].ok_init;
     GC_INVOKE_FINALIZERS();
     DISABLE_SIGNALS();
     LOCK();
-    result = GC_generic_malloc_inner_ignore_off_page(lb,k);
+    result = (ptr_t)GC_alloc_large(lw, k, IGNORE_OFF_PAGE);
+    if (0 != result) {
+        if (GC_debugging_started) {
+	    BZERO(result, n_blocks * HBLKSIZE);
+        } else {
+#           ifdef THREADS
+	      /* Clear any memory that might be used for GC descriptors */
+	      /* before we release the lock.			      */
+	        ((word *)result)[0] = 0;
+	        ((word *)result)[1] = 0;
+	        ((word *)result)[lw-1] = 0;
+	        ((word *)result)[lw-2] = 0;
+#	    endif
+        }
+    }
+    GC_words_allocd += lw;
     UNLOCK();
     ENABLE_SIGNALS();
     if (0 == result) {
         return((*GC_oom_fn)(lb));
     } else {
+    	if (init & !GC_debugging_started) {
+	    BZERO(result, n_blocks * HBLKSIZE);
+        }
         return(result);
     }
 }
@@ -185,6 +296,24 @@ DCL_LOCK_STATE;
 }
 
 #if defined(THREADS) && !defined(SRC_M3)
+
+extern signed_word GC_mem_found;   /* Protected by GC lock.  */
+
+#ifdef PARALLEL_MARK
+volatile signed_word GC_words_allocd_tmp = 0;
+                        /* Number of words of memory allocated since    */
+                        /* we released the GC lock.  Instead of         */
+                        /* reacquiring the GC lock just to add this in, */
+                        /* we add it in the next time we reacquire      */
+                        /* the lock.  (Atomically adding it doesn't     */
+                        /* work, since we would have to atomically      */
+                        /* update it in GC_malloc, which is too         */
+                        /* expensive.                                   */
+#endif /* PARALLEL_MARK */
+
+/* See reclaim.c: */
+extern ptr_t GC_reclaim_generic();
+
 /* Return a list of 1 or more objects of the indicated size, linked	*/
 /* through the first word in the object.  This has the advantage that	*/
 /* it acquires the allocation lock only once, and may greatly reduce	*/
@@ -200,12 +329,19 @@ register word lb;
 register int k;
 {
 ptr_t op;
-register ptr_t p;
+ptr_t p;
 ptr_t *opp;
 word lw;
-register word my_words_allocd;
+word my_words_allocd = 0;
+struct obj_kind * ok = &(GC_obj_kinds[k]);
 DCL_LOCK_STATE;
 
+#   if defined(GATHERSTATS) || defined(PARALLEL_MARK)
+#     define COUNT_ARG , &my_words_allocd
+#   else
+#     define COUNT_ARG
+#     define NEED_TO_COUNT
+#   endif
     if (!SMALL_OBJ(lb)) {
         op = GC_generic_malloc(lb, k);
         if(0 != op) obj_link(op) = 0;
@@ -215,40 +351,142 @@ DCL_LOCK_STATE;
     GC_INVOKE_FINALIZERS();
     DISABLE_SIGNALS();
     LOCK();
-    opp = &(GC_obj_kinds[k].ok_freelist[lw]);
-    if( (op = *opp) == 0 ) {
-        if (!GC_is_initialized) {
-            GC_init_inner();
-        }
-	op = GC_clear_stack(GC_allocobj(lw, k));
-	if (op == 0) {
-	    UNLOCK();
-	    ENABLE_SIGNALS();
-	    op = (*GC_oom_fn)(lb);
-	    if(0 != op) obj_link(op) = 0;
-            return(op);
-	}
+    if (!GC_is_initialized) GC_init_inner();
+    /* First see if we can reclaim a page of objects waiting to be */
+    /* reclaimed.						   */
+    {
+	struct hblk ** rlh = ok -> ok_reclaim_list;
+	struct hblk * hbp;
+	hdr * hhdr;
+
+	rlh += lw;
+    	while ((hbp = *rlh) != 0) {
+            hhdr = HDR(hbp);
+            *rlh = hhdr -> hb_next;
+#	    ifdef PARALLEL_MARK
+		{
+		  signed_word my_words_allocd_tmp = GC_words_allocd_tmp;
+
+		  GC_ASSERT(my_words_allocd_tmp >= 0);
+		  /* We only decrement it while holding the GC lock.	*/
+		  /* Thus we can't accidentally adjust it down in more	*/
+		  /* than one thread simultaneously.			*/
+		  if (my_words_allocd_tmp != 0) {
+		    (void)GC_atomic_add(
+				(volatile GC_word *)(&GC_words_allocd_tmp),
+				(GC_word)(-my_words_allocd_tmp));
+		    GC_words_allocd += my_words_allocd_tmp;
+		  }
+		}
+		GC_acquire_mark_lock();
+		++ GC_fl_builder_count;
+		UNLOCK();
+		ENABLE_SIGNALS();
+		GC_release_mark_lock();
+#	    endif
+	    op = GC_reclaim_generic(hbp, hhdr, lw,
+				    ok -> ok_init, 0 COUNT_ARG);
+            if (op != 0) {
+#	      ifdef NEED_TO_COUNT
+		/* We are neither gathering statistics, nor marking in	*/
+		/* parallel.  Thus GC_reclaim_generic doesn't count	*/
+		/* for us.						*/
+    		for (p = op; p != 0; p = obj_link(p)) {
+        	  my_words_allocd += lw;
+		}
+#	      endif
+#	      if defined(GATHERSTATS)
+	        /* We also reclaimed memory, so we need to adjust 	*/
+	        /* that count.						*/
+		/* This should be atomic, so the results may be		*/
+		/* inaccurate.						*/
+		GC_mem_found += my_words_allocd;
+#	      endif
+#	      ifdef PARALLEL_MARK
+		(void)GC_atomic_add(
+				(volatile GC_word *)(&GC_words_allocd_tmp),
+				(GC_word)(my_words_allocd));
+		GC_acquire_mark_lock();
+		-- GC_fl_builder_count;
+		if (GC_fl_builder_count == 0) GC_notify_all_builder();
+		GC_release_mark_lock();
+		return GC_clear_stack(op);
+#	      else
+	        GC_words_allocd += my_words_allocd;
+	        goto out;
+#	      endif
+	    }
+#	    ifdef PARALLEL_MARK
+	      GC_acquire_mark_lock();
+	      -- GC_fl_builder_count;
+	      if (GC_fl_builder_count == 0) GC_notify_all_builder();
+	      GC_release_mark_lock();
+	      DISABLE_SIGNALS();
+	      LOCK();
+	      /* GC lock is needed for reclaim list access.	We	*/
+	      /* must decrement fl_builder_count before reaquiring GC	*/
+	      /* lock.  Hopefully this path is rare.			*/
+#	    endif
+    	}
     }
-    *opp = 0;
-    my_words_allocd = 0;
-    for (p = op; p != 0; p = obj_link(p)) {
-        my_words_allocd += lw;
-        if (my_words_allocd >= BODY_SZ) {
+    /* Next try to use prefix of global free list if there is one.	*/
+    /* We don't refill it, but we need to use it up before allocating	*/
+    /* a new block ourselves.						*/
+      opp = &(GC_obj_kinds[k].ok_freelist[lw]);
+      if ( (op = *opp) != 0 ) {
+	*opp = 0;
+        my_words_allocd = 0;
+        for (p = op; p != 0; p = obj_link(p)) {
+          my_words_allocd += lw;
+          if (my_words_allocd >= BODY_SZ) {
             *opp = obj_link(p);
             obj_link(p) = 0;
             break;
+	  }
         }
+	GC_words_allocd += my_words_allocd;
+	goto out;
+      }
+    /* Next try to allocate a new block worth of objects of this size.	*/
+    {
+	struct hblk *h = GC_allochblk(lw, k, 0);
+	if (h != 0) {
+	  if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
+	  GC_words_allocd += BYTES_TO_WORDS(HBLKSIZE)
+			       - BYTES_TO_WORDS(HBLKSIZE) % lw;
+#	  ifdef PARALLEL_MARK
+	    GC_acquire_mark_lock();
+	    ++ GC_fl_builder_count;
+	    UNLOCK();
+	    ENABLE_SIGNALS();
+	    GC_release_mark_lock();
+#	  endif
+
+	  op = GC_build_fl(h, lw, ok -> ok_init, 0);
+#	  ifdef PARALLEL_MARK
+	    GC_acquire_mark_lock();
+	    -- GC_fl_builder_count;
+	    if (GC_fl_builder_count == 0) GC_notify_all_builder();
+	    GC_release_mark_lock();
+	    return GC_clear_stack(op);
+#	  else
+	    goto out;
+#	  endif
+	}
     }
-    GC_words_allocd += my_words_allocd;
     
-out:
+    /* As a last attempt, try allocating a single object.  Note that	*/
+    /* this may trigger a collection or expand the heap.		*/
+      op = GC_generic_malloc_inner(lb, k);
+      if (0 != op) obj_link(op) = 0;
+    
+  out:
     UNLOCK();
     ENABLE_SIGNALS();
-    return(op);
-
+    return(GC_clear_stack(op));
 }
 
-void * GC_malloc_many(size_t lb)
+GC_PTR GC_malloc_many(size_t lb)
 {
     return(GC_generic_malloc_many(lb, NORMAL));
 }
@@ -272,11 +510,9 @@ DCL_LOCK_STATE;
 
     if( SMALL_OBJ(lb) ) {
 #       ifdef MERGE_SIZES
-#	  ifdef ADD_BYTE_AT_END
-	    if (lb != 0) lb--;
+	  if (EXTRA_BYTES != 0 && lb != 0) lb--;
 	    	  /* We don't need the extra byte, since this won't be	*/
 	    	  /* collected anyway.					*/
-#	  endif
 	  lw = GC_size_map[lb];
 #	else
 	  lw = ALIGNED_WORDS(lb);
@@ -338,11 +574,9 @@ DCL_LOCK_STATE;
 
     if( SMALL_OBJ(lb) ) {
 #       ifdef MERGE_SIZES
-#	  ifdef ADD_BYTE_AT_END
-	    if (lb != 0) lb--;
+	  if (EXTRA_BYTES != 0 && lb != 0) lb--;
 	    	  /* We don't need the extra byte, since this won't be	*/
 	    	  /* collected anyway.					*/
-#	  endif
 	  lw = GC_size_map[lb];
 #	else
 	  lw = ALIGNED_WORDS(lb);
-- 
cgit v1.2.1