summaryrefslogtreecommitdiff
path: root/thread_local_alloc.c
diff options
context:
space:
mode:
authorhboehm <hboehm>2006-04-25 04:17:59 +0000
committerIvan Maidanski <ivmai@mail.ru>2011-07-26 21:06:36 +0400
commitee711c32e6d38068d6077cfc7be948057ad5e0fe (patch)
treea00fed5f4ee24b1e0e99c141d8f39b8839f69950 /thread_local_alloc.c
parentd268a8c5e567634baf8f3ec618bb83f27266c5bc (diff)
downloadbdwgc-ee711c32e6d38068d6077cfc7be948057ad5e0fe.tar.gz
2006-04-24 Hans Boehm <Hans.Boehm@hp.com>
* thread_local_alloc.c, include/gc_inline.h, include/private/thread_local_alloc.h: Make non-gcj local free lists for size 0 contains normal 1 granule objects. * test/test.c: Add test for explicit deallocation of size zero objects.
Diffstat (limited to 'thread_local_alloc.c')
-rw-r--r--thread_local_alloc.c53
1 files changed, 32 insertions, 21 deletions
diff --git a/thread_local_alloc.c b/thread_local_alloc.c
index 8cb105f8..4b9629ec 100644
--- a/thread_local_alloc.c
+++ b/thread_local_alloc.c
@@ -28,6 +28,27 @@ GC_key_t GC_thread_key;
static GC_bool keys_initialized;
+/* Return a single nonempty freelist fl to the global one pointed to */
+/* by gfl. */
+
+static void return_single_freelist(void *fl, void **gfl)
+{
+ void *q, **qptr;
+
+ if (fl == ERROR_FL) return;
+ if (*gfl == 0) {
+ *gfl = fl;
+ } else {
+ GC_ASSERT(GC_size(fl) == GC_size(*gfl));
+ /* Concatenate: */
+ for (qptr = &(obj_link(fl)), q = *qptr;
+ (word)q >= HBLKSIZE; qptr = &(obj_link(q)), q = *qptr);
+ GC_ASSERT(0 == q);
+ *qptr = *gfl;
+ *gfl = fl;
+ }
+}
+
/* Recover the contents of the freelist array fl into the global one gfl.*/
/* We hold the allocator lock. */
static void return_freelists(void **fl, void **gfl)
@@ -37,31 +58,18 @@ static void return_freelists(void **fl, void **gfl)
for (i = 1; i < TINY_FREELISTS; ++i) {
if ((word)(fl[i]) >= HBLKSIZE) {
- if (gfl[i] == 0) {
- gfl[i] = fl[i];
- } else {
- GC_ASSERT(GC_size(fl[i]) == GRANULES_TO_BYTES(i));
- GC_ASSERT(GC_size(gfl[i]) == GRANULES_TO_BYTES(i));
- /* Concatenate: */
- for (qptr = fl+i, q = *qptr;
- (word)q >= HBLKSIZE; qptr = &(obj_link(q)), q = *qptr);
- GC_ASSERT(0 == q);
- *qptr = gfl[i];
- gfl[i] = fl[i];
- }
+ return_single_freelist(fl[i], gfl+i);
}
/* Clear fl[i], since the thread structure may hang around. */
/* Do it in a way that is likely to trap if we access it. */
fl[i] = (ptr_t)HBLKSIZE;
}
+ /* The 0 granule freelist really contains 1 granule objects. */
+ if ((word)(fl[0]) >= HBLKSIZE && fl[0] != ERROR_FL) {
+ return_single_freelist(fl[0], gfl+1);
+ }
}
-/* We statically allocate a single "size 0" object. It is linked to */
-/* itself, and is thus repeatedly reused for all size 0 allocation */
-/* requests. (Size 0 gcj allocation requests are incorrect, and */
-/* we arrange for those to fault asap.) */
-static ptr_t size_zero_object = (ptr_t)(&size_zero_object);
-
/* Each thread structure must be initialized. */
/* This call must be made from the new thread. */
/* Caller holds allocation lock. */
@@ -86,10 +94,13 @@ void GC_init_thread_local(GC_tlfs p)
# endif
}
/* Set up the size 0 free lists. */
- p -> ptrfree_freelists[0] = (void *)(&size_zero_object);
- p -> normal_freelists[0] = (void *)(&size_zero_object);
+ /* We now handle most of them like regular free lists, to ensure */
+ /* That explicit deallocation works. However, allocation of a */
+ /* size 0 "gcj" object is always an error. */
+ p -> ptrfree_freelists[0] = (void *)1;
+ p -> normal_freelists[0] = (void *)1;
# ifdef GC_GCJ_SUPPORT
- p -> gcj_freelists[0] = (void *)(-1);
+ p -> gcj_freelists[0] = ERROR_FL;
# endif
}