summaryrefslogtreecommitdiff
path: root/thread_local_alloc.c
diff options
context:
space:
mode:
authorhboehm <hboehm>2006-04-28 23:40:34 +0000
committerIvan Maidanski <ivmai@mail.ru>2011-07-26 21:06:37 +0400
commite0c23aa40014934485f5d853448bfb0d41404672 (patch)
tree357300d7e0a4e698285950fca6dd21e3ced0892e /thread_local_alloc.c
parent2659a10bdb9922749ef1fabfade9f5ced6e551f7 (diff)
downloadbdwgc-e0c23aa40014934485f5d853448bfb0d41404672.tar.gz
2006-04-28 Hans Boehm <Hans.Boehm@hp.com>
* thread_local_alloc.c (GC_gcj_malloc): Once again punt in incremental mode. The old code contained unavoidable races in that case.
Diffstat (limited to 'thread_local_alloc.c')
-rw-r--r--thread_local_alloc.c26
1 files changed, 25 insertions, 1 deletions
diff --git a/thread_local_alloc.c b/thread_local_alloc.c
index 4b9629ec..e44cf71f 100644
--- a/thread_local_alloc.c
+++ b/thread_local_alloc.c
@@ -197,9 +197,30 @@ void * GC_malloc_atomic(size_t bytes)
extern int GC_gcj_kind;
+/* Gcj-style allocation without locks is extremely tricky. The */
+/* fundamental issue is that we may end up marking a free list, which */
+/* has freelist links instead of "vtable" pointers. That is usually */
+/* OK, since the next object on the free list will be cleared, and */
+/* will thus be interpreted as containg a zero descriptor. That's fine */
+/* if the object has not yet been initialized. But there are */
+/* interesting potential races. */
+/* In the case of incremental collection, this seems hopeless, since */
+/* the marker may run asynchronously, and may pick up the pointer to */
+/* the next freelist entry (which it thinks is a vtable pointer), get */
+/* suspended for a while, and then see an allocated object instead */
+/* of the vtable. This made be avoidable with either a handshake with */
+/* the collector or, probably more easily, by moving the free list */
+/* links to the second word of each object. The latter isn't a */
+/* universal win, since on architecture like Itanium, nonzero offsets */
+/* are not necessarily free. And there may be cache fill order issues. */
+/* For now, we punt with incremental GC. This probably means that */
+/* incremental GC should be enabled before we fork a second thread. */
void * GC_gcj_malloc(size_t bytes,
void * ptr_to_struct_containing_descr)
{
+ if (GC_EXPECT(GC_incremental, 0)) {
+ return GC_core_gcj_malloc(bytes, ptr_to_struct_containing_descr);
+ } else {
size_t granules = ROUNDED_UP_GRANULES(bytes);
void *result;
void **tiny_fl = ((GC_tlfs)GC_getspecific(GC_thread_key))
@@ -226,8 +247,11 @@ void * GC_gcj_malloc(size_t bytes,
/* Thus it is impossible for a mark procedure to see the */
/* allocation of the next object, but to see this object */
/* still containing a free list pointer. Otherwise the */
- /* marker might find a random "mark descriptor". */
+ /* marker, by misinterpreting the freelist link as a vtable */
+ /* pointer, might find a random "mark descriptor" in the next */
+ /* object. */
return result;
+ }
}
#endif /* GC_GCJ_SUPPORT */