summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--alloc.c44
-rw-r--r--include/private/gc_priv.h3
-rw-r--r--specific.c7
3 files changed, 42 insertions, 12 deletions
diff --git a/alloc.c b/alloc.c
index 1d365f4d..3cfcd831 100644
--- a/alloc.c
+++ b/alloc.c
@@ -734,17 +734,41 @@ GC_INNER void GC_set_fl_marks(ptr_t q)
/* (*pfreelist) are set. Check skipped if points to a special value. */
void GC_check_fl_marks(void **pfreelist)
{
- ptr_t list = *pfreelist;
- ptr_t p;
-
- if ((word)list <= HBLKSIZE) return;
+# ifdef AO_HAVE_load_acquire_read
+ AO_t *list = (AO_t *)AO_load_acquire_read((AO_t *)pfreelist);
+ /* Atomic operations are used because the world is running. */
+ AO_t *prev;
+ AO_t *p;
+
+ if ((word)list <= HBLKSIZE) return;
+
+ prev = (AO_t *)pfreelist;
+ for (p = list; p != NULL;) {
+ AO_t *next;
+
+ if (!GC_is_marked(p)) {
+ GC_err_printf("Unmarked object %p on list %p\n",
+ (void *)p, (void *)list);
+ ABORT("Unmarked local free list entry");
+ }
- for (p = list; p != 0; p = obj_link(p)) {
- if (!GC_is_marked(p)) {
- GC_err_printf("Unmarked object %p on list %p\n", p, list);
- ABORT("Unmarked local free list entry");
- }
- }
+ /* While traversing the free-list, it re-reads the pointer to */
+ /* the current node before accepting its next pointer and */
+ /* bails out if the latter has changed. That way, it won't */
+ /* try to follow the pointer which might be been modified */
+ /* after the object was returned to the client. It might */
+ /* perform the mark-check on the just allocated object but */
+ /* that should be harmless. */
+ next = (AO_t *)AO_load_acquire_read(p);
+ if (AO_load(prev) != (AO_t)p)
+ break;
+ prev = p;
+ p = next;
+ }
+# else
+ /* FIXME: Not implemented (just skipped). */
+ (void)pfreelist;
+# endif
}
#endif /* GC_ASSERTIONS && THREAD_LOCAL_ALLOC */
diff --git a/include/private/gc_priv.h b/include/private/gc_priv.h
index 8287e030..5cfb5438 100644
--- a/include/private/gc_priv.h
+++ b/include/private/gc_priv.h
@@ -832,7 +832,8 @@ typedef word page_hash_table[PHT_SIZE];
# define counter_t volatile AO_t
#else
typedef size_t counter_t;
-# if defined(THREADS) && defined(MPROTECT_VDB)
+# if defined(THREADS) && (defined(MPROTECT_VDB) \
+ || (defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)))
# include "atomic_ops.h"
# endif
#endif /* !PARALLEL_MARK */
diff --git a/specific.c b/specific.c
index e35c9b57..20c78ade 100644
--- a/specific.c
+++ b/specific.c
@@ -44,14 +44,19 @@ GC_INNER int GC_key_create_inner(tsd ** key_ptr)
return 0;
}
+/* Called with the lock held. */
GC_INNER int GC_setspecific(tsd * key, void * value)
{
pthread_t self = pthread_self();
int hash_val = HASH(self);
- volatile tse * entry = (volatile tse *)MALLOC_CLEAR(sizeof (tse));
+ volatile tse * entry;
GC_ASSERT(self != INVALID_THREADID);
+ GC_dont_gc++; /* disable GC */
+ entry = (volatile tse *)MALLOC_CLEAR(sizeof(tse));
+ GC_dont_gc--;
if (0 == entry) return ENOMEM;
+
pthread_mutex_lock(&(key -> lock));
/* Could easily check for an existing entry here. */
entry -> next = key->hash[hash_val].p;