summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIvan Maidanski <ivmai@mail.ru>2022-06-08 10:24:39 +0300
committerIvan Maidanski <ivmai@mail.ru>2022-06-17 00:50:45 +0300
commit84a018dc1adcb6341d419404abd633dfb75af6a0 (patch)
tree02e11a7c022c5bf7d085f5b5af272235b8acd5ea
parent921f5adf30b93e3d76281c698e49af1dc2dcb67d (diff)
downloadbdwgc-84a018dc1adcb6341d419404abd633dfb75af6a0.tar.gz
Fix race between calloc_explicitly_typed and push_complex_descriptor
(a cherry-pick of commit 0a0c26df5 from 'release-7_6') Issue #449 (bdwgc). Use atomic store release operation to write a simple descriptor to object in GC_malloc_explicitly_typed[_ignore_off_page] to ensure the descriptor is seen by GC_typed_mark_proc (even if the object might be constructed incompletely by client yet). Hold the allocation lock while writing a complex descriptor to the object (in GC_calloc_explicitly_typed) to ensure that the descriptor is seen by GC_array_mark_proc as expected. * typd_mlc.c (set_obj_descr): Define macro (using AO_store_release if available). * typd_mlc.c (GC_malloc_explicitly_typed, GC_malloc_explicitly_typed_ignore_off_page): Use set_complex_descr() instead of direct write to *op. * typd_mlc.c (GC_calloc_explicitly_typed): Remove volatile for lp local variable; add comment; wrap writing to op[lw-1] into LOCK/UNLOCK. Co-authored-by: Hans Boehm <boehm@acm.org>
-rw-r--r--typd_mlc.c31
1 files changed, 26 insertions, 5 deletions
diff --git a/typd_mlc.c b/typd_mlc.c
index 709e578e..f19655a0 100644
--- a/typd_mlc.c
+++ b/typd_mlc.c
@@ -497,6 +497,14 @@ STATIC mse * GC_push_complex_descriptor(word *addr, complex_descriptor *d,
}
}
+#ifdef AO_HAVE_store_release
+# define set_obj_descr(op, nwords, d) \
+ AO_store_release((volatile AO_t *)(op) + (nwords) - 1, (AO_t)(d))
+#else
+# define set_obj_descr(op, nwords, d) \
+ (void)(((word *)(op))[(nwords) - 1] = (word)(d))
+#endif
+
STATIC mse * GC_array_mark_proc(word * addr, mse * mark_stack_ptr,
mse * mark_stack_limit,
word env GC_ATTR_UNUSED)
@@ -644,7 +652,7 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_explicitly_typed(size_t lb,
return NULL;
lg = BYTES_TO_GRANULES(GC_size(op));
}
- ((word *)op)[GRANULES_TO_WORDS(lg) - 1] = d;
+ set_obj_descr(op, GRANULES_TO_WORDS(lg), d);
GC_dirty(op + GRANULES_TO_WORDS(lg) - 1);
REACHABLE_AFTER_DIRTY(d);
return((void *) op);
@@ -683,7 +691,7 @@ GC_API GC_ATTR_MALLOC void * GC_CALL
if (NULL == op) return NULL;
lg = BYTES_TO_GRANULES(GC_size(op));
}
- ((word *)op)[GRANULES_TO_WORDS(lg) - 1] = d;
+ set_obj_descr(op, GRANULES_TO_WORDS(lg), d);
GC_dirty((word *)op + GRANULES_TO_WORDS(lg) - 1);
REACHABLE_AFTER_DIRTY(d);
return op;
@@ -743,7 +751,7 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_calloc_explicitly_typed(size_t n,
}
if (descr_type == LEAF) {
/* Set up the descriptor inside the object itself. */
- volatile struct LeafDescriptor * lp =
+ struct LeafDescriptor * lp =
(struct LeafDescriptor *)
((word *)op
+ GRANULES_TO_WORDS(lg)
@@ -753,12 +761,25 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_calloc_explicitly_typed(size_t n,
lp -> ld_size = leaf.ld_size;
lp -> ld_nelements = leaf.ld_nelements;
lp -> ld_descriptor = leaf.ld_descriptor;
- ((volatile word *)op)[GRANULES_TO_WORDS(lg) - 1] = (word)lp;
+
+ /* Hold the allocation lock while writing the descriptor word */
+ /* to the object to ensure that the descriptor contents are */
+ /* seen by GC_array_mark_proc as expected. */
+ /* TODO: It should be possible to replace locking with the */
+ /* atomic operations (with the release barrier here) but, in */
+ /* this case, avoiding the acquire barrier in */
+ /* GC_array_mark_proc seems to be tricky as GC_mark_some might */
+ /* be invoked with the world running. */
+ LOCK();
+ ((word *)op)[GRANULES_TO_WORDS(lg) - 1] = (word)lp;
+ UNLOCK();
} else {
# ifndef GC_NO_FINALIZATION
size_t lw = GRANULES_TO_WORDS(lg);
- ((word *)op)[lw - 1] = (word)complex_descr;
+ LOCK();
+ ((word *)op)[lw - 1] = (word)complex_descr;
+ UNLOCK();
GC_dirty(op + lw - 1);
REACHABLE_AFTER_DIRTY(complex_descr);
/* Make sure the descriptor is cleared once there is any danger */