summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIvan Maidanski <ivmai@mail.ru>2022-02-20 10:51:25 +0300
committerIvan Maidanski <ivmai@mail.ru>2022-02-20 10:51:25 +0300
commitaecb7206376d5aff68a41352129db957480259bf (patch)
tree4338542ef9992103b1d6d19945c49fe582a7fef7
parente7af919c070fee36d0b011069ff0484079a27103 (diff)
downloadlibatomic_ops-aecb7206376d5aff68a41352129db957480259bf.tar.gz
Repeat black list check on CAS fail in stack_push_explicit_aux_release
Also, execute the first read in a loop with an acquire barrier, and place black list checking as close to CAS as possible. * src/atomic_ops_stack.c [AO_USE_ALMOST_LOCK_FREE] (AO_stack_push_explicit_aux_release): Use acquire barrier to read list value (stored to next local variable); read list value and store it to x element before iterating over AO_stack_bl (and, thus, retry iterating over AO_stack_bl if CAS failed).
-rw-r--r--src/atomic_ops_stack.c17
1 files changed, 8 insertions, 9 deletions
diff --git a/src/atomic_ops_stack.c b/src/atomic_ops_stack.c
index 379adbf..2e8626a 100644
--- a/src/atomic_ops_stack.c
+++ b/src/atomic_ops_stack.c
@@ -112,7 +112,10 @@ AO_API AO_t *AO_stack_next_ptr(AO_t next)
/* No deletions of x can start here, since x is not */
/* currently in the list. */
retry:
- {
+ do {
+ next = AO_load_acquire(list);
+ store_before_cas(x, next);
+ {
# if AO_BL_SIZE == 2
/* Start all loads as close to concurrently as possible. */
AO_t entry1 = AO_load(&a->AO_stack_bl[0]);
@@ -131,15 +134,11 @@ AO_API AO_t *AO_stack_next_ptr(AO_t next)
x_bits = (AO_t)x;
goto retry;
}
- }
-
- /* x_bits is not currently being deleted */
- do
- {
- next = AO_load(list);
- store_before_cas(x, next);
}
- while (AO_EXPECT_FALSE(!AO_compare_and_swap_release(list, next, x_bits)));
+
+ /* x_bits value is not currently being deleted. */
+ } while (AO_EXPECT_FALSE(!AO_compare_and_swap_release(list, next,
+ x_bits)));
}
/* I concluded experimentally that checking a value first before */