summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGabriel Scherer <gabriel.scherer@gmail.com>2023-04-26 23:04:46 +0200
committerGitHub <noreply@github.com>2023-04-26 23:04:46 +0200
commit9fd4767208663033b7695c3b507239c6197bdded (patch)
tree5ee8ce78f99cf003e1c17a85ecfd3d7707ff4450
parent69472048a0d1782d2b773561588339136a8cf5b6 (diff)
parent2788503869eae32cbddfbc9952dd7b512a4816cb (diff)
downloadocaml-9fd4767208663033b7695c3b507239c6197bdded.tar.gz
Merge pull request #12192 from gadmm/simpl_atomic
Simplification with atomic_ helpers
-rw-r--r--otherlibs/runtime_events/runtime_events_consumer.c10
-rw-r--r--otherlibs/systhreads/st_pthreads.h10
-rw-r--r--otherlibs/systhreads/st_stubs.c4
-rw-r--r--otherlibs/unix/symlink_win32.c7
-rw-r--r--otherlibs/unix/unixsupport_unix.c5
-rw-r--r--otherlibs/unix/unixsupport_win32.c5
-rw-r--r--runtime/array.c6
-rw-r--r--runtime/caml/atomic_refcount.h2
-rw-r--r--runtime/caml/domain.h2
-rw-r--r--runtime/caml/lf_skiplist.h3
-rw-r--r--runtime/caml/platform.h31
-rw-r--r--runtime/codefrag.c6
-rw-r--r--runtime/domain.c66
-rw-r--r--runtime/fail_nat.c4
-rw-r--r--runtime/fiber.c4
-rw-r--r--runtime/interp.c6
-rw-r--r--runtime/lf_skiplist.c50
-rw-r--r--runtime/major_gc.c62
-rw-r--r--runtime/memory.c3
-rw-r--r--runtime/minor_gc.c14
-rw-r--r--runtime/obj.c2
-rw-r--r--runtime/platform.c2
-rw-r--r--runtime/runtime_events.c46
-rw-r--r--runtime/signals.c2
-rw-r--r--testsuite/tests/lf_skiplist/stubs.c3
25 files changed, 160 insertions, 195 deletions
diff --git a/otherlibs/runtime_events/runtime_events_consumer.c b/otherlibs/runtime_events/runtime_events_consumer.c
index 0901a3d931..1e5f229fd1 100644
--- a/otherlibs/runtime_events/runtime_events_consumer.c
+++ b/otherlibs/runtime_events/runtime_events_consumer.c
@@ -23,6 +23,7 @@
#include "caml/misc.h"
#include "caml/mlvalues.h"
#include "caml/osdeps.h"
+#include "caml/platform.h"
#include <fcntl.h>
#include <stdatomic.h>
@@ -391,10 +392,8 @@ caml_runtime_events_read_poll(struct caml_runtime_events_cursor *cursor,
do {
uint64_t buf[RUNTIME_EVENTS_MAX_MSG_LENGTH];
uint64_t ring_mask, header, msg_length;
- ring_head = atomic_load_explicit(&runtime_events_buffer_header->ring_head,
- memory_order_acquire);
- ring_tail = atomic_load_explicit(&runtime_events_buffer_header->ring_tail,
- memory_order_acquire);
+ ring_head = atomic_load_acquire(&runtime_events_buffer_header->ring_head);
+ ring_tail = atomic_load_acquire(&runtime_events_buffer_header->ring_tail);
if (ring_head > cursor->current_positions[domain_num]) {
if (cursor->lost_events) {
@@ -427,8 +426,7 @@ caml_runtime_events_read_poll(struct caml_runtime_events_cursor *cursor,
atomic_thread_fence(memory_order_seq_cst);
- ring_head = atomic_load_explicit(&runtime_events_buffer_header->ring_head,
- memory_order_acquire);
+ ring_head = atomic_load_acquire(&runtime_events_buffer_header->ring_head);
/* Check the message we've read hasn't been overwritten by the writer */
if (ring_head > cursor->current_positions[domain_num]) {
diff --git a/otherlibs/systhreads/st_pthreads.h b/otherlibs/systhreads/st_pthreads.h
index 5d29df67d1..bd8839b6de 100644
--- a/otherlibs/systhreads/st_pthreads.h
+++ b/otherlibs/systhreads/st_pthreads.h
@@ -37,7 +37,7 @@ static atomic_uintnat tick_thread_stop[Max_domains];
static int st_initialize(void)
{
- atomic_store_rel(&Tick_thread_stop, 0);
+ atomic_store_release(&Tick_thread_stop, 0);
return 0;
}
@@ -112,14 +112,14 @@ static void st_masterlock_init(st_masterlock * m)
m->init = 1;
}
m->busy = 1;
- atomic_store_rel(&m->waiters, 0);
+ atomic_store_release(&m->waiters, 0);
return;
};
static uintnat st_masterlock_waiters(st_masterlock * m)
{
- return atomic_load_acq(&m->waiters);
+ return atomic_load_acquire(&m->waiters);
}
static void st_bt_lock_acquire(st_masterlock *m) {
@@ -295,10 +295,10 @@ static void * caml_thread_tick(void * arg)
caml_init_domain_self(*domain_id);
caml_domain_state *domain = Caml_state;
- while(! atomic_load_acq(&Tick_thread_stop)) {
+ while(! atomic_load_acquire(&Tick_thread_stop)) {
st_msleep(Thread_timeout);
- atomic_store_rel(&domain->requested_external_interrupt, 1);
+ atomic_store_release(&domain->requested_external_interrupt, 1);
caml_interrupt_self();
}
return NULL;
diff --git a/otherlibs/systhreads/st_stubs.c b/otherlibs/systhreads/st_stubs.c
index 389a343b2d..09d56120a7 100644
--- a/otherlibs/systhreads/st_stubs.c
+++ b/otherlibs/systhreads/st_stubs.c
@@ -488,9 +488,9 @@ CAMLprim value caml_thread_initialize(value unit)
CAMLprim value caml_thread_cleanup(value unit)
{
if (Tick_thread_running){
- atomic_store_rel(&Tick_thread_stop, 1);
+ atomic_store_release(&Tick_thread_stop, 1);
st_thread_join(Tick_thread_id);
- atomic_store_rel(&Tick_thread_stop, 0);
+ atomic_store_release(&Tick_thread_stop, 0);
Tick_thread_running = 0;
}
diff --git a/otherlibs/unix/symlink_win32.c b/otherlibs/unix/symlink_win32.c
index 3a1d6a1b8b..aac6545b31 100644
--- a/otherlibs/unix/symlink_win32.c
+++ b/otherlibs/unix/symlink_win32.c
@@ -26,6 +26,7 @@
#include <caml/fail.h>
#include <caml/signals.h>
#include <caml/osdeps.h>
+#include <caml/platform.h>
#include "unixsupport.h"
#ifndef SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE
@@ -78,13 +79,11 @@ CAMLprim value caml_unix_symlink(value to_dir, value osource, value odest)
caml_unix_check_path(osource, "symlink");
caml_unix_check_path(odest, "symlink");
- additional_flags = atomic_load_explicit(&additional_symlink_flags,
- memory_order_relaxed);
+ additional_flags = atomic_load_relaxed(&additional_symlink_flags);
if (additional_flags == -1) {
additional_flags = IsDeveloperModeEnabled() ?
SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE : 0;
- atomic_store_explicit(&additional_symlink_flags, additional_flags,
- memory_order_relaxed);
+ atomic_store_relaxed(&additional_symlink_flags, additional_flags);
}
flags =
diff --git a/otherlibs/unix/unixsupport_unix.c b/otherlibs/unix/unixsupport_unix.c
index 4a206072a0..449af8b5c2 100644
--- a/otherlibs/unix/unixsupport_unix.c
+++ b/otherlibs/unix/unixsupport_unix.c
@@ -13,11 +13,14 @@
/* */
/**************************************************************************/
+#define CAML_INTERNALS
+
#include <caml/mlvalues.h>
#include <caml/alloc.h>
#include <caml/callback.h>
#include <caml/memory.h>
#include <caml/fail.h>
+#include <caml/platform.h>
#include "unixsupport.h"
#include "cst2constr.h"
#include <errno.h>
@@ -293,7 +296,7 @@ void caml_unix_error(int errcode, const char *cmdname, value cmdarg)
value res;
const value * exn;
- exn = atomic_load_explicit(&caml_unix_error_exn, memory_order_acquire);
+ exn = atomic_load_acquire(&caml_unix_error_exn);
if (exn == NULL) {
exn = caml_named_value("Unix.Unix_error");
if (exn == NULL)
diff --git a/otherlibs/unix/unixsupport_win32.c b/otherlibs/unix/unixsupport_win32.c
index f9e85a4586..cb5eb35df7 100644
--- a/otherlibs/unix/unixsupport_win32.c
+++ b/otherlibs/unix/unixsupport_win32.c
@@ -13,6 +13,8 @@
/* */
/**************************************************************************/
+#define CAML_INTERNALS
+
#include <stddef.h>
#include <caml/mlvalues.h>
#include <caml/callback.h>
@@ -20,6 +22,7 @@
#include <caml/memory.h>
#include <caml/fail.h>
#include <caml/custom.h>
+#include <caml/platform.h>
#include "unixsupport.h"
#include "cst2constr.h"
#include <errno.h>
@@ -297,7 +300,7 @@ void caml_unix_error(int errcode, const char *cmdname, value cmdarg)
value res;
const value * exn;
- exn = atomic_load_explicit(&caml_unix_error_exn, memory_order_acquire);
+ exn = atomic_load_acquire(&caml_unix_error_exn);
if (exn == NULL) {
exn = caml_named_value("Unix.Unix_error");
if (exn == NULL)
diff --git a/runtime/array.c b/runtime/array.c
index 317153901e..5a850b4944 100644
--- a/runtime/array.c
+++ b/runtime/array.c
@@ -336,14 +336,12 @@ static void wo_memmove (volatile value* const dst,
if (dst < src) {
/* copy ascending */
for (i = 0; i < nvals; i++)
- atomic_store_explicit(&((atomic_value*)dst)[i], src[i],
- memory_order_release);
+ atomic_store_release(&((atomic_value*)dst)[i], src[i]);
} else {
/* copy descending */
for (i = nvals; i > 0; i--)
- atomic_store_explicit(&((atomic_value*)dst)[i-1], src[i-1],
- memory_order_release);
+ atomic_store_release(&((atomic_value*)dst)[i-1], src[i-1]);
}
}
}
diff --git a/runtime/caml/atomic_refcount.h b/runtime/caml/atomic_refcount.h
index 3e4a239d51..aba5ce7f67 100644
--- a/runtime/caml/atomic_refcount.h
+++ b/runtime/caml/atomic_refcount.h
@@ -21,7 +21,7 @@
#include "camlatomic.h"
Caml_inline void caml_atomic_refcount_init(atomic_uintnat* refc, uintnat n){
- atomic_store_rel(refc, n);
+ atomic_store_release(refc, n);
}
Caml_inline uintnat caml_atomic_refcount_decr(atomic_uintnat* refcount){
diff --git a/runtime/caml/domain.h b/runtime/caml/domain.h
index 17c011ecee..49194ae73d 100644
--- a/runtime/caml/domain.h
+++ b/runtime/caml/domain.h
@@ -92,7 +92,7 @@ CAMLextern atomic_uintnat caml_num_domains_running;
Caml_inline intnat caml_domain_alone(void)
{
- return atomic_load_acq(&caml_num_domains_running) == 1;
+ return atomic_load_acquire(&caml_num_domains_running) == 1;
}
#ifdef DEBUG
diff --git a/runtime/caml/lf_skiplist.h b/runtime/caml/lf_skiplist.h
index f35f112256..db6544c867 100644
--- a/runtime/caml/lf_skiplist.h
+++ b/runtime/caml/lf_skiplist.h
@@ -95,8 +95,7 @@ extern void caml_lf_skiplist_free_garbage(struct lf_skiplist *sk);
#define LF_SK_UNMARK(p) ((struct lf_skipcell *)(((uintptr_t)(p)) & ~1))
#define LF_SK_EXTRACT(from, mark_to, ptr_to) \
{ \
- uintptr_t tmp = \
- (uintptr_t)atomic_load_explicit(&from, memory_order_acquire); \
+ uintptr_t tmp = (uintptr_t)atomic_load_acquire(&(from)); \
mark_to = LF_SK_IS_MARKED(tmp); \
ptr_to = LF_SK_UNMARK(tmp); \
}
diff --git a/runtime/caml/platform.h b/runtime/caml/platform.h
index 77027405be..373419e3c9 100644
--- a/runtime/caml/platform.h
+++ b/runtime/caml/platform.h
@@ -49,27 +49,16 @@ Caml_inline void cpu_relax(void) {
#endif
}
-/* Loads and stores with acquire and release semantics respectively */
+/* Loads and stores with acquire, release and relaxed semantics */
-Caml_inline uintnat atomic_load_acq(atomic_uintnat* p)
-{
- return atomic_load_explicit(p, memory_order_acquire);
-}
-
-Caml_inline uintnat atomic_load_relaxed(atomic_uintnat* p)
-{
- return atomic_load_explicit(p, memory_order_relaxed);
-}
-
-Caml_inline void atomic_store_rel(atomic_uintnat* p, uintnat v)
-{
- atomic_store_explicit(p, v, memory_order_release);
-}
-
-Caml_inline void atomic_store_relaxed(atomic_uintnat* p, uintnat v)
-{
- atomic_store_explicit(p, v, memory_order_relaxed);
-}
+#define atomic_load_acquire(p) \
+ atomic_load_explicit((p), memory_order_acquire)
+#define atomic_load_relaxed(p) \
+ atomic_load_explicit((p), memory_order_relaxed)
+#define atomic_store_release(p, v) \
+ atomic_store_explicit((p), (v), memory_order_release)
+#define atomic_store_relaxed(p, v) \
+ atomic_store_explicit((p), (v), memory_order_relaxed)
/* Spin-wait loops */
@@ -94,7 +83,7 @@ CAMLextern unsigned caml_plat_spin_wait(unsigned spins,
Caml_inline uintnat atomic_load_wait_nonzero(atomic_uintnat* p) {
SPIN_WAIT {
- uintnat v = atomic_load_acq(p);
+ uintnat v = atomic_load_acquire(p);
if (v) return v;
}
}
diff --git a/runtime/codefrag.c b/runtime/codefrag.c
index d1659563c8..9237995fa2 100644
--- a/runtime/codefrag.c
+++ b/runtime/codefrag.c
@@ -95,7 +95,7 @@ void caml_remove_code_fragment(struct code_fragment *cf) {
cf_cell->cf = cf;
do {
- cf_cell->next = atomic_load_explicit(&garbage_head, memory_order_acquire);
+ cf_cell->next = atomic_load_acquire(&garbage_head);
} while (!atomic_compare_exchange_strong(&garbage_head, &cf_cell->next,
cf_cell));
}
@@ -167,7 +167,7 @@ void caml_code_fragment_cleanup (void)
caml_lf_skiplist_free_garbage(&code_fragments_by_pc);
caml_lf_skiplist_free_garbage(&code_fragments_by_num);
- curr = atomic_load_explicit(&garbage_head, memory_order_acquire);
+ curr = atomic_load_acquire(&garbage_head);
while (curr != NULL) {
struct code_fragment_garbage *next = curr->next;
@@ -178,5 +178,5 @@ void caml_code_fragment_cleanup (void)
curr = next;
}
- atomic_store_explicit(&garbage_head, NULL, memory_order_release);
+ atomic_store_release(&garbage_head, NULL);
}
diff --git a/runtime/domain.c b/runtime/domain.c
index 88d7c1469c..91325c4bfc 100644
--- a/runtime/domain.c
+++ b/runtime/domain.c
@@ -294,22 +294,22 @@ CAMLexport caml_domain_state* caml_get_domain_state(void)
Caml_inline void interrupt_domain(struct interruptor* s)
{
- atomic_store_rel(s->interrupt_word, (uintnat)(-1));
+ atomic_store_release(s->interrupt_word, (uintnat)(-1));
}
int caml_incoming_interrupts_queued(void)
{
- return atomic_load_acq(&domain_self->interruptor.interrupt_pending);
+ return atomic_load_acquire(&domain_self->interruptor.interrupt_pending);
}
/* must NOT be called with s->lock held */
static void stw_handler(caml_domain_state* domain);
static uintnat handle_incoming(struct interruptor* s)
{
- uintnat handled = atomic_load_acq(&s->interrupt_pending);
+ uintnat handled = atomic_load_acquire(&s->interrupt_pending);
CAMLassert (s->running);
if (handled) {
- atomic_store_rel(&s->interrupt_pending, 0);
+ atomic_store_release(&s->interrupt_pending, 0);
stw_handler(domain_self->state);
}
@@ -330,7 +330,7 @@ void caml_handle_incoming_interrupts(void)
int caml_send_interrupt(struct interruptor* target)
{
/* signal that there is an interrupt pending */
- atomic_store_rel(&target->interrupt_pending, 1);
+ atomic_store_release(&target->interrupt_pending, 1);
/* Signal the condition variable, in case the target is
itself waiting for an interrupt to be processed elsewhere */
@@ -349,7 +349,7 @@ static void caml_wait_interrupt_serviced(struct interruptor* target)
/* Often, interrupt handlers are fast, so spin for a bit before waiting */
for (i=0; i<1000; i++) {
- if (!atomic_load_acq(&target->interrupt_pending)) {
+ if (!atomic_load_acquire(&target->interrupt_pending)) {
return;
}
cpu_relax();
@@ -357,7 +357,7 @@ static void caml_wait_interrupt_serviced(struct interruptor* target)
{
SPIN_WAIT {
- if (!atomic_load_acq(&target->interrupt_pending))
+ if (!atomic_load_acquire(&target->interrupt_pending))
return;
}
}
@@ -453,7 +453,7 @@ static void free_minor_heap(void) {
domain_state->young_end = NULL;
domain_state->young_ptr = NULL;
domain_state->young_trigger = NULL;
- atomic_store_rel(&domain_state->young_limit,
+ atomic_store_release(&domain_state->young_limit,
(uintnat) domain_state->young_start);
}
@@ -545,7 +545,7 @@ static void domain_create(uintnat initial_minor_heap_wsize) {
caml_plat_lock(&all_domains_lock);
/* Wait until any in-progress STW sections end. */
- while (atomic_load_acq(&stw_leader)) {
+ while (atomic_load_acquire(&stw_leader)) {
/* [caml_plat_wait] releases [all_domains_lock] until the current
STW section ends, and then takes the lock again. */
caml_plat_wait(&all_domains_cond);
@@ -938,7 +938,7 @@ static void* backup_thread_func(void* v)
domain_self = di;
caml_state = di->state;
- msg = atomic_load_acq (&di->backup_thread_msg);
+ msg = atomic_load_acquire (&di->backup_thread_msg);
while (msg != BT_TERMINATE) {
CAMLassert (msg <= BT_TERMINATE);
switch (msg) {
@@ -958,7 +958,7 @@ static void* backup_thread_func(void* v)
* Will be woken from caml_leave_blocking_section
*/
caml_plat_lock(&s->lock);
- msg = atomic_load_acq (&di->backup_thread_msg);
+ msg = atomic_load_acquire (&di->backup_thread_msg);
if (msg == BT_IN_BLOCKING_SECTION &&
!caml_incoming_interrupts_queued())
caml_plat_wait(&s->cond);
@@ -970,7 +970,7 @@ static void* backup_thread_func(void* v)
* or domain_terminate
*/
caml_plat_lock(&di->domain_lock);
- msg = atomic_load_acq (&di->backup_thread_msg);
+ msg = atomic_load_acquire (&di->backup_thread_msg);
if (msg == BT_ENTERING_OCAML)
caml_plat_wait(&di->domain_cond);
caml_plat_unlock(&di->domain_lock);
@@ -979,11 +979,11 @@ static void* backup_thread_func(void* v)
cpu_relax();
break;
};
- msg = atomic_load_acq (&di->backup_thread_msg);
+ msg = atomic_load_acquire (&di->backup_thread_msg);
}
/* doing terminate */
- atomic_store_rel(&di->backup_thread_msg, BT_INIT);
+ atomic_store_release(&di->backup_thread_msg, BT_INIT);
return 0;
}
@@ -999,7 +999,7 @@ static void install_backup_thread (dom_internal* di)
CAMLassert (di->backup_thread_msg == BT_INIT || /* Using fresh domain */
di->backup_thread_msg == BT_TERMINATE); /* Reusing domain */
- while (atomic_load_acq(&di->backup_thread_msg) != BT_INIT) {
+ while (atomic_load_acquire(&di->backup_thread_msg) != BT_INIT) {
/* Give a chance for backup thread on this domain to terminate */
caml_plat_unlock (&di->domain_lock);
cpu_relax ();
@@ -1012,7 +1012,7 @@ static void install_backup_thread (dom_internal* di)
pthread_sigmask(SIG_BLOCK, &mask, &old_mask);
#endif
- atomic_store_rel(&di->backup_thread_msg, BT_ENTERING_OCAML);
+ atomic_store_release(&di->backup_thread_msg, BT_ENTERING_OCAML);
err = pthread_create(&di->backup_thread, 0, backup_thread_func, (void*)di);
#ifndef _WIN32
@@ -1227,11 +1227,11 @@ void caml_global_barrier_end(barrier_status b)
uintnat sense = b & BARRIER_SENSE_BIT;
if (caml_global_barrier_is_final(b)) {
/* last domain into the barrier, flip sense */
- atomic_store_rel(&stw_request.barrier, sense ^ BARRIER_SENSE_BIT);
+ atomic_store_release(&stw_request.barrier, sense ^ BARRIER_SENSE_BIT);
} else {
/* wait until another domain flips the sense */
SPIN_WAIT {
- uintnat barrier = atomic_load_acq(&stw_request.barrier);
+ uintnat barrier = atomic_load_acquire(&stw_request.barrier);
if ((barrier & BARRIER_SENSE_BIT) != sense) break;
}
}
@@ -1259,7 +1259,7 @@ static void decrement_stw_domains_still_processing(void)
if( am_last ) {
/* release the STW lock to allow new STW sections */
caml_plat_lock(&all_domains_lock);
- atomic_store_rel(&stw_leader, 0);
+ atomic_store_release(&stw_leader, 0);
caml_plat_broadcast(&all_domains_cond);
caml_gc_log("clearing stw leader");
caml_plat_unlock(&all_domains_lock);
@@ -1272,7 +1272,7 @@ static void stw_handler(caml_domain_state* domain)
CAML_EV_BEGIN(EV_STW_API_BARRIER);
{
SPIN_WAIT {
- if (atomic_load_acq(&stw_request.domains_still_running) == 0)
+ if (atomic_load_acquire(&stw_request.domains_still_running) == 0)
break;
if (stw_request.enter_spin_callback)
@@ -1384,21 +1384,21 @@ int caml_try_run_on_all_domains_with_spin_work(
situations. Without this read, [stw_leader] would be protected by
[all_domains_lock] and could be a non-atomic variable.
*/
- if (atomic_load_acq(&stw_leader) ||
+ if (atomic_load_acquire(&stw_leader) ||
!caml_plat_try_lock(&all_domains_lock)) {
caml_handle_incoming_interrupts();
return 0;
}
/* see if there is a stw_leader already */
- if (atomic_load_acq(&stw_leader)) {
+ if (atomic_load_acquire(&stw_leader)) {
caml_plat_unlock(&all_domains_lock);
caml_handle_incoming_interrupts();
return 0;
}
/* we have the lock and can claim the stw_leader */
- atomic_store_rel(&stw_leader, (uintnat)domain_self);
+ atomic_store_release(&stw_leader, (uintnat)domain_self);
CAML_EV_BEGIN(EV_STW_LEADER);
caml_gc_log("causing STW");
@@ -1409,10 +1409,10 @@ int caml_try_run_on_all_domains_with_spin_work(
stw_request.enter_spin_data = enter_spin_data;
stw_request.callback = handler;
stw_request.data = data;
- atomic_store_rel(&stw_request.barrier, 0);
- atomic_store_rel(&stw_request.domains_still_running, 1);
+ atomic_store_release(&stw_request.barrier, 0);
+ atomic_store_release(&stw_request.domains_still_running, 1);
stw_request.num_domains = stw_domains.participating_domains;
- atomic_store_rel(&stw_request.num_domains_still_processing,
+ atomic_store_release(&stw_request.num_domains_still_processing,
stw_domains.participating_domains);
if( leader_setup ) {
@@ -1462,7 +1462,7 @@ int caml_try_run_on_all_domains_with_spin_work(
}
/* release from the enter barrier */
- atomic_store_rel(&stw_request.domains_still_running, 0);
+ atomic_store_release(&stw_request.domains_still_running, 0);
#ifdef DEBUG
domain_state->inside_stw_handler = 1;
@@ -1511,7 +1511,7 @@ void caml_reset_young_limit(caml_domain_state * dom_st)
|| dom_st->major_slice_epoch < atomic_load (&caml_major_slice_epoch)
|| atomic_load_relaxed(&dom_st->requested_external_interrupt)
|| dom_st->action_pending) {
- atomic_store_rel(&dom_st->young_limit, (uintnat)-1);
+ atomic_store_release(&dom_st->young_limit, (uintnat)-1);
CAMLassert(caml_check_gc_interrupt(dom_st));
}
}
@@ -1599,7 +1599,7 @@ void caml_poll_gc_work(void)
CAML_EV_END(EV_MAJOR);
}
- if (atomic_load_acq(&d->requested_external_interrupt)) {
+ if (atomic_load_acquire(&d->requested_external_interrupt)) {
caml_domain_external_interrupt_hook();
}
caml_reset_young_limit(d);
@@ -1621,7 +1621,7 @@ void caml_handle_gc_interrupt(void)
CAMLexport int caml_bt_is_in_blocking_section(void)
{
- uintnat status = atomic_load_acq(&domain_self->backup_thread_msg);
+ uintnat status = atomic_load_acquire(&domain_self->backup_thread_msg);
return status == BT_IN_BLOCKING_SECTION;
}
@@ -1650,7 +1650,7 @@ CAMLexport void caml_bt_enter_ocaml(void)
CAMLassert(caml_domain_alone() || self->backup_thread_running);
if (self->backup_thread_running) {
- atomic_store_rel(&self->backup_thread_msg, BT_ENTERING_OCAML);
+ atomic_store_release(&self->backup_thread_msg, BT_ENTERING_OCAML);
}
}
@@ -1668,7 +1668,7 @@ CAMLexport void caml_bt_exit_ocaml(void)
CAMLassert(caml_domain_alone() || self->backup_thread_running);
if (self->backup_thread_running) {
- atomic_store_rel(&self->backup_thread_msg, BT_IN_BLOCKING_SECTION);
+ atomic_store_release(&self->backup_thread_msg, BT_IN_BLOCKING_SECTION);
/* Wakeup backup thread if it is sleeping */
caml_plat_signal(&self->domain_cond);
}
@@ -1827,7 +1827,7 @@ static void domain_terminate (void)
/* signal the domain termination to the backup thread
NB: for a program with no additional domains, the backup thread
will not have been started */
- atomic_store_rel(&domain_self->backup_thread_msg, BT_TERMINATE);
+ atomic_store_release(&domain_self->backup_thread_msg, BT_TERMINATE);
caml_plat_signal(&domain_self->domain_cond);
caml_plat_unlock(&domain_self->domain_lock);
diff --git a/runtime/fail_nat.c b/runtime/fail_nat.c
index 2245f933b3..bb891b940f 100644
--- a/runtime/fail_nat.c
+++ b/runtime/fail_nat.c
@@ -197,7 +197,7 @@ CAMLexport value caml_raise_if_exception(value res)
static value array_bound_exn(void)
{
static atomic_uintnat exn_cache = ATOMIC_UINTNAT_INIT(0);
- const value* exn = (const value*)atomic_load_acq(&exn_cache);
+ const value* exn = (const value*)atomic_load_acquire(&exn_cache);
if (!exn) {
exn = caml_named_value("Pervasives.array_bound_error");
if (!exn) {
@@ -205,7 +205,7 @@ static value array_bound_exn(void)
"Invalid_argument(\"index out of bounds\")\n");
exit(2);
}
- atomic_store_rel(&exn_cache, (uintnat)exn);
+ atomic_store_release(&exn_cache, (uintnat)exn);
}
return *exn;
}
diff --git a/runtime/fiber.c b/runtime/fiber.c
index 52d68fbbff..d86daf9141 100644
--- a/runtime/fiber.c
+++ b/runtime/fiber.c
@@ -670,14 +670,14 @@ static const value * cache_named_exception(const value * _Atomic * cache,
const char * name)
{
const value * exn;
- exn = atomic_load_explicit(cache, memory_order_acquire);
+ exn = atomic_load_acquire(cache);
if (exn == NULL) {
exn = caml_named_value(name);
if (exn == NULL) {
fprintf(stderr, "Fatal error: exception %s\n", name);
exit(2);
}
- atomic_store_explicit(cache, exn, memory_order_release);
+ atomic_store_release(cache, exn);
}
return exn;
}
diff --git a/runtime/interp.c b/runtime/interp.c
index 891096debf..92f6dd7d3e 100644
--- a/runtime/interp.c
+++ b/runtime/interp.c
@@ -1211,8 +1211,7 @@ value caml_interprete(code_t prog, asize_t prog_size)
accu = Val_int(*pc++);
/* We use relaxed atomic accesses to avoid racing with other domains
updating the cache */
- ofs = atomic_load_explicit((_Atomic opcode_t *)pc, memory_order_relaxed)
- & Field(meths,1);
+ ofs = atomic_load_relaxed((_Atomic opcode_t *)pc) & Field(meths,1);
if (*(value*)(((char*)&Field(meths,3)) + ofs) == accu) {
#ifdef CAML_TEST_CACHE
hits++;
@@ -1227,8 +1226,7 @@ value caml_interprete(code_t prog, asize_t prog_size)
if (accu < Field(meths,mi)) hi = mi-2;
else li = mi;
}
- atomic_store_explicit((_Atomic opcode_t *)pc, (li-3)*sizeof(value),
- memory_order_relaxed);
+ atomic_store_relaxed((_Atomic opcode_t *)pc, (li-3)*sizeof(value));
accu = Field (meths, li-1);
}
pc++;
diff --git a/runtime/lf_skiplist.c b/runtime/lf_skiplist.c
index 6cbe46d874..59434fee82 100644
--- a/runtime/lf_skiplist.c
+++ b/runtime/lf_skiplist.c
@@ -74,8 +74,7 @@ static int random_level(void) {
(Knuth vol 2 p. 106, line 15 of table 1), additive = 25173. */
while( 1 ) {
- uint32_t curr =
- atomic_load_explicit(&random_seed, memory_order_relaxed);
+ uint32_t curr = atomic_load_relaxed(&random_seed);
r = curr * 69069 + 25173;
@@ -97,7 +96,7 @@ static int random_level(void) {
/* Initialize a skip list */
void caml_lf_skiplist_init(struct lf_skiplist *sk) {
- atomic_store_explicit(&sk->search_level, 0, memory_order_relaxed);
+ atomic_store_relaxed(&sk->search_level, 0);
/* This concurrent skip list has two sentinel nodes, the first [head] is
less than any possible key in the data structure and the second [tail] is
@@ -125,11 +124,9 @@ void caml_lf_skiplist_init(struct lf_skiplist *sk) {
/* each level in the skip list starts of being just head pointing to tail */
for (int j = 0; j < NUM_LEVELS; j++) {
- atomic_store_explicit
- (&sk->head->forward[j], sk->tail, memory_order_release);
+ atomic_store_release(&sk->head->forward[j], sk->tail);
- atomic_store_explicit
- (&sk->tail->forward[j], NULL, memory_order_release);
+ atomic_store_release(&sk->tail->forward[j], NULL);
}
}
@@ -172,8 +169,7 @@ retry:
compare-and-swap.
*/
for (int level = NUM_LEVELS - 1; level >= 0; level--) {
- curr = LF_SK_UNMARK(
- atomic_load_explicit(&pred->forward[level], memory_order_acquire));
+ curr = LF_SK_UNMARK(atomic_load_acquire(&pred->forward[level]));
while (1) {
int is_marked;
@@ -210,10 +206,9 @@ retry:
This is why we need to a retry loop and yet another CAS. */
while (1) {
struct lf_skipcell *_Atomic current_garbage_head =
- atomic_load_explicit(&sk->garbage_head, memory_order_acquire);
+ atomic_load_acquire(&sk->garbage_head);
- atomic_store_explicit(&curr->garbage_next, current_garbage_head,
- memory_order_release);
+ atomic_store_release(&curr->garbage_next, current_garbage_head);
if (atomic_compare_exchange_strong(
&sk->garbage_head,
@@ -225,8 +220,7 @@ retry:
/* Now try to load the current node again. We need to check it too
hasn't been marked. If it has we repeat the process */
- curr = LF_SK_UNMARK(atomic_load_explicit(&pred->forward[level],
- memory_order_acquire));
+ curr = LF_SK_UNMARK(atomic_load_acquire(&pred->forward[level]));
LF_SK_EXTRACT(curr->forward[level], is_marked, succ);
}
@@ -271,11 +265,9 @@ static struct lf_skipcell *lf_skiplist_lookup(struct lf_skiplist *sk,
level then our only cost is an increased number of nodes searched. If we
did the same thing in the find function above then we'd also fail to snip
out marked nodes. If we did that for long enough we might leak memory. */
- for (int level =
- atomic_load_explicit(&sk->search_level, memory_order_relaxed);
+ for (int level = atomic_load_relaxed(&sk->search_level);
level >= 0; level--) {
- curr = LF_SK_UNMARK(
- atomic_load_explicit(&pred->forward[level], memory_order_acquire));
+ curr = LF_SK_UNMARK(atomic_load_acquire(&pred->forward[level]));
while (1) {
LF_SK_EXTRACT(curr->forward[level], marked, succ);
while (marked) {
@@ -355,8 +347,7 @@ int caml_lf_skiplist_insert(struct lf_skiplist *sk, uintnat key, uintnat data) {
if (found) {
/* Already present; update data */
- atomic_store_explicit((atomic_uintnat*)&succs[0]->data, data,
- memory_order_relaxed);
+ atomic_store_relaxed((atomic_uintnat*)&succs[0]->data, data);
return 1;
} else {
/* node does not exist. We need to generate a random top_level and
@@ -374,11 +365,10 @@ int caml_lf_skiplist_insert(struct lf_skiplist *sk, uintnat key, uintnat data) {
new_cell->top_level = top_level;
new_cell->key = key;
new_cell->data = data;
- atomic_store_explicit(&new_cell->garbage_next,NULL,memory_order_relaxed);
+ atomic_store_relaxed(&new_cell->garbage_next,NULL);
for (int level = 0; level <= top_level; level++) {
- atomic_store_explicit(&new_cell->forward[level], succs[level],
- memory_order_release);
+ atomic_store_release(&new_cell->forward[level], succs[level]);
}
/* Now we need to actually slip the node in. We start at the bottom-most
@@ -426,10 +416,8 @@ int caml_lf_skiplist_insert(struct lf_skiplist *sk, uintnat key, uintnat data) {
/* If we put the new node at a higher level than the current
[search_level] then to speed up searches we need to bump it. We don't
care too much if this fails though. */
- if (top_level >
- atomic_load_explicit(&sk->search_level, memory_order_relaxed)) {
- atomic_store_explicit(&sk->search_level, top_level,
- memory_order_relaxed);
+ if (top_level > atomic_load_relaxed(&sk->search_level)) {
+ atomic_store_relaxed(&sk->search_level, top_level);
}
return 1;
@@ -500,17 +488,15 @@ int caml_lf_skiplist_remove(struct lf_skiplist *sk, uintnat key) {
skiplist */
void caml_lf_skiplist_free_garbage(struct lf_skiplist *sk) {
- struct lf_skipcell *curr =
- atomic_load_explicit(&sk->garbage_head, memory_order_acquire);
+ struct lf_skipcell *curr = atomic_load_acquire(&sk->garbage_head);
struct lf_skipcell *head = sk->head;
while (curr != head) {
- struct lf_skipcell *next = atomic_load_explicit
- (&curr->garbage_next, memory_order_relaxed);
+ struct lf_skipcell *next = atomic_load_relaxed(&curr->garbage_next);
// acquire not useful, if executed in STW
caml_stat_free(curr);
curr = next;
}
- atomic_store_explicit(&sk->garbage_head, sk->head, memory_order_release);
+ atomic_store_release(&sk->garbage_head, sk->head);
}
diff --git a/runtime/major_gc.c b/runtime/major_gc.c
index 4adf49c8d8..7f1dac022d 100644
--- a/runtime/major_gc.c
+++ b/runtime/major_gc.c
@@ -196,8 +196,8 @@ static void ephe_next_cycle (void)
caml_plat_lock(&ephe_lock);
atomic_fetch_add(&ephe_cycle_info.ephe_cycle, +1);
- CAMLassert(atomic_load_acq(&ephe_cycle_info.num_domains_done) <=
- atomic_load_acq(&ephe_cycle_info.num_domains_todo));
+ CAMLassert(atomic_load_acquire(&ephe_cycle_info.num_domains_done) <=
+ atomic_load_acquire(&ephe_cycle_info.num_domains_todo));
atomic_store(&ephe_cycle_info.num_domains_done, 0);
caml_plat_unlock(&ephe_lock);
@@ -216,8 +216,8 @@ static void ephe_todo_list_emptied (void)
/* Since the todo list is empty, this domain does not need to participate in
* further ephemeron cycles. */
atomic_fetch_add(&ephe_cycle_info.num_domains_todo, -1);
- CAMLassert(atomic_load_acq(&ephe_cycle_info.num_domains_done) <=
- atomic_load_acq(&ephe_cycle_info.num_domains_todo));
+ CAMLassert(atomic_load_acquire(&ephe_cycle_info.num_domains_done) <=
+ atomic_load_acquire(&ephe_cycle_info.num_domains_todo));
caml_plat_unlock(&ephe_lock);
}
@@ -225,18 +225,18 @@ static void ephe_todo_list_emptied (void)
/* Record that ephemeron marking was done for the given ephemeron cycle. */
static void record_ephe_marking_done (uintnat ephe_cycle)
{
- CAMLassert (ephe_cycle <= atomic_load_acq(&ephe_cycle_info.ephe_cycle));
+ CAMLassert (ephe_cycle <= atomic_load_acquire(&ephe_cycle_info.ephe_cycle));
CAMLassert (Caml_state->marking_done);
- if (ephe_cycle < atomic_load_acq(&ephe_cycle_info.ephe_cycle))
+ if (ephe_cycle < atomic_load_acquire(&ephe_cycle_info.ephe_cycle))
return;
caml_plat_lock(&ephe_lock);
if (ephe_cycle == atomic_load(&ephe_cycle_info.ephe_cycle)) {
Caml_state->ephe_info->cycle = ephe_cycle;
atomic_fetch_add(&ephe_cycle_info.num_domains_done, +1);
- CAMLassert(atomic_load_acq(&ephe_cycle_info.num_domains_done) <=
- atomic_load_acq(&ephe_cycle_info.num_domains_todo));
+ CAMLassert(atomic_load_acquire(&ephe_cycle_info.num_domains_done) <=
+ atomic_load_acquire(&ephe_cycle_info.num_domains_todo));
}
caml_plat_unlock(&ephe_lock);
}
@@ -1009,10 +1009,8 @@ void caml_darken_cont(value cont)
if (Ptr_val(stk) != NULL)
caml_scan_stack(&caml_darken, darken_scanning_flags, Caml_state,
Ptr_val(stk), 0);
- atomic_store_explicit(
- Hp_atomic_val(cont),
- With_status_hd(hd, caml_global_heap_state.MARKED),
- memory_order_release);
+ atomic_store_release(Hp_atomic_val(cont),
+ With_status_hd(hd, caml_global_heap_state.MARKED));
}
}
}
@@ -1168,8 +1166,8 @@ static void cycle_all_domains_callback(caml_domain_state* domain, void* unused,
CAML_EV_BEGIN(EV_MAJOR_GC_CYCLE_DOMAINS);
CAMLassert(domain == Caml_state);
- CAMLassert(atomic_load_acq(&ephe_cycle_info.num_domains_todo) ==
- atomic_load_acq(&ephe_cycle_info.num_domains_done));
+ CAMLassert(atomic_load_acquire(&ephe_cycle_info.num_domains_todo) ==
+ atomic_load_acquire(&ephe_cycle_info.num_domains_done));
CAMLassert(atomic_load(&num_domains_to_mark) == 0);
CAMLassert(atomic_load(&num_domains_to_sweep) == 0);
CAMLassert(atomic_load(&num_domains_to_ephe_sweep) == 0);
@@ -1246,20 +1244,22 @@ static void cycle_all_domains_callback(caml_domain_state* domain, void* unused,
domain->swept_words = 0;
num_domains_in_stw = (uintnat)caml_global_barrier_num_domains();
- atomic_store_rel(&num_domains_to_sweep, num_domains_in_stw);
- atomic_store_rel(&num_domains_to_mark, num_domains_in_stw);
+ atomic_store_release(&num_domains_to_sweep, num_domains_in_stw);
+ atomic_store_release(&num_domains_to_mark, num_domains_in_stw);
caml_gc_phase = Phase_sweep_and_mark_main;
atomic_store(&ephe_cycle_info.num_domains_todo, num_domains_in_stw);
atomic_store(&ephe_cycle_info.ephe_cycle, 1);
atomic_store(&ephe_cycle_info.num_domains_done, 0);
- atomic_store_rel(&num_domains_to_ephe_sweep, 0);
+ atomic_store_release(&num_domains_to_ephe_sweep, 0);
/* Will be set to the correct number when switching to
[Phase_sweep_ephe] */
- atomic_store_rel(&num_domains_to_final_update_first, num_domains_in_stw);
- atomic_store_rel(&num_domains_to_final_update_last, num_domains_in_stw);
+ atomic_store_release(&num_domains_to_final_update_first,
+ num_domains_in_stw);
+ atomic_store_release(&num_domains_to_final_update_last,
+ num_domains_in_stw);
atomic_store(&domain_global_roots_started, WORK_UNSTARTED);
@@ -1366,11 +1366,11 @@ static int is_complete_phase_sweep_and_mark_main (void)
{
return
caml_gc_phase == Phase_sweep_and_mark_main &&
- atomic_load_acq (&num_domains_to_sweep) == 0 &&
- atomic_load_acq (&num_domains_to_mark) == 0 &&
+ atomic_load_acquire (&num_domains_to_sweep) == 0 &&
+ atomic_load_acquire (&num_domains_to_mark) == 0 &&
/* Marking is done */
- atomic_load_acq(&ephe_cycle_info.num_domains_todo) ==
- atomic_load_acq(&ephe_cycle_info.num_domains_done) &&
+ atomic_load_acquire(&ephe_cycle_info.num_domains_todo) ==
+ atomic_load_acquire(&ephe_cycle_info.num_domains_done) &&
/* Ephemeron marking is done */
no_orphaned_work();
/* All orphaned ephemerons have been adopted */
@@ -1380,12 +1380,12 @@ static int is_complete_phase_mark_final (void)
{
return
caml_gc_phase == Phase_mark_final &&
- atomic_load_acq (&num_domains_to_final_update_first) == 0 &&
+ atomic_load_acquire (&num_domains_to_final_update_first) == 0 &&
/* updated finalise first values */
- atomic_load_acq (&num_domains_to_mark) == 0 &&
+ atomic_load_acquire (&num_domains_to_mark) == 0 &&
/* Marking is done */
- atomic_load_acq(&ephe_cycle_info.num_domains_todo) ==
- atomic_load_acq(&ephe_cycle_info.num_domains_done) &&
+ atomic_load_acquire(&ephe_cycle_info.num_domains_todo) ==
+ atomic_load_acquire(&ephe_cycle_info.num_domains_done) &&
/* Ephemeron marking is done */
no_orphaned_work();
/* All orphaned ephemerons have been adopted */
@@ -1395,9 +1395,9 @@ static int is_complete_phase_sweep_ephe (void)
{
return
caml_gc_phase == Phase_sweep_ephe &&
- atomic_load_acq (&num_domains_to_ephe_sweep) == 0 &&
+ atomic_load_acquire (&num_domains_to_ephe_sweep) == 0 &&
/* All domains have swept their ephemerons */
- atomic_load_acq (&num_domains_to_final_update_last) == 0 &&
+ atomic_load_acquire (&num_domains_to_final_update_last) == 0 &&
/* All domains have updated finalise last values */
no_orphaned_work();
/* All orphaned structures have been adopted */
@@ -1416,7 +1416,7 @@ static void try_complete_gc_phase (caml_domain_state* domain, void* unused,
caml_gc_phase = Phase_mark_final;
} else if (is_complete_phase_mark_final()) {
caml_gc_phase = Phase_sweep_ephe;
- atomic_store_rel(&num_domains_to_ephe_sweep, participant_count);
+ atomic_store_release(&num_domains_to_ephe_sweep, participant_count);
for (int i = 0; i < participant_count; i++)
participating[i]->ephe_info->must_sweep_ephe = 1;
}
@@ -1560,7 +1560,7 @@ mark_again:
/* Ephemerons */
if (caml_gc_phase != Phase_sweep_ephe) {
/* Ephemeron Marking */
- saved_ephe_cycle = atomic_load_acq(&ephe_cycle_info.ephe_cycle);
+ saved_ephe_cycle = atomic_load_acquire(&ephe_cycle_info.ephe_cycle);
if (domain_state->ephe_info->todo != (value) NULL &&
saved_ephe_cycle > domain_state->ephe_info->cycle) {
CAML_EV_BEGIN(EV_MAJOR_EPHE_MARK);
diff --git a/runtime/memory.c b/runtime/memory.c
index 3af3a6f72b..1907d5ce84 100644
--- a/runtime/memory.c
+++ b/runtime/memory.c
@@ -152,8 +152,7 @@ CAMLexport CAMLweakdef void caml_modify (volatile value *fp, value val)
/* See Note [MM] above */
atomic_thread_fence(memory_order_acquire);
- atomic_store_explicit(&Op_atomic_val((value)fp)[0], val,
- memory_order_release);
+ atomic_store_release(&Op_atomic_val((value)fp)[0], val);
}
/* Dependent memory is all memory blocks allocated out of the heap
diff --git a/runtime/minor_gc.c b/runtime/minor_gc.c
index 55476b9026..faad61c915 100644
--- a/runtime/minor_gc.c
+++ b/runtime/minor_gc.c
@@ -172,7 +172,7 @@ static void spin_on_header(value v) {
}
Caml_inline header_t get_header_val(value v) {
- header_t hd = atomic_load_explicit(Hp_atomic_val(v), memory_order_acquire);
+ header_t hd = atomic_load_acquire(Hp_atomic_val(v));
if (!Is_update_in_progress(hd))
return hd;
@@ -210,9 +210,9 @@ static int try_update_object_header(value v, volatile value *p, value result,
header_t desired_hd = In_progress_update_val;
if( atomic_compare_exchange_strong(Hp_atomic_val(v), &hd, desired_hd) ) {
/* Success. Now we can write the forwarding pointer. */
- atomic_store_explicit(Op_atomic_val(v), result, memory_order_relaxed);
+ atomic_store_relaxed(Op_atomic_val(v), result);
/* And update header ('release' ensures after update of fwd pointer) */
- atomic_store_rel(Hp_atomic_val(v), 0);
+ atomic_store_release(Hp_atomic_val(v), 0);
/* Let the caller know we were responsible for the update */
success = 1;
} else {
@@ -675,7 +675,7 @@ void caml_do_opportunistic_major_slice
if needed.
*/
void caml_empty_minor_heap_setup(caml_domain_state* domain_unused) {
- atomic_store_explicit(&domains_finished_minor_gc, 0, memory_order_release);
+ atomic_store_release(&domains_finished_minor_gc, 0);
/* Increment the total number of minor collections done in the program */
atomic_fetch_add (&caml_minor_collections_count, 1);
}
@@ -706,10 +706,8 @@ caml_stw_empty_minor_heap_no_major_slice(caml_domain_state* domain,
CAML_EV_BEGIN(EV_MINOR_LEAVE_BARRIER);
{
SPIN_WAIT {
- if( atomic_load_explicit
- (&domains_finished_minor_gc, memory_order_acquire)
- ==
- participating_count ) {
+ if (atomic_load_acquire(&domains_finished_minor_gc) ==
+ participating_count) {
break;
}
diff --git a/runtime/obj.c b/runtime/obj.c
index cdaa4c1766..56db69f5fa 100644
--- a/runtime/obj.c
+++ b/runtime/obj.c
@@ -44,7 +44,7 @@ static int obj_tag (value arg)
/* The acquire load ensures that reading the field of a Forward_tag
block in stdlib/camlinternalLazy.ml:force_gen has the necessary
synchronization. */
- hd = (header_t)atomic_load_acq(Hp_atomic_val(arg));
+ hd = (header_t)atomic_load_acquire(Hp_atomic_val(arg));
return Tag_hd(hd);
}
}
diff --git a/runtime/platform.c b/runtime/platform.c
index 34544b8bf5..b3bf88a7aa 100644
--- a/runtime/platform.c
+++ b/runtime/platform.c
@@ -165,7 +165,7 @@ void* caml_mem_map(uintnat size, uintnat alignment, int reserve_only)
if (mmap_blocks.head == NULL) {
/* The first call to caml_mem_map should be during caml_init_domains, called
by caml_init_gc during startup - i.e. before any domains have started. */
- CAMLassert(atomic_load_acq(&caml_num_domains_running) <= 1);
+ CAMLassert(atomic_load_acquire(&caml_num_domains_running) <= 1);
caml_lf_skiplist_init(&mmap_blocks);
}
#endif
diff --git a/runtime/runtime_events.c b/runtime/runtime_events.c
index 921b3cd538..1e5e141c0f 100644
--- a/runtime/runtime_events.c
+++ b/runtime/runtime_events.c
@@ -182,7 +182,7 @@ static void runtime_events_teardown_raw(int remove_file) {
caml_stat_free(current_ring_loc);
current_metadata = NULL;
- atomic_store_rel(&runtime_events_enabled, 0);
+ atomic_store_release(&runtime_events_enabled, 0);
}
/* Stop-the-world which calls the teardown code */
@@ -204,7 +204,7 @@ void caml_runtime_events_post_fork(void) {
new domain can have run yet. Let's be double sure. */
CAMLassert(caml_domain_alone());
- if (atomic_load_acq(&runtime_events_enabled)) {
+ if (atomic_load_acquire(&runtime_events_enabled)) {
/* In the child we need to tear down the various structures used for the
existing runtime_events from the parent. In doing so we need to make sure we
don't remove the runtime_events file itself as that may still be used by
@@ -220,7 +220,7 @@ void caml_runtime_events_post_fork(void) {
/* Return the current location for the ring buffers of this process. This is
used in the consumer to read the ring buffers of the current process */
char_os* caml_runtime_events_current_location(void) {
- if( atomic_load_acq(&runtime_events_enabled) ) {
+ if( atomic_load_acquire(&runtime_events_enabled) ) {
return current_ring_loc;
} else {
return NULL;
@@ -230,7 +230,7 @@ char_os* caml_runtime_events_current_location(void) {
/* Write a lifecycle event and then trigger a stop the world to tear down the
ring buffers */
void caml_runtime_events_destroy(void) {
- if (atomic_load_acq(&runtime_events_enabled)) {
+ if (atomic_load_acquire(&runtime_events_enabled)) {
write_to_ring(
EV_RUNTIME, (ev_message_type){.runtime=EV_LIFECYCLE}, EV_RING_STOP, 0,
NULL, 0);
@@ -242,7 +242,7 @@ void caml_runtime_events_destroy(void) {
caml_try_run_on_all_domains(&stw_teardown_runtime_events,
&remove_file, NULL);
}
- while( atomic_load_acq(&runtime_events_enabled) );
+ while( atomic_load_acquire(&runtime_events_enabled) );
}
}
@@ -251,7 +251,7 @@ void caml_runtime_events_destroy(void) {
domain running. */
static void runtime_events_create_raw(void) {
/* Don't initialise runtime_events twice */
- if (!atomic_load_acq(&runtime_events_enabled)) {
+ if (!atomic_load_acquire(&runtime_events_enabled)) {
int ret, ring_headers_length, ring_data_length;
#ifdef _WIN32
DWORD pid = GetCurrentProcessId();
@@ -386,10 +386,10 @@ static void runtime_events_create_raw(void) {
// runtime_events_enabled to 1
caml_plat_lock(&user_events_lock);
value current_user_event = user_events;
- atomic_store_rel(&runtime_events_enabled, 1);
+ atomic_store_release(&runtime_events_enabled, 1);
caml_plat_unlock(&user_events_lock);
- atomic_store_rel(&runtime_events_paused, 0);
+ atomic_store_release(&runtime_events_paused, 0);
caml_ev_lifecycle(EV_RING_START, pid);
@@ -421,7 +421,7 @@ stw_create_runtime_events(caml_domain_state *domain_state, void *data,
}
CAMLprim value caml_runtime_events_start(void) {
- while (!atomic_load_acq(&runtime_events_enabled)) {
+ while (!atomic_load_acquire(&runtime_events_enabled)) {
caml_try_run_on_all_domains(&stw_create_runtime_events, NULL, NULL);
}
@@ -429,7 +429,7 @@ CAMLprim value caml_runtime_events_start(void) {
}
CAMLprim value caml_runtime_events_pause(void) {
- if (!atomic_load_acq(&runtime_events_enabled)) return Val_unit;
+ if (!atomic_load_acquire(&runtime_events_enabled)) return Val_unit;
uintnat not_paused = 0;
@@ -441,7 +441,7 @@ CAMLprim value caml_runtime_events_pause(void) {
}
CAMLprim value caml_runtime_events_resume(void) {
- if (!atomic_load_acq(&runtime_events_enabled)) return Val_unit;
+ if (!atomic_load_acquire(&runtime_events_enabled)) return Val_unit;
uintnat paused = 1;
@@ -478,10 +478,8 @@ static void write_to_ring(ev_category category, ev_message_type type,
/* the head and tail indexes for the current domain's ring buffer (out of
the header) */
- uint64_t ring_head = atomic_load_explicit(&domain_ring_header->ring_head,
- memory_order_acquire);
- uint64_t ring_tail = atomic_load_explicit(&domain_ring_header->ring_tail,
- memory_order_acquire);
+ uint64_t ring_head = atomic_load_acquire(&domain_ring_header->ring_head);
+ uint64_t ring_tail = atomic_load_acquire(&domain_ring_header->ring_tail);
/* since rings can only be powers of two in size, we use this mask to cheaply
convert the head and tail indexes in to the physical offset in the ring
@@ -519,8 +517,8 @@ static void write_to_ring(ev_category category, ev_message_type type,
ring_head += RUNTIME_EVENTS_ITEM_LENGTH(head_header);
- atomic_store_explicit(&domain_ring_header->ring_head, ring_head,
- memory_order_release); // advance the ring head
+ // advance the ring head
+ atomic_store_release(&domain_ring_header->ring_head, ring_head);
}
if (padding_required > 0) {
@@ -532,8 +530,7 @@ static void write_to_ring(ev_category category, ev_message_type type,
ring_tail += ring_distance_to_end;
- atomic_store_explicit(&domain_ring_header->ring_tail, ring_tail,
- memory_order_release);
+ atomic_store_release(&domain_ring_header->ring_tail, ring_tail);
ring_tail_offset = 0;
}
@@ -553,17 +550,16 @@ static void write_to_ring(ev_category category, ev_message_type type,
memcpy(&ring_ptr[ring_tail_offset], content + word_offset,
event_length * sizeof(uint64_t));
}
- atomic_store_explicit(&domain_ring_header->ring_tail,
- ring_tail + length_with_header_ts,
- memory_order_release);
+ atomic_store_release(&domain_ring_header->ring_tail,
+ ring_tail + length_with_header_ts);
}
/* Functions for putting runtime data on to the runtime_events */
static inline int ring_is_active(void) {
return
- atomic_load_explicit(&runtime_events_enabled, memory_order_relaxed)
- && !atomic_load_explicit(&runtime_events_paused, memory_order_relaxed);
+ atomic_load_relaxed(&runtime_events_enabled)
+ && !atomic_load_relaxed(&runtime_events_paused);
}
void caml_ev_begin(ev_runtime_phase phase) {
@@ -680,7 +676,7 @@ CAMLprim value caml_runtime_events_user_register(value event_name,
// critical section: when we update the user_events list we need to make sure
// it is not updated while we construct the pointer to the next element
- if (atomic_load_acq(&runtime_events_enabled)) {
+ if (atomic_load_acquire(&runtime_events_enabled)) {
// Ring buffer is already available, we register the name
events_register_write_buffer(index, event_name);
}
diff --git a/runtime/signals.c b/runtime/signals.c
index cf9774d199..5a21024ad4 100644
--- a/runtime/signals.c
+++ b/runtime/signals.c
@@ -285,7 +285,7 @@ CAMLno_tsan /* When called from [caml_record_signal], these memory
void caml_set_action_pending(caml_domain_state * dom_st)
{
dom_st->action_pending = 1;
- atomic_store_rel(&dom_st->young_limit, (uintnat)-1);
+ atomic_store_release(&dom_st->young_limit, (uintnat)-1);
}
CAMLexport int caml_check_pending_actions(void)
diff --git a/testsuite/tests/lf_skiplist/stubs.c b/testsuite/tests/lf_skiplist/stubs.c
index 991483e408..75296cd92b 100644
--- a/testsuite/tests/lf_skiplist/stubs.c
+++ b/testsuite/tests/lf_skiplist/stubs.c
@@ -68,8 +68,7 @@ static uintnat count_marks(struct lf_skiplist *sk) {
while (p) {
for (int k = p->top_level; k >= 0; k--) {
- succ =
- (uintptr_t)atomic_load_explicit(&p->forward[k],memory_order_relaxed);
+ succ = (uintptr_t)atomic_load_relaxed(&p->forward[k]);
if (LF_SK_IS_MARKED(succ)) r++ ;
}
p = LF_SK_UNMARK(succ);