summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGuillaume Munch-Maccagnoni <Guillaume.Munch-Maccagnoni@inria.fr>2023-04-18 16:06:36 +0200
committerGuillaume Munch-Maccagnoni <Guillaume.Munch-Maccagnoni@inria.fr>2023-04-18 17:42:16 +0200
commitb42a488f0b2d08483e54c7dd1d983398542184d7 (patch)
treee07f39f1498f998bba4ebe21010eeacdb789b73c
parent4d5d29b6f6a005eed05a8b1d7c1fb8c710a760b8 (diff)
downloadocaml-b42a488f0b2d08483e54c7dd1d983398542184d7.tar.gz
Clarification acq -> acquire, rel -> release
-rw-r--r--otherlibs/systhreads/st_pthreads.h10
-rw-r--r--otherlibs/systhreads/st_stubs.c4
-rw-r--r--runtime/caml/atomic_refcount.h2
-rw-r--r--runtime/caml/domain.h2
-rw-r--r--runtime/caml/platform.h6
-rw-r--r--runtime/domain.c66
-rw-r--r--runtime/fail_nat.c4
-rw-r--r--runtime/major_gc.c56
-rw-r--r--runtime/minor_gc.c2
-rw-r--r--runtime/obj.c2
-rw-r--r--runtime/platform.c2
-rw-r--r--runtime/runtime_events.c24
-rw-r--r--runtime/signals.c2
13 files changed, 92 insertions, 90 deletions
diff --git a/otherlibs/systhreads/st_pthreads.h b/otherlibs/systhreads/st_pthreads.h
index 5d29df67d1..bd8839b6de 100644
--- a/otherlibs/systhreads/st_pthreads.h
+++ b/otherlibs/systhreads/st_pthreads.h
@@ -37,7 +37,7 @@ static atomic_uintnat tick_thread_stop[Max_domains];
static int st_initialize(void)
{
- atomic_store_rel(&Tick_thread_stop, 0);
+ atomic_store_release(&Tick_thread_stop, 0);
return 0;
}
@@ -112,14 +112,14 @@ static void st_masterlock_init(st_masterlock * m)
m->init = 1;
}
m->busy = 1;
- atomic_store_rel(&m->waiters, 0);
+ atomic_store_release(&m->waiters, 0);
return;
};
static uintnat st_masterlock_waiters(st_masterlock * m)
{
- return atomic_load_acq(&m->waiters);
+ return atomic_load_acquire(&m->waiters);
}
static void st_bt_lock_acquire(st_masterlock *m) {
@@ -295,10 +295,10 @@ static void * caml_thread_tick(void * arg)
caml_init_domain_self(*domain_id);
caml_domain_state *domain = Caml_state;
- while(! atomic_load_acq(&Tick_thread_stop)) {
+ while(! atomic_load_acquire(&Tick_thread_stop)) {
st_msleep(Thread_timeout);
- atomic_store_rel(&domain->requested_external_interrupt, 1);
+ atomic_store_release(&domain->requested_external_interrupt, 1);
caml_interrupt_self();
}
return NULL;
diff --git a/otherlibs/systhreads/st_stubs.c b/otherlibs/systhreads/st_stubs.c
index 389a343b2d..09d56120a7 100644
--- a/otherlibs/systhreads/st_stubs.c
+++ b/otherlibs/systhreads/st_stubs.c
@@ -488,9 +488,9 @@ CAMLprim value caml_thread_initialize(value unit)
CAMLprim value caml_thread_cleanup(value unit)
{
if (Tick_thread_running){
- atomic_store_rel(&Tick_thread_stop, 1);
+ atomic_store_release(&Tick_thread_stop, 1);
st_thread_join(Tick_thread_id);
- atomic_store_rel(&Tick_thread_stop, 0);
+ atomic_store_release(&Tick_thread_stop, 0);
Tick_thread_running = 0;
}
diff --git a/runtime/caml/atomic_refcount.h b/runtime/caml/atomic_refcount.h
index 3e4a239d51..aba5ce7f67 100644
--- a/runtime/caml/atomic_refcount.h
+++ b/runtime/caml/atomic_refcount.h
@@ -21,7 +21,7 @@
#include "camlatomic.h"
Caml_inline void caml_atomic_refcount_init(atomic_uintnat* refc, uintnat n){
- atomic_store_rel(refc, n);
+ atomic_store_release(refc, n);
}
Caml_inline uintnat caml_atomic_refcount_decr(atomic_uintnat* refcount){
diff --git a/runtime/caml/domain.h b/runtime/caml/domain.h
index 17c011ecee..49194ae73d 100644
--- a/runtime/caml/domain.h
+++ b/runtime/caml/domain.h
@@ -92,7 +92,7 @@ CAMLextern atomic_uintnat caml_num_domains_running;
Caml_inline intnat caml_domain_alone(void)
{
- return atomic_load_acq(&caml_num_domains_running) == 1;
+ return atomic_load_acquire(&caml_num_domains_running) == 1;
}
#ifdef DEBUG
diff --git a/runtime/caml/platform.h b/runtime/caml/platform.h
index 4d8431b3d9..373419e3c9 100644
--- a/runtime/caml/platform.h
+++ b/runtime/caml/platform.h
@@ -51,11 +51,11 @@ Caml_inline void cpu_relax(void) {
/* Loads and stores with acquire, release and relaxed semantics */
-#define atomic_load_acq(p) \
+#define atomic_load_acquire(p) \
atomic_load_explicit((p), memory_order_acquire)
#define atomic_load_relaxed(p) \
atomic_load_explicit((p), memory_order_relaxed)
-#define atomic_store_rel(p, v) \
+#define atomic_store_release(p, v) \
atomic_store_explicit((p), (v), memory_order_release)
#define atomic_store_relaxed(p, v) \
atomic_store_explicit((p), (v), memory_order_relaxed)
@@ -83,7 +83,7 @@ CAMLextern unsigned caml_plat_spin_wait(unsigned spins,
Caml_inline uintnat atomic_load_wait_nonzero(atomic_uintnat* p) {
SPIN_WAIT {
- uintnat v = atomic_load_acq(p);
+ uintnat v = atomic_load_acquire(p);
if (v) return v;
}
}
diff --git a/runtime/domain.c b/runtime/domain.c
index 88d7c1469c..91325c4bfc 100644
--- a/runtime/domain.c
+++ b/runtime/domain.c
@@ -294,22 +294,22 @@ CAMLexport caml_domain_state* caml_get_domain_state(void)
Caml_inline void interrupt_domain(struct interruptor* s)
{
- atomic_store_rel(s->interrupt_word, (uintnat)(-1));
+ atomic_store_release(s->interrupt_word, (uintnat)(-1));
}
int caml_incoming_interrupts_queued(void)
{
- return atomic_load_acq(&domain_self->interruptor.interrupt_pending);
+ return atomic_load_acquire(&domain_self->interruptor.interrupt_pending);
}
/* must NOT be called with s->lock held */
static void stw_handler(caml_domain_state* domain);
static uintnat handle_incoming(struct interruptor* s)
{
- uintnat handled = atomic_load_acq(&s->interrupt_pending);
+ uintnat handled = atomic_load_acquire(&s->interrupt_pending);
CAMLassert (s->running);
if (handled) {
- atomic_store_rel(&s->interrupt_pending, 0);
+ atomic_store_release(&s->interrupt_pending, 0);
stw_handler(domain_self->state);
}
@@ -330,7 +330,7 @@ void caml_handle_incoming_interrupts(void)
int caml_send_interrupt(struct interruptor* target)
{
/* signal that there is an interrupt pending */
- atomic_store_rel(&target->interrupt_pending, 1);
+ atomic_store_release(&target->interrupt_pending, 1);
/* Signal the condition variable, in case the target is
itself waiting for an interrupt to be processed elsewhere */
@@ -349,7 +349,7 @@ static void caml_wait_interrupt_serviced(struct interruptor* target)
/* Often, interrupt handlers are fast, so spin for a bit before waiting */
for (i=0; i<1000; i++) {
- if (!atomic_load_acq(&target->interrupt_pending)) {
+ if (!atomic_load_acquire(&target->interrupt_pending)) {
return;
}
cpu_relax();
@@ -357,7 +357,7 @@ static void caml_wait_interrupt_serviced(struct interruptor* target)
{
SPIN_WAIT {
- if (!atomic_load_acq(&target->interrupt_pending))
+ if (!atomic_load_acquire(&target->interrupt_pending))
return;
}
}
@@ -453,7 +453,7 @@ static void free_minor_heap(void) {
domain_state->young_end = NULL;
domain_state->young_ptr = NULL;
domain_state->young_trigger = NULL;
- atomic_store_rel(&domain_state->young_limit,
+ atomic_store_release(&domain_state->young_limit,
(uintnat) domain_state->young_start);
}
@@ -545,7 +545,7 @@ static void domain_create(uintnat initial_minor_heap_wsize) {
caml_plat_lock(&all_domains_lock);
/* Wait until any in-progress STW sections end. */
- while (atomic_load_acq(&stw_leader)) {
+ while (atomic_load_acquire(&stw_leader)) {
/* [caml_plat_wait] releases [all_domains_lock] until the current
STW section ends, and then takes the lock again. */
caml_plat_wait(&all_domains_cond);
@@ -938,7 +938,7 @@ static void* backup_thread_func(void* v)
domain_self = di;
caml_state = di->state;
- msg = atomic_load_acq (&di->backup_thread_msg);
+ msg = atomic_load_acquire (&di->backup_thread_msg);
while (msg != BT_TERMINATE) {
CAMLassert (msg <= BT_TERMINATE);
switch (msg) {
@@ -958,7 +958,7 @@ static void* backup_thread_func(void* v)
* Will be woken from caml_leave_blocking_section
*/
caml_plat_lock(&s->lock);
- msg = atomic_load_acq (&di->backup_thread_msg);
+ msg = atomic_load_acquire (&di->backup_thread_msg);
if (msg == BT_IN_BLOCKING_SECTION &&
!caml_incoming_interrupts_queued())
caml_plat_wait(&s->cond);
@@ -970,7 +970,7 @@ static void* backup_thread_func(void* v)
* or domain_terminate
*/
caml_plat_lock(&di->domain_lock);
- msg = atomic_load_acq (&di->backup_thread_msg);
+ msg = atomic_load_acquire (&di->backup_thread_msg);
if (msg == BT_ENTERING_OCAML)
caml_plat_wait(&di->domain_cond);
caml_plat_unlock(&di->domain_lock);
@@ -979,11 +979,11 @@ static void* backup_thread_func(void* v)
cpu_relax();
break;
};
- msg = atomic_load_acq (&di->backup_thread_msg);
+ msg = atomic_load_acquire (&di->backup_thread_msg);
}
/* doing terminate */
- atomic_store_rel(&di->backup_thread_msg, BT_INIT);
+ atomic_store_release(&di->backup_thread_msg, BT_INIT);
return 0;
}
@@ -999,7 +999,7 @@ static void install_backup_thread (dom_internal* di)
CAMLassert (di->backup_thread_msg == BT_INIT || /* Using fresh domain */
di->backup_thread_msg == BT_TERMINATE); /* Reusing domain */
- while (atomic_load_acq(&di->backup_thread_msg) != BT_INIT) {
+ while (atomic_load_acquire(&di->backup_thread_msg) != BT_INIT) {
/* Give a chance for backup thread on this domain to terminate */
caml_plat_unlock (&di->domain_lock);
cpu_relax ();
@@ -1012,7 +1012,7 @@ static void install_backup_thread (dom_internal* di)
pthread_sigmask(SIG_BLOCK, &mask, &old_mask);
#endif
- atomic_store_rel(&di->backup_thread_msg, BT_ENTERING_OCAML);
+ atomic_store_release(&di->backup_thread_msg, BT_ENTERING_OCAML);
err = pthread_create(&di->backup_thread, 0, backup_thread_func, (void*)di);
#ifndef _WIN32
@@ -1227,11 +1227,11 @@ void caml_global_barrier_end(barrier_status b)
uintnat sense = b & BARRIER_SENSE_BIT;
if (caml_global_barrier_is_final(b)) {
/* last domain into the barrier, flip sense */
- atomic_store_rel(&stw_request.barrier, sense ^ BARRIER_SENSE_BIT);
+ atomic_store_release(&stw_request.barrier, sense ^ BARRIER_SENSE_BIT);
} else {
/* wait until another domain flips the sense */
SPIN_WAIT {
- uintnat barrier = atomic_load_acq(&stw_request.barrier);
+ uintnat barrier = atomic_load_acquire(&stw_request.barrier);
if ((barrier & BARRIER_SENSE_BIT) != sense) break;
}
}
@@ -1259,7 +1259,7 @@ static void decrement_stw_domains_still_processing(void)
if( am_last ) {
/* release the STW lock to allow new STW sections */
caml_plat_lock(&all_domains_lock);
- atomic_store_rel(&stw_leader, 0);
+ atomic_store_release(&stw_leader, 0);
caml_plat_broadcast(&all_domains_cond);
caml_gc_log("clearing stw leader");
caml_plat_unlock(&all_domains_lock);
@@ -1272,7 +1272,7 @@ static void stw_handler(caml_domain_state* domain)
CAML_EV_BEGIN(EV_STW_API_BARRIER);
{
SPIN_WAIT {
- if (atomic_load_acq(&stw_request.domains_still_running) == 0)
+ if (atomic_load_acquire(&stw_request.domains_still_running) == 0)
break;
if (stw_request.enter_spin_callback)
@@ -1384,21 +1384,21 @@ int caml_try_run_on_all_domains_with_spin_work(
situations. Without this read, [stw_leader] would be protected by
[all_domains_lock] and could be a non-atomic variable.
*/
- if (atomic_load_acq(&stw_leader) ||
+ if (atomic_load_acquire(&stw_leader) ||
!caml_plat_try_lock(&all_domains_lock)) {
caml_handle_incoming_interrupts();
return 0;
}
/* see if there is a stw_leader already */
- if (atomic_load_acq(&stw_leader)) {
+ if (atomic_load_acquire(&stw_leader)) {
caml_plat_unlock(&all_domains_lock);
caml_handle_incoming_interrupts();
return 0;
}
/* we have the lock and can claim the stw_leader */
- atomic_store_rel(&stw_leader, (uintnat)domain_self);
+ atomic_store_release(&stw_leader, (uintnat)domain_self);
CAML_EV_BEGIN(EV_STW_LEADER);
caml_gc_log("causing STW");
@@ -1409,10 +1409,10 @@ int caml_try_run_on_all_domains_with_spin_work(
stw_request.enter_spin_data = enter_spin_data;
stw_request.callback = handler;
stw_request.data = data;
- atomic_store_rel(&stw_request.barrier, 0);
- atomic_store_rel(&stw_request.domains_still_running, 1);
+ atomic_store_release(&stw_request.barrier, 0);
+ atomic_store_release(&stw_request.domains_still_running, 1);
stw_request.num_domains = stw_domains.participating_domains;
- atomic_store_rel(&stw_request.num_domains_still_processing,
+ atomic_store_release(&stw_request.num_domains_still_processing,
stw_domains.participating_domains);
if( leader_setup ) {
@@ -1462,7 +1462,7 @@ int caml_try_run_on_all_domains_with_spin_work(
}
/* release from the enter barrier */
- atomic_store_rel(&stw_request.domains_still_running, 0);
+ atomic_store_release(&stw_request.domains_still_running, 0);
#ifdef DEBUG
domain_state->inside_stw_handler = 1;
@@ -1511,7 +1511,7 @@ void caml_reset_young_limit(caml_domain_state * dom_st)
|| dom_st->major_slice_epoch < atomic_load (&caml_major_slice_epoch)
|| atomic_load_relaxed(&dom_st->requested_external_interrupt)
|| dom_st->action_pending) {
- atomic_store_rel(&dom_st->young_limit, (uintnat)-1);
+ atomic_store_release(&dom_st->young_limit, (uintnat)-1);
CAMLassert(caml_check_gc_interrupt(dom_st));
}
}
@@ -1599,7 +1599,7 @@ void caml_poll_gc_work(void)
CAML_EV_END(EV_MAJOR);
}
- if (atomic_load_acq(&d->requested_external_interrupt)) {
+ if (atomic_load_acquire(&d->requested_external_interrupt)) {
caml_domain_external_interrupt_hook();
}
caml_reset_young_limit(d);
@@ -1621,7 +1621,7 @@ void caml_handle_gc_interrupt(void)
CAMLexport int caml_bt_is_in_blocking_section(void)
{
- uintnat status = atomic_load_acq(&domain_self->backup_thread_msg);
+ uintnat status = atomic_load_acquire(&domain_self->backup_thread_msg);
return status == BT_IN_BLOCKING_SECTION;
}
@@ -1650,7 +1650,7 @@ CAMLexport void caml_bt_enter_ocaml(void)
CAMLassert(caml_domain_alone() || self->backup_thread_running);
if (self->backup_thread_running) {
- atomic_store_rel(&self->backup_thread_msg, BT_ENTERING_OCAML);
+ atomic_store_release(&self->backup_thread_msg, BT_ENTERING_OCAML);
}
}
@@ -1668,7 +1668,7 @@ CAMLexport void caml_bt_exit_ocaml(void)
CAMLassert(caml_domain_alone() || self->backup_thread_running);
if (self->backup_thread_running) {
- atomic_store_rel(&self->backup_thread_msg, BT_IN_BLOCKING_SECTION);
+ atomic_store_release(&self->backup_thread_msg, BT_IN_BLOCKING_SECTION);
/* Wakeup backup thread if it is sleeping */
caml_plat_signal(&self->domain_cond);
}
@@ -1827,7 +1827,7 @@ static void domain_terminate (void)
/* signal the domain termination to the backup thread
NB: for a program with no additional domains, the backup thread
will not have been started */
- atomic_store_rel(&domain_self->backup_thread_msg, BT_TERMINATE);
+ atomic_store_release(&domain_self->backup_thread_msg, BT_TERMINATE);
caml_plat_signal(&domain_self->domain_cond);
caml_plat_unlock(&domain_self->domain_lock);
diff --git a/runtime/fail_nat.c b/runtime/fail_nat.c
index 2245f933b3..bb891b940f 100644
--- a/runtime/fail_nat.c
+++ b/runtime/fail_nat.c
@@ -197,7 +197,7 @@ CAMLexport value caml_raise_if_exception(value res)
static value array_bound_exn(void)
{
static atomic_uintnat exn_cache = ATOMIC_UINTNAT_INIT(0);
- const value* exn = (const value*)atomic_load_acq(&exn_cache);
+ const value* exn = (const value*)atomic_load_acquire(&exn_cache);
if (!exn) {
exn = caml_named_value("Pervasives.array_bound_error");
if (!exn) {
@@ -205,7 +205,7 @@ static value array_bound_exn(void)
"Invalid_argument(\"index out of bounds\")\n");
exit(2);
}
- atomic_store_rel(&exn_cache, (uintnat)exn);
+ atomic_store_release(&exn_cache, (uintnat)exn);
}
return *exn;
}
diff --git a/runtime/major_gc.c b/runtime/major_gc.c
index 4adf49c8d8..f1245e5afc 100644
--- a/runtime/major_gc.c
+++ b/runtime/major_gc.c
@@ -196,8 +196,8 @@ static void ephe_next_cycle (void)
caml_plat_lock(&ephe_lock);
atomic_fetch_add(&ephe_cycle_info.ephe_cycle, +1);
- CAMLassert(atomic_load_acq(&ephe_cycle_info.num_domains_done) <=
- atomic_load_acq(&ephe_cycle_info.num_domains_todo));
+ CAMLassert(atomic_load_acquire(&ephe_cycle_info.num_domains_done) <=
+ atomic_load_acquire(&ephe_cycle_info.num_domains_todo));
atomic_store(&ephe_cycle_info.num_domains_done, 0);
caml_plat_unlock(&ephe_lock);
@@ -216,8 +216,8 @@ static void ephe_todo_list_emptied (void)
/* Since the todo list is empty, this domain does not need to participate in
* further ephemeron cycles. */
atomic_fetch_add(&ephe_cycle_info.num_domains_todo, -1);
- CAMLassert(atomic_load_acq(&ephe_cycle_info.num_domains_done) <=
- atomic_load_acq(&ephe_cycle_info.num_domains_todo));
+ CAMLassert(atomic_load_acquire(&ephe_cycle_info.num_domains_done) <=
+ atomic_load_acquire(&ephe_cycle_info.num_domains_todo));
caml_plat_unlock(&ephe_lock);
}
@@ -225,18 +225,18 @@ static void ephe_todo_list_emptied (void)
/* Record that ephemeron marking was done for the given ephemeron cycle. */
static void record_ephe_marking_done (uintnat ephe_cycle)
{
- CAMLassert (ephe_cycle <= atomic_load_acq(&ephe_cycle_info.ephe_cycle));
+ CAMLassert (ephe_cycle <= atomic_load_acquire(&ephe_cycle_info.ephe_cycle));
CAMLassert (Caml_state->marking_done);
- if (ephe_cycle < atomic_load_acq(&ephe_cycle_info.ephe_cycle))
+ if (ephe_cycle < atomic_load_acquire(&ephe_cycle_info.ephe_cycle))
return;
caml_plat_lock(&ephe_lock);
if (ephe_cycle == atomic_load(&ephe_cycle_info.ephe_cycle)) {
Caml_state->ephe_info->cycle = ephe_cycle;
atomic_fetch_add(&ephe_cycle_info.num_domains_done, +1);
- CAMLassert(atomic_load_acq(&ephe_cycle_info.num_domains_done) <=
- atomic_load_acq(&ephe_cycle_info.num_domains_todo));
+ CAMLassert(atomic_load_acquire(&ephe_cycle_info.num_domains_done) <=
+ atomic_load_acquire(&ephe_cycle_info.num_domains_todo));
}
caml_plat_unlock(&ephe_lock);
}
@@ -1168,8 +1168,8 @@ static void cycle_all_domains_callback(caml_domain_state* domain, void* unused,
CAML_EV_BEGIN(EV_MAJOR_GC_CYCLE_DOMAINS);
CAMLassert(domain == Caml_state);
- CAMLassert(atomic_load_acq(&ephe_cycle_info.num_domains_todo) ==
- atomic_load_acq(&ephe_cycle_info.num_domains_done));
+ CAMLassert(atomic_load_acquire(&ephe_cycle_info.num_domains_todo) ==
+ atomic_load_acquire(&ephe_cycle_info.num_domains_done));
CAMLassert(atomic_load(&num_domains_to_mark) == 0);
CAMLassert(atomic_load(&num_domains_to_sweep) == 0);
CAMLassert(atomic_load(&num_domains_to_ephe_sweep) == 0);
@@ -1246,20 +1246,22 @@ static void cycle_all_domains_callback(caml_domain_state* domain, void* unused,
domain->swept_words = 0;
num_domains_in_stw = (uintnat)caml_global_barrier_num_domains();
- atomic_store_rel(&num_domains_to_sweep, num_domains_in_stw);
- atomic_store_rel(&num_domains_to_mark, num_domains_in_stw);
+ atomic_store_release(&num_domains_to_sweep, num_domains_in_stw);
+ atomic_store_release(&num_domains_to_mark, num_domains_in_stw);
caml_gc_phase = Phase_sweep_and_mark_main;
atomic_store(&ephe_cycle_info.num_domains_todo, num_domains_in_stw);
atomic_store(&ephe_cycle_info.ephe_cycle, 1);
atomic_store(&ephe_cycle_info.num_domains_done, 0);
- atomic_store_rel(&num_domains_to_ephe_sweep, 0);
+ atomic_store_release(&num_domains_to_ephe_sweep, 0);
/* Will be set to the correct number when switching to
[Phase_sweep_ephe] */
- atomic_store_rel(&num_domains_to_final_update_first, num_domains_in_stw);
- atomic_store_rel(&num_domains_to_final_update_last, num_domains_in_stw);
+ atomic_store_release(&num_domains_to_final_update_first,
+ num_domains_in_stw);
+ atomic_store_release(&num_domains_to_final_update_last,
+ num_domains_in_stw);
atomic_store(&domain_global_roots_started, WORK_UNSTARTED);
@@ -1366,11 +1368,11 @@ static int is_complete_phase_sweep_and_mark_main (void)
{
return
caml_gc_phase == Phase_sweep_and_mark_main &&
- atomic_load_acq (&num_domains_to_sweep) == 0 &&
- atomic_load_acq (&num_domains_to_mark) == 0 &&
+ atomic_load_acquire (&num_domains_to_sweep) == 0 &&
+ atomic_load_acquire (&num_domains_to_mark) == 0 &&
/* Marking is done */
- atomic_load_acq(&ephe_cycle_info.num_domains_todo) ==
- atomic_load_acq(&ephe_cycle_info.num_domains_done) &&
+ atomic_load_acquire(&ephe_cycle_info.num_domains_todo) ==
+ atomic_load_acquire(&ephe_cycle_info.num_domains_done) &&
/* Ephemeron marking is done */
no_orphaned_work();
/* All orphaned ephemerons have been adopted */
@@ -1380,12 +1382,12 @@ static int is_complete_phase_mark_final (void)
{
return
caml_gc_phase == Phase_mark_final &&
- atomic_load_acq (&num_domains_to_final_update_first) == 0 &&
+ atomic_load_acquire (&num_domains_to_final_update_first) == 0 &&
/* updated finalise first values */
- atomic_load_acq (&num_domains_to_mark) == 0 &&
+ atomic_load_acquire (&num_domains_to_mark) == 0 &&
/* Marking is done */
- atomic_load_acq(&ephe_cycle_info.num_domains_todo) ==
- atomic_load_acq(&ephe_cycle_info.num_domains_done) &&
+ atomic_load_acquire(&ephe_cycle_info.num_domains_todo) ==
+ atomic_load_acquire(&ephe_cycle_info.num_domains_done) &&
/* Ephemeron marking is done */
no_orphaned_work();
/* All orphaned ephemerons have been adopted */
@@ -1395,9 +1397,9 @@ static int is_complete_phase_sweep_ephe (void)
{
return
caml_gc_phase == Phase_sweep_ephe &&
- atomic_load_acq (&num_domains_to_ephe_sweep) == 0 &&
+ atomic_load_acquire (&num_domains_to_ephe_sweep) == 0 &&
/* All domains have swept their ephemerons */
- atomic_load_acq (&num_domains_to_final_update_last) == 0 &&
+ atomic_load_acquire (&num_domains_to_final_update_last) == 0 &&
/* All domains have updated finalise last values */
no_orphaned_work();
/* All orphaned structures have been adopted */
@@ -1416,7 +1418,7 @@ static void try_complete_gc_phase (caml_domain_state* domain, void* unused,
caml_gc_phase = Phase_mark_final;
} else if (is_complete_phase_mark_final()) {
caml_gc_phase = Phase_sweep_ephe;
- atomic_store_rel(&num_domains_to_ephe_sweep, participant_count);
+ atomic_store_release(&num_domains_to_ephe_sweep, participant_count);
for (int i = 0; i < participant_count; i++)
participating[i]->ephe_info->must_sweep_ephe = 1;
}
@@ -1560,7 +1562,7 @@ mark_again:
/* Ephemerons */
if (caml_gc_phase != Phase_sweep_ephe) {
/* Ephemeron Marking */
- saved_ephe_cycle = atomic_load_acq(&ephe_cycle_info.ephe_cycle);
+ saved_ephe_cycle = atomic_load_acquire(&ephe_cycle_info.ephe_cycle);
if (domain_state->ephe_info->todo != (value) NULL &&
saved_ephe_cycle > domain_state->ephe_info->cycle) {
CAML_EV_BEGIN(EV_MAJOR_EPHE_MARK);
diff --git a/runtime/minor_gc.c b/runtime/minor_gc.c
index 55476b9026..6092dcd80b 100644
--- a/runtime/minor_gc.c
+++ b/runtime/minor_gc.c
@@ -212,7 +212,7 @@ static int try_update_object_header(value v, volatile value *p, value result,
/* Success. Now we can write the forwarding pointer. */
atomic_store_explicit(Op_atomic_val(v), result, memory_order_relaxed);
/* And update header ('release' ensures after update of fwd pointer) */
- atomic_store_rel(Hp_atomic_val(v), 0);
+ atomic_store_release(Hp_atomic_val(v), 0);
/* Let the caller know we were responsible for the update */
success = 1;
} else {
diff --git a/runtime/obj.c b/runtime/obj.c
index cdaa4c1766..56db69f5fa 100644
--- a/runtime/obj.c
+++ b/runtime/obj.c
@@ -44,7 +44,7 @@ static int obj_tag (value arg)
/* The acquire load ensures that reading the field of a Forward_tag
block in stdlib/camlinternalLazy.ml:force_gen has the necessary
synchronization. */
- hd = (header_t)atomic_load_acq(Hp_atomic_val(arg));
+ hd = (header_t)atomic_load_acquire(Hp_atomic_val(arg));
return Tag_hd(hd);
}
}
diff --git a/runtime/platform.c b/runtime/platform.c
index 34544b8bf5..b3bf88a7aa 100644
--- a/runtime/platform.c
+++ b/runtime/platform.c
@@ -165,7 +165,7 @@ void* caml_mem_map(uintnat size, uintnat alignment, int reserve_only)
if (mmap_blocks.head == NULL) {
/* The first call to caml_mem_map should be during caml_init_domains, called
by caml_init_gc during startup - i.e. before any domains have started. */
- CAMLassert(atomic_load_acq(&caml_num_domains_running) <= 1);
+ CAMLassert(atomic_load_acquire(&caml_num_domains_running) <= 1);
caml_lf_skiplist_init(&mmap_blocks);
}
#endif
diff --git a/runtime/runtime_events.c b/runtime/runtime_events.c
index 921b3cd538..f4a91d6b39 100644
--- a/runtime/runtime_events.c
+++ b/runtime/runtime_events.c
@@ -182,7 +182,7 @@ static void runtime_events_teardown_raw(int remove_file) {
caml_stat_free(current_ring_loc);
current_metadata = NULL;
- atomic_store_rel(&runtime_events_enabled, 0);
+ atomic_store_release(&runtime_events_enabled, 0);
}
/* Stop-the-world which calls the teardown code */
@@ -204,7 +204,7 @@ void caml_runtime_events_post_fork(void) {
new domain can have run yet. Let's be double sure. */
CAMLassert(caml_domain_alone());
- if (atomic_load_acq(&runtime_events_enabled)) {
+ if (atomic_load_acquire(&runtime_events_enabled)) {
/* In the child we need to tear down the various structures used for the
existing runtime_events from the parent. In doing so we need to make sure we
don't remove the runtime_events file itself as that may still be used by
@@ -220,7 +220,7 @@ void caml_runtime_events_post_fork(void) {
/* Return the current location for the ring buffers of this process. This is
used in the consumer to read the ring buffers of the current process */
char_os* caml_runtime_events_current_location(void) {
- if( atomic_load_acq(&runtime_events_enabled) ) {
+ if( atomic_load_acquire(&runtime_events_enabled) ) {
return current_ring_loc;
} else {
return NULL;
@@ -230,7 +230,7 @@ char_os* caml_runtime_events_current_location(void) {
/* Write a lifecycle event and then trigger a stop the world to tear down the
ring buffers */
void caml_runtime_events_destroy(void) {
- if (atomic_load_acq(&runtime_events_enabled)) {
+ if (atomic_load_acquire(&runtime_events_enabled)) {
write_to_ring(
EV_RUNTIME, (ev_message_type){.runtime=EV_LIFECYCLE}, EV_RING_STOP, 0,
NULL, 0);
@@ -242,7 +242,7 @@ void caml_runtime_events_destroy(void) {
caml_try_run_on_all_domains(&stw_teardown_runtime_events,
&remove_file, NULL);
}
- while( atomic_load_acq(&runtime_events_enabled) );
+ while( atomic_load_acquire(&runtime_events_enabled) );
}
}
@@ -251,7 +251,7 @@ void caml_runtime_events_destroy(void) {
domain running. */
static void runtime_events_create_raw(void) {
/* Don't initialise runtime_events twice */
- if (!atomic_load_acq(&runtime_events_enabled)) {
+ if (!atomic_load_acquire(&runtime_events_enabled)) {
int ret, ring_headers_length, ring_data_length;
#ifdef _WIN32
DWORD pid = GetCurrentProcessId();
@@ -386,10 +386,10 @@ static void runtime_events_create_raw(void) {
// runtime_events_enabled to 1
caml_plat_lock(&user_events_lock);
value current_user_event = user_events;
- atomic_store_rel(&runtime_events_enabled, 1);
+ atomic_store_release(&runtime_events_enabled, 1);
caml_plat_unlock(&user_events_lock);
- atomic_store_rel(&runtime_events_paused, 0);
+ atomic_store_release(&runtime_events_paused, 0);
caml_ev_lifecycle(EV_RING_START, pid);
@@ -421,7 +421,7 @@ stw_create_runtime_events(caml_domain_state *domain_state, void *data,
}
CAMLprim value caml_runtime_events_start(void) {
- while (!atomic_load_acq(&runtime_events_enabled)) {
+ while (!atomic_load_acquire(&runtime_events_enabled)) {
caml_try_run_on_all_domains(&stw_create_runtime_events, NULL, NULL);
}
@@ -429,7 +429,7 @@ CAMLprim value caml_runtime_events_start(void) {
}
CAMLprim value caml_runtime_events_pause(void) {
- if (!atomic_load_acq(&runtime_events_enabled)) return Val_unit;
+ if (!atomic_load_acquire(&runtime_events_enabled)) return Val_unit;
uintnat not_paused = 0;
@@ -441,7 +441,7 @@ CAMLprim value caml_runtime_events_pause(void) {
}
CAMLprim value caml_runtime_events_resume(void) {
- if (!atomic_load_acq(&runtime_events_enabled)) return Val_unit;
+ if (!atomic_load_acquire(&runtime_events_enabled)) return Val_unit;
uintnat paused = 1;
@@ -680,7 +680,7 @@ CAMLprim value caml_runtime_events_user_register(value event_name,
// critical section: when we update the user_events list we need to make sure
// it is not updated while we construct the pointer to the next element
- if (atomic_load_acq(&runtime_events_enabled)) {
+ if (atomic_load_acquire(&runtime_events_enabled)) {
// Ring buffer is already available, we register the name
events_register_write_buffer(index, event_name);
}
diff --git a/runtime/signals.c b/runtime/signals.c
index cf9774d199..5a21024ad4 100644
--- a/runtime/signals.c
+++ b/runtime/signals.c
@@ -285,7 +285,7 @@ CAMLno_tsan /* When called from [caml_record_signal], these memory
void caml_set_action_pending(caml_domain_state * dom_st)
{
dom_st->action_pending = 1;
- atomic_store_rel(&dom_st->young_limit, (uintnat)-1);
+ atomic_store_release(&dom_st->young_limit, (uintnat)-1);
}
CAMLexport int caml_check_pending_actions(void)