summaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorJack Rosenthal <jrosenth@chromium.org>2020-09-23 15:21:18 -0600
committerCommit Bot <commit-bot@chromium.org>2020-09-29 16:30:59 +0000
commitd4be394be0b91a71b2f16ca476114b7470bc630f (patch)
treee08dd7e428be1d27b65c4bae3ba8a47e40c8b1a5 /core
parent9513a63a626021b071199d02b7c73112eb746833 (diff)
downloadchrome-ec-d4be394be0b91a71b2f16ca476114b7470bc630f.tar.gz
tree: rename atomic_* functions to deprecated_atomic_*
We will move to an API compatible with Zephyr's API. See the bug for complete rationale and plan. BUG=b:169151160 BRANCH=none TEST=buildall Signed-off-by: Jack Rosenthal <jrosenth@chromium.org> Change-Id: Id611f663446abf00b24298a669f2ae47fef7f632 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/ec/+/2427507 Tested-by: Dawid Niedźwiecki <dn@semihalf.com> Reviewed-by: Tom Hughes <tomhughes@chromium.org> Reviewed-by: Jett Rink <jettrink@chromium.org>
Diffstat (limited to 'core')
-rw-r--r--core/cortex-m/atomic.h19
-rw-r--r--core/cortex-m/task.c28
-rw-r--r--core/cortex-m0/atomic.h19
-rw-r--r--core/cortex-m0/task.c21
-rw-r--r--core/host/atomic.h19
-rw-r--r--core/host/task.c8
-rw-r--r--core/minute-ia/atomic.h26
-rw-r--r--core/minute-ia/task.c23
-rw-r--r--core/nds32/atomic.h19
-rw-r--r--core/nds32/task.c17
-rw-r--r--core/riscv-rv32i/atomic.h25
-rw-r--r--core/riscv-rv32i/task.c21
12 files changed, 154 insertions, 91 deletions
diff --git a/core/cortex-m/atomic.h b/core/cortex-m/atomic.h
index 2b2b6153ba..4d56ce3e4e 100644
--- a/core/cortex-m/atomic.h
+++ b/core/cortex-m/atomic.h
@@ -30,27 +30,36 @@
: "r" (a), "r" (v) : "cc"); \
} while (0)
-static inline void atomic_clear(uint32_t volatile *addr, uint32_t bits)
+/*
+ * The atomic_* functions are marked as deprecated as a part of the process of
+ * transaction to Zephyr compatible atomic functions. These prefixes will be
+ * removed in the following patches. Please see b:169151160 for more details.
+ */
+
+static inline void deprecated_atomic_clear(uint32_t volatile *addr,
+ uint32_t bits)
{
ATOMIC_OP(bic, addr, bits);
}
-static inline void atomic_or(uint32_t volatile *addr, uint32_t bits)
+static inline void deprecated_atomic_or(uint32_t volatile *addr, uint32_t bits)
{
ATOMIC_OP(orr, addr, bits);
}
-static inline void atomic_add(uint32_t volatile *addr, uint32_t value)
+static inline void deprecated_atomic_add(uint32_t volatile *addr,
+ uint32_t value)
{
ATOMIC_OP(add, addr, value);
}
-static inline void atomic_sub(uint32_t volatile *addr, uint32_t value)
+static inline void deprecated_atomic_sub(uint32_t volatile *addr,
+ uint32_t value)
{
ATOMIC_OP(sub, addr, value);
}
-static inline uint32_t atomic_read_clear(uint32_t volatile *addr)
+static inline uint32_t deprecated_atomic_read_clear(uint32_t volatile *addr)
{
uint32_t ret, tmp;
diff --git a/core/cortex-m/task.c b/core/cortex-m/task.c
index d64f9400b6..058b892a46 100644
--- a/core/cortex-m/task.c
+++ b/core/cortex-m/task.c
@@ -413,7 +413,7 @@ static uint32_t __wait_evt(int timeout_us, task_id_t resched)
ret = timer_arm(deadline, me);
ASSERT(ret == EC_SUCCESS);
}
- while (!(evt = atomic_read_clear(&tsk->events))) {
+ while (!(evt = deprecated_atomic_read_clear(&tsk->events))) {
/* Remove ourself and get the next task in the scheduler */
__schedule(1, resched);
resched = TASK_ID_IDLE;
@@ -421,7 +421,7 @@ static uint32_t __wait_evt(int timeout_us, task_id_t resched)
if (timeout_us > 0) {
timer_cancel(me);
/* Ensure timer event is clear, we no longer care about it */
- atomic_clear(&tsk->events, TASK_EVENT_TIMER);
+ deprecated_atomic_clear(&tsk->events, TASK_EVENT_TIMER);
}
return evt;
}
@@ -432,12 +432,12 @@ uint32_t task_set_event(task_id_t tskid, uint32_t event, int wait)
ASSERT(receiver);
/* Set the event bit in the receiver message bitmap */
- atomic_or(&receiver->events, event);
+ deprecated_atomic_or(&receiver->events, event);
/* Re-schedule if priorities have changed */
if (in_interrupt_context()) {
/* The receiver might run again */
- atomic_or(&tasks_ready, 1 << tskid);
+ deprecated_atomic_or(&tasks_ready, 1 << tskid);
#ifndef CONFIG_TASK_PROFILING
if (start_called)
need_resched_or_profiling = 1;
@@ -480,7 +480,8 @@ uint32_t task_wait_event_mask(uint32_t event_mask, int timeout_us)
/* Re-post any other events collected */
if (events & ~event_mask)
- atomic_or(&current_task->events, events & ~event_mask);
+ deprecated_atomic_or(&current_task->events,
+ events & ~event_mask);
return events & event_mask;
}
@@ -495,12 +496,12 @@ void task_enable_all_tasks(void)
void task_enable_task(task_id_t tskid)
{
- atomic_or(&tasks_enabled, BIT(tskid));
+ deprecated_atomic_or(&tasks_enabled, BIT(tskid));
}
void task_disable_task(task_id_t tskid)
{
- atomic_clear(&tasks_enabled, BIT(tskid));
+ deprecated_atomic_clear(&tasks_enabled, BIT(tskid));
if (!in_interrupt_context() && tskid == task_get_current())
__schedule(0, 0);
@@ -578,7 +579,8 @@ static void deferred_task_reset(void)
while (deferred_reset_task_ids) {
task_id_t reset_id = __fls(deferred_reset_task_ids);
- atomic_clear(&deferred_reset_task_ids, 1 << reset_id);
+ deprecated_atomic_clear(&deferred_reset_task_ids,
+ 1 << reset_id);
do_task_reset(reset_id);
}
}
@@ -673,7 +675,7 @@ void task_enable_resets(void)
return;
/* People are waiting for us to reset; schedule a reset. */
- atomic_or(&deferred_reset_task_ids, 1 << id);
+ deprecated_atomic_or(&deferred_reset_task_ids, 1 << id);
/*
* This will always trigger a deferred call after our new ID was
* written. If the hook call is currently executing, it will run
@@ -758,7 +760,7 @@ int task_reset_cleanup(void)
* itself back to the list of tasks to notify,
* and we will notify it again.
*/
- atomic_clear(state, 1 << notify_id);
+ deprecated_atomic_clear(state, 1 << notify_id);
/*
* Skip any invalid ids set by tasks that
* requested a non-blocking reset.
@@ -876,7 +878,7 @@ void mutex_lock(struct mutex *mtx)
id = 1 << task_get_current();
- atomic_or(&mtx->waiters, id);
+ deprecated_atomic_or(&mtx->waiters, id);
do {
/* Try to get the lock (set 1 into the lock field) */
@@ -895,7 +897,7 @@ void mutex_lock(struct mutex *mtx)
task_wait_event_mask(TASK_EVENT_MUTEX, 0);
} while (value);
- atomic_clear(&mtx->waiters, id);
+ deprecated_atomic_clear(&mtx->waiters, id);
}
void mutex_unlock(struct mutex *mtx)
@@ -921,7 +923,7 @@ void mutex_unlock(struct mutex *mtx)
}
/* Ensure no event is remaining from mutex wake-up */
- atomic_clear(&tsk->events, TASK_EVENT_MUTEX);
+ deprecated_atomic_clear(&tsk->events, TASK_EVENT_MUTEX);
}
void task_print_list(void)
diff --git a/core/cortex-m0/atomic.h b/core/cortex-m0/atomic.h
index 417c86a6c9..414e6a56c1 100644
--- a/core/cortex-m0/atomic.h
+++ b/core/cortex-m0/atomic.h
@@ -27,27 +27,36 @@
: "b" (a), "r" (v) : "cc"); \
} while (0)
-static inline void atomic_clear(uint32_t volatile *addr, uint32_t bits)
+/*
+ * The atomic_* functions are marked as deprecated as a part of the process of
+ * transaction to Zephyr compatible atomic functions. These prefixes will be
+ * removed in the following patches. Please see b:169151160 for more details.
+ */
+
+static inline void deprecated_atomic_clear(uint32_t volatile *addr,
+ uint32_t bits)
{
ATOMIC_OP(bic, addr, bits);
}
-static inline void atomic_or(uint32_t volatile *addr, uint32_t bits)
+static inline void deprecated_atomic_or(uint32_t volatile *addr, uint32_t bits)
{
ATOMIC_OP(orr, addr, bits);
}
-static inline void atomic_add(uint32_t volatile *addr, uint32_t value)
+static inline void deprecated_atomic_add(uint32_t volatile *addr,
+ uint32_t value)
{
ATOMIC_OP(add, addr, value);
}
-static inline void atomic_sub(uint32_t volatile *addr, uint32_t value)
+static inline void deprecated_atomic_sub(uint32_t volatile *addr,
+ uint32_t value)
{
ATOMIC_OP(sub, addr, value);
}
-static inline uint32_t atomic_read_clear(uint32_t volatile *addr)
+static inline uint32_t deprecated_atomic_read_clear(uint32_t volatile *addr)
{
uint32_t ret;
diff --git a/core/cortex-m0/task.c b/core/cortex-m0/task.c
index 5fa884fc3e..657ed88a73 100644
--- a/core/cortex-m0/task.c
+++ b/core/cortex-m0/task.c
@@ -334,7 +334,7 @@ static uint32_t __wait_evt(int timeout_us, task_id_t resched)
ret = timer_arm(deadline, me);
ASSERT(ret == EC_SUCCESS);
}
- while (!(evt = atomic_read_clear(&tsk->events))) {
+ while (!(evt = deprecated_atomic_read_clear(&tsk->events))) {
/*
* We need to ensure that the execution priority is actually
* decreased after the "cpsie i" in the atomic operation above
@@ -349,7 +349,7 @@ static uint32_t __wait_evt(int timeout_us, task_id_t resched)
if (timeout_us > 0) {
timer_cancel(me);
/* Ensure timer event is clear, we no longer care about it */
- atomic_clear(&tsk->events, TASK_EVENT_TIMER);
+ deprecated_atomic_clear(&tsk->events, TASK_EVENT_TIMER);
}
return evt;
}
@@ -360,12 +360,12 @@ uint32_t task_set_event(task_id_t tskid, uint32_t event, int wait)
ASSERT(receiver);
/* Set the event bit in the receiver message bitmap */
- atomic_or(&receiver->events, event);
+ deprecated_atomic_or(&receiver->events, event);
/* Re-schedule if priorities have changed */
if (in_interrupt_context()) {
/* The receiver might run again */
- atomic_or(&tasks_ready, 1 << tskid);
+ deprecated_atomic_or(&tasks_ready, 1 << tskid);
if (start_called) {
/*
* Trigger the scheduler when there's
@@ -420,7 +420,8 @@ uint32_t task_wait_event_mask(uint32_t event_mask, int timeout_us)
/* Re-post any other events collected */
if (events & ~event_mask)
- atomic_or(&current_task->events, events & ~event_mask);
+ deprecated_atomic_or(&current_task->events,
+ events & ~event_mask);
return events & event_mask;
}
@@ -435,12 +436,12 @@ void task_enable_all_tasks(void)
void task_enable_task(task_id_t tskid)
{
- atomic_or(&tasks_enabled, BIT(tskid));
+ deprecated_atomic_or(&tasks_enabled, BIT(tskid));
}
void task_disable_task(task_id_t tskid)
{
- atomic_clear(&tasks_enabled, BIT(tskid));
+ deprecated_atomic_clear(&tasks_enabled, BIT(tskid));
if (!in_interrupt_context() && tskid == task_get_current())
__schedule(0, 0);
@@ -499,7 +500,7 @@ void mutex_lock(struct mutex *mtx)
uint32_t id = 1 << task_get_current();
ASSERT(id != TASK_ID_INVALID);
- atomic_or(&mtx->waiters, id);
+ deprecated_atomic_or(&mtx->waiters, id);
while (1) {
/* Try to get the lock (set 2 into the lock field) */
@@ -513,7 +514,7 @@ void mutex_lock(struct mutex *mtx)
mtx->lock = 2;
__asm__ __volatile__("cpsie i");
- atomic_clear(&mtx->waiters, id);
+ deprecated_atomic_clear(&mtx->waiters, id);
}
void mutex_unlock(struct mutex *mtx)
@@ -539,7 +540,7 @@ void mutex_unlock(struct mutex *mtx)
}
/* Ensure no event is remaining from mutex wake-up */
- atomic_clear(&tsk->events, TASK_EVENT_MUTEX);
+ deprecated_atomic_clear(&tsk->events, TASK_EVENT_MUTEX);
}
void task_print_list(void)
diff --git a/core/host/atomic.h b/core/host/atomic.h
index b6e60e40b5..4150a20216 100644
--- a/core/host/atomic.h
+++ b/core/host/atomic.h
@@ -10,27 +10,36 @@
#include "common.h"
-static inline void atomic_clear(uint32_t volatile *addr, uint32_t bits)
+/*
+ * The atomic_* functions are marked as deprecated as a part of the process of
+ * transaction to Zephyr compatible atomic functions. These prefixes will be
+ * removed in the following patches. Please see b:169151160 for more details.
+ */
+
+static inline void deprecated_atomic_clear(uint32_t volatile *addr,
+ uint32_t bits)
{
__sync_and_and_fetch(addr, ~bits);
}
-static inline void atomic_or(uint32_t volatile *addr, uint32_t bits)
+static inline void deprecated_atomic_or(uint32_t volatile *addr, uint32_t bits)
{
__sync_or_and_fetch(addr, bits);
}
-static inline void atomic_add(uint32_t volatile *addr, uint32_t value)
+static inline void deprecated_atomic_add(uint32_t volatile *addr,
+ uint32_t value)
{
__sync_add_and_fetch(addr, value);
}
-static inline void atomic_sub(uint32_t volatile *addr, uint32_t value)
+static inline void deprecated_atomic_sub(uint32_t volatile *addr,
+ uint32_t value)
{
__sync_sub_and_fetch(addr, value);
}
-static inline uint32_t atomic_read_clear(uint32_t volatile *addr)
+static inline uint32_t deprecated_atomic_read_clear(uint32_t volatile *addr)
{
return __sync_fetch_and_and(addr, 0);
}
diff --git a/core/host/task.c b/core/host/task.c
index 7bd44c61b9..ae5e525303 100644
--- a/core/host/task.c
+++ b/core/host/task.c
@@ -200,7 +200,7 @@ pthread_t task_get_thread(task_id_t tskid)
uint32_t task_set_event(task_id_t tskid, uint32_t event, int wait)
{
- atomic_or(&tasks[tskid].event, event);
+ deprecated_atomic_or(&tasks[tskid].event, event);
if (wait)
return task_wait_event(-1);
return 0;
@@ -224,7 +224,7 @@ uint32_t task_wait_event(int timeout_us)
pthread_cond_wait(&tasks[tid].resume, &run_lock);
/* Resume */
- ret = atomic_read_clear(&tasks[tid].event);
+ ret = deprecated_atomic_read_clear(&tasks[tid].event);
pthread_mutex_unlock(&interrupt_lock);
return ret;
}
@@ -252,8 +252,8 @@ uint32_t task_wait_event_mask(uint32_t event_mask, int timeout_us)
/* Re-post any other events collected */
if (events & ~event_mask)
- atomic_or(&tasks[task_get_current()].event,
- events & ~event_mask);
+ deprecated_atomic_or(&tasks[task_get_current()].event,
+ events & ~event_mask);
return events & event_mask;
}
diff --git a/core/minute-ia/atomic.h b/core/minute-ia/atomic.h
index a944127fa1..89ed35340c 100644
--- a/core/minute-ia/atomic.h
+++ b/core/minute-ia/atomic.h
@@ -32,42 +32,52 @@ static inline int bool_compare_and_swap_u32(uint32_t *var, uint32_t old_value,
return (_old_value == old_value);
}
-static inline void atomic_or_u8(uint8_t *addr, uint8_t bits)
+/*
+ * The atomic_* functions are marked as deprecated as a part of the process of
+ * transaction to Zephyr compatible atomic functions. These prefixes will be
+ * removed in the following patches. Please see b:169151160 for more details.
+ */
+
+static inline void deprecated_atomic_or_u8(uint8_t *addr, uint8_t bits)
{
ATOMIC_OP(or, addr, bits);
}
-static inline void atomic_and_u8(uint8_t *addr, uint8_t bits)
+static inline void deprecated_atomic_and_u8(uint8_t *addr, uint8_t bits)
{
ATOMIC_OP(and, addr, bits);
}
-static inline void atomic_clear(uint32_t volatile *addr, uint32_t bits)
+static inline void deprecated_atomic_clear(uint32_t volatile *addr,
+ uint32_t bits)
{
ATOMIC_OP(andl, addr, ~bits);
}
-static inline void atomic_or(uint32_t volatile *addr, uint32_t bits)
+static inline void deprecated_atomic_or(uint32_t volatile *addr, uint32_t bits)
{
ATOMIC_OP(orl, addr, bits);
}
-static inline void atomic_add(uint32_t volatile *addr, uint32_t value)
+static inline void deprecated_atomic_add(uint32_t volatile *addr,
+ uint32_t value)
{
ATOMIC_OP(addl, addr, value);
}
-static inline void atomic_and(uint32_t volatile *addr, uint32_t value)
+static inline void deprecated_atomic_and(uint32_t volatile *addr,
+ uint32_t value)
{
ATOMIC_OP(andl, addr, value);
}
-static inline void atomic_sub(uint32_t volatile *addr, uint32_t value)
+static inline void deprecated_atomic_sub(uint32_t volatile *addr,
+ uint32_t value)
{
ATOMIC_OP(subl, addr, value);
}
-static inline uint32_t atomic_read_clear(uint32_t volatile *addr)
+static inline uint32_t deprecated_atomic_read_clear(uint32_t volatile *addr)
{
int ret = 0;
diff --git a/core/minute-ia/task.c b/core/minute-ia/task.c
index 7e7c836f7e..f0296596b1 100644
--- a/core/minute-ia/task.c
+++ b/core/minute-ia/task.c
@@ -287,7 +287,7 @@ void __keep task_start_irq_handler(void *data)
irq_dist[irq]++;
else
/* Track total number of service calls */
- atomic_add(&svc_calls, 1);
+ deprecated_atomic_add(&svc_calls, 1);
/* Only the outer ISR should keep track of the ISR start time */
if (__in_isr == 1) {
@@ -318,7 +318,7 @@ static uint32_t __wait_evt(int timeout_us, task_id_t resched)
ret = timer_arm(deadline, me);
ASSERT(ret == EC_SUCCESS);
}
- while (!(evt = atomic_read_clear(&tsk->events))) {
+ while (!(evt = deprecated_atomic_read_clear(&tsk->events))) {
/* Remove ourself and get the next task in the scheduler */
__schedule(1, resched);
resched = TASK_ID_IDLE;
@@ -326,7 +326,7 @@ static uint32_t __wait_evt(int timeout_us, task_id_t resched)
if (timeout_us > 0) {
timer_cancel(me);
/* Ensure timer event is clear, we no longer care about it */
- atomic_clear(&tsk->events, TASK_EVENT_TIMER);
+ deprecated_atomic_clear(&tsk->events, TASK_EVENT_TIMER);
}
return evt;
}
@@ -345,12 +345,12 @@ uint32_t task_set_event(task_id_t tskid, uint32_t event, int wait)
ASSERT(receiver);
/* Set the event bit in the receiver message bitmap */
- atomic_or(&receiver->events, event);
+ deprecated_atomic_or(&receiver->events, event);
/* Re-schedule if priorities have changed */
if (in_interrupt_context()) {
/* The receiver might run again */
- atomic_or(&tasks_ready, 1 << tskid);
+ deprecated_atomic_or(&tasks_ready, 1 << tskid);
} else {
if (wait)
return __wait_evt(-1, tskid);
@@ -389,7 +389,8 @@ uint32_t task_wait_event_mask(uint32_t event_mask, int timeout_us)
/* Re-post any other events collected */
if (events & ~event_mask)
- atomic_or(&current_task->events, events & ~event_mask);
+ deprecated_atomic_or(&current_task->events,
+ events & ~event_mask);
return events & event_mask;
}
@@ -405,12 +406,12 @@ void task_enable_all_tasks(void)
void task_enable_task(task_id_t tskid)
{
- atomic_or(&tasks_enabled, BIT(tskid));
+ deprecated_atomic_or(&tasks_enabled, BIT(tskid));
}
void task_disable_task(task_id_t tskid)
{
- atomic_clear(&tasks_enabled, BIT(tskid));
+ deprecated_atomic_clear(&tasks_enabled, BIT(tskid));
if (!in_interrupt_context() && tskid == task_get_current())
__schedule(0, 0);
@@ -454,7 +455,7 @@ void mutex_lock(struct mutex *mtx)
uint32_t id = 1 << task_get_current();
ASSERT(id != TASK_ID_INVALID);
- atomic_or(&mtx->waiters, id);
+ deprecated_atomic_or(&mtx->waiters, id);
do {
old_val = 0;
@@ -470,7 +471,7 @@ void mutex_lock(struct mutex *mtx)
}
} while (old_val);
- atomic_clear(&mtx->waiters, id);
+ deprecated_atomic_clear(&mtx->waiters, id);
}
void mutex_unlock(struct mutex *mtx)
@@ -497,7 +498,7 @@ void mutex_unlock(struct mutex *mtx)
}
/* Ensure no event is remaining from mutex wake-up */
- atomic_clear(&tsk->events, TASK_EVENT_MUTEX);
+ deprecated_atomic_clear(&tsk->events, TASK_EVENT_MUTEX);
}
void task_print_list(void)
diff --git a/core/nds32/atomic.h b/core/nds32/atomic.h
index 792093d598..e10a35aa54 100644
--- a/core/nds32/atomic.h
+++ b/core/nds32/atomic.h
@@ -12,7 +12,14 @@
#include "cpu.h"
#include "task.h"
-static inline void atomic_clear(uint32_t volatile *addr, uint32_t bits)
+/*
+ * The atomic_* functions are marked as deprecated as a part of the process of
+ * transaction to Zephyr compatible atomic functions. These prefixes will be
+ * removed in the following patches. Please see b:169151160 for more details.
+ */
+
+static inline void deprecated_atomic_clear(uint32_t volatile *addr,
+ uint32_t bits)
{
uint32_t int_mask = read_clear_int_mask();
@@ -20,7 +27,7 @@ static inline void atomic_clear(uint32_t volatile *addr, uint32_t bits)
set_int_mask(int_mask);
}
-static inline void atomic_or(uint32_t volatile *addr, uint32_t bits)
+static inline void deprecated_atomic_or(uint32_t volatile *addr, uint32_t bits)
{
uint32_t int_mask = read_clear_int_mask();
@@ -28,7 +35,8 @@ static inline void atomic_or(uint32_t volatile *addr, uint32_t bits)
set_int_mask(int_mask);
}
-static inline void atomic_add(uint32_t volatile *addr, uint32_t value)
+static inline void deprecated_atomic_add(uint32_t volatile *addr,
+ uint32_t value)
{
uint32_t int_mask = read_clear_int_mask();
@@ -36,7 +44,8 @@ static inline void atomic_add(uint32_t volatile *addr, uint32_t value)
set_int_mask(int_mask);
}
-static inline void atomic_sub(uint32_t volatile *addr, uint32_t value)
+static inline void deprecated_atomic_sub(uint32_t volatile *addr,
+ uint32_t value)
{
uint32_t int_mask = read_clear_int_mask();
@@ -44,7 +53,7 @@ static inline void atomic_sub(uint32_t volatile *addr, uint32_t value)
set_int_mask(int_mask);
}
-static inline uint32_t atomic_read_clear(uint32_t volatile *addr)
+static inline uint32_t deprecated_atomic_read_clear(uint32_t volatile *addr)
{
uint32_t val;
uint32_t int_mask = read_clear_int_mask();
diff --git a/core/nds32/task.c b/core/nds32/task.c
index 21bd8d5edd..c6ae32912e 100644
--- a/core/nds32/task.c
+++ b/core/nds32/task.c
@@ -403,7 +403,7 @@ static uint32_t __ram_code __wait_evt(int timeout_us, task_id_t resched)
ret = timer_arm(deadline, me);
ASSERT(ret == EC_SUCCESS);
}
- while (!(evt = atomic_read_clear(&tsk->events))) {
+ while (!(evt = deprecated_atomic_read_clear(&tsk->events))) {
/* Remove ourself and get the next task in the scheduler */
__schedule(1, resched, 0);
resched = TASK_ID_IDLE;
@@ -411,7 +411,7 @@ static uint32_t __ram_code __wait_evt(int timeout_us, task_id_t resched)
if (timeout_us > 0) {
timer_cancel(me);
/* Ensure timer event is clear, we no longer care about it */
- atomic_clear(&tsk->events, TASK_EVENT_TIMER);
+ deprecated_atomic_clear(&tsk->events, TASK_EVENT_TIMER);
}
return evt;
}
@@ -422,12 +422,12 @@ uint32_t __ram_code task_set_event(task_id_t tskid, uint32_t event, int wait)
ASSERT(receiver);
/* Set the event bit in the receiver message bitmap */
- atomic_or(&receiver->events, event);
+ deprecated_atomic_or(&receiver->events, event);
/* Re-schedule if priorities have changed */
if (in_interrupt_context()) {
/* The receiver might run again */
- atomic_or(&tasks_ready, 1 << tskid);
+ deprecated_atomic_or(&tasks_ready, 1 << tskid);
if (start_called)
need_resched = 1;
} else {
@@ -468,7 +468,8 @@ uint32_t __ram_code task_wait_event_mask(uint32_t event_mask, int timeout_us)
/* Re-post any other events collected */
if (events & ~event_mask)
- atomic_or(&current_task->events, events & ~event_mask);
+ deprecated_atomic_or(&current_task->events,
+ events & ~event_mask);
return events & event_mask;
}
@@ -520,12 +521,12 @@ void task_enable_all_tasks(void)
void task_enable_task(task_id_t tskid)
{
- atomic_or(&tasks_enabled, BIT(tskid));
+ deprecated_atomic_or(&tasks_enabled, BIT(tskid));
}
void task_disable_task(task_id_t tskid)
{
- atomic_clear(&tasks_enabled, BIT(tskid));
+ deprecated_atomic_clear(&tasks_enabled, BIT(tskid));
if (!in_interrupt_context() && tskid == task_get_current())
__schedule(0, 0, 0);
@@ -643,7 +644,7 @@ void __ram_code mutex_unlock(struct mutex *mtx)
}
/* Ensure no event is remaining from mutex wake-up */
- atomic_clear(&tsk->events, TASK_EVENT_MUTEX);
+ deprecated_atomic_clear(&tsk->events, TASK_EVENT_MUTEX);
}
void task_print_list(void)
diff --git a/core/riscv-rv32i/atomic.h b/core/riscv-rv32i/atomic.h
index d9f25cd38d..a9e4df0344 100644
--- a/core/riscv-rv32i/atomic.h
+++ b/core/riscv-rv32i/atomic.h
@@ -22,37 +22,48 @@
tmp; \
})
-static inline void atomic_clear(volatile uint32_t *addr, uint32_t bits)
+/*
+ * The atomic_* functions are marked as deprecated as a part of the process of
+ * transaction to Zephyr compatible atomic functions. These prefixes will be
+ * removed in the following patches. Please see b:169151160 for more details.
+ */
+
+static inline void deprecated_atomic_clear(volatile uint32_t *addr,
+ uint32_t bits)
{
ATOMIC_OP(and, ~bits, addr);
}
-static inline void atomic_or(volatile uint32_t *addr, uint32_t bits)
+static inline void deprecated_atomic_or(volatile uint32_t *addr, uint32_t bits)
{
ATOMIC_OP(or, bits, addr);
}
-static inline void atomic_add(volatile uint32_t *addr, uint32_t value)
+static inline void deprecated_atomic_add(volatile uint32_t *addr,
+ uint32_t value)
{
ATOMIC_OP(add, value, addr);
}
-static inline void atomic_sub(volatile uint32_t *addr, uint32_t value)
+static inline void deprecated_atomic_sub(volatile uint32_t *addr,
+ uint32_t value)
{
ATOMIC_OP(add, -value, addr);
}
-static inline uint32_t atomic_read_clear(volatile uint32_t *addr)
+static inline uint32_t deprecated_atomic_read_clear(volatile uint32_t *addr)
{
return ATOMIC_OP(and, 0, addr);
}
-static inline uint32_t atomic_inc(volatile uint32_t *addr, uint32_t value)
+static inline uint32_t deprecated_atomic_inc(volatile uint32_t *addr,
+ uint32_t value)
{
return ATOMIC_OP(add, value, addr);
}
-static inline uint32_t atomic_dec(volatile uint32_t *addr, uint32_t value)
+static inline uint32_t deprecated_atomic_dec(volatile uint32_t *addr,
+ uint32_t value)
{
return ATOMIC_OP(add, -value, addr);
}
diff --git a/core/riscv-rv32i/task.c b/core/riscv-rv32i/task.c
index 3ad78994bd..862f9e3b81 100644
--- a/core/riscv-rv32i/task.c
+++ b/core/riscv-rv32i/task.c
@@ -397,7 +397,7 @@ static uint32_t __ram_code __wait_evt(int timeout_us, task_id_t resched)
ret = timer_arm(deadline, me);
ASSERT(ret == EC_SUCCESS);
}
- while (!(evt = atomic_read_clear(&tsk->events))) {
+ while (!(evt = deprecated_atomic_read_clear(&tsk->events))) {
/* Remove ourself and get the next task in the scheduler */
__schedule(1, resched, 0);
resched = TASK_ID_IDLE;
@@ -405,7 +405,7 @@ static uint32_t __ram_code __wait_evt(int timeout_us, task_id_t resched)
if (timeout_us > 0) {
timer_cancel(me);
/* Ensure timer event is clear, we no longer care about it */
- atomic_clear(&tsk->events, TASK_EVENT_TIMER);
+ deprecated_atomic_clear(&tsk->events, TASK_EVENT_TIMER);
}
return evt;
}
@@ -417,12 +417,12 @@ uint32_t __ram_code task_set_event(task_id_t tskid, uint32_t event, int wait)
ASSERT(receiver);
/* Set the event bit in the receiver message bitmap */
- atomic_or(&receiver->events, event);
+ deprecated_atomic_or(&receiver->events, event);
/* Re-schedule if priorities have changed */
if (in_interrupt_context()) {
/* The receiver might run again */
- atomic_or(&tasks_ready, 1 << tskid);
+ deprecated_atomic_or(&tasks_ready, 1 << tskid);
if (start_called)
need_resched = 1;
} else {
@@ -463,7 +463,8 @@ uint32_t __ram_code task_wait_event_mask(uint32_t event_mask, int timeout_us)
/* Re-post any other events collected */
if (events & ~event_mask)
- atomic_or(&current_task->events, events & ~event_mask);
+ deprecated_atomic_or(&current_task->events,
+ events & ~event_mask);
return events & event_mask;
}
@@ -493,12 +494,12 @@ void task_enable_all_tasks(void)
void task_enable_task(task_id_t tskid)
{
- atomic_or(&tasks_enabled, BIT(tskid));
+ deprecated_atomic_or(&tasks_enabled, BIT(tskid));
}
void task_disable_task(task_id_t tskid)
{
- atomic_clear(&tasks_enabled, BIT(tskid));
+ deprecated_atomic_clear(&tasks_enabled, BIT(tskid));
if (!in_interrupt_context() && tskid == task_get_current())
__schedule(0, 0, 0);
@@ -557,7 +558,7 @@ void __ram_code mutex_lock(struct mutex *mtx)
uint32_t id = 1 << task_get_current();
ASSERT(id != TASK_ID_INVALID);
- atomic_or(&mtx->waiters, id);
+ deprecated_atomic_or(&mtx->waiters, id);
while (1) {
asm volatile (
@@ -574,7 +575,7 @@ void __ram_code mutex_lock(struct mutex *mtx)
task_wait_event_mask(TASK_EVENT_MUTEX, 0);
}
- atomic_clear(&mtx->waiters, id);
+ deprecated_atomic_clear(&mtx->waiters, id);
}
void __ram_code mutex_unlock(struct mutex *mtx)
@@ -598,7 +599,7 @@ void __ram_code mutex_unlock(struct mutex *mtx)
}
/* Ensure no event is remaining from mutex wake-up */
- atomic_clear(&tsk->events, TASK_EVENT_MUTEX);
+ deprecated_atomic_clear(&tsk->events, TASK_EVENT_MUTEX);
}
void task_print_list(void)