diff options
Diffstat (limited to 'core/cortex-m')
-rw-r--r-- | core/cortex-m/atomic.h | 19 | ||||
-rw-r--r-- | core/cortex-m/task.c | 28 |
2 files changed, 29 insertions, 18 deletions
diff --git a/core/cortex-m/atomic.h b/core/cortex-m/atomic.h index 2b2b6153ba..4d56ce3e4e 100644 --- a/core/cortex-m/atomic.h +++ b/core/cortex-m/atomic.h @@ -30,27 +30,36 @@ : "r" (a), "r" (v) : "cc"); \ } while (0) -static inline void atomic_clear(uint32_t volatile *addr, uint32_t bits) +/* + * The atomic_* functions are marked as deprecated as a part of the process of + * transaction to Zephyr compatible atomic functions. These prefixes will be + * removed in the following patches. Please see b:169151160 for more details. + */ + +static inline void deprecated_atomic_clear(uint32_t volatile *addr, + uint32_t bits) { ATOMIC_OP(bic, addr, bits); } -static inline void atomic_or(uint32_t volatile *addr, uint32_t bits) +static inline void deprecated_atomic_or(uint32_t volatile *addr, uint32_t bits) { ATOMIC_OP(orr, addr, bits); } -static inline void atomic_add(uint32_t volatile *addr, uint32_t value) +static inline void deprecated_atomic_add(uint32_t volatile *addr, + uint32_t value) { ATOMIC_OP(add, addr, value); } -static inline void atomic_sub(uint32_t volatile *addr, uint32_t value) +static inline void deprecated_atomic_sub(uint32_t volatile *addr, + uint32_t value) { ATOMIC_OP(sub, addr, value); } -static inline uint32_t atomic_read_clear(uint32_t volatile *addr) +static inline uint32_t deprecated_atomic_read_clear(uint32_t volatile *addr) { uint32_t ret, tmp; diff --git a/core/cortex-m/task.c b/core/cortex-m/task.c index d64f9400b6..058b892a46 100644 --- a/core/cortex-m/task.c +++ b/core/cortex-m/task.c @@ -413,7 +413,7 @@ static uint32_t __wait_evt(int timeout_us, task_id_t resched) ret = timer_arm(deadline, me); ASSERT(ret == EC_SUCCESS); } - while (!(evt = atomic_read_clear(&tsk->events))) { + while (!(evt = deprecated_atomic_read_clear(&tsk->events))) { /* Remove ourself and get the next task in the scheduler */ __schedule(1, resched); resched = TASK_ID_IDLE; @@ -421,7 +421,7 @@ static uint32_t __wait_evt(int timeout_us, task_id_t resched) if (timeout_us > 0) { timer_cancel(me); /* Ensure timer event is clear, we no longer care about it */ - atomic_clear(&tsk->events, TASK_EVENT_TIMER); + deprecated_atomic_clear(&tsk->events, TASK_EVENT_TIMER); } return evt; } @@ -432,12 +432,12 @@ uint32_t task_set_event(task_id_t tskid, uint32_t event, int wait) ASSERT(receiver); /* Set the event bit in the receiver message bitmap */ - atomic_or(&receiver->events, event); + deprecated_atomic_or(&receiver->events, event); /* Re-schedule if priorities have changed */ if (in_interrupt_context()) { /* The receiver might run again */ - atomic_or(&tasks_ready, 1 << tskid); + deprecated_atomic_or(&tasks_ready, 1 << tskid); #ifndef CONFIG_TASK_PROFILING if (start_called) need_resched_or_profiling = 1; @@ -480,7 +480,8 @@ uint32_t task_wait_event_mask(uint32_t event_mask, int timeout_us) /* Re-post any other events collected */ if (events & ~event_mask) - atomic_or(¤t_task->events, events & ~event_mask); + deprecated_atomic_or(¤t_task->events, + events & ~event_mask); return events & event_mask; } @@ -495,12 +496,12 @@ void task_enable_all_tasks(void) void task_enable_task(task_id_t tskid) { - atomic_or(&tasks_enabled, BIT(tskid)); + deprecated_atomic_or(&tasks_enabled, BIT(tskid)); } void task_disable_task(task_id_t tskid) { - atomic_clear(&tasks_enabled, BIT(tskid)); + deprecated_atomic_clear(&tasks_enabled, BIT(tskid)); if (!in_interrupt_context() && tskid == task_get_current()) __schedule(0, 0); @@ -578,7 +579,8 @@ static void deferred_task_reset(void) while (deferred_reset_task_ids) { task_id_t reset_id = __fls(deferred_reset_task_ids); - atomic_clear(&deferred_reset_task_ids, 1 << reset_id); + deprecated_atomic_clear(&deferred_reset_task_ids, + 1 << reset_id); do_task_reset(reset_id); } } @@ -673,7 +675,7 @@ void task_enable_resets(void) return; /* People are waiting for us to reset; schedule a reset. */ - atomic_or(&deferred_reset_task_ids, 1 << id); + deprecated_atomic_or(&deferred_reset_task_ids, 1 << id); /* * This will always trigger a deferred call after our new ID was * written. If the hook call is currently executing, it will run @@ -758,7 +760,7 @@ int task_reset_cleanup(void) * itself back to the list of tasks to notify, * and we will notify it again. */ - atomic_clear(state, 1 << notify_id); + deprecated_atomic_clear(state, 1 << notify_id); /* * Skip any invalid ids set by tasks that * requested a non-blocking reset. @@ -876,7 +878,7 @@ void mutex_lock(struct mutex *mtx) id = 1 << task_get_current(); - atomic_or(&mtx->waiters, id); + deprecated_atomic_or(&mtx->waiters, id); do { /* Try to get the lock (set 1 into the lock field) */ @@ -895,7 +897,7 @@ void mutex_lock(struct mutex *mtx) task_wait_event_mask(TASK_EVENT_MUTEX, 0); } while (value); - atomic_clear(&mtx->waiters, id); + deprecated_atomic_clear(&mtx->waiters, id); } void mutex_unlock(struct mutex *mtx) @@ -921,7 +923,7 @@ void mutex_unlock(struct mutex *mtx) } /* Ensure no event is remaining from mutex wake-up */ - atomic_clear(&tsk->events, TASK_EVENT_MUTEX); + deprecated_atomic_clear(&tsk->events, TASK_EVENT_MUTEX); } void task_print_list(void) |