diff options
Diffstat (limited to 'core/cortex-m/task.c')
-rw-r--r-- | core/cortex-m/task.c | 28 |
1 files changed, 13 insertions, 15 deletions
diff --git a/core/cortex-m/task.c b/core/cortex-m/task.c index 510d18fca0..9c3068bbc0 100644 --- a/core/cortex-m/task.c +++ b/core/cortex-m/task.c @@ -413,7 +413,7 @@ static uint32_t __wait_evt(int timeout_us, task_id_t resched) ret = timer_arm(deadline, me); ASSERT(ret == EC_SUCCESS); } - while (!(evt = deprecated_atomic_read_clear(&tsk->events))) { + while (!(evt = atomic_read_clear(&tsk->events))) { /* Remove ourself and get the next task in the scheduler */ __schedule(1, resched); resched = TASK_ID_IDLE; @@ -421,7 +421,7 @@ static uint32_t __wait_evt(int timeout_us, task_id_t resched) if (timeout_us > 0) { timer_cancel(me); /* Ensure timer event is clear, we no longer care about it */ - deprecated_atomic_clear_bits(&tsk->events, TASK_EVENT_TIMER); + atomic_clear_bits(&tsk->events, TASK_EVENT_TIMER); } return evt; } @@ -432,12 +432,12 @@ uint32_t task_set_event(task_id_t tskid, uint32_t event, int wait) ASSERT(receiver); /* Set the event bit in the receiver message bitmap */ - deprecated_atomic_or(&receiver->events, event); + atomic_or(&receiver->events, event); /* Re-schedule if priorities have changed */ if (in_interrupt_context()) { /* The receiver might run again */ - deprecated_atomic_or(&tasks_ready, 1 << tskid); + atomic_or(&tasks_ready, 1 << tskid); #ifndef CONFIG_TASK_PROFILING if (start_called) need_resched_or_profiling = 1; @@ -480,8 +480,7 @@ uint32_t task_wait_event_mask(uint32_t event_mask, int timeout_us) /* Re-post any other events collected */ if (events & ~event_mask) - deprecated_atomic_or(¤t_task->events, - events & ~event_mask); + atomic_or(¤t_task->events, events & ~event_mask); return events & event_mask; } @@ -496,12 +495,12 @@ void task_enable_all_tasks(void) void task_enable_task(task_id_t tskid) { - deprecated_atomic_or(&tasks_enabled, BIT(tskid)); + atomic_or(&tasks_enabled, BIT(tskid)); } void task_disable_task(task_id_t tskid) { - deprecated_atomic_clear_bits(&tasks_enabled, BIT(tskid)); + atomic_clear_bits(&tasks_enabled, BIT(tskid)); if (!in_interrupt_context() && tskid == task_get_current()) __schedule(0, 0); @@ -579,8 +578,7 @@ static void deferred_task_reset(void) while (deferred_reset_task_ids) { task_id_t reset_id = __fls(deferred_reset_task_ids); - deprecated_atomic_clear_bits(&deferred_reset_task_ids, - 1 << reset_id); + atomic_clear_bits(&deferred_reset_task_ids, 1 << reset_id); do_task_reset(reset_id); } } @@ -675,7 +673,7 @@ void task_enable_resets(void) return; /* People are waiting for us to reset; schedule a reset. */ - deprecated_atomic_or(&deferred_reset_task_ids, 1 << id); + atomic_or(&deferred_reset_task_ids, 1 << id); /* * This will always trigger a deferred call after our new ID was * written. If the hook call is currently executing, it will run @@ -760,7 +758,7 @@ int task_reset_cleanup(void) * itself back to the list of tasks to notify, * and we will notify it again. */ - deprecated_atomic_clear_bits(state, 1 << notify_id); + atomic_clear_bits(state, 1 << notify_id); /* * Skip any invalid ids set by tasks that * requested a non-blocking reset. @@ -878,7 +876,7 @@ void mutex_lock(struct mutex *mtx) id = 1 << task_get_current(); - deprecated_atomic_or(&mtx->waiters, id); + atomic_or(&mtx->waiters, id); do { /* Try to get the lock (set 1 into the lock field) */ @@ -897,7 +895,7 @@ void mutex_lock(struct mutex *mtx) task_wait_event_mask(TASK_EVENT_MUTEX, 0); } while (value); - deprecated_atomic_clear_bits(&mtx->waiters, id); + atomic_clear_bits(&mtx->waiters, id); } void mutex_unlock(struct mutex *mtx) @@ -923,7 +921,7 @@ void mutex_unlock(struct mutex *mtx) } /* Ensure no event is remaining from mutex wake-up */ - deprecated_atomic_clear_bits(&tsk->events, TASK_EVENT_MUTEX); + atomic_clear_bits(&tsk->events, TASK_EVENT_MUTEX); } void task_print_list(void) |