summaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorDawid Niedzwiecki <dn@semihalf.com>2020-10-07 12:13:52 +0200
committerCommit Bot <commit-bot@chromium.org>2020-10-27 09:35:49 +0000
commita05f7b9f469e7c171f4a737968ab5cbd11ba1253 (patch)
treeab128a89ce9206ca967ad104e307d6c0b4c33a52 /core
parent3cba51e9e807e7015d81c2891c47ea4c59587a1c (diff)
downloadchrome-ec-a05f7b9f469e7c171f4a737968ab5cbd11ba1253.tar.gz
tree: Use new atomic_* implementation
It is done as a part of porting to Zephyr. Since the implementation of atomic functions is done for all architectures use atomic_* instead of deprecated_atomic_*. Sometimes there was a compilation error "discards 'volatile' qualifier" due to dropping "volatile" in the argument of the functions, thus some pointers casts need to be made. It shouldn't cause any issues, because we are sure about generated asm (store operation will be performed). BUG=b:169151160 BRANCH=none TEST=buildall Signed-off-by: Dawid Niedzwiecki <dn@semihalf.com> Change-Id: I98f590c323c3af52035e62825e8acfa358e0805a Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/ec/+/2478949 Tested-by: Jett Rink <jettrink@chromium.org> Reviewed-by: Jett Rink <jettrink@chromium.org> Reviewed-by: Tom Hughes <tomhughes@chromium.org>
Diffstat (limited to 'core')
-rw-r--r--core/cortex-m/task.c28
-rw-r--r--core/cortex-m0/task.c21
-rw-r--r--core/host/task.c8
-rw-r--r--core/minute-ia/task.c23
-rw-r--r--core/nds32/task.c17
-rw-r--r--core/riscv-rv32i/task.c21
6 files changed, 56 insertions, 62 deletions
diff --git a/core/cortex-m/task.c b/core/cortex-m/task.c
index 510d18fca0..9c3068bbc0 100644
--- a/core/cortex-m/task.c
+++ b/core/cortex-m/task.c
@@ -413,7 +413,7 @@ static uint32_t __wait_evt(int timeout_us, task_id_t resched)
ret = timer_arm(deadline, me);
ASSERT(ret == EC_SUCCESS);
}
- while (!(evt = deprecated_atomic_read_clear(&tsk->events))) {
+ while (!(evt = atomic_read_clear(&tsk->events))) {
/* Remove ourself and get the next task in the scheduler */
__schedule(1, resched);
resched = TASK_ID_IDLE;
@@ -421,7 +421,7 @@ static uint32_t __wait_evt(int timeout_us, task_id_t resched)
if (timeout_us > 0) {
timer_cancel(me);
/* Ensure timer event is clear, we no longer care about it */
- deprecated_atomic_clear_bits(&tsk->events, TASK_EVENT_TIMER);
+ atomic_clear_bits(&tsk->events, TASK_EVENT_TIMER);
}
return evt;
}
@@ -432,12 +432,12 @@ uint32_t task_set_event(task_id_t tskid, uint32_t event, int wait)
ASSERT(receiver);
/* Set the event bit in the receiver message bitmap */
- deprecated_atomic_or(&receiver->events, event);
+ atomic_or(&receiver->events, event);
/* Re-schedule if priorities have changed */
if (in_interrupt_context()) {
/* The receiver might run again */
- deprecated_atomic_or(&tasks_ready, 1 << tskid);
+ atomic_or(&tasks_ready, 1 << tskid);
#ifndef CONFIG_TASK_PROFILING
if (start_called)
need_resched_or_profiling = 1;
@@ -480,8 +480,7 @@ uint32_t task_wait_event_mask(uint32_t event_mask, int timeout_us)
/* Re-post any other events collected */
if (events & ~event_mask)
- deprecated_atomic_or(&current_task->events,
- events & ~event_mask);
+ atomic_or(&current_task->events, events & ~event_mask);
return events & event_mask;
}
@@ -496,12 +495,12 @@ void task_enable_all_tasks(void)
void task_enable_task(task_id_t tskid)
{
- deprecated_atomic_or(&tasks_enabled, BIT(tskid));
+ atomic_or(&tasks_enabled, BIT(tskid));
}
void task_disable_task(task_id_t tskid)
{
- deprecated_atomic_clear_bits(&tasks_enabled, BIT(tskid));
+ atomic_clear_bits(&tasks_enabled, BIT(tskid));
if (!in_interrupt_context() && tskid == task_get_current())
__schedule(0, 0);
@@ -579,8 +578,7 @@ static void deferred_task_reset(void)
while (deferred_reset_task_ids) {
task_id_t reset_id = __fls(deferred_reset_task_ids);
- deprecated_atomic_clear_bits(&deferred_reset_task_ids,
- 1 << reset_id);
+ atomic_clear_bits(&deferred_reset_task_ids, 1 << reset_id);
do_task_reset(reset_id);
}
}
@@ -675,7 +673,7 @@ void task_enable_resets(void)
return;
/* People are waiting for us to reset; schedule a reset. */
- deprecated_atomic_or(&deferred_reset_task_ids, 1 << id);
+ atomic_or(&deferred_reset_task_ids, 1 << id);
/*
* This will always trigger a deferred call after our new ID was
* written. If the hook call is currently executing, it will run
@@ -760,7 +758,7 @@ int task_reset_cleanup(void)
* itself back to the list of tasks to notify,
* and we will notify it again.
*/
- deprecated_atomic_clear_bits(state, 1 << notify_id);
+ atomic_clear_bits(state, 1 << notify_id);
/*
* Skip any invalid ids set by tasks that
* requested a non-blocking reset.
@@ -878,7 +876,7 @@ void mutex_lock(struct mutex *mtx)
id = 1 << task_get_current();
- deprecated_atomic_or(&mtx->waiters, id);
+ atomic_or(&mtx->waiters, id);
do {
/* Try to get the lock (set 1 into the lock field) */
@@ -897,7 +895,7 @@ void mutex_lock(struct mutex *mtx)
task_wait_event_mask(TASK_EVENT_MUTEX, 0);
} while (value);
- deprecated_atomic_clear_bits(&mtx->waiters, id);
+ atomic_clear_bits(&mtx->waiters, id);
}
void mutex_unlock(struct mutex *mtx)
@@ -923,7 +921,7 @@ void mutex_unlock(struct mutex *mtx)
}
/* Ensure no event is remaining from mutex wake-up */
- deprecated_atomic_clear_bits(&tsk->events, TASK_EVENT_MUTEX);
+ atomic_clear_bits(&tsk->events, TASK_EVENT_MUTEX);
}
void task_print_list(void)
diff --git a/core/cortex-m0/task.c b/core/cortex-m0/task.c
index 3e54630ab2..d8ed4a6376 100644
--- a/core/cortex-m0/task.c
+++ b/core/cortex-m0/task.c
@@ -334,7 +334,7 @@ static uint32_t __wait_evt(int timeout_us, task_id_t resched)
ret = timer_arm(deadline, me);
ASSERT(ret == EC_SUCCESS);
}
- while (!(evt = deprecated_atomic_read_clear(&tsk->events))) {
+ while (!(evt = atomic_read_clear(&tsk->events))) {
/*
* We need to ensure that the execution priority is actually
* decreased after the "cpsie i" in the atomic operation above
@@ -349,7 +349,7 @@ static uint32_t __wait_evt(int timeout_us, task_id_t resched)
if (timeout_us > 0) {
timer_cancel(me);
/* Ensure timer event is clear, we no longer care about it */
- deprecated_atomic_clear_bits(&tsk->events, TASK_EVENT_TIMER);
+ atomic_clear_bits(&tsk->events, TASK_EVENT_TIMER);
}
return evt;
}
@@ -360,12 +360,12 @@ uint32_t task_set_event(task_id_t tskid, uint32_t event, int wait)
ASSERT(receiver);
/* Set the event bit in the receiver message bitmap */
- deprecated_atomic_or(&receiver->events, event);
+ atomic_or(&receiver->events, event);
/* Re-schedule if priorities have changed */
if (in_interrupt_context()) {
/* The receiver might run again */
- deprecated_atomic_or(&tasks_ready, 1 << tskid);
+ atomic_or(&tasks_ready, 1 << tskid);
if (start_called) {
/*
* Trigger the scheduler when there's
@@ -420,8 +420,7 @@ uint32_t task_wait_event_mask(uint32_t event_mask, int timeout_us)
/* Re-post any other events collected */
if (events & ~event_mask)
- deprecated_atomic_or(&current_task->events,
- events & ~event_mask);
+ atomic_or(&current_task->events, events & ~event_mask);
return events & event_mask;
}
@@ -436,12 +435,12 @@ void task_enable_all_tasks(void)
void task_enable_task(task_id_t tskid)
{
- deprecated_atomic_or(&tasks_enabled, BIT(tskid));
+ atomic_or(&tasks_enabled, BIT(tskid));
}
void task_disable_task(task_id_t tskid)
{
- deprecated_atomic_clear_bits(&tasks_enabled, BIT(tskid));
+ atomic_clear_bits(&tasks_enabled, BIT(tskid));
if (!in_interrupt_context() && tskid == task_get_current())
__schedule(0, 0);
@@ -500,7 +499,7 @@ void mutex_lock(struct mutex *mtx)
uint32_t id = 1 << task_get_current();
ASSERT(id != TASK_ID_INVALID);
- deprecated_atomic_or(&mtx->waiters, id);
+ atomic_or(&mtx->waiters, id);
while (1) {
/* Try to get the lock (set 2 into the lock field) */
@@ -514,7 +513,7 @@ void mutex_lock(struct mutex *mtx)
mtx->lock = 2;
__asm__ __volatile__("cpsie i");
- deprecated_atomic_clear_bits(&mtx->waiters, id);
+ atomic_clear_bits(&mtx->waiters, id);
}
void mutex_unlock(struct mutex *mtx)
@@ -540,7 +539,7 @@ void mutex_unlock(struct mutex *mtx)
}
/* Ensure no event is remaining from mutex wake-up */
- deprecated_atomic_clear_bits(&tsk->events, TASK_EVENT_MUTEX);
+ atomic_clear_bits(&tsk->events, TASK_EVENT_MUTEX);
}
void task_print_list(void)
diff --git a/core/host/task.c b/core/host/task.c
index ae5e525303..e73f8c57fe 100644
--- a/core/host/task.c
+++ b/core/host/task.c
@@ -200,7 +200,7 @@ pthread_t task_get_thread(task_id_t tskid)
uint32_t task_set_event(task_id_t tskid, uint32_t event, int wait)
{
- deprecated_atomic_or(&tasks[tskid].event, event);
+ atomic_or(&tasks[tskid].event, event);
if (wait)
return task_wait_event(-1);
return 0;
@@ -224,7 +224,7 @@ uint32_t task_wait_event(int timeout_us)
pthread_cond_wait(&tasks[tid].resume, &run_lock);
/* Resume */
- ret = deprecated_atomic_read_clear(&tasks[tid].event);
+ ret = atomic_read_clear(&tasks[tid].event);
pthread_mutex_unlock(&interrupt_lock);
return ret;
}
@@ -252,8 +252,8 @@ uint32_t task_wait_event_mask(uint32_t event_mask, int timeout_us)
/* Re-post any other events collected */
if (events & ~event_mask)
- deprecated_atomic_or(&tasks[task_get_current()].event,
- events & ~event_mask);
+ atomic_or(&tasks[task_get_current()].event,
+ events & ~event_mask);
return events & event_mask;
}
diff --git a/core/minute-ia/task.c b/core/minute-ia/task.c
index 431bd62e54..8a54772ada 100644
--- a/core/minute-ia/task.c
+++ b/core/minute-ia/task.c
@@ -287,7 +287,7 @@ void __keep task_start_irq_handler(void *data)
irq_dist[irq]++;
else
/* Track total number of service calls */
- deprecated_atomic_add(&svc_calls, 1);
+ atomic_add(&svc_calls, 1);
/* Only the outer ISR should keep track of the ISR start time */
if (__in_isr == 1) {
@@ -318,7 +318,7 @@ static uint32_t __wait_evt(int timeout_us, task_id_t resched)
ret = timer_arm(deadline, me);
ASSERT(ret == EC_SUCCESS);
}
- while (!(evt = deprecated_atomic_read_clear(&tsk->events))) {
+ while (!(evt = atomic_read_clear(&tsk->events))) {
/* Remove ourself and get the next task in the scheduler */
__schedule(1, resched);
resched = TASK_ID_IDLE;
@@ -326,7 +326,7 @@ static uint32_t __wait_evt(int timeout_us, task_id_t resched)
if (timeout_us > 0) {
timer_cancel(me);
/* Ensure timer event is clear, we no longer care about it */
- deprecated_atomic_clear_bits(&tsk->events, TASK_EVENT_TIMER);
+ atomic_clear_bits(&tsk->events, TASK_EVENT_TIMER);
}
return evt;
}
@@ -345,12 +345,12 @@ uint32_t task_set_event(task_id_t tskid, uint32_t event, int wait)
ASSERT(receiver);
/* Set the event bit in the receiver message bitmap */
- deprecated_atomic_or(&receiver->events, event);
+ atomic_or(&receiver->events, event);
/* Re-schedule if priorities have changed */
if (in_interrupt_context()) {
/* The receiver might run again */
- deprecated_atomic_or(&tasks_ready, 1 << tskid);
+ atomic_or(&tasks_ready, 1 << tskid);
} else {
if (wait)
return __wait_evt(-1, tskid);
@@ -389,8 +389,7 @@ uint32_t task_wait_event_mask(uint32_t event_mask, int timeout_us)
/* Re-post any other events collected */
if (events & ~event_mask)
- deprecated_atomic_or(&current_task->events,
- events & ~event_mask);
+ atomic_or(&current_task->events, events & ~event_mask);
return events & event_mask;
}
@@ -406,12 +405,12 @@ void task_enable_all_tasks(void)
void task_enable_task(task_id_t tskid)
{
- deprecated_atomic_or(&tasks_enabled, BIT(tskid));
+ atomic_or(&tasks_enabled, BIT(tskid));
}
void task_disable_task(task_id_t tskid)
{
- deprecated_atomic_clear_bits(&tasks_enabled, BIT(tskid));
+ atomic_clear_bits(&tasks_enabled, BIT(tskid));
if (!in_interrupt_context() && tskid == task_get_current())
__schedule(0, 0);
@@ -455,7 +454,7 @@ void mutex_lock(struct mutex *mtx)
uint32_t id = 1 << task_get_current();
ASSERT(id != TASK_ID_INVALID);
- deprecated_atomic_or(&mtx->waiters, id);
+ atomic_or(&mtx->waiters, id);
do {
old_val = 0;
@@ -471,7 +470,7 @@ void mutex_lock(struct mutex *mtx)
}
} while (old_val);
- deprecated_atomic_clear_bits(&mtx->waiters, id);
+ atomic_clear_bits(&mtx->waiters, id);
}
void mutex_unlock(struct mutex *mtx)
@@ -498,7 +497,7 @@ void mutex_unlock(struct mutex *mtx)
}
/* Ensure no event is remaining from mutex wake-up */
- deprecated_atomic_clear_bits(&tsk->events, TASK_EVENT_MUTEX);
+ atomic_clear_bits(&tsk->events, TASK_EVENT_MUTEX);
}
void task_print_list(void)
diff --git a/core/nds32/task.c b/core/nds32/task.c
index 209395e86a..2fc2302c67 100644
--- a/core/nds32/task.c
+++ b/core/nds32/task.c
@@ -403,7 +403,7 @@ static uint32_t __ram_code __wait_evt(int timeout_us, task_id_t resched)
ret = timer_arm(deadline, me);
ASSERT(ret == EC_SUCCESS);
}
- while (!(evt = deprecated_atomic_read_clear(&tsk->events))) {
+ while (!(evt = atomic_read_clear(&tsk->events))) {
/* Remove ourself and get the next task in the scheduler */
__schedule(1, resched, 0);
resched = TASK_ID_IDLE;
@@ -411,7 +411,7 @@ static uint32_t __ram_code __wait_evt(int timeout_us, task_id_t resched)
if (timeout_us > 0) {
timer_cancel(me);
/* Ensure timer event is clear, we no longer care about it */
- deprecated_atomic_clear_bits(&tsk->events, TASK_EVENT_TIMER);
+ atomic_clear_bits(&tsk->events, TASK_EVENT_TIMER);
}
return evt;
}
@@ -422,12 +422,12 @@ uint32_t __ram_code task_set_event(task_id_t tskid, uint32_t event, int wait)
ASSERT(receiver);
/* Set the event bit in the receiver message bitmap */
- deprecated_atomic_or(&receiver->events, event);
+ atomic_or(&receiver->events, event);
/* Re-schedule if priorities have changed */
if (in_interrupt_context()) {
/* The receiver might run again */
- deprecated_atomic_or(&tasks_ready, 1 << tskid);
+ atomic_or(&tasks_ready, 1 << tskid);
if (start_called)
need_resched = 1;
} else {
@@ -468,8 +468,7 @@ uint32_t __ram_code task_wait_event_mask(uint32_t event_mask, int timeout_us)
/* Re-post any other events collected */
if (events & ~event_mask)
- deprecated_atomic_or(&current_task->events,
- events & ~event_mask);
+ atomic_or(&current_task->events, events & ~event_mask);
return events & event_mask;
}
@@ -521,12 +520,12 @@ void task_enable_all_tasks(void)
void task_enable_task(task_id_t tskid)
{
- deprecated_atomic_or(&tasks_enabled, BIT(tskid));
+ atomic_or(&tasks_enabled, BIT(tskid));
}
void task_disable_task(task_id_t tskid)
{
- deprecated_atomic_clear_bits(&tasks_enabled, BIT(tskid));
+ atomic_clear_bits(&tasks_enabled, BIT(tskid));
if (!in_interrupt_context() && tskid == task_get_current())
__schedule(0, 0, 0);
@@ -653,7 +652,7 @@ void __ram_code mutex_unlock(struct mutex *mtx)
}
/* Ensure no event is remaining from mutex wake-up */
- deprecated_atomic_clear_bits(&tsk->events, TASK_EVENT_MUTEX);
+ atomic_clear_bits(&tsk->events, TASK_EVENT_MUTEX);
}
void task_print_list(void)
diff --git a/core/riscv-rv32i/task.c b/core/riscv-rv32i/task.c
index ecd8d72382..b5f78b12d4 100644
--- a/core/riscv-rv32i/task.c
+++ b/core/riscv-rv32i/task.c
@@ -397,7 +397,7 @@ static uint32_t __ram_code __wait_evt(int timeout_us, task_id_t resched)
ret = timer_arm(deadline, me);
ASSERT(ret == EC_SUCCESS);
}
- while (!(evt = deprecated_atomic_read_clear(&tsk->events))) {
+ while (!(evt = atomic_read_clear(&tsk->events))) {
/* Remove ourself and get the next task in the scheduler */
__schedule(1, resched, 0);
resched = TASK_ID_IDLE;
@@ -405,7 +405,7 @@ static uint32_t __ram_code __wait_evt(int timeout_us, task_id_t resched)
if (timeout_us > 0) {
timer_cancel(me);
/* Ensure timer event is clear, we no longer care about it */
- deprecated_atomic_clear_bits(&tsk->events, TASK_EVENT_TIMER);
+ atomic_clear_bits(&tsk->events, TASK_EVENT_TIMER);
}
return evt;
}
@@ -417,12 +417,12 @@ uint32_t __ram_code task_set_event(task_id_t tskid, uint32_t event, int wait)
ASSERT(receiver);
/* Set the event bit in the receiver message bitmap */
- deprecated_atomic_or(&receiver->events, event);
+ atomic_or(&receiver->events, event);
/* Re-schedule if priorities have changed */
if (in_interrupt_context()) {
/* The receiver might run again */
- deprecated_atomic_or(&tasks_ready, 1 << tskid);
+ atomic_or(&tasks_ready, 1 << tskid);
if (start_called)
need_resched = 1;
} else {
@@ -463,8 +463,7 @@ uint32_t __ram_code task_wait_event_mask(uint32_t event_mask, int timeout_us)
/* Re-post any other events collected */
if (events & ~event_mask)
- deprecated_atomic_or(&current_task->events,
- events & ~event_mask);
+ atomic_or(&current_task->events, events & ~event_mask);
return events & event_mask;
}
@@ -494,12 +493,12 @@ void task_enable_all_tasks(void)
void task_enable_task(task_id_t tskid)
{
- deprecated_atomic_or(&tasks_enabled, BIT(tskid));
+ atomic_or(&tasks_enabled, BIT(tskid));
}
void task_disable_task(task_id_t tskid)
{
- deprecated_atomic_clear_bits(&tasks_enabled, BIT(tskid));
+ atomic_clear_bits(&tasks_enabled, BIT(tskid));
if (!in_interrupt_context() && tskid == task_get_current())
__schedule(0, 0, 0);
@@ -558,7 +557,7 @@ void __ram_code mutex_lock(struct mutex *mtx)
uint32_t id = 1 << task_get_current();
ASSERT(id != TASK_ID_INVALID);
- deprecated_atomic_or(&mtx->waiters, id);
+ atomic_or(&mtx->waiters, id);
while (1) {
asm volatile (
@@ -575,7 +574,7 @@ void __ram_code mutex_lock(struct mutex *mtx)
task_wait_event_mask(TASK_EVENT_MUTEX, 0);
}
- deprecated_atomic_clear_bits(&mtx->waiters, id);
+ atomic_clear_bits(&mtx->waiters, id);
}
void __ram_code mutex_unlock(struct mutex *mtx)
@@ -599,7 +598,7 @@ void __ram_code mutex_unlock(struct mutex *mtx)
}
/* Ensure no event is remaining from mutex wake-up */
- deprecated_atomic_clear_bits(&tsk->events, TASK_EVENT_MUTEX);
+ atomic_clear_bits(&tsk->events, TASK_EVENT_MUTEX);
}
void task_print_list(void)