summaryrefslogtreecommitdiff
path: root/core/cortex-m/task.c
diff options
context:
space:
mode:
Diffstat (limited to 'core/cortex-m/task.c')
-rw-r--r--core/cortex-m/task.c366
1 files changed, 24 insertions, 342 deletions
diff --git a/core/cortex-m/task.c b/core/cortex-m/task.c
index 1def5e0aa5..2ec3960570 100644
--- a/core/cortex-m/task.c
+++ b/core/cortex-m/task.c
@@ -125,30 +125,9 @@ static const struct {
/* Contexts for all tasks */
static task_ tasks[TASK_ID_COUNT];
-/* Reset constants and state for all tasks */
-#define TASK_RESET_SUPPORTED BIT(31)
-#define TASK_RESET_LOCK BIT(30)
-#define TASK_RESET_STATE_MASK (TASK_RESET_SUPPORTED | TASK_RESET_LOCK)
-#define TASK_RESET_WAITERS_MASK ~TASK_RESET_STATE_MASK
-#define TASK_RESET_UNSUPPORTED 0
-#define TASK_RESET_STATE_LOCKED (TASK_RESET_SUPPORTED | TASK_RESET_LOCK)
-#define TASK_RESET_STATE_UNLOCKED TASK_RESET_SUPPORTED
-
-#ifdef CONFIG_TASK_RESET_LIST
-#define ENABLE_RESET(n) \
- [TASK_ID_##n] = TASK_RESET_SUPPORTED,
-static uint32_t task_reset_state[TASK_ID_COUNT] = {
-#ifdef CONFIG_TASK_RESET_LIST
- CONFIG_TASK_RESET_LIST
-#endif
-};
-#undef ENABLE_RESET
-#endif /* CONFIG_TASK_RESET_LIST */
-
/* Sanity checks about static task invariants */
BUILD_ASSERT(TASK_ID_COUNT <= sizeof(unsigned) * 8);
BUILD_ASSERT(TASK_ID_COUNT < (1 << (sizeof(task_id_t) * 8)));
-BUILD_ASSERT(BIT(TASK_ID_COUNT) < TASK_RESET_LOCK);
/* Stacks for all tasks */
#define TASK(n, r, d, s) + s
@@ -497,305 +476,6 @@ void task_trigger_irq(int irq)
CPU_NVIC_SWTRIG = irq;
}
-static uint32_t init_task_context(task_id_t id)
-{
- uint32_t *sp;
- /* Stack size in words */
- uint32_t ssize = tasks_init[id].stack_size / 4;
-
- /*
- * Update stack used by first frame: 8 words for the normal
- * stack, plus 8 for R4-R11. Even if using FPU, the first frame
- * does not store FP regs.
- */
- sp = tasks[id].stack + ssize - 16;
- tasks[id].sp = (uint32_t)sp;
-
- /* Initial context on stack (see __switchto()) */
- sp[8] = tasks_init[id].r0; /* r0 */
- sp[13] = (uint32_t)task_exit_trap; /* lr */
- sp[14] = tasks_init[id].pc; /* pc */
- sp[15] = 0x01000000; /* psr */
-
- /* Fill unused stack; also used to detect stack overflow. */
- for (sp = tasks[id].stack; sp < (uint32_t *)tasks[id].sp; sp++)
- *sp = STACK_UNUSED_VALUE;
-
- return ssize;
-}
-
-#ifdef CONFIG_TASK_RESET_LIST
-
-/*
- * Re-initializes a task stack to its initial state, and marks it ready.
- * The task reset lock must be held prior to calling this function.
- */
-static void do_task_reset(task_id_t id)
-{
- interrupt_disable();
- init_task_context(id);
- tasks_ready |= 1 << id;
- /* TODO: Clear all pending events? */
- interrupt_enable();
-}
-
-/* We can't pass a parameter to a deferred call. Use this instead. */
-/* Mask of task IDs waiting to be reset. */
-static uint32_t deferred_reset_task_ids;
-
-/* Tasks may call this function if they want to reset themselves. */
-static void deferred_task_reset(void)
-{
- while (deferred_reset_task_ids) {
- task_id_t reset_id = __fls(deferred_reset_task_ids);
-
- atomic_clear(&deferred_reset_task_ids, 1 << reset_id);
- do_task_reset(reset_id);
- }
-}
-DECLARE_DEFERRED(deferred_task_reset);
-
-/*
- * Helper for updating task_reset state atomically. Checks the current state,
- * and if it matches if_value, updates the state to new_value, and returns
- * TRUE.
- */
-static int update_reset_state(uint32_t *state,
- uint32_t if_value,
- uint32_t to_value)
-{
- int update;
-
- interrupt_disable();
- update = *state == if_value;
- if (update)
- *state = to_value;
- interrupt_enable();
-
- return update;
-}
-
-/*
- * Helper that acquires the reset lock iff it is not currently held.
- * Returns TRUE if the lock was acquired.
- */
-static inline int try_acquire_reset_lock(uint32_t *state)
-{
- return update_reset_state(state,
- /* if the lock is not held */
- TASK_RESET_STATE_UNLOCKED,
- /* acquire it */
- TASK_RESET_STATE_LOCKED);
-}
-
-/*
- * Helper that releases the reset lock iff it is currently held, and there
- * are no pending resets. Returns TRUE if the lock was released.
- */
-static inline int try_release_reset_lock(uint32_t *state)
-{
- return update_reset_state(state,
- /* if the lock is held, with no waiters */
- TASK_RESET_STATE_LOCKED,
- /* release it */
- TASK_RESET_STATE_UNLOCKED);
-}
-
-/*
- * Helper to cause the current task to sleep indefinitely; useful if the
- * calling task just needs to block until it is reset.
- */
-static inline void sleep_forever(void)
-{
- while (1)
- usleep(-1);
-}
-
-void task_enable_resets(void)
-{
- task_id_t id = task_get_current();
- uint32_t *state = &task_reset_state[id];
-
- if (*state == TASK_RESET_UNSUPPORTED) {
- cprints(CC_TASK,
- "%s called from non-resettable task, id: %d",
- __func__, id);
- return;
- }
-
- /*
- * A correctly written resettable task will only call this function
- * if resets are currently disabled; this implies that this task
- * holds the reset lock.
- */
-
- if (*state == TASK_RESET_STATE_UNLOCKED) {
- cprints(CC_TASK,
- "%s called, but resets already enabled, id: %d",
- __func__, id);
- return;
- }
-
- /*
- * Attempt to release the lock. If we cannot, it means there are tasks
- * waiting for a reset.
- */
- if (try_release_reset_lock(state))
- return;
-
- /* People are waiting for us to reset; schedule a reset. */
- atomic_or(&deferred_reset_task_ids, 1 << id);
- /*
- * This will always trigger a deferred call after our new ID was
- * written. If the hook call is currently executing, it will run
- * again.
- */
- hook_call_deferred(&deferred_task_reset_data, 0);
- /* Wait to be reset. */
- sleep_forever();
-}
-
-void task_disable_resets(void)
-{
- task_id_t id = task_get_current();
- uint32_t *state = &task_reset_state[id];
-
- if (*state == TASK_RESET_UNSUPPORTED) {
- cprints(CC_TASK,
- "%s called from non-resettable task, id %d",
- __func__, id);
- return;
- }
-
- /*
- * A correctly written resettable task will only call this function
- * if resets are currently enabled; this implies that this task does
- * not hold the reset lock.
- */
-
- if (try_acquire_reset_lock(state))
- return;
-
- /*
- * If we can't acquire the lock, we are about to be reset by another
- * task.
- */
- sleep_forever();
-}
-
-int task_reset_cleanup(void)
-{
- task_id_t id = task_get_current();
- uint32_t *state = &task_reset_state[id];
-
- /*
- * If the task has never started before, state will be
- * TASK_RESET_ENABLED.
- *
- * If the task was reset, the TASK_RESET_LOCK bit will be set, and
- * there may additionally be bits representing tasks we must notify
- * that we have reset.
- */
-
- /*
- * Only this task can unset the lock bit so we can read this safely,
- * even though other tasks may be modifying the state to add themselves
- * as waiters.
- */
- int cleanup_req = *state & TASK_RESET_LOCK;
-
- /*
- * Attempt to release the lock. We can only do this when there are no
- * tasks waiting to be notified that we have been reset, so we loop
- * until no tasks are waiting.
- *
- * Other tasks may still be trying to reset us at this point; if they
- * do, they will add themselves to the list of tasks we must notify. We
- * will simply notify them (multiple times if necessary) until we are
- * free to unlock.
- */
- if (cleanup_req) {
- while (!try_release_reset_lock(state)) {
- /* Find the first waiter to notify. */
- task_id_t notify_id = __fls(
- *state & TASK_RESET_WAITERS_MASK);
- /*
- * Remove the task from waiters first, so that
- * when it wakes after being notified, it is in
- * a consistent state (it should not be waiting
- * to be notified and running).
- * After being notified, the task may try to
- * reset us again; if it does, it will just add
- * itself back to the list of tasks to notify,
- * and we will notify it again.
- */
- atomic_clear(state, 1 << notify_id);
- /*
- * Skip any invalid ids set by tasks that
- * requested a non-blocking reset.
- */
- if (notify_id < TASK_ID_COUNT)
- task_set_event(notify_id,
- TASK_EVENT_RESET_DONE,
- 0);
- }
- }
-
- return cleanup_req;
-}
-
-int task_reset(task_id_t id, int wait)
-{
- task_id_t current = task_get_current();
- uint32_t *state = &task_reset_state[id];
- uint32_t waiter_id;
- int resets_disabled;
-
- if (id == current)
- return EC_ERROR_INVAL;
-
- /*
- * This value is only set at compile time, and will never be modified.
- */
- if (*state == TASK_RESET_UNSUPPORTED)
- return EC_ERROR_INVAL;
-
- /*
- * If we are not blocking for reset, we use an invalid task id to notify
- * the task that _someone_ wanted it to reset, but didn't want to be
- * notified when the reset is complete.
- */
- waiter_id = 1 << (wait ? current : TASK_ID_COUNT);
-
- /*
- * Try and take the lock. If we can't have it, just notify the task we
- * tried; it will reset itself when it next tries to release the lock.
- */
- interrupt_disable();
- resets_disabled = *state & TASK_RESET_LOCK;
- if (resets_disabled)
- *state |= waiter_id;
- else
- *state |= TASK_RESET_LOCK;
- interrupt_enable();
-
- if (!resets_disabled) {
- /* We got the lock, do the reset immediately. */
- do_task_reset(id);
- } else if (wait) {
- /*
- * We couldn't get the lock, and have been asked to block for
- * reset. We have asked the task to reset itself; it will notify
- * us when it has.
- */
- task_wait_event_mask(TASK_EVENT_RESET_DONE, -1);
- }
-
- return EC_SUCCESS;
-}
-
-#endif /* CONFIG_TASK_RESET_LIST */
-
/*
* Initialize IRQs in the NVIC and set their priorities as defined by the
* DECLARE_IRQ statements.
@@ -969,8 +649,31 @@ void task_pre_init(void)
/* Fill the task memory with initial values */
for (i = 0; i < TASK_ID_COUNT; i++) {
+ uint32_t *sp;
+ /* Stack size in words */
+ uint32_t ssize = tasks_init[i].stack_size / 4;
+
tasks[i].stack = stack_next;
- stack_next += init_task_context(i);
+
+ /*
+ * Update stack used by first frame: 8 words for the normal
+ * stack, plus 8 for R4-R11. Even if using FPU, the first frame
+ * does not store FP regs.
+ */
+ sp = stack_next + ssize - 16;
+ tasks[i].sp = (uint32_t)sp;
+
+ /* Initial context on stack (see __switchto()) */
+ sp[8] = tasks_init[i].r0; /* r0 */
+ sp[13] = (uint32_t)task_exit_trap; /* lr */
+ sp[14] = tasks_init[i].pc; /* pc */
+ sp[15] = 0x01000000; /* psr */
+
+ /* Fill unused stack; also used to detect stack overflow. */
+ for (sp = stack_next; sp < (uint32_t *)tasks[i].sp; sp++)
+ *sp = STACK_UNUSED_VALUE;
+
+ stack_next += ssize;
}
/*
@@ -1008,24 +711,3 @@ int task_start(void)
return __task_start(&need_resched_or_profiling);
}
-
-#ifdef CONFIG_CMD_TASK_RESET
-static int command_task_reset(int argc, char **argv)
-{
- task_id_t id;
- char *e;
-
- if (argc == 2) {
- id = strtoi(argv[1], &e, 10);
- if (*e)
- return EC_ERROR_PARAM1;
- ccprintf("Resetting task %d\n", id);
- return task_reset(id, 1);
- }
-
- return EC_ERROR_PARAM_COUNT;
-}
-DECLARE_CONSOLE_COMMAND(taskreset, command_task_reset,
- "task_id",
- "Reset a task");
-#endif /* CONFIG_CMD_TASK_RESET */