summaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorDino Li <Dino.Li@ite.com.tw>2019-06-10 16:26:36 +0800
committerCommit Bot <commit-bot@chromium.org>2019-06-11 17:26:38 +0000
commit59d060ebfe68082f4ea87214ffcda976c55176af (patch)
treee237e25e9979220a716b72b65d43fb559504bc60 /core
parent3a668749460466ff002b5dd2cbf00529f97e5974 (diff)
downloadchrome-ec-59d060ebfe68082f4ea87214ffcda976c55176af.tar.gz
core:RISC-V / chip:IT83202
The IT83202 is an embedded controller with RISC-V core. It supports maximum ram size to 256KB and internal flash to 1MB. BUG=none BRANCH=none TEST=EC boots and test console commands (eg: taskinfo, version, sysjump...) on it83202 EVB. Change-Id: I424c0d2878beb941c816363b5c7a3f57fda9fd13 Signed-off-by: Dino Li <Dino.Li@ite.com.tw> Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/ec/+/1588300 Reviewed-by: Jett Rink <jettrink@chromium.org> Commit-Queue: Jett Rink <jettrink@chromium.org>
Diffstat (limited to 'core')
-rw-r--r--core/riscv-rv32i/__builtin.c16
-rw-r--r--core/riscv-rv32i/atomic.h55
-rw-r--r--core/riscv-rv32i/build.mk21
-rw-r--r--core/riscv-rv32i/config_core.h16
-rw-r--r--core/riscv-rv32i/cpu.c15
-rw-r--r--core/riscv-rv32i/cpu.h51
-rw-r--r--core/riscv-rv32i/ec.lds.S268
-rw-r--r--core/riscv-rv32i/init.S394
-rw-r--r--core/riscv-rv32i/irq_chip.h59
-rw-r--r--core/riscv-rv32i/irq_handler.h25
-rw-r--r--core/riscv-rv32i/panic.c182
-rw-r--r--core/riscv-rv32i/switch.S157
-rw-r--r--core/riscv-rv32i/task.c716
13 files changed, 1975 insertions, 0 deletions
diff --git a/core/riscv-rv32i/__builtin.c b/core/riscv-rv32i/__builtin.c
new file mode 100644
index 0000000000..4bf495a011
--- /dev/null
+++ b/core/riscv-rv32i/__builtin.c
@@ -0,0 +1,16 @@
+/* Copyright 2019 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "common.h"
+
+/*
+ * __builtin_ffs:
+ * Returns one plus the index of the least significant 1-bit of x,
+ * or if x is zero, returns zero.
+ */
+int __keep __ffssi2(int x)
+{
+ return 32 - __builtin_clz(x & -x);
+}
diff --git a/core/riscv-rv32i/atomic.h b/core/riscv-rv32i/atomic.h
new file mode 100644
index 0000000000..11765a2615
--- /dev/null
+++ b/core/riscv-rv32i/atomic.h
@@ -0,0 +1,55 @@
+/* Copyright 2019 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/* Atomic operations for RISC_V */
+
+#ifndef __CROS_EC_ATOMIC_H
+#define __CROS_EC_ATOMIC_H
+
+#include "common.h"
+#include "cpu.h"
+#include "task.h"
+
+#define ATOMIC_OP(op, value, addr) \
+({ \
+ uint32_t tmp; \
+ asm volatile ( \
+ "amo" #op ".w.aqrl %0, %2, %1" \
+ : "=r" (tmp), "+A" (*addr) \
+ : "r" (value)); \
+})
+
+static inline void atomic_clear(volatile uint32_t *addr, uint32_t bits)
+{
+ ATOMIC_OP(and, ~bits, addr);
+}
+
+static inline void atomic_or(volatile uint32_t *addr, uint32_t bits)
+{
+ ATOMIC_OP(or, bits, addr);
+}
+
+static inline void atomic_add(volatile uint32_t *addr, uint32_t value)
+{
+ ATOMIC_OP(add, value, addr);
+}
+
+static inline void atomic_sub(volatile uint32_t *addr, uint32_t value)
+{
+ ATOMIC_OP(add, -value, addr);
+}
+
+static inline uint32_t atomic_read_clear(volatile uint32_t *addr)
+{
+ uint32_t ret;
+
+ asm volatile (
+ "amoand.w.aqrl %0, %2, %1"
+ : "=r" (ret), "+A" (*addr)
+ : "r" (0));
+
+ return ret;
+}
+#endif /* __CROS_EC_ATOMIC_H */
diff --git a/core/riscv-rv32i/build.mk b/core/riscv-rv32i/build.mk
new file mode 100644
index 0000000000..044678ab33
--- /dev/null
+++ b/core/riscv-rv32i/build.mk
@@ -0,0 +1,21 @@
+# -*- makefile -*-
+# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# RISC-V core OS files build
+#
+
+# TODO(b/133639441): Toolchain need to support 32-bit architecture or we
+# will get the following error message:
+# ABI is incompatible with that of the selected emulation:
+# target emulation `elf64-littleriscv' does not match `elf32-littleriscv'
+# Select RISC-V bare-metal toolchain
+$(call set-option,CROSS_COMPILE,$(CROSS_COMPILE_riscv),\
+ /opt/coreboot-sdk/bin/riscv64-elf-)
+
+# CPU specific compilation flags
+CFLAGS_CPU+=-march=rv32imafc -mabi=ilp32f -Os
+LDFLAGS_EXTRA+=-mrelax
+
+core-y=cpu.o init.o panic.o task.o switch.o __builtin.o
diff --git a/core/riscv-rv32i/config_core.h b/core/riscv-rv32i/config_core.h
new file mode 100644
index 0000000000..608fec3026
--- /dev/null
+++ b/core/riscv-rv32i/config_core.h
@@ -0,0 +1,16 @@
+/* Copyright 2019 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef __CROS_EC_CONFIG_CORE_H
+#define __CROS_EC_CONFIG_CORE_H
+
+/* Linker binary architecture and format */
+#define BFD_ARCH riscv
+#define BFD_FORMAT "elf32-littleriscv"
+
+#define CONFIG_SOFTWARE_CLZ
+#define CONFIG_SOFTWARE_PANIC
+
+#endif /* __CROS_EC_CONFIG_CORE_H */
diff --git a/core/riscv-rv32i/cpu.c b/core/riscv-rv32i/cpu.c
new file mode 100644
index 0000000000..761e81fe44
--- /dev/null
+++ b/core/riscv-rv32i/cpu.c
@@ -0,0 +1,15 @@
+/* Copyright 2019 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * Set up the RISC-V core
+ */
+
+#include "cpu.h"
+#include "registers.h"
+
+void cpu_init(void)
+{
+ /* bit3: Global interrupt enable (M-mode) */
+ asm volatile ("csrsi mstatus, 0x8");
+}
diff --git a/core/riscv-rv32i/cpu.h b/core/riscv-rv32i/cpu.h
new file mode 100644
index 0000000000..2cb31a6faa
--- /dev/null
+++ b/core/riscv-rv32i/cpu.h
@@ -0,0 +1,51 @@
+/* Copyright 2019 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * Registers map and definitions for RISC-V cores
+ */
+
+#ifndef __CROS_EC_CPU_H
+#define __CROS_EC_CPU_H
+
+#ifdef CONFIG_FPU
+/* additional space to save FP registers (fcsr, ft0-11, fa0-7, fs0-11) */
+#define TASK_SCRATCHPAD_SIZE (62)
+#else
+#define TASK_SCRATCHPAD_SIZE (29)
+#endif
+
+#ifndef __ASSEMBLER__
+#include <stdint.h>
+
+/* write Exception Program Counter register */
+static inline void set_mepc(uint32_t val)
+{
+ asm volatile ("csrw mepc, %0" : : "r"(val));
+}
+
+/* read Exception Program Counter register */
+static inline uint32_t get_mepc(void)
+{
+ uint32_t ret;
+
+ asm volatile ("csrr %0, mepc" : "=r"(ret));
+ return ret;
+}
+
+/* read Trap cause register */
+static inline uint32_t get_mcause(void)
+{
+ uint32_t ret;
+
+ asm volatile ("csrr %0, mcause" : "=r"(ret));
+ return ret;
+}
+
+/* Generic CPU core initialization */
+void cpu_init(void);
+extern uint32_t ec_reset_lp;
+extern uint32_t ira;
+#endif
+
+#endif /* __CROS_EC_CPU_H */
diff --git a/core/riscv-rv32i/ec.lds.S b/core/riscv-rv32i/ec.lds.S
new file mode 100644
index 0000000000..9733569b49
--- /dev/null
+++ b/core/riscv-rv32i/ec.lds.S
@@ -0,0 +1,268 @@
+/* Copyright 2019 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "config.h"
+
+#define STRINGIFY0(name) #name
+#define STRINGIFY(name) STRINGIFY0(name)
+
+#define FW_OFF_(section) CONFIG_##section##_MEM_OFF
+#define FW_OFF(section) (CONFIG_PROGRAM_MEMORY_BASE + FW_OFF_(section))
+
+#define FW_SIZE_(section) CONFIG_##section##_SIZE
+#define FW_SIZE(section) FW_SIZE_(section)
+
+
+OUTPUT_FORMAT(BFD_FORMAT, BFD_FORMAT, BFD_FORMAT)
+OUTPUT_ARCH(BFD_ARCH)
+ENTRY(__reset)
+MEMORY
+{
+ FLASH (rx) : ORIGIN = FW_OFF(SECTION) - CHIP_ILM_BASE, \
+ LENGTH = FW_SIZE(SECTION)
+ /*
+ * ILM (Instruction Local Memory).
+ * We connect ILM to internal flash so we are able to boot from the flash.
+ */
+ ILM (rx) : ORIGIN = FW_OFF(SECTION), LENGTH = FW_SIZE(SECTION)
+ IRAM (rw) : ORIGIN = CONFIG_RAM_BASE + CHIP_RAM_SPACE_RESERVED,
+ LENGTH = CONFIG_RAM_SIZE - CHIP_RAM_SPACE_RESERVED
+#if defined(CONFIG_HOSTCMD_X86) || defined(CONFIG_I2C_SLAVE)
+ H2RAM (rw) : ORIGIN = CONFIG_H2RAM_BASE, LENGTH = CONFIG_H2RAM_SIZE
+#endif
+}
+SECTIONS
+{
+ .text : {
+ /* We put "__flash_dma_start" at the beginning of the text section
+ * to avoid gap.
+ */
+ __flash_dma_start = .;
+ ASSERT((__flash_dma_start == 0),
+ "__flash_dma_start has to be 4k-byte aligned");
+ KEEP(STRINGIFY(OUTDIR/core/CORE/init.o) (.text.vecttable))
+ . = ALIGN(4);
+ __image_data_offset = .;
+ KEEP(*(.rodata.ver))
+ . = ALIGN(4);
+ KEEP(STRINGIFY(OUTDIR/core/CORE/init.o) (.text.vectirq))
+ KEEP(STRINGIFY(OUTDIR/core/CORE/init.o) (.text))
+ KEEP(*(.flash_direct_map))
+ . = ALIGN(16);
+ KEEP(*(.ram_code))
+ __flash_dma_size = . - __flash_dma_start;
+ ASSERT((__flash_dma_size < IT83XX_ILM_BLOCK_SIZE),
+ "__flash_dma_size < IT83XX_ILM_BLOCK_SIZE");
+ . = ALIGN(IT83XX_ILM_BLOCK_SIZE);
+ __flash_text_start = .;
+ *(.text*)
+ } > ILM AT>FLASH
+
+ . = ALIGN(4);
+ .rodata : {
+ /* Symbols defined here are declared in link_defs.h */
+ __irqprio = .;
+ KEEP(*(.rodata.irqprio))
+ __irqprio_end = .;
+
+ . = ALIGN(4);
+ __irqhandler = .;
+ KEEP(STRINGIFY(OUTDIR/core/CORE/init.o) (.rodata.vecthandlers))
+
+ . = ALIGN(4);
+ __cmds = .;
+ KEEP(*(SORT(.rodata.cmds*)))
+ __cmds_end = .;
+
+ . = ALIGN(4);
+ __hcmds = .;
+ KEEP(*(SORT(.rodata.hcmds*)))
+ __hcmds_end = .;
+
+ . = ALIGN(4);
+ __mkbp_evt_srcs = .;
+ KEEP(*(.rodata.evtsrcs))
+ __mkbp_evt_srcs_end = .;
+
+ . = ALIGN(4);
+ __hooks_init = .;
+ KEEP(*(.rodata.HOOK_INIT))
+ __hooks_init_end = .;
+
+ __hooks_pre_freq_change = .;
+ KEEP(*(.rodata.HOOK_PRE_FREQ_CHANGE))
+ __hooks_pre_freq_change_end = .;
+
+ __hooks_freq_change = .;
+ KEEP(*(.rodata.HOOK_FREQ_CHANGE))
+ __hooks_freq_change_end = .;
+
+ __hooks_sysjump = .;
+ KEEP(*(.rodata.HOOK_SYSJUMP))
+ __hooks_sysjump_end = .;
+
+ __hooks_chipset_pre_init = .;
+ KEEP(*(.rodata.HOOK_CHIPSET_PRE_INIT))
+ __hooks_chipset_pre_init_end = .;
+
+ __hooks_chipset_startup = .;
+ KEEP(*(.rodata.HOOK_CHIPSET_STARTUP))
+ __hooks_chipset_startup_end = .;
+
+ __hooks_chipset_resume = .;
+ KEEP(*(.rodata.HOOK_CHIPSET_RESUME))
+ __hooks_chipset_resume_end = .;
+
+ __hooks_chipset_suspend = .;
+ KEEP(*(.rodata.HOOK_CHIPSET_SUSPEND))
+ __hooks_chipset_suspend_end = .;
+
+ __hooks_chipset_shutdown = .;
+ KEEP(*(.rodata.HOOK_CHIPSET_SHUTDOWN))
+ __hooks_chipset_shutdown_end = .;
+
+ __hooks_chipset_reset = .;
+ KEEP(*(.rodata.HOOK_CHIPSET_RESET))
+ __hooks_chipset_reset_end = .;
+
+ __hooks_ac_change = .;
+ KEEP(*(.rodata.HOOK_AC_CHANGE))
+ __hooks_ac_change_end = .;
+
+ __hooks_lid_change = .;
+ KEEP(*(.rodata.HOOK_LID_CHANGE))
+ __hooks_lid_change_end = .;
+
+ __hooks_tablet_mode_change = .;
+ KEEP(*(.rodata.HOOK_TABLET_MODE_CHANGE))
+ __hooks_tablet_mode_change_end = .;
+
+ __hooks_base_attached_change = .;
+ KEEP(*(.rodata.HOOK_BASE_ATTACHED_CHANGE))
+ __hooks_base_attached_change_end = .;
+
+ __hooks_pwrbtn_change = .;
+ KEEP(*(.rodata.HOOK_POWER_BUTTON_CHANGE))
+ __hooks_pwrbtn_change_end = .;
+
+ __hooks_battery_soc_change = .;
+ KEEP(*(.rodata.HOOK_BATTERY_SOC_CHANGE))
+ __hooks_battery_soc_change_end = .;
+
+#ifdef CONFIG_USB_SUSPEND
+ __hooks_usb_change = .;
+ KEEP(*(.rodata.HOOK_USB_PM_CHANGE))
+ __hooks_usb_change_end = .;
+#endif
+
+ __hooks_tick = .;
+ KEEP(*(.rodata.HOOK_TICK))
+ __hooks_tick_end = .;
+
+ __hooks_second = .;
+ KEEP(*(.rodata.HOOK_SECOND))
+ __hooks_second_end = .;
+
+ __hooks_usb_pd_disconnect = .;
+ KEEP(*(.rodata.HOOK_USB_PD_DISCONNECT))
+ __hooks_usb_pd_disconnect_end = .;
+
+ __hooks_usb_pd_connect = .;
+ KEEP(*(.rodata.HOOK_USB_PD_CONNECT))
+ __hooks_usb_pd_connect_end = .;
+
+ __deferred_funcs = .;
+ KEEP(*(.rodata.deferred))
+ __deferred_funcs_end = .;
+
+ . = ALIGN(4);
+ *(.rodata*)
+
+ . = ALIGN(4);
+ *(.srodata*)
+#if defined(SECTION_IS_RO) && defined(CONFIG_FLASH)
+ . = ALIGN(64);
+ KEEP(*(.google))
+#endif
+ . = ALIGN(4);
+ } > ILM AT>FLASH
+ __data_lma_start = .;
+
+ .data : {
+ . = ALIGN(4);
+ __data_start = .;
+ *(.data)
+ *(.sdata)
+ . = ALIGN(4);
+ __data_end = .;
+ } > IRAM AT>FLASH
+
+ .bss : {
+ /* Stacks must be 128-bit aligned */
+ . = ALIGN(16);
+ __bss_start = .;
+ *(.bss.tasks)
+ . = ALIGN(16);
+ *(.bss.task_scratchpad)
+ . = ALIGN(16);
+ *(.bss.system_stack)
+ . = ALIGN(16);
+ __global_pointer$ = .;
+ *(.sbss)
+ . = ALIGN(4);
+ /* Rest of .bss takes care of its own alignment */
+ *(.bss)
+ *(.bss.slow)
+
+ /*
+ * Reserve space for deferred function firing times. Each time is a
+ * uint64_t, each func is a 32-bit pointer, thus the scaling factor of
+ * two.
+ */
+ . = ALIGN(8);
+ __deferred_until = .;
+ . += (__deferred_funcs_end - __deferred_funcs) * (8 / 4);
+ __deferred_until_end = .;
+
+ . = ALIGN(4);
+ __bss_end = .;
+
+ /* Shared memory buffer must be at the end of preallocated RAM, so it
+ * can expand to use all the remaining RAM. */
+ __shared_mem_buf = .;
+
+ } > IRAM
+
+ ASSERT((__shared_mem_buf + CONFIG_SHAREDMEM_MINIMUM_SIZE) <=
+ (CONFIG_RAM_BASE + CONFIG_RAM_SIZE),
+ "Not enough space for shared memory.")
+ __ram_free = (CONFIG_RAM_BASE + CONFIG_RAM_SIZE) -
+ (__shared_mem_buf + CONFIG_SHAREDMEM_MINIMUM_SIZE);
+ __image_size = LOADADDR(.data) + SIZEOF(.data) + \
+ CHIP_ILM_BASE - FW_OFF(SECTION);
+
+#if defined(CONFIG_HOSTCMD_X86) || defined(CONFIG_I2C_SLAVE)
+ .h2ram (NOLOAD) : {
+ . += CONFIG_H2RAM_HOST_LPC_IO_BASE;
+ *(.h2ram.pool.hostcmd)
+ . = ALIGN(256);
+ *(.h2ram.pool.acpiec)
+#ifdef CONFIG_I2C_SLAVE
+ . = ALIGN(256);
+ *(.h2ram.pool.i2cslv)
+#endif
+ __h2ram_end = .;
+ } > H2RAM
+#endif
+ ASSERT((__h2ram_end) <= (CONFIG_H2RAM_BASE + CONFIG_H2RAM_SIZE),
+ "Not enough space for h2ram section.")
+
+#if !(defined(SECTION_IS_RO) && defined(CONFIG_FLASH))
+ /DISCARD/ : {
+ *(.google)
+ }
+#endif
+
+ /DISCARD/ : { *(.ARM.*) }
+}
diff --git a/core/riscv-rv32i/init.S b/core/riscv-rv32i/init.S
new file mode 100644
index 0000000000..11b1dba1f8
--- /dev/null
+++ b/core/riscv-rv32i/init.S
@@ -0,0 +1,394 @@
+/* Copyright 2019 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * RISC-V CPU initialization
+ */
+
+#include "config.h"
+
+.macro vector name
+.set \name\()_handler, unhandled_ec_irq
+.weak \name\()_handler
+j __entry_\()\name
+.pushsection .text.vectirq
+.global __entry_\()\name
+__entry_\()\name:
+ /* C routine handler */
+ j \name\()_handler
+.popsection
+.pushsection .rodata.vecthandlers
+.long \name\()_handler
+.popsection
+.endm
+
+.section .text.vecttable
+.align 2
+__startup:
+ j __reset /* reset */
+__irq:
+ j __irq_isr /* interrupts / exceptions */
+
+/*
+ * E-flash signature used to enable specific function after power-on reset.
+ * (HW mechanism)
+ * The content of 16-bytes must be the following and at offset 0x80 of binary.
+ * ----------------------------------------------------------------------------
+ * 1st 2nd 3rd 4th 5th 6th 7th 8th 9th 10th 11th 12th 13th 14th 15th 16th
+ * ----------------------------------------------------------------------------
+ * A5h A5h A5h A5h A5h A5h [host] [flag] 85h 12h 5Ah 5Ah AAh AAh 55h 55h
+ * ----------------------------------------------------------------------------
+ * [host]: A4h = enable eSPI, A5h = enable LPC
+ * [flag]:
+ * bit7: it must be 1b.
+ * bit6: it must be 0b.
+ * bit5: it must be 1b.
+ * bit4: 1b = 32.768KHz is from the internal clock generator.
+ * bit3: it must be 0b.
+ * bit2: it must be 1b.
+ * bit1: it must be 0b.
+ * bit0: it must be 0b.
+ */
+.org 0x80
+.balign 16
+.global eflash_sig
+eflash_sig:
+.byte 0xA5, 0xA5, 0xA5, 0xA5, 0xA5, 0xA5
+#ifdef CONFIG_HOSTCMD_ESPI
+.byte 0xA4 /* eSPI */
+#else
+.byte 0xA5 /* LPC */
+#endif
+.byte 0xB4 /* flag of signature */
+.byte 0x85, 0x12, 0x5A, 0x5A, 0xAA, 0xAA, 0x55, 0x55
+/* flags: internal oscillator + implicit location */
+
+.align 2
+__ec_intc:
+ vector irq_0 /* INT GROUP 0 */
+ vector irq_1 /* INT GROUP 1 */
+ vector irq_2 /* INT GROUP 2 */
+ vector irq_3 /* INT GROUP 3 */
+ vector irq_4 /* INT GROUP 4 */
+ vector irq_5 /* INT GROUP 5 */
+ vector irq_6 /* INT GROUP 6 */
+ vector irq_7 /* INT GROUP 7 */
+ vector irq_8 /* INT GROUP 8 */
+ vector irq_9 /* INT GROUP 9 */
+ vector irq_10 /* INT GROUP 10 */
+ vector irq_11 /* INT GROUP 11 */
+ vector irq_12 /* INT GROUP 12 */
+ vector irq_13 /* INT GROUP 13 */
+ vector irq_14 /* INT GROUP 14 */
+ vector irq_15 /* INT GROUP 15 */
+ vector syscall /* system call (emulate INT GROUP 16) */
+
+.align 2
+__irq_isr:
+ /* save t2 to scratch register */
+ csrw mscratch, t2
+ /* save registers (sp, ra, t0, and t1) state at exception entry */
+ la t2, excep_entry_saved_regs
+ sw sp, 0*4(t2)
+ sw ra, 1*4(t2)
+ sw t0, 2*4(t2)
+ sw t1, 3*4(t2)
+ /* store return address register */
+ la t2, ira
+ sw ra, 0(t2)
+ /* restore t2 */
+ csrr t2, mscratch
+ /* save ra, a0-7, t0-t6 (high memory address to low) */
+ sw t6, -16*4(sp)
+ sw t5, -15*4(sp)
+ sw t4, -14*4(sp)
+ sw t3, -13*4(sp)
+ sw t2, -12*4(sp)
+ sw t1, -11*4(sp)
+ sw t0, -10*4(sp)
+ sw a7, -9*4(sp)
+ sw a6, -8*4(sp)
+ sw a5, -7*4(sp)
+ sw a4, -6*4(sp)
+ sw a3, -5*4(sp)
+ sw a2, -4*4(sp)
+ sw a1, -3*4(sp)
+ /* Don't change index of ra and a0 (see task_pre_init()) */
+ sw a0, -2*4(sp)
+ sw ra, -1*4(sp)
+#ifdef CONFIG_FPU
+ /* save ft0-11, fa0-7, and fcsr. */
+ csrr t0, fcsr
+ sw t0, -37*4(sp)
+ fsw fa7, -36*4(sp)
+ fsw fa6, -35*4(sp)
+ fsw fa5, -34*4(sp)
+ fsw fa4, -33*4(sp)
+ fsw fa3, -32*4(sp)
+ fsw fa2, -31*4(sp)
+ fsw fa1, -30*4(sp)
+ fsw fa0, -29*4(sp)
+ fsw ft11, -28*4(sp)
+ fsw ft10, -27*4(sp)
+ fsw ft9, -26*4(sp)
+ fsw ft8, -25*4(sp)
+ fsw ft7, -24*4(sp)
+ fsw ft6, -23*4(sp)
+ fsw ft5, -22*4(sp)
+ fsw ft4, -21*4(sp)
+ fsw ft3, -20*4(sp)
+ fsw ft2, -19*4(sp)
+ fsw ft1, -18*4(sp)
+ fsw ft0, -17*4(sp)
+ addi sp, sp, -37*4
+#else
+ addi sp, sp, -16*4
+#endif
+ /* save sp to scratch register */
+ csrw mscratch, sp
+ /* switch to system stack if we are called from process stack */
+ la t0, stack_end
+ /* no chagne sp if sp < end of system stack */
+ bltu sp, t0, __no_adjust_sp
+ mv sp, t0
+__no_adjust_sp:
+ /* read exception cause */
+ csrr t0, mcause
+ /* isolate exception cause */
+ andi t1, t0, 0x1f
+ /* mcause = 11: external interrupt or environment call from M-mode */
+ addi t1, t1, -11
+ beqz t1, __irq_handler
+ /* branch if this is an exceptoin (the interrupt bit of mcause is 0) */
+ bgez t0, excep_handler
+ /* This interrupt is unhandled */
+ j unhandled_interrupt
+__irq_handler:
+ jal start_irq_handler
+ /* get EC interrupt group 0-15 or 16:ecall */
+ la t0, ec_int_group
+ /* get corresponding isr */
+ lw t1, 0(t0)
+ slli t1, t1, 2
+ la t0, __ec_intc
+ add t0, t0, t1
+ /* handle irq */
+ jalr t0
+ /* check whether we need to change the scheduled task */
+ la t0, need_resched
+ lw t1, 0(t0)
+ bnez t1, __switch_task
+.global __irq_exit
+__irq_exit:
+ jal end_irq_handler
+ /* restore sp from scratch register */
+ csrr sp, mscratch
+#ifdef CONFIG_FPU
+ addi sp, sp, 37*4
+ /* restore ft0-11, fa0-7, and fcsr. */
+ lw t0, -37*4(sp)
+ csrw fcsr, t0
+ flw fa7, -36*4(sp)
+ flw fa6, -35*4(sp)
+ flw fa5, -34*4(sp)
+ flw fa4, -33*4(sp)
+ flw fa3, -32*4(sp)
+ flw fa2, -31*4(sp)
+ flw fa1, -30*4(sp)
+ flw fa0, -29*4(sp)
+ flw ft11, -28*4(sp)
+ flw ft10, -27*4(sp)
+ flw ft9, -26*4(sp)
+ flw ft8, -25*4(sp)
+ flw ft7, -24*4(sp)
+ flw ft6, -23*4(sp)
+ flw ft5, -22*4(sp)
+ flw ft4, -21*4(sp)
+ flw ft3, -20*4(sp)
+ flw ft2, -19*4(sp)
+ flw ft1, -18*4(sp)
+ flw ft0, -17*4(sp)
+#else
+ addi sp, sp, 16*4
+#endif
+ /* restore ra, a0-a7, t0-t6 */
+ lw t6, -16*4(sp)
+ lw t5, -15*4(sp)
+ lw t4, -14*4(sp)
+ lw t3, -13*4(sp)
+ lw t2, -12*4(sp)
+ lw t1, -11*4(sp)
+ lw t0, -10*4(sp)
+ lw a7, -9*4(sp)
+ lw a6, -8*4(sp)
+ lw a5, -7*4(sp)
+ lw a4, -6*4(sp)
+ lw a3, -5*4(sp)
+ lw a2, -4*4(sp)
+ lw a1, -3*4(sp)
+ lw a0, -2*4(sp)
+ lw ra, -1*4(sp)
+ mret
+
+.text
+.global __reset
+__reset:
+ /* disable interrupts */
+ csrw mie, zero
+.option push
+.option norelax
+ /* GP register is used to access .data and .bss (address +/- 2048) */
+ la gp, __global_pointer$
+.option pop
+ /* Set system stack pointer. */
+ la sp, stack_end
+#ifdef CONFIG_FPU
+ li t0, 0x6000
+ csrw mstatus, t0
+ csrw fcsr, zero
+#else
+ csrw mstatus, zero
+#endif
+ /*
+ * move content of return address(ra) into t5 and then store the content
+ * into variable "ec_reset_lp" later after memory initialization.
+ */
+ mv t5, ra
+ /* Clear the link register */
+ li ra, 0
+ /* Clear the thread pointer register */
+ li tp, 0
+ /* set machine trap-handler base address */
+ la t0, __irq
+ csrw mtvec, t0
+ /* reset scratch register */
+ csrw mscratch, zero
+ /* The M-mode handles interrupt/exception */
+ csrwi mideleg, 0
+ csrwi medeleg, 0
+ /* Clear BSS */
+ la t0, __bss_start
+ la t1, __bss_end
+bss_loop:
+ sw zero, 0(t0)
+ addi t0, t0, 4
+ bltu t0, t1, bss_loop
+ /* Copy initialized data to data section */
+ la t0, __data_start
+ la t1, __data_end
+ la t2, __data_lma_start
+data_loop:
+ lw t3, 0(t2)
+ sw t3, 0(t0)
+ addi t0, t0, 4
+ addi t2, t2, 4
+ bltu t0, t1, data_loop
+ /* store the content of t5 (ra after reset) into "ec_reset_lp" */
+ la t0, ec_reset_lp
+ sw t5, 0(t0)
+ /* clear BRAM if it is not valid */
+ jal chip_bram_valid
+ /* Jump to C routine */
+ jal main
+ /* That should not return. If it does, loop forever. */
+ j .
+
+.global unhandled_ec_irq
+.global unhandled_interrupt
+unhandled_ec_irq:
+ li tp, 0xBAD1
+ j __unhandled_irq
+unhandled_interrupt:
+ li tp, 0xBAD0
+__unhandled_irq:
+ slli tp, tp, 8
+ csrr t0, mcause
+ add tp, tp, t0
+ j excep_handler /* display exception with TP 80bad[0|1]<irq> */
+
+.global excep_handler
+excep_handler:
+ /* save t2 */
+ csrw mscratch, t2
+ /* restore registers (sp, ra, t0, and t1) state */
+ la t2, excep_entry_saved_regs
+ lw sp, 0*4(t2)
+ lw ra, 1*4(t2)
+ lw t0, 2*4(t2)
+ lw t1, 3*4(t2)
+ /* restore t2 */
+ csrr t2, mscratch
+ /* save sp to scratch register */
+ csrw mscratch, sp
+ la sp, saved_regs
+ /* save sp, ra, gp, tp , a0-a7, t0-t6, and s0-s11 registers */
+ sw s11, 0*4(sp)
+ sw s10, 1*4(sp)
+ sw s9, 2*4(sp)
+ sw s8, 3*4(sp)
+ sw s7, 4*4(sp)
+ sw s6, 5*4(sp)
+ sw s5, 6*4(sp)
+ sw s4, 7*4(sp)
+ sw s3, 8*4(sp)
+ sw s2, 9*4(sp)
+ sw s1, 10*4(sp)
+ sw s0, 11*4(sp)
+ sw t6, 12*4(sp)
+ sw t5, 13*4(sp)
+ sw t4, 14*4(sp)
+ sw t3, 15*4(sp)
+ sw t2, 16*4(sp)
+ sw t1, 17*4(sp)
+ sw t0, 18*4(sp)
+ sw a7, 19*4(sp)
+ sw a6, 20*4(sp)
+ sw a5, 21*4(sp)
+ sw a4, 22*4(sp)
+ sw a3, 23*4(sp)
+ sw a2, 24*4(sp)
+ sw a1, 25*4(sp)
+ sw a0, 26*4(sp)
+ sw tp, 27*4(sp)
+ sw gp, 28*4(sp)
+ sw ra, 29*4(sp)
+ la a0, saved_regs
+ csrr sp, mscratch
+ sw sp, 30*4(a0)
+ /* put a sane stack pointer */
+ la sp, stack_end
+ /* jump to panic dump C routine */
+ jal report_panic
+ j .
+
+.align 2
+_bss_start:
+.long __bss_start
+_bss_end:
+.long __bss_end
+_data_start:
+.long __data_start
+_data_end:
+.long __data_end
+_data_lma_start:
+.long __data_lma_start
+
+/* Reserve space for system stack */
+.section .bss.system_stack
+stack_start:
+.space CONFIG_STACK_SIZE, 0
+stack_end:
+.global stack_end
+
+/* sp, ra, t0, t1 registers state at exception entry */
+.global excep_entry_saved_regs
+excep_entry_saved_regs:
+.long 0, 0, 0, 0
+
+/* registers state at exception entry */
+.global saved_regs
+saved_regs:
+.long 0, 0, 0, 0, 0, 0, 0, 0
+.long 0, 0, 0, 0, 0, 0, 0, 0
+.long 0, 0, 0, 0, 0, 0, 0, 0
+.long 0, 0, 0, 0, 0, 0, 0, 0
diff --git a/core/riscv-rv32i/irq_chip.h b/core/riscv-rv32i/irq_chip.h
new file mode 100644
index 0000000000..916de4ed57
--- /dev/null
+++ b/core/riscv-rv32i/irq_chip.h
@@ -0,0 +1,59 @@
+/* Copyright 2019 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * Chip-specific part of the IRQ handling.
+ */
+
+#ifndef __CROS_EC_IRQ_CHIP_H
+#define __CROS_EC_IRQ_CHIP_H
+
+/**
+ * Enable an IRQ in the chip interrupt controller.
+ *
+ * @param irq interrupt request index.
+ * @return CPU interrupt number to enable if any, -1 else.
+ */
+int chip_enable_irq(int irq);
+
+/**
+ * Disable an IRQ in the chip interrupt controller.
+ *
+ * @param irq interrupt request index.
+ * @return CPU interrupt number to disable if any, -1 else.
+ */
+int chip_disable_irq(int irq);
+
+/**
+ * Clear a pending IRQ in the chip interrupt controller.
+ *
+ * @param irq interrupt request index.
+ * @return CPU interrupt number to clear if any, -1 else.
+ *
+ * Note that most interrupts can be removed from the pending state simply by
+ * handling whatever caused the interrupt in the first place. This only needs
+ * to be called if an interrupt handler disables itself without clearing the
+ * reason for the interrupt, and then the interrupt is re-enabled from a
+ * different context.
+ */
+int chip_clear_pending_irq(int irq);
+
+/**
+ * Software-trigger an IRQ in the chip interrupt controller.
+ *
+ * @param irq interrupt request index.
+ * @return CPU interrupt number to trigger if any, -1 else.
+ */
+int chip_trigger_irq(int irq);
+
+/**
+ * Initialize chip interrupt controller.
+ */
+void chip_init_irqs(void);
+
+/**
+ * Return interrupt number of software interrupt.
+ */
+int get_sw_int(void);
+
+#endif /* __CROS_EC_IRQ_CHIP_H */
diff --git a/core/riscv-rv32i/irq_handler.h b/core/riscv-rv32i/irq_handler.h
new file mode 100644
index 0000000000..e2ca79d71f
--- /dev/null
+++ b/core/riscv-rv32i/irq_handler.h
@@ -0,0 +1,25 @@
+/* Copyright 2019 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/* Helper to declare IRQ handling routines */
+
+#ifndef __CROS_EC_IRQ_HANDLER_H
+#define __CROS_EC_IRQ_HANDLER_H
+
+/* Helper macros to build the IRQ handler and priority struct names */
+#define IRQ_HANDLER(irqname) CONCAT3(irq_, irqname, _handler)
+#define IRQ_PRIORITY(irqname) CONCAT2(prio_, irqname)
+/*
+ * Macro to connect the interrupt handler "routine" to the irq number "irq" and
+ * ensure it is enabled in the interrupt controller with the right priority.
+ */
+#define DECLARE_IRQ(irq, routine, priority) \
+ void IRQ_HANDLER(CPU_INT(irq))(void) \
+ __attribute__ ((alias(STRINGIFY(routine)))); \
+ const struct irq_priority __keep IRQ_PRIORITY(CPU_INT(irq)) \
+ __attribute__((section(".rodata.irqprio"))) \
+ = {CPU_INT(irq), priority}
+
+#endif /* __CROS_EC_IRQ_HANDLER_H */
diff --git a/core/riscv-rv32i/panic.c b/core/riscv-rv32i/panic.c
new file mode 100644
index 0000000000..dcf1da21b3
--- /dev/null
+++ b/core/riscv-rv32i/panic.c
@@ -0,0 +1,182 @@
+/* Copyright 2019 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "common.h"
+#include "console.h"
+#include "cpu.h"
+#include "panic.h"
+#include "printf.h"
+#include "system.h"
+#include "task.h"
+#include "timer.h"
+#include "util.h"
+
+/* Panic data goes at the end of RAM. */
+static struct panic_data * const pdata_ptr = PANIC_DATA_PTR;
+
+#ifdef CONFIG_DEBUG_EXCEPTIONS
+/**
+ * bit[3-0] @ mcause, general exception type information.
+ */
+static const char * const exc_type[16] = {
+ "Instruction address misaligned",
+ "Instruction access fault",
+ "Illegal instruction",
+ "Breakpoint",
+ "Load address misaligned",
+ "Load access fault",
+ "Store/AMO address misaligned",
+ "Store/AMO access fault",
+
+ NULL,
+ NULL,
+ NULL,
+ "Environment call from M-mode",
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+};
+#endif /* CONFIG_DEBUG_EXCEPTIONS */
+
+#ifdef CONFIG_SOFTWARE_PANIC
+/* General purpose register (s0) for saving software panic reason */
+#define SOFT_PANIC_GPR_REASON 11
+/* General purpose register (s1) for saving software panic information */
+#define SOFT_PANIC_GPR_INFO 10
+/* The size must be a power of 2 */
+#define SOFT_PANIC_REASON_SIZE 8
+#define SOFT_PANIC_REASON_MASK (SOFT_PANIC_REASON_SIZE - 1)
+/* Software panic reasons */
+static const char * const panic_sw_reasons[SOFT_PANIC_REASON_SIZE] = {
+ "PANIC_SW_DIV_ZERO",
+ "PANIC_SW_STACK_OVERFLOW",
+ "PANIC_SW_PD_CRASH",
+ "PANIC_SW_ASSERT",
+ "PANIC_SW_WATCHDOG",
+ "PANIC_SW_BAD_RNG",
+ "PANIC_SW_PMIC_FAULT",
+ NULL,
+};
+
+void software_panic(uint32_t reason, uint32_t info)
+{
+ asm volatile ("mv s0, %0" : : "r"(reason));
+ asm volatile ("mv s1, %0" : : "r"(info));
+ if (in_interrupt_context())
+ asm("j excep_handler");
+ else
+ asm("ebreak");
+ __builtin_unreachable();
+}
+
+void panic_set_reason(uint32_t reason, uint32_t info, uint8_t exception)
+{
+ uint32_t *regs = pdata_ptr->riscv.regs;
+ uint32_t warning_mepc;
+
+ /* Setup panic data structure */
+ if (reason != PANIC_SW_WATCHDOG) {
+ memset(pdata_ptr, 0, sizeof(*pdata_ptr));
+ } else {
+ warning_mepc = pdata_ptr->riscv.mepc;
+ memset(pdata_ptr, 0, sizeof(*pdata_ptr));
+ pdata_ptr->riscv.mepc = warning_mepc;
+ }
+ pdata_ptr->magic = PANIC_DATA_MAGIC;
+ pdata_ptr->struct_size = sizeof(*pdata_ptr);
+ pdata_ptr->struct_version = 2;
+ pdata_ptr->arch = PANIC_ARCH_RISCV_RV32I;
+
+ /* Log panic cause */
+ pdata_ptr->riscv.mcause = exception;
+ regs[SOFT_PANIC_GPR_REASON] = reason;
+ regs[SOFT_PANIC_GPR_INFO] = info;
+}
+
+void panic_get_reason(uint32_t *reason, uint32_t *info, uint8_t *exception)
+{
+ uint32_t *regs = pdata_ptr->riscv.regs;
+
+ if (pdata_ptr->magic == PANIC_DATA_MAGIC &&
+ pdata_ptr->struct_version == 2) {
+ *exception = pdata_ptr->riscv.mcause;
+ *reason = regs[SOFT_PANIC_GPR_REASON];
+ *info = regs[SOFT_PANIC_GPR_INFO];
+ } else {
+ *exception = *reason = *info = 0;
+ }
+}
+#endif /* CONFIG_SOFTWARE_PANIC */
+
+static void print_panic_information(uint32_t *regs, uint32_t mcause,
+ uint32_t mepc)
+{
+ panic_printf("=== EXCEPTION: MCAUSE=%x ===\n", mcause);
+ panic_printf("S11 %08x S10 %08x S9 %08x S8 %08x\n",
+ regs[0], regs[1], regs[2], regs[3]);
+ panic_printf("S7 %08x S6 %08x S5 %08x S4 %08x\n",
+ regs[4], regs[5], regs[6], regs[7]);
+ panic_printf("S3 %08x S2 %08x S1 %08x S0 %08x\n",
+ regs[8], regs[9], regs[10], regs[11]);
+ panic_printf("T6 %08x T5 %08x T4 %08x T3 %08x\n",
+ regs[12], regs[13], regs[14], regs[15]);
+ panic_printf("T2 %08x T1 %08x T0 %08x A7 %08x\n",
+ regs[16], regs[17], regs[18], regs[19]);
+ panic_printf("A6 %08x A5 %08x A4 %08x A3 %08x\n",
+ regs[20], regs[21], regs[22], regs[23]);
+ panic_printf("A2 %08x A1 %08x A0 %08x TP %08x\n",
+ regs[24], regs[25], regs[26], regs[27]);
+ panic_printf("GP %08x RA %08x SP %08x MEPC %08x\n",
+ regs[28], regs[29], regs[30], mepc);
+
+#ifdef CONFIG_DEBUG_EXCEPTIONS
+ if ((regs[SOFT_PANIC_GPR_REASON] & 0xfffffff0) == PANIC_SW_BASE) {
+#ifdef CONFIG_SOFTWARE_PANIC
+ panic_printf("Software panic reason: %s\n",
+ panic_sw_reasons[(regs[SOFT_PANIC_GPR_REASON] &
+ SOFT_PANIC_REASON_MASK)]);
+ panic_printf("Software panic info: %d\n",
+ regs[SOFT_PANIC_GPR_INFO]);
+#endif
+ } else {
+ panic_printf("Exception type: %s\n", exc_type[(mcause & 0xf)]);
+ }
+#endif
+}
+
+void report_panic(uint32_t *regs)
+{
+ uint32_t i, mcause, mepc;
+ struct panic_data *pdata = pdata_ptr;
+
+ mepc = get_mepc();
+ mcause = get_mcause();
+
+ pdata->magic = PANIC_DATA_MAGIC;
+ pdata->struct_size = sizeof(*pdata);
+ pdata->struct_version = 2;
+ pdata->arch = PANIC_ARCH_RISCV_RV32I;
+ pdata->flags = 0;
+ pdata->reserved = 0;
+
+ pdata->riscv.mcause = mcause;
+ pdata->riscv.mepc = mepc;
+ for (i = 0; i < 31; i++)
+ pdata->riscv.regs[i] = regs[i];
+
+ print_panic_information(regs, mcause, mepc);
+ panic_reboot();
+}
+
+void panic_data_print(const struct panic_data *pdata)
+{
+ uint32_t *regs, mcause, mepc;
+
+ regs = (uint32_t *)pdata->riscv.regs;
+ mcause = pdata->riscv.mcause;
+ mepc = pdata->riscv.mepc;
+ print_panic_information(regs, mcause, mepc);
+}
diff --git a/core/riscv-rv32i/switch.S b/core/riscv-rv32i/switch.S
new file mode 100644
index 0000000000..cfff3e8bc3
--- /dev/null
+++ b/core/riscv-rv32i/switch.S
@@ -0,0 +1,157 @@
+/* Copyright 2019 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * Context switching
+ */
+
+#include "config.h"
+#include "cpu.h"
+
+.text
+
+/**
+ * Task context switching
+ *
+ * Change the task scheduled after returning from an interruption.
+ *
+ * This function must be called in interrupt context.
+ *
+ * Save the registers of the current task below the interrupt context on
+ * its task, then restore the live registers of the next task and set the
+ * process stack pointer to the new stack.
+ *
+ * the structure of the saved context on the stack is :
+ * ra, a0-a7, t0-t6 (caller saved) , s0-s11 (callee saved), mepc
+ * interrupt entry frame <|> additional registers
+ * if enabling the FPU:
+ * ra, a0-a7, t0-t6, ft0-ft11, fa0-fa7, and fcsr <|>
+ * s0-s11, fs0-fs11, and mepc
+ *
+ */
+.global __switch_task
+__switch_task:
+ /* get the (new) highest priority task pointer in a0 */
+ jal next_sched_task
+ /* pointer to the current task (which are switching from) */
+ la t1, current_task
+ lw t0, 0(t1)
+ /* reset the re-scheduling request */
+ la t2, need_resched
+ sw zero, 0(t2)
+ /* Nothing to do: let's return to keep the same task scheduled */
+ beq a0, t0, __irq_exit
+ /* save our new scheduled task */
+ sw a0, 0(t1)
+ /* restore current process stack pointer */
+ csrr sp, mscratch
+ /* get the task program counter saved at exception entry */
+ csrr t5, mepc
+ /* save s0-s11 on the current process stack */
+ sw s11, -12*4(sp)
+ sw s10, -11*4(sp)
+ sw s9, -10*4(sp)
+ sw s8, -9*4(sp)
+ sw s7, -8*4(sp)
+ sw s6, -7*4(sp)
+ sw s5, -6*4(sp)
+ sw s4, -5*4(sp)
+ sw s3, -4*4(sp)
+ sw s2, -3*4(sp)
+ sw s1, -2*4(sp)
+ sw s0, -1*4(sp)
+#ifdef CONFIG_FPU
+ /* save fs0-fs11 on the current process stack */
+ fsw fs11, -24*4(sp)
+ fsw fs10, -23*4(sp)
+ fsw fs9, -22*4(sp)
+ fsw fs8, -21*4(sp)
+ fsw fs7, -20*4(sp)
+ fsw fs6, -19*4(sp)
+ fsw fs5, -18*4(sp)
+ fsw fs4, -17*4(sp)
+ fsw fs3, -16*4(sp)
+ fsw fs2, -15*4(sp)
+ fsw fs1, -14*4(sp)
+ fsw fs0, -13*4(sp)
+ /* save program counter on the current process stack */
+ sw t5, -25*4(sp)
+ addi sp, sp, -25*4
+#else
+ /* save program counter on the current process stack */
+ sw t5, -13*4(sp)
+ addi sp, sp, -13*4
+#endif
+ /* save the task stack pointer in its context */
+ sw sp, 0(t0)
+ /* get the new scheduled task stack pointer */
+ lw sp, 0(a0)
+#ifdef CONFIG_FPU
+ addi sp, sp, 25*4
+ /* get mepc */
+ lw t0, -25*4(sp)
+ /* restore FP registers (fs0-fs11) from the next stack context */
+ flw fs11, -24*4(sp)
+ flw fs10, -23*4(sp)
+ flw fs9, -22*4(sp)
+ flw fs8, -21*4(sp)
+ flw fs7, -20*4(sp)
+ flw fs6, -19*4(sp)
+ flw fs5, -18*4(sp)
+ flw fs4, -17*4(sp)
+ flw fs3, -16*4(sp)
+ flw fs2, -15*4(sp)
+ flw fs1, -14*4(sp)
+ flw fs0, -13*4(sp)
+#else
+ addi sp, sp, 13*4
+ /* get mepc */
+ lw t0, -13*4(sp)
+#endif
+ /* restore program counter from the next stack context */
+ csrw mepc, t0
+ /* restore registers from the next stack context */
+ lw s11, -12*4(sp)
+ lw s10, -11*4(sp)
+ lw s9, -10*4(sp)
+ lw s8, -9*4(sp)
+ lw s7, -8*4(sp)
+ lw s6, -7*4(sp)
+ lw s5, -6*4(sp)
+ lw s4, -5*4(sp)
+ lw s3, -4*4(sp)
+ lw s2, -3*4(sp)
+ lw s1, -2*4(sp)
+ lw s0, -1*4(sp)
+ /*
+ * save sp to scratch register and switch to system stack.
+ * __irq_exit will restore sp from scratch register again before mret.
+ */
+ csrw mscratch, sp
+ la sp, stack_end
+ j __irq_exit
+
+/**
+ * Start the task scheduling.
+ */
+.global __task_start
+__task_start:
+ csrci mstatus, 0x8
+ /* area used as dummy thread stack for the first switch */
+ la a3, scratchpad
+ li a4, 1
+ li a2, 0 /* system call 3rd parameter : not an IRQ emulation */
+ li a1, 0 /* system call 2nd parameter : re-schedule nothing */
+ li a0, 0 /* system call 1st parameter : de-schedule nothing */
+ /* put the dummy stack pointer at the top of the stack in scratchpad */
+ addi sp, a3, 4 * TASK_SCRATCHPAD_SIZE
+ /* we are ready to re-schedule */
+ la t0, need_resched
+ sw a4, 0(t0)
+ la t0, start_called
+ sw a4, 0(t0)
+ csrsi mstatus, 0x8
+ /* trigger scheduling to execute the task with the highest priority */
+ ecall
+ /* we should never return here */
+ j .
diff --git a/core/riscv-rv32i/task.c b/core/riscv-rv32i/task.c
new file mode 100644
index 0000000000..b7ed17a211
--- /dev/null
+++ b/core/riscv-rv32i/task.c
@@ -0,0 +1,716 @@
+/* Copyright 2019 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/* Task scheduling / events module for Chrome EC operating system */
+
+#include "atomic.h"
+#include "common.h"
+#include "console.h"
+#include "cpu.h"
+#include "hwtimer_chip.h"
+#include "intc.h"
+#include "irq_chip.h"
+#include "link_defs.h"
+#include "registers.h"
+#include "task.h"
+#include "timer.h"
+#include "util.h"
+
+typedef struct {
+ /*
+ * Note that sp must be the first element in the task struct
+ * for __switchto() to work.
+ */
+ uint32_t sp; /* Saved stack pointer for context switch */
+ uint32_t events; /* Bitmaps of received events */
+ uint64_t runtime; /* Time spent in task */
+ uint32_t *stack; /* Start of stack */
+} task_;
+
+/* Value to store in unused stack */
+#define STACK_UNUSED_VALUE 0xdeadd00d
+
+/* declare task routine prototypes */
+#define TASK(n, r, d, s) void r(void *);
+void __idle(void);
+CONFIG_TASK_LIST
+CONFIG_TEST_TASK_LIST
+#undef TASK
+
+/* Task names for easier debugging */
+#define TASK(n, r, d, s) #n,
+static const char * const task_names[] = {
+ "<< idle >>",
+ CONFIG_TASK_LIST
+ CONFIG_TEST_TASK_LIST
+};
+#undef TASK
+
+#ifdef CONFIG_TASK_PROFILING
+static int task_will_switch;
+static uint64_t exc_sub_time;
+static uint64_t task_start_time; /* Time task scheduling started */
+static uint64_t exc_start_time; /* Time of task->exception transition */
+static uint64_t exc_end_time; /* Time of exception->task transition */
+static uint64_t exc_total_time; /* Total time in exceptions */
+static uint32_t svc_calls; /* Number of service calls */
+static uint32_t task_switches; /* Number of times active task changed */
+static uint32_t irq_dist[CONFIG_IRQ_COUNT]; /* Distribution of IRQ calls */
+#endif
+
+extern int __task_start(void);
+
+#ifndef CONFIG_LOW_POWER_IDLE
+/* Idle task. Executed when no tasks are ready to be scheduled. */
+void __idle(void)
+{
+ /*
+ * Print when the idle task starts. This is the lowest priority task,
+ * so this only starts once all other tasks have gotten a chance to do
+ * their task inits and have gone to sleep.
+ */
+ cprints(CC_TASK, "idle task started");
+
+ while (1) {
+ /* doze mode */
+ IT83XX_ECPM_PLLCTRL = EC_PLL_DOZE;
+ clock_cpu_standby();
+ }
+}
+#endif /* !CONFIG_LOW_POWER_IDLE */
+
+static void task_exit_trap(void)
+{
+ int i = task_get_current();
+
+ cprints(CC_TASK, "Task %d (%s) exited!", i, task_names[i]);
+ /* Exited tasks simply sleep forever */
+ while (1)
+ task_wait_event(-1);
+}
+
+/* Startup parameters for all tasks. */
+#define TASK(n, r, d, s) { \
+ .a0 = (uint32_t)d, \
+ .pc = (uint32_t)r, \
+ .stack_size = s, \
+},
+static const struct {
+ uint32_t a0;
+ uint32_t pc;
+ uint16_t stack_size;
+} tasks_init[] = {
+ TASK(IDLE, __idle, 0, IDLE_TASK_STACK_SIZE)
+ CONFIG_TASK_LIST
+ CONFIG_TEST_TASK_LIST
+};
+#undef TASK
+
+/* Contexts for all tasks */
+static task_ tasks[TASK_ID_COUNT] __attribute__ ((section(".bss.tasks")));
+/* Sanity checks about static task invariants */
+BUILD_ASSERT(TASK_ID_COUNT <= (sizeof(unsigned) * 8));
+BUILD_ASSERT(TASK_ID_COUNT < (1 << (sizeof(task_id_t) * 8)));
+
+/* Stacks for all tasks */
+#define TASK(n, r, d, s) + s
+uint8_t task_stacks[0
+ TASK(IDLE, __idle, 0, IDLE_TASK_STACK_SIZE)
+ CONFIG_TASK_LIST
+ CONFIG_TEST_TASK_LIST
+] __aligned(8);
+
+#undef TASK
+
+/* Reserve space to discard context on first context switch. */
+uint32_t scratchpad[TASK_SCRATCHPAD_SIZE] __attribute__
+ ((section(".bss.task_scratchpad")));
+
+task_ *current_task = (task_ *)scratchpad;
+
+/*
+ * Should IRQs chain to svc_handler()? This should be set if either of the
+ * following is true:
+ *
+ * 1) Task scheduling has started, and task profiling is enabled. Task
+ * profiling does its tracking in svc_handler().
+ *
+ * 2) An event was set by an interrupt; this could result in a higher-priority
+ * task unblocking. After checking for a task switch, svc_handler() will clear
+ * the flag (unless profiling is also enabled; then the flag remains set).
+ */
+int need_resched;
+
+/*
+ * Bitmap of all tasks ready to be run.
+ *
+ * Start off with only the hooks task marked as ready such that all the modules
+ * can do their init within a task switching context. The hooks task will then
+ * make a call to enable all tasks.
+ */
+static uint32_t tasks_ready = BIT(TASK_ID_HOOKS);
+/*
+ * Initially allow only the HOOKS and IDLE task to run, regardless of ready
+ * status, in order for HOOK_INIT to complete before other tasks.
+ * task_enable_all_tasks() will open the flood gates.
+ */
+static uint32_t tasks_enabled = BIT(TASK_ID_HOOKS) | BIT(TASK_ID_IDLE);
+
+int start_called; /* Has task swapping started */
+
+/* in interrupt context */
+static int in_interrupt;
+/* Interrupt number of EC modules */
+static volatile int ec_int;
+/* Interrupt group of EC INTC modules */
+volatile int ec_int_group;
+/* interrupt number of sw interrupt */
+static int sw_int_num;
+/* This variable is used to save return address register at EC reset. */
+uint32_t ec_reset_lp;
+/*
+ * This variable is used to save return address register,
+ * and it is updated at the beginning of each ISR.
+ */
+uint32_t ira;
+
+static inline task_ *__task_id_to_ptr(task_id_t id)
+{
+ return tasks + id;
+}
+
+void interrupt_disable(void)
+{
+ /* bit11: disable MEIE */
+ asm volatile ("li t0, 0x800");
+ asm volatile ("csrc mie, t0");
+}
+
+void interrupt_enable(void)
+{
+ /* bit11: enable MEIE */
+ asm volatile ("li t0, 0x800");
+ asm volatile ("csrs mie, t0");
+}
+
+inline int in_interrupt_context(void)
+{
+ return in_interrupt;
+}
+
+task_id_t task_get_current(void)
+{
+#ifdef CONFIG_DEBUG_BRINGUP
+ /* If we haven't done a context switch then our task ID isn't valid */
+ ASSERT(current_task != (task_ *)scratchpad);
+#endif
+ return current_task - tasks;
+}
+
+uint32_t *task_get_event_bitmap(task_id_t tskid)
+{
+ task_ *tsk = __task_id_to_ptr(tskid);
+
+ return &tsk->events;
+}
+
+int task_start_called(void)
+{
+ return start_called;
+}
+
+int get_sw_int(void)
+{
+ /* If this is a SW interrupt */
+ if (get_mcause() == 11)
+ return sw_int_num;
+ return 0;
+}
+
+/**
+ * Scheduling system call
+ *
+ * Also includes emulation of software triggering interrupt vector
+ */
+void __keep syscall_handler(int desched, task_id_t resched, int swirq)
+{
+ /* are we emulating an interrupt ? */
+ if (swirq) {
+ void (*handler)(void) = __irqhandler[swirq];
+ /* adjust IPC to return *after* the syscall instruction */
+ set_mepc(get_mepc() + 4);
+ /* call the regular IRQ handler */
+ handler();
+ sw_int_num = 0;
+ return;
+ }
+
+ if (desched && !current_task->events) {
+ /*
+ * Remove our own ready bit (current - tasks is same as
+ * task_get_current())
+ */
+ tasks_ready &= ~(1 << (current_task - tasks));
+ }
+ tasks_ready |= 1 << resched;
+
+ /* trigger a re-scheduling on exit */
+ need_resched = 1;
+
+#ifdef CONFIG_TASK_PROFILING
+ svc_calls++;
+#endif
+ /* adjust IPC to return *after* the syscall instruction */
+ set_mepc(get_mepc() + 4);
+}
+
+task_ *next_sched_task(void)
+{
+ task_ *new_task = __task_id_to_ptr(__fls(tasks_ready & tasks_enabled));
+
+#ifdef CONFIG_TASK_PROFILING
+ if (current_task != new_task) {
+ if ((current_task - tasks) < TASK_ID_COUNT) {
+ current_task->runtime +=
+ (exc_start_time - exc_end_time - exc_sub_time);
+ }
+ task_will_switch = 1;
+ }
+#endif
+
+#ifdef CONFIG_DEBUG_STACK_OVERFLOW
+ if (*current_task->stack != STACK_UNUSED_VALUE) {
+ int i = task_get_current();
+
+ if (i < TASK_ID_COUNT) {
+ panic_printf("\n\nStack overflow in %s task!\n",
+ task_names[i]);
+#ifdef CONFIG_SOFTWARE_PANIC
+ software_panic(PANIC_SW_STACK_OVERFLOW, i);
+#endif
+ }
+ }
+#endif
+
+ return new_task;
+}
+
+static inline void __schedule(int desched, int resched, int swirq)
+{
+ register int p0 asm("a0") = desched;
+ register int p1 asm("a1") = resched;
+ register int p2 asm("a2") = swirq;
+
+ asm("ecall" : : "r"(p0), "r"(p1), "r"(p2));
+}
+
+void update_exc_start_time(void)
+{
+#ifdef CONFIG_TASK_PROFILING
+ exc_start_time = get_time().val;
+#endif
+}
+
+#ifdef CHIP_FAMILY_IT83XX
+int intc_get_ec_int(void)
+{
+ return ec_int;
+}
+#endif
+
+void start_irq_handler(void)
+{
+ /* save a0, a1, and a2 for syscall */
+ asm volatile ("addi sp, sp, -4*3");
+ asm volatile ("sw a0, 0(sp)");
+ asm volatile ("sw a1, 1*4(sp)");
+ asm volatile ("sw a2, 2*4(sp)");
+
+ in_interrupt = 1;
+
+ /* If this is a SW interrupt */
+ if (get_mcause() == 11) {
+ ec_int = get_sw_int();
+ ec_int_group = 16;
+ } else {
+ /* Determine interrupt number */
+ ec_int = IT83XX_INTC_AIVCT - 0x10;
+ ec_int_group = chip_get_intc_group(ec_int);
+ }
+
+#if defined(CONFIG_LOW_POWER_IDLE) && defined(CHIP_FAMILY_IT83XX)
+ clock_sleep_mode_wakeup_isr();
+#endif
+#ifdef CONFIG_TASK_PROFILING
+ update_exc_start_time();
+
+ /*
+ * Track IRQ distribution. No need for atomic add, because an IRQ
+ * can't pre-empt itself.
+ */
+ if ((ec_int > 0) && (ec_int < ARRAY_SIZE(irq_dist)))
+ irq_dist[ec_int]++;
+#endif
+
+ /* restore a0, a1, and a2 */
+ asm volatile ("lw a0, 0(sp)");
+ asm volatile ("lw a1, 1*4(sp)");
+ asm volatile ("lw a2, 2*4(sp)");
+ asm volatile ("addi sp, sp, 4*3");
+}
+
+void end_irq_handler(void)
+{
+#ifdef CONFIG_TASK_PROFILING
+ uint64_t t, p;
+
+ t = get_time().val;
+ p = t - exc_start_time;
+
+ exc_total_time += p;
+ exc_sub_time += p;
+ if (task_will_switch) {
+ task_will_switch = 0;
+ exc_sub_time = 0;
+ exc_end_time = t;
+ task_switches++;
+ }
+#endif
+ in_interrupt = 0;
+}
+
+static uint32_t __wait_evt(int timeout_us, task_id_t resched)
+{
+ task_ *tsk = current_task;
+ task_id_t me = tsk - tasks;
+ uint32_t evt;
+ int ret;
+
+ ASSERT(!in_interrupt_context());
+
+ if (timeout_us > 0) {
+ timestamp_t deadline = get_time();
+
+ deadline.val += timeout_us;
+ ret = timer_arm(deadline, me);
+ ASSERT(ret == EC_SUCCESS);
+ }
+ while (!(evt = atomic_read_clear(&tsk->events))) {
+ /* Remove ourself and get the next task in the scheduler */
+ __schedule(1, resched, 0);
+ resched = TASK_ID_IDLE;
+ }
+ if (timeout_us > 0) {
+ timer_cancel(me);
+ /* Ensure timer event is clear, we no longer care about it */
+ atomic_clear(&tsk->events, TASK_EVENT_TIMER);
+ }
+ return evt;
+}
+
+uint32_t task_set_event(task_id_t tskid, uint32_t event, int wait)
+{
+ task_ *receiver = __task_id_to_ptr(tskid);
+
+ ASSERT(receiver);
+
+ /* Set the event bit in the receiver message bitmap */
+ atomic_or(&receiver->events, event);
+
+ /* Re-schedule if priorities have changed */
+ if (in_interrupt_context()) {
+ /* The receiver might run again */
+ atomic_or(&tasks_ready, 1 << tskid);
+ if (start_called)
+ need_resched = 1;
+ } else {
+ if (wait)
+ return __wait_evt(-1, tskid);
+ else
+ __schedule(0, tskid, 0);
+ }
+
+ return 0;
+}
+
+uint32_t task_wait_event(int timeout_us)
+{
+ return __wait_evt(timeout_us, TASK_ID_IDLE);
+}
+
+uint32_t task_wait_event_mask(uint32_t event_mask, int timeout_us)
+{
+ uint64_t deadline = get_time().val + timeout_us;
+ uint32_t events = 0;
+ int time_remaining_us = timeout_us;
+
+ /* Add the timer event to the mask so we can indicate a timeout */
+ event_mask |= TASK_EVENT_TIMER;
+
+ while (!(events & event_mask)) {
+ /* Collect events to re-post later */
+ events |= __wait_evt(time_remaining_us, TASK_ID_IDLE);
+
+ time_remaining_us = deadline - get_time().val;
+ if (timeout_us > 0 && time_remaining_us <= 0) {
+ /* Ensure we return a TIMER event if we timeout */
+ events |= TASK_EVENT_TIMER;
+ break;
+ }
+ }
+
+ /* Re-post any other events collected */
+ if (events & ~event_mask)
+ atomic_or(&current_task->events, events & ~event_mask);
+
+ return events & event_mask;
+}
+
+uint32_t get_int_mask(void)
+{
+ uint32_t ret;
+
+ asm volatile ("csrr %0, mie" : "=r"(ret));
+ return ret;
+}
+
+void set_int_mask(uint32_t val)
+{
+ asm volatile ("csrw mie, %0" : : "r"(val));
+}
+
+void task_enable_all_tasks(void)
+{
+ /* Mark all tasks as ready and able to run. */
+ tasks_ready = tasks_enabled = BIT(TASK_ID_COUNT) - 1;
+ /* Reschedule the highest priority task. */
+ __schedule(0, 0, 0);
+}
+
+void task_enable_irq(int irq)
+{
+ uint32_t int_mask = get_int_mask();
+
+ interrupt_disable();
+ chip_enable_irq(irq);
+ set_int_mask(int_mask);
+}
+
+void task_disable_irq(int irq)
+{
+ uint32_t int_mask = get_int_mask();
+
+ interrupt_disable();
+ chip_disable_irq(irq);
+ set_int_mask(int_mask);
+}
+
+void task_clear_pending_irq(int irq)
+{
+ chip_clear_pending_irq(irq);
+}
+
+void task_trigger_irq(int irq)
+{
+ int cpu_int = chip_trigger_irq(irq);
+
+ if (cpu_int > 0) {
+ sw_int_num = irq;
+ __schedule(0, 0, cpu_int);
+ }
+}
+
+/*
+ * Initialize IRQs in the IVIC and set their priorities as defined by the
+ * DECLARE_IRQ statements.
+ */
+static void ivic_init_irqs(void)
+{
+ /* chip-specific interrupt controller initialization */
+ chip_init_irqs();
+ /*
+ * Re-enable global interrupts in case they're disabled. On a reboot,
+ * they're already enabled; if we've jumped here from another image,
+ * they're not.
+ */
+ interrupt_enable();
+}
+
+void mutex_lock(struct mutex *mtx)
+{
+ uint32_t locked;
+ uint32_t id = 1 << task_get_current();
+
+ ASSERT(id != TASK_ID_INVALID);
+ atomic_or(&mtx->waiters, id);
+
+ while (1) {
+ asm volatile (
+ /* set lock value */
+ "li %0, 2\n\t"
+ /* attempt to acquire lock */
+ "amoswap.w.aq %0, %0, %1\n\t"
+ : "=r" (locked), "+A" (mtx->lock));
+ /* we got it ! */
+ if (!locked)
+ break;
+ /* Contention on the mutex */
+ /* Sleep waiting for our turn */
+ task_wait_event_mask(TASK_EVENT_MUTEX, 0);
+ }
+
+ atomic_clear(&mtx->waiters, id);
+}
+
+void mutex_unlock(struct mutex *mtx)
+{
+ uint32_t waiters;
+ task_ *tsk = current_task;
+
+ /* give back the lock */
+ asm volatile (
+ "amoswap.w.aqrl zero, zero, %0\n\t"
+ : "+A" (mtx->lock));
+ waiters = mtx->waiters;
+
+ while (waiters) {
+ task_id_t id = __fls(waiters);
+
+ waiters &= ~BIT(id);
+
+ /* Somebody is waiting on the mutex */
+ task_set_event(id, TASK_EVENT_MUTEX, 0);
+ }
+
+ /* Ensure no event is remaining from mutex wake-up */
+ atomic_clear(&tsk->events, TASK_EVENT_MUTEX);
+}
+
+void task_print_list(void)
+{
+ int i;
+
+ ccputs("Task Ready Name Events Time (s) StkUsed\n");
+
+ for (i = 0; i < TASK_ID_COUNT; i++) {
+ char is_ready = (tasks_ready & (1<<i)) ? 'R' : ' ';
+ uint32_t *sp;
+
+ int stackused = tasks_init[i].stack_size;
+
+ for (sp = tasks[i].stack;
+ sp < (uint32_t *)tasks[i].sp && *sp == STACK_UNUSED_VALUE;
+ sp++)
+ stackused -= sizeof(uint32_t);
+
+ ccprintf("%4d %c %-16s %08x %11.6ld %3d/%3d\n", i, is_ready,
+ task_names[i], tasks[i].events, tasks[i].runtime,
+ stackused, tasks_init[i].stack_size);
+ cflush();
+ }
+}
+
+int command_task_info(int argc, char **argv)
+{
+#ifdef CONFIG_TASK_PROFILING
+ int total = 0;
+ int i;
+#endif
+
+ task_print_list();
+
+#ifdef CONFIG_TASK_PROFILING
+ ccputs("IRQ counts by type:\n");
+ cflush();
+ for (i = 0; i < ARRAY_SIZE(irq_dist); i++) {
+ if (irq_dist[i]) {
+ ccprintf("%4d %8d\n", i, irq_dist[i]);
+ total += irq_dist[i];
+ }
+ }
+
+ ccprintf("Service calls: %11d\n", svc_calls);
+ ccprintf("Total exceptions: %11d\n", total + svc_calls);
+ ccprintf("Task switches: %11d\n", task_switches);
+ ccprintf("Task switching started: %11.6ld s\n", task_start_time);
+ ccprintf("Time in tasks: %11.6ld s\n",
+ get_time().val - task_start_time);
+ ccprintf("Time in exceptions: %11.6ld s\n", exc_total_time);
+#endif
+
+ return EC_SUCCESS;
+}
+DECLARE_CONSOLE_COMMAND(taskinfo, command_task_info,
+ NULL,
+ "Print task info");
+
+static int command_task_ready(int argc, char **argv)
+{
+ if (argc < 2) {
+ ccprintf("tasks_ready: 0x%08x\n", tasks_ready);
+ } else {
+ tasks_ready = strtoi(argv[1], NULL, 16);
+ ccprintf("Setting tasks_ready to 0x%08x\n", tasks_ready);
+ __schedule(0, 0, 0);
+ }
+
+ return EC_SUCCESS;
+}
+DECLARE_CONSOLE_COMMAND(taskready, command_task_ready,
+ "[setmask]",
+ "Print/set ready tasks");
+
+void task_pre_init(void)
+{
+ uint32_t *stack_next = (uint32_t *)task_stacks;
+ int i;
+
+ /* Fill the task memory with initial values */
+ for (i = 0; i < TASK_ID_COUNT; i++) {
+ uint32_t *sp;
+ /* Stack size in words */
+ uint32_t ssize = tasks_init[i].stack_size / 4;
+
+ tasks[i].stack = stack_next;
+
+ /*
+ * Update stack used by first frame: 28 regs + MEPC + (FP regs)
+ */
+ sp = stack_next + ssize - TASK_SCRATCHPAD_SIZE;
+ tasks[i].sp = (uint32_t)sp;
+
+ /* Initial context on stack (see __switchto()) */
+ sp[TASK_SCRATCHPAD_SIZE-2] = tasks_init[i].a0; /* a0 */
+ sp[TASK_SCRATCHPAD_SIZE-1] = (uint32_t)task_exit_trap; /* ra */
+ sp[0] = tasks_init[i].pc; /* pc/mepc */
+
+ /* Fill unused stack; also used to detect stack overflow. */
+ for (sp = stack_next; sp < (uint32_t *)tasks[i].sp; sp++)
+ *sp = STACK_UNUSED_VALUE;
+
+ stack_next += ssize;
+ }
+
+ /*
+ * Fill in guard value in scratchpad to prevent stack overflow
+ * detection failure on the first context switch. This works because
+ * the first word in the scratchpad is where the switcher will store
+ * sp, so it's ok to blow away.
+ */
+ ((task_ *)scratchpad)->stack = (uint32_t *)scratchpad;
+ *(uint32_t *)scratchpad = STACK_UNUSED_VALUE;
+
+ /* Initialize IRQs */
+ ivic_init_irqs();
+}
+
+int task_start(void)
+{
+#ifdef CONFIG_TASK_PROFILING
+ task_start_time = exc_end_time = get_time().val;
+#endif
+
+ return __task_start();
+}