summaryrefslogtreecommitdiff
path: root/core/cortex-m
diff options
context:
space:
mode:
authorJack Rosenthal <jrosenth@chromium.org>2021-11-04 12:11:58 -0600
committerCommit Bot <commit-bot@chromium.org>2021-11-05 04:22:34 +0000
commit252457d4b21f46889eebad61d4c0a65331919cec (patch)
tree01856c4d31d710b20e85a74c8d7b5836e35c3b98 /core/cortex-m
parent08f5a1e6fc2c9467230444ac9b582dcf4d9f0068 (diff)
downloadchrome-ec-stabilize-14526.73.B-ish.tar.gz
In the interest of making long-term branch maintenance incur as little technical debt on us as possible, we should not maintain any files on the branch we are not actually using. This has the added effect of making it extremely clear when merging CLs from the main branch when changes have the possibility to affect us. The follow-on CL adds a convenience script to actually pull updates from the main branch and generate a CL for the update. BUG=b:204206272 BRANCH=ish TEST=make BOARD=arcada_ish && make BOARD=drallion_ish Signed-off-by: Jack Rosenthal <jrosenth@chromium.org> Change-Id: I17e4694c38219b5a0823e0a3e55a28d1348f4b18 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/ec/+/3262038 Reviewed-by: Jett Rink <jettrink@chromium.org> Reviewed-by: Tom Hughes <tomhughes@chromium.org>
Diffstat (limited to 'core/cortex-m')
l---------core/cortex-m/aes.S1
-rw-r--r--core/cortex-m/atomic.h41
-rw-r--r--core/cortex-m/build.mk34
-rw-r--r--core/cortex-m/cache.S76
-rw-r--r--core/cortex-m/config_core.h15
-rw-r--r--core/cortex-m/cpu.c65
-rw-r--r--core/cortex-m/cpu.h103
-rw-r--r--core/cortex-m/debug.c12
-rw-r--r--core/cortex-m/debug.h37
-rw-r--r--core/cortex-m/ec.lds.S721
l---------core/cortex-m/ghash.S1
-rw-r--r--core/cortex-m/include/fpu.h35
-rw-r--r--core/cortex-m/include/mpu.h144
-rw-r--r--core/cortex-m/include/mpu_private.h25
-rw-r--r--core/cortex-m/init.S135
-rw-r--r--core/cortex-m/irq_handler.h44
l---------core/cortex-m/ldivmod.S1
-rw-r--r--core/cortex-m/llsr.c65
-rw-r--r--core/cortex-m/mpu.c457
-rw-r--r--core/cortex-m/panic-internal.h11
-rw-r--r--core/cortex-m/panic.c475
-rw-r--r--core/cortex-m/switch.S101
-rw-r--r--core/cortex-m/task.c1088
l---------core/cortex-m/uldivmod.S1
-rw-r--r--core/cortex-m/vecttable.c400
-rw-r--r--core/cortex-m/watchdog.c58
26 files changed, 0 insertions, 4146 deletions
diff --git a/core/cortex-m/aes.S b/core/cortex-m/aes.S
deleted file mode 120000
index 39d1286943..0000000000
--- a/core/cortex-m/aes.S
+++ /dev/null
@@ -1 +0,0 @@
-../../third_party/boringssl/core/cortex-m/aes.S \ No newline at end of file
diff --git a/core/cortex-m/atomic.h b/core/cortex-m/atomic.h
deleted file mode 100644
index e9b96f6fd5..0000000000
--- a/core/cortex-m/atomic.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* Copyright 2012 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-/* Atomic operations for ARMv7 */
-
-#ifndef __CROS_EC_ATOMIC_H
-#define __CROS_EC_ATOMIC_H
-
-#include "common.h"
-
-typedef int atomic_t;
-typedef atomic_t atomic_val_t;
-
-static inline atomic_val_t atomic_clear_bits(atomic_t *addr, atomic_val_t bits)
-{
- return __atomic_fetch_and(addr, ~bits, __ATOMIC_SEQ_CST);
-}
-
-static inline atomic_val_t atomic_or(atomic_t *addr, atomic_val_t bits)
-{
- return __atomic_fetch_or(addr, bits, __ATOMIC_SEQ_CST);
-}
-
-static inline atomic_val_t atomic_add(atomic_t *addr, atomic_val_t value)
-{
- return __atomic_fetch_add(addr, value, __ATOMIC_SEQ_CST);
-}
-
-static inline atomic_val_t atomic_sub(atomic_t *addr, atomic_val_t value)
-{
- return __atomic_fetch_sub(addr, value, __ATOMIC_SEQ_CST);
-}
-
-static inline atomic_val_t atomic_clear(atomic_t *addr)
-{
- return __atomic_exchange_n(addr, 0, __ATOMIC_SEQ_CST);
-}
-
-#endif /* __CROS_EC_ATOMIC_H */
diff --git a/core/cortex-m/build.mk b/core/cortex-m/build.mk
deleted file mode 100644
index ad7ab6eacc..0000000000
--- a/core/cortex-m/build.mk
+++ /dev/null
@@ -1,34 +0,0 @@
-# -*- makefile -*-
-# Copyright 2012 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-#
-# Cortex-M4 core OS files build
-#
-
-# Use coreboot-sdk
-$(call set-option,CROSS_COMPILE,\
- $(CROSS_COMPILE_arm),\
- /opt/coreboot-sdk/bin/arm-eabi-)
-
-# FPU compilation flags
-CFLAGS_FPU-$(CONFIG_FPU)=-mfpu=fpv4-sp-d16 -mfloat-abi=hard
-
-# CPU specific compilation flags
-CFLAGS_CPU+=-mthumb -Os -mno-sched-prolog
-CFLAGS_CPU+=-mno-unaligned-access
-CFLAGS_CPU+=$(CFLAGS_FPU-y)
-
-ifneq ($(CONFIG_LTO),)
-CFLAGS_CPU+=-flto
-LDFLAGS_EXTRA+=-flto
-endif
-
-core-y=cpu.o debug.o init.o ldivmod.o llsr.o uldivmod.o vecttable.o
-core-$(CONFIG_AES)+=aes.o
-core-$(CONFIG_AES_GCM)+=ghash.o
-core-$(CONFIG_ARMV7M_CACHE)+=cache.o
-core-$(CONFIG_COMMON_PANIC_OUTPUT)+=panic.o
-core-$(CONFIG_COMMON_RUNTIME)+=switch.o task.o
-core-$(CONFIG_WATCHDOG)+=watchdog.o
-core-$(CONFIG_MPU)+=mpu.o
diff --git a/core/cortex-m/cache.S b/core/cortex-m/cache.S
deleted file mode 100644
index 0a3d3bb67d..0000000000
--- a/core/cortex-m/cache.S
+++ /dev/null
@@ -1,76 +0,0 @@
-/* Copyright 2018 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- *
- * ARMv7-M architectural caches maintenance operations.
- */
-
-.syntax unified
-.text
-.thumb
-
-/* System Control Block: cache registers */
-#define SCB_CCSIDR 0xe000ed80
-#define SCB_CCSELR 0xe000ed84
-#define SCB_DCISW 0xe000ef60
-#define SCB_DCCISW 0xe000ef74
-
-.macro dcache_set_way_op name register
-@
-@ Perform an operation on all D-cache sets/ways.
-@
-@ Note: implemented in assembly to guarantee that we are not touching the
-@ D-cache in the middle of the loop.
-@
-.thumb_func
-.section .text.\name
-.global \name
-\name:
- /* Select Level-1 Data cache (for operations on CCSIDR). */
- ldr r1, =SCB_CCSELR
- movs r0, #0
- ldr r2, =SCB_CCSIDR
- str r0, [r1] /* set CCSELR = 0 */
-
- /* Ensure the CCSELR write is effective before reading CCSIDR. */
- dsb
- /* CCSIDR contains the cache geometry. */
- ldr r3, [r2] /* [27:13] Number of sets -1 [12:3] Number of ways -1 */
-
- /* register used to do the set/way cache operation. */
- ldr r0, =\register
- /* r2 is the number of cache 'sets' - 1 */
- ubfx r2, r3, #13, #15
- /* r12 is the number of cache 'ways' - 1 */
- ubfx r12, r3, #3, #10
-
-1:
- mov r1, r12 /* reset way index */
-2:
- /*
- * Build address Set/Way operation e.g DC(C)ISW
- * [31:30] way index [13:5] set index
- */
- lsls r3, r2, #5 /* set index */
- /* TODO(crbug.com/848704) remove cache geometry assumptions */
- orr r3, r3, r1, lsl #30 /* way index */
- /* Perform operation (e.g invalidate) on a D-cache line */
- str r3, [r0]
- /* go to previous way */
- subs r1, #1
- bcs 2b
- /* go to previous set */
- subs r2, #1
- bcs 1b
-
- /* Ensure everything has propagated and return. */
- dsb
- isb
- bx lr
-.endm
-
-/* D-cache Invalidate by set-way */
-dcache_set_way_op cpu_invalidate_dcache SCB_DCISW
-
-/* D-cache Clean and Invalidate by set-way, to Point of Coherency */
-dcache_set_way_op cpu_clean_invalidate_dcache SCB_DCCISW
diff --git a/core/cortex-m/config_core.h b/core/cortex-m/config_core.h
deleted file mode 100644
index 0665b28852..0000000000
--- a/core/cortex-m/config_core.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* Copyright 2013 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef __CROS_EC_CONFIG_CORE_H
-#define __CROS_EC_CONFIG_CORE_H
-
-/* Linker binary architecture and format */
-#define BFD_ARCH arm
-#define BFD_FORMAT "elf32-littlearm"
-
-#define CONFIG_SOFTWARE_PANIC
-
-#endif /* __CROS_EC_CONFIG_CORE_H */
diff --git a/core/cortex-m/cpu.c b/core/cortex-m/cpu.c
deleted file mode 100644
index 7c31892c18..0000000000
--- a/core/cortex-m/cpu.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/* Copyright 2012 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- *
- * Set up the Cortex-M core
- */
-
-#include "common.h"
-#include "cpu.h"
-#include "hooks.h"
-
-void cpu_init(void)
-{
- /* Catch divide by 0 and unaligned access */
- CPU_NVIC_CCR |= CPU_NVIC_CCR_DIV_0_TRAP | CPU_NVIC_CCR_UNALIGN_TRAP;
-
- /* Enable reporting of memory faults, bus faults and usage faults */
- CPU_NVIC_SHCSR |= CPU_NVIC_SHCSR_MEMFAULTENA |
- CPU_NVIC_SHCSR_BUSFAULTENA | CPU_NVIC_SHCSR_USGFAULTENA;
-}
-
-#ifdef CONFIG_ARMV7M_CACHE
-static void cpu_invalidate_icache(void)
-{
- /*
- * Invalidates the entire instruction cache to the point of
- * unification.
- */
- CPU_SCB_ICIALLU = 0;
- asm volatile("dsb; isb");
-}
-
-void cpu_enable_caches(void)
-{
- /* Check whether the I-cache is already enabled */
- if (!(CPU_NVIC_CCR & CPU_NVIC_CCR_ICACHE)) {
- /* Invalidate the I-cache first */
- cpu_invalidate_icache();
- /* Turn on the caching */
- CPU_NVIC_CCR |= CPU_NVIC_CCR_ICACHE;
- asm volatile("dsb; isb");
- }
- /* Check whether the D-cache is already enabled */
- if (!(CPU_NVIC_CCR & CPU_NVIC_CCR_DCACHE)) {
- /* Invalidate the D-cache first */
- cpu_invalidate_dcache();
- /* Turn on the caching */
- CPU_NVIC_CCR |= CPU_NVIC_CCR_DCACHE;
- asm volatile("dsb; isb");
- }
-}
-
-void cpu_disable_caches(void)
-{
- /*
- * Disable the I-cache and the D-cache
- * The I-cache will be invalidated after the reboot/sysjump if needed
- * (e.g after a flash update).
- */
- cpu_clean_invalidate_dcache();
- CPU_NVIC_CCR &= ~(CPU_NVIC_CCR_ICACHE | CPU_NVIC_CCR_DCACHE);
- asm volatile("dsb; isb");
-}
-DECLARE_HOOK(HOOK_SYSJUMP, cpu_disable_caches, HOOK_PRIO_LAST);
-#endif /* CONFIG_ARMV7M_CACHE */
diff --git a/core/cortex-m/cpu.h b/core/cortex-m/cpu.h
deleted file mode 100644
index 0b03302bfc..0000000000
--- a/core/cortex-m/cpu.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/* Copyright 2012 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- *
- * Registers map and definitions for Cortex-MLM4x processor
- */
-
-#ifndef __CROS_EC_CPU_H
-#define __CROS_EC_CPU_H
-
-#include <stdint.h>
-#include "compile_time_macros.h"
-
-/* Macro to access 32-bit registers */
-#define CPUREG(addr) (*(volatile uint32_t*)(addr))
-
-#define CPU_NVIC_ST_CTRL CPUREG(0xE000E010)
-#define ST_ENABLE BIT(0)
-#define ST_TICKINT BIT(1)
-#define ST_CLKSOURCE BIT(2)
-#define ST_COUNTFLAG BIT(16)
-
-/* Nested Vectored Interrupt Controller */
-#define CPU_NVIC_EN(x) CPUREG(0xe000e100 + 4 * (x))
-#define CPU_NVIC_DIS(x) CPUREG(0xe000e180 + 4 * (x))
-#define CPU_NVIC_UNPEND(x) CPUREG(0xe000e280 + 4 * (x))
-#define CPU_NVIC_PRI(x) CPUREG(0xe000e400 + 4 * (x))
-/* SCB AIRCR : Application interrupt and reset control register */
-#define CPU_NVIC_APINT CPUREG(0xe000ed0c)
-#define CPU_NVIC_APINT_SYSRST BIT(2) /* System reset request */
-#define CPU_NVIC_APINT_PRIOGRP (BIT(8)|BIT(9)|BIT(10))
-#define CPU_NVIC_APINT_ENDIAN BIT(15) /* Endianness */
-#define CPU_NVIC_APINT_KEY_RD (0xFA05U << 16)
-#define CPU_NVIC_APINT_KEY_WR (0x05FAU << 16)
-/* NVIC STIR : Software Trigger Interrupt Register */
-#define CPU_NVIC_SWTRIG CPUREG(0xe000ef00)
-/* SCB SCR : System Control Register */
-#define CPU_SCB_SYSCTRL CPUREG(0xe000ed10)
-
-#define CPU_NVIC_CCR CPUREG(0xe000ed14)
-#define CPU_NVIC_SHCSR CPUREG(0xe000ed24)
-#define CPU_NVIC_CFSR CPUREG(0xe000ed28)
-#define CPU_NVIC_HFSR CPUREG(0xe000ed2c)
-#define CPU_NVIC_DFSR CPUREG(0xe000ed30)
-#define CPU_NVIC_MFAR CPUREG(0xe000ed34)
-#define CPU_NVIC_BFAR CPUREG(0xe000ed38)
-
-enum {
- CPU_NVIC_CFSR_BFARVALID = BIT(15),
- CPU_NVIC_CFSR_MFARVALID = BIT(7),
-
- CPU_NVIC_CCR_ICACHE = BIT(17),
- CPU_NVIC_CCR_DCACHE = BIT(16),
- CPU_NVIC_CCR_DIV_0_TRAP = BIT(4),
- CPU_NVIC_CCR_UNALIGN_TRAP = BIT(3),
-
- CPU_NVIC_HFSR_DEBUGEVT = 1UL << 31,
- CPU_NVIC_HFSR_FORCED = BIT(30),
- CPU_NVIC_HFSR_VECTTBL = BIT(1),
-
- CPU_NVIC_SHCSR_MEMFAULTENA = BIT(16),
- CPU_NVIC_SHCSR_BUSFAULTENA = BIT(17),
- CPU_NVIC_SHCSR_USGFAULTENA = BIT(18),
-};
-
-/* System Control Block: cache registers */
-#define CPU_SCB_CCSIDR CPUREG(0xe000ed80)
-#define CPU_SCB_CCSELR CPUREG(0xe000ed84)
-#define CPU_SCB_ICIALLU CPUREG(0xe000ef50)
-#define CPU_SCB_DCISW CPUREG(0xe000ef60)
-#define CPU_SCB_DCCISW CPUREG(0xe000ef74)
-
-/* Set up the cpu to detect faults */
-void cpu_init(void);
-/* Enable the CPU I-cache and D-cache if they are not already enabled */
-void cpu_enable_caches(void);
-/* Disable the CPU I-cache and D-cache */
-void cpu_disable_caches(void);
-/* Invalidate the D-cache */
-void cpu_invalidate_dcache(void);
-/* Clean and Invalidate the D-cache to the Point of Coherency */
-void cpu_clean_invalidate_dcache(void);
-
-/* Invalidate a single range of the D-cache */
-void cpu_invalidate_dcache_range(uintptr_t base, unsigned int length);
-/* Clean and Invalidate a single range of the D-cache */
-void cpu_clean_invalidate_dcache_range(uintptr_t base, unsigned int length);
-
-/* Set the priority of the given IRQ in the NVIC (0 is highest). */
-static inline void cpu_set_interrupt_priority(uint8_t irq, uint8_t priority)
-{
- const uint32_t prio_shift = irq % 4 * 8 + 5;
-
- if (priority > 7)
- priority = 7;
-
- CPU_NVIC_PRI(irq / 4) =
- (CPU_NVIC_PRI(irq / 4) &
- ~(7 << prio_shift)) |
- (priority << prio_shift);
-}
-
-#endif /* __CROS_EC_CPU_H */
diff --git a/core/cortex-m/debug.c b/core/cortex-m/debug.c
deleted file mode 100644
index db8891b5d8..0000000000
--- a/core/cortex-m/debug.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright 2021 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "debug.h"
-#include "stdbool.h"
-
-bool debugger_is_connected(void)
-{
- return CPU_DHCSR & DHCSR_C_DEBUGEN;
-}
diff --git a/core/cortex-m/debug.h b/core/cortex-m/debug.h
deleted file mode 100644
index ae5ef08d06..0000000000
--- a/core/cortex-m/debug.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* Copyright 2021 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef __CROS_EC_DEBUG_H
-#define __CROS_EC_DEBUG_H
-
-#include "common.h"
-#include "stdbool.h"
-
-/* For Cortex-M0, see "C1.6.3 Debug Halting Control and Status Register, DHCSR"
- * in the ARMv6-M Architecture Reference Manual.
- *
- * For other Cortex-M, see
- * "C1.6.2 Debug Halting Control and Status Register, DHCSR" in the ARMv7-M
- * Architecture Reference Manual or
- * https://developer.arm.com/documentation/ddi0337/e/core-debug/core-debug-registers/debug-halting-control-and-status-register.
- */
-#define CPU_DHCSR REG32(0xE000EDF0)
-#define DHCSR_C_DEBUGEN BIT(0)
-#define DHCSR_C_HALT BIT(1)
-#define DHCSR_C_STEP BIT(2)
-#define DHCSR_C_MASKINTS BIT(3)
-#ifndef CHIP_CORE_CORTEX_M0
-#define DHCSR_C_SNAPSTALL BIT(5) /* Not available on Cortex-M0 */
-#endif
-#define DHCSR_S_REGRDY BIT(16)
-#define DHCSR_S_HALT BIT(17)
-#define DHCSR_S_SLEEP BIT(18)
-#define DHCSR_S_LOCKUP BIT(19)
-#define DHCSR_S_RETIRE_ST BIT(24)
-#define DHCSR_S_RESET_ST BIT(25)
-
-bool debugger_is_connected(void);
-
-#endif /* __CROS_EC_DEBUG_H */
diff --git a/core/cortex-m/ec.lds.S b/core/cortex-m/ec.lds.S
deleted file mode 100644
index 7b08be81a6..0000000000
--- a/core/cortex-m/ec.lds.S
+++ /dev/null
@@ -1,721 +0,0 @@
-/* Copyright 2013 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "config.h"
-#include "rwsig.h"
-
-#define CONCAT_STAGE_1(w, x, y, z) w ## x ## y ## z
-#define CONCAT2(w, x) CONCAT_STAGE_1(w, x, , )
-#define CONCAT3(w, x, y) CONCAT_STAGE_1(w, x, y, )
-#define CONCAT4(w, x, y, z) CONCAT_STAGE_1(w, x, y, z)
-
-#define STRINGIFY0(name) #name
-#define STRINGIFY(name) STRINGIFY0(name)
-
-#ifdef RW_B_LDS
-#define FW_MEM_OFF_(section) CONFIG_##section##_B_MEM_OFF
-#else
-#define FW_MEM_OFF_(section) CONFIG_##section##_MEM_OFF
-#endif
-#define FW_MEM_OFF(section) (FW_MEM_OFF_(section))
-#define FW_OFF(section) (CONFIG_PROGRAM_MEMORY_BASE + FW_MEM_OFF(section))
-
-#define FW_SIZE_(section) CONFIG_##section##_SIZE
-#define FW_SIZE(section) FW_SIZE_(section)
-
-/*
- * Define the VMA (virtual memory address) of the ROM_RESIDENT region within
- * the EC image. This is full 32-bit address starting from
- * CONFIG_PROGRAM_MEMORY_BASE.
- */
-#define ROM_RES_OFF(section) FW_OFF(CONCAT2(section, _ROM_RESIDENT))
-#define ROM_RES_SIZE(section) FW_SIZE(CONCAT2(section, _ROM_RESIDENT))
-
-/*
- * Define the VMA (virtual memory address) of the ROM_RESIDENT region. Instead
- * of a full 32-bit address, set the VMA to be an offset within the flash memory
- * section. Objects linked into this section can pass the address of the
- * object unmodified to the public APIs of the flash and init_rom modules.
- */
-#ifdef SECTION_IS_RO
-#define ROM_RES_FLASH_OFF(section) \
- FW_MEM_OFF(CONCAT2(section, _ROM_RESIDENT)) + \
- CONFIG_EC_PROTECTED_STORAGE_OFF
-#else
-#define ROM_RES_FLASH_OFF(section) \
- FW_MEM_OFF(CONCAT2(section, _ROM_RESIDENT)) + \
- CONFIG_EC_WRITABLE_STORAGE_OFF
-#endif
-
-/* Indicates where .data LMA should reside. */
-#undef DATA_LMA_MEM_REGION
-
-OUTPUT_FORMAT(BFD_FORMAT, BFD_FORMAT, BFD_FORMAT)
-OUTPUT_ARCH(BFD_ARCH)
-ENTRY(reset)
-
-MEMORY
-{
-#if !defined(CONFIG_FLASH_PHYSICAL)
- IROM (rx) : ORIGIN = CONFIG_ROM_BASE, LENGTH = CONFIG_ROM_SIZE
-#else
-#if defined(SECTION_IS_RO) && defined(NPCX_RO_HEADER)
- /*
- * Header structure used by npcx booter in RO region.
- * Please notice the location of header must be in front of FW
- * which needs copy. But header itself won't be copied to code ram
- * by booter.
- */
- FLASH_HDR (rx) : ORIGIN = FW_OFF(RO_HDR), LENGTH = FW_SIZE(RO_HDR)
- FLASH (rx) : ORIGIN = FW_OFF(SECTION) + FW_SIZE(RO_HDR), \
- LENGTH = FW_SIZE(SECTION) - FW_SIZE(RO_HDR)
-#else
- FLASH (rx) : ORIGIN = FW_OFF(SECTION), LENGTH = FW_SIZE(SECTION)
-#endif
-#ifdef CONFIG_CHIP_INIT_ROM_REGION
- ROM_RESIDENT (r) : \
- ORIGIN = ROM_RES_OFF(SECTION), \
- LENGTH = ROM_RES_SIZE(SECTION)
-
- ROM_RESIDENT_VMA (r) : \
- ORIGIN = ROM_RES_FLASH_OFF(SECTION), \
- LENGTH = ROM_RES_SIZE(SECTION)
-#endif /* CONFIG_CHIP_INIT_ROM_REGION */
-#ifdef CONFIG_SHAREDLIB
- SHARED_LIB (rx) : ORIGIN = FW_OFF(SHAREDLIB), \
- LENGTH = FW_SIZE(SHAREDLIB)
-#endif
-#endif /* !CONFIG_FLASH_PHYSICAL */
- IRAM (rw) : ORIGIN = CONFIG_RAM_BASE, LENGTH = CONFIG_RAM_SIZE
-
-#ifdef CONFIG_EXTERNAL_STORAGE
- CDRAM (rx) : \
- ORIGIN = CONFIG_PROGRAM_MEMORY_BASE + FW_MEM_OFF(SECTION), \
- LENGTH = FW_SIZE(SECTION)
-#endif /* CONFIG_EXTERNAL_STORAGE */
-
-#ifdef CONFIG_CHIP_MEMORY_REGIONS
-#define REGION(name, attr, start, size) \
- name(attr) : ORIGIN = (start), LENGTH = (size)
-#define REGION_LOAD REGION
-#include "memory_regions.inc"
-#undef REGION
-#undef REGION_LOAD
-#endif /* CONFIG_MEMORY_REGIONS */
-
-#ifdef CONFIG_DRAM_BASE
- DRAM (rwx) : ORIGIN = CONFIG_DRAM_BASE, LENGTH = CONFIG_DRAM_SIZE
-#endif
-}
-
-/*
- * Convenience macros for determining the correct output memory section.
- */
-#if !defined(CONFIG_FLASH_PHYSICAL)
- #define EC_IMAGE_LMA_MEM_REGION IROM
- #define EC_IMAGE_VMA_MEM_REGION IROM
- #define DATA_LMA_MEM_REGION IROM
-#else
- #define EC_IMAGE_LMA_MEM_REGION FLASH
- #ifdef CONFIG_EXTERNAL_STORAGE
- #define EC_IMAGE_VMA_MEM_REGION CDRAM
- #else
- #define EC_IMAGE_VMA_MEM_REGION FLASH
- #endif
-
- #ifdef CONFIG_CHIP_DATA_IN_INIT_ROM
- #define DATA_LMA_MEM_REGION ROM_RESIDENT
- #else
- #define DATA_LMA_MEM_REGION FLASH
- #endif
-#endif
-
-SECTIONS
-{
-#if defined(SECTION_IS_RO) && defined(NPCX_RO_HEADER)
- .header : {
- KEEP(*(.header))
- } > FLASH_HDR
-#endif
-#ifdef CONFIG_SHAREDLIB
- .roshared : {
- KEEP(*(.roshared*))
- } > SHARED_LIB
-#endif
- .text : {
-#ifdef SECTION_IS_RO
- . = . + CONFIG_RO_HEAD_ROOM;
-#endif
-#ifdef SECTION_IS_RW
- . = . + CONFIG_RW_HEAD_ROOM;
-#endif
- *(.text.vecttable)
- . = ALIGN(4);
- __image_data_offset = .;
- KEEP(*(.rodata.ver))
-
- . = ALIGN(4);
- KEEP(*(.rodata.pstate))
-
- . = ALIGN(4);
- STRINGIFY(OUTDIR/core/CORE/init.o) (.text)
-#if NPCX_FAMILY_VERSION >= NPCX_FAMILY_NPCX7 && !defined(CONFIG_HIBERNATE_PSL)
- /* Keep hibernate utility in last code ram block */
- . = ALIGN(4);
- KEEP(*(.after_init))
- __after_init_end = .;
-#endif
- *(.text*)
-#ifdef CONFIG_EXTERNAL_STORAGE
- . = ALIGN(4);
- __flash_lpfw_start = .;
- /* Entering deep idle FW for better power consumption */
- KEEP(*(.lowpower_ram))
- . = ALIGN(4);
- __flash_lpfw_end = .;
- __flash_lplfw_start = .;
- /* GDMA utilities for better FW download speed */
- KEEP(*(.lowpower_ram2))
- . = ALIGN(4);
- __flash_lplfw_end = .;
-#endif /* CONFIG_EXTERNAL_STORAGE */
- } > EC_IMAGE_VMA_MEM_REGION AT > EC_IMAGE_LMA_MEM_REGION
-
- . = ALIGN(4);
- .rodata : {
- /* Symbols defined here are declared in link_defs.h */
- __irqprio = .;
- KEEP(*(.rodata.irqprio))
- __irqprio_end = .;
-
- . = ALIGN(4);
- __cmds = .;
- KEEP(*(SORT(.rodata.cmds*)))
- __cmds_end = .;
-
- . = ALIGN(4);
- __extension_cmds = .;
- KEEP(*(.rodata.extensioncmds))
- __extension_cmds_end = .;
-
- . = ALIGN(4);
- __hcmds = .;
- KEEP(*(SORT(.rodata.hcmds*)))
- __hcmds_end = .;
-
- . = ALIGN(4);
- __mkbp_evt_srcs = .;
- KEEP(*(.rodata.evtsrcs))
- __mkbp_evt_srcs_end = .;
-
- . = ALIGN(4);
- __hooks_init = .;
- KEEP(*(.rodata.HOOK_INIT))
- __hooks_init_end = .;
-
- __hooks_pre_freq_change = .;
- KEEP(*(.rodata.HOOK_PRE_FREQ_CHANGE))
- __hooks_pre_freq_change_end = .;
-
- __hooks_freq_change = .;
- KEEP(*(.rodata.HOOK_FREQ_CHANGE))
- __hooks_freq_change_end = .;
-
- __hooks_sysjump = .;
- KEEP(*(.rodata.HOOK_SYSJUMP))
- __hooks_sysjump_end = .;
-
- __hooks_chipset_pre_init = .;
- KEEP(*(.rodata.HOOK_CHIPSET_PRE_INIT))
- __hooks_chipset_pre_init_end = .;
-
- __hooks_chipset_startup = .;
- KEEP(*(.rodata.HOOK_CHIPSET_STARTUP))
- __hooks_chipset_startup_end = .;
-
- __hooks_chipset_resume = .;
- KEEP(*(.rodata.HOOK_CHIPSET_RESUME))
- __hooks_chipset_resume_end = .;
-
- __hooks_chipset_suspend = .;
- KEEP(*(.rodata.HOOK_CHIPSET_SUSPEND))
- __hooks_chipset_suspend_end = .;
-
-#ifdef CONFIG_CHIPSET_RESUME_INIT_HOOK
- __hooks_chipset_resume_init = .;
- KEEP(*(.rodata.HOOK_CHIPSET_RESUME_INIT))
- __hooks_chipset_resume_init_end = .;
-
- __hooks_chipset_suspend_complete = .;
- KEEP(*(.rodata.HOOK_CHIPSET_SUSPEND_COMPLETE))
- __hooks_chipset_suspend_complete_end = .;
-#endif
-
- __hooks_chipset_shutdown = .;
- KEEP(*(.rodata.HOOK_CHIPSET_SHUTDOWN))
- __hooks_chipset_shutdown_end = .;
-
- __hooks_chipset_shutdown_complete = .;
- KEEP(*(.rodata.HOOK_CHIPSET_SHUTDOWN_COMPLETE))
- __hooks_chipset_shutdown_complete_end = .;
-
- __hooks_chipset_hard_off = .;
- KEEP(*(.rodata.HOOK_CHIPSET_HARD_OFF))
- __hooks_chipset_hard_off_end = .;
-
- __hooks_chipset_reset = .;
- KEEP(*(.rodata.HOOK_CHIPSET_RESET))
- __hooks_chipset_reset_end = .;
-
- __hooks_ac_change = .;
- KEEP(*(.rodata.HOOK_AC_CHANGE))
- __hooks_ac_change_end = .;
-
- __hooks_lid_change = .;
- KEEP(*(.rodata.HOOK_LID_CHANGE))
- __hooks_lid_change_end = .;
-
- __hooks_tablet_mode_change = .;
- KEEP(*(.rodata.HOOK_TABLET_MODE_CHANGE))
- __hooks_tablet_mode_change_end = .;
-
- __hooks_base_attached_change = .;
- KEEP(*(.rodata.HOOK_BASE_ATTACHED_CHANGE))
- __hooks_base_attached_change_end = .;
-
- __hooks_pwrbtn_change = .;
- KEEP(*(.rodata.HOOK_POWER_BUTTON_CHANGE))
- __hooks_pwrbtn_change_end = .;
-
- __hooks_battery_soc_change = .;
- KEEP(*(.rodata.HOOK_BATTERY_SOC_CHANGE))
- __hooks_battery_soc_change_end = .;
-
-#ifdef CONFIG_USB_SUSPEND
- __hooks_usb_change = .;
- KEEP(*(.rodata.HOOK_USB_PM_CHANGE))
- __hooks_usb_change_end = .;
-#endif
-
- __hooks_tick = .;
- KEEP(*(.rodata.HOOK_TICK))
- __hooks_tick_end = .;
-
- __hooks_second = .;
- KEEP(*(.rodata.HOOK_SECOND))
- __hooks_second_end = .;
-
- __hooks_usb_pd_disconnect = .;
- KEEP(*(.rodata.HOOK_USB_PD_DISCONNECT))
- __hooks_usb_pd_disconnect_end = .;
-
- __hooks_usb_pd_connect = .;
- KEEP(*(.rodata.HOOK_USB_PD_CONNECT))
- __hooks_usb_pd_connect_end = .;
-
- __deferred_funcs = .;
- KEEP(*(.rodata.deferred))
- __deferred_funcs_end = .;
-
- __usb_desc = .;
- KEEP(*(.rodata.usb_desc_conf))
- KEEP(*(SORT(.rodata.usb_desc*)))
- __usb_desc_end = .;
- . = ALIGN(4);
- KEEP(*(.rodata.usb_ep))
- KEEP(*(.rodata.usb_ep.usb_ep_tx))
- KEEP(*(.rodata.usb_ep.usb_ep_rx))
- KEEP(*(.rodata.usb_ep.usb_ep_reset))
- KEEP(*(.rodata.usb_ep.usb_iface_request))
-
- . = ALIGN(4);
- *(.rodata*)
-
-#ifndef CONFIG_CHIP_INIT_ROM_REGION
- /*
- * When a separate ROM resident section isn't enabled, ensure
- * the corresponding data objects are linked into the .rodata
- * section.
- */
- . = ALIGN(4);
- __init_rom_start = .;
- *(.init.rom)
- __init_rom_end = .;
-#endif /* CONFIG_CHIP_INIT_ROM_REGION */
-
-#if defined(SECTION_IS_RO) && defined(CONFIG_FLASH_CROS)
- . = ALIGN(64);
- KEEP(*(.google))
-#endif
-
- . = ALIGN(4);
- } > EC_IMAGE_VMA_MEM_REGION AT > EC_IMAGE_LMA_MEM_REGION
-
-#ifdef CONFIG_CHIP_DATA_IN_INIT_ROM
- __data_lma_start = ORIGIN(ROM_RESIDENT_VMA);
- #define INIT_ROM_LMA (ORIGIN(ROM_RESIDENT_VMA) + SIZEOF(.data))
-#else
- __data_lma_start = .;
- #define INIT_ROM_LMA ORIGIN(ROM_RESIDENT_VMA)
-#endif
-
-#ifdef CONFIG_PRESERVE_LOGS
- .preserve_logs(NOLOAD) : {
- . = ALIGN(8);
- *(SORT(.preserved_logs.*))
- . = ALIGN(8);
- __preserved_logs_end = .;
- } > IRAM
-
- ASSERT((SIZEOF(.preserve_logs) + CONFIG_RAM_BASE) ==
- __preserved_logs_end,
- "preserve_logs must be at CONFIG_RAM_BASE.")
-#endif
-
- .bss : {
- /*
- * Align to 512 bytes. This is convenient when some memory block
- * needs big alignment. This is the beginning of the RAM,
- * so there is usually no penalty on aligning this.
- */
- . = ALIGN(512);
- __bss_start = .;
- *(.bss.big_align)
- /* Stacks must be 64-bit aligned */
- . = ALIGN(8);
- *(.bss.system_stack)
- /* Rest of .bss takes care of its own alignment */
-
- /* Group libtpm2 data so it can be cleared on system reset */
- __bss_libtpm2_start = .;
- /* TPM registers should be cleared at the same time */
- STRINGIFY(OUTDIR/common/tpm_registers.o*)(.bss)
- *(.bss.Tpm2_common)
- __bss_libtpm2_end = .;
-
- *(.bss)
-
- /*
- * Reserve space for deferred function firing times.
- * Each time is a uint64_t, each func is a 32-bit pointer,
- * thus the scaling factor of two. The 8 byte alignment of
- * uint64_t is required by the ARM ABI.
- */
- . = ALIGN(8);
- __deferred_until = .;
- . += (__deferred_funcs_end - __deferred_funcs) * (8 / 4);
- __deferred_until_end = .;
- } > IRAM
-
- .bss.slow : {
- /* Region of RAM reclaimed from the little firmware(LFW). */
- *(.bss.slow)
- /*
- * Not replacing the loader, so .bss.slow is part of .bss.
- * It needs to be followed by __bss_end so that .bss.slow
- * will be zeroed by init.
- */
- . = ALIGN(4);
- __bss_end = .;
- } > IRAM
-
- .data : {
- . = ALIGN(4);
- __data_start = .;
- *(.data.tasks)
-
- /*
- * Group libtpm2 data so it can be reinitialized on
- * system reset
- */
- __data_libtpm2_start = .;
- Tpm2_*(.data)
- /* TPM registers should be reinitialized at the same time */
- STRINGIFY(OUTDIR/common/tpm_registers.o*)(.data)
- __data_libtpm2_end = .;
-
- /*
- * TPM reset currently only clears BSS for the TPM library.
- * It does not reset any initialized variables in data.
- * So, make sure there aren't any.
- */
- ASSERT(__data_libtpm2_start == __data_libtpm2_end,
- "libtpm2 .data section is nonzero");
-
- *(.data*)
-#ifdef CONFIG_MPU
- /*
- * It has to be aligned by 32 bytes to be a valid
- * MPU region.
- */
- . = ALIGN(32);
- __iram_text_start = .;
-#else
- . = ALIGN(4);
-#endif
- *(.iram.text)
-#ifdef CONFIG_MPU
- . = ALIGN(32);
- __iram_text_end = .;
-#else
- . = ALIGN(4);
-#endif
- __data_end = .;
-
- /*
- * Shared memory buffer must be at the end of preallocated
- * RAM, so it can expand to use all the remaining RAM.
- */
- __shared_mem_buf = .;
-
- /* NOTHING MAY GO AFTER THIS! */
- } > IRAM AT > DATA_LMA_MEM_REGION
-
- ASSERT((__shared_mem_buf + CONFIG_SHAREDMEM_MINIMUM_SIZE) <=
- (CONFIG_RAM_BASE + CONFIG_RAM_SIZE),
- "Not enough space for shared memory.")
-
- __ram_free = (CONFIG_RAM_BASE + CONFIG_RAM_SIZE) -
- (__shared_mem_buf + CONFIG_SHAREDMEM_MINIMUM_SIZE);
-
-#ifdef CONFIG_CHIP_DATA_IN_INIT_ROM
- /*
- * .data is ROM resident, last section in the EC image is the .rodata
- * section.
- */
- #define FLASH_USED_END (LOADADDR(.rodata) + SIZEOF(.rodata))
-#else
- /*
- * .data is included in the EC image and copied to RAM by the loader.
- */
- #define FLASH_USED_END (LOADADDR(.data) + SIZEOF(.data))
-#endif
-
- /*
- * __flash_used is used in flash free calculations by the makefile.
- * __image_size is stored in the struct image_data header and used
- * in hash calcuations.
- */
- __flash_used = FLASH_USED_END - ORIGIN(EC_IMAGE_LMA_MEM_REGION);
-#ifndef CONFIG_CHIP_INIT_ROM_REGION
-#if !(defined(SECTION_IS_RW) && (CONFIG_FLASH_WRITE_SIZE > 4))
- __image_size = __flash_used;
-#else
- .rw_image_size_alignment :
- {
- . = ORIGIN(FLASH) + __flash_used;
- BYTE(0xFF);
- . = ALIGN (CONFIG_FLASH_WRITE_SIZE);
- } > FLASH = 0xFF
-
- __image_size = __flash_used + SIZEOF(.rw_image_size_alignment);
-#endif
-#endif /* CONFIG_CHIP_INIT_ROM_REGION */
-
-#ifdef CONFIG_FLASH_CROS
- /*
- * These linker labels are just for analysis and not used in the code.
- */
- __config_flash_size = CONFIG_FLASH_SIZE_BYTES;
- __config_ro_size = CONFIG_RO_SIZE;
- __config_ec_protected_storage_size = CONFIG_EC_PROTECTED_STORAGE_SIZE;
- __config_rw_size = CONFIG_RW_SIZE;
- __config_ec_writable_storage_size = CONFIG_EC_WRITABLE_STORAGE_SIZE;
-#endif
-
- /*
- * The linker won't notice if the .data section is too big to fit,
- * apparently because we're sending it into IRAM, not FLASH.
- * Verify that all sections linked into the FLASH region will fit.
- */
- ASSERT((LENGTH(EC_IMAGE_LMA_MEM_REGION)
-#if defined(CONFIG_RWSIG) && defined(SECTION_IS_RO)
- - CONFIG_RO_PUBKEY_SIZE
-#endif
-#if defined(CONFIG_RWSIG) && defined(SECTION_IS_RW)
- - CONFIG_RW_SIG_SIZE
-#endif
- ) >= __flash_used,
- "No room left in the flash")
-
-#ifdef CONFIG_CHIP_INIT_ROM_REGION
- /*
- * Image layout when ROM_RESIDENT region is used (lower addresses
- * at the top). This layout is setup by the LMA assignment.
- *
- * EC image layout (LMA) VMA
- * .header (if RO image) none
- * .text code RAM
- * .rodata code RAM + .text size
- * .data data RAM
- * .fill none
- * .init_rom flash offset
- *
- * The loader code copies the .text, .rodata, and .data sections into
- * the code RAM of the EC. The .header and .init_rom sections are not
- * copied by the loader.
- *
- * Image layout when ROM_RESIDENT region is used, and
- * CONFIG_CHIP_DATA_IN_INIT_ROM is enabled.
- *
- * EC image layout (LMA) VMA
- * .header (if RO image) none
- * .text code RAM
- * .rodata code RAM + .text size
- * .fill none
- * .data data RAM
- * .init_rom flash offset
- *
- * The loader code copies the .text and .rodata sections into the code
- * RAM of the EC. The .header, .data, and .init_rom sections are not
- * copied by the loader.
- *
- * EC initialization code copies the .data directly from flash to
- * data RAM at runtime.
- */
-
- /*
- * The layout assumes the ROM_RESIDENT region follows the FLASH
- * region.
- */
- ASSERT((ORIGIN(FLASH) + LENGTH(FLASH)) == ORIGIN(ROM_RESIDENT),
- ".init_rom section must follow the flash section")
-
- .init_rom INIT_ROM_LMA : {
- . = ALIGN(4);
- __init_rom_start = .;
- *(.init.rom)
- __init_rom_end = .;
- } > ROM_RESIDENT_VMA AT > ROM_RESIDENT
-
- /*
- * The ROM_RESIDENT section is assumed to be in the same physical
- * flash as the FLASH section. Fill the space between.
- */
- .fill : {
- . = FLASH_USED_END;
- . = ALIGN(4);
- __fill_start = .;
- FILL(0xFF);
- . = ORIGIN(FLASH) + LENGTH(FLASH) - 1;
- /* Need at least one byte so section is not omitted */
- BYTE(0xFF);
- __fill_end = .;
- } > FLASH
-
- /*
- * The end of the .fill region should also be the start of the ROM
- * resident region.
- */
- ASSERT(__fill_end == ORIGIN(ROM_RESIDENT),
- ".fill region end not aligned to start of ROM_RESIDENT region")
-
- /*
- * __image_size is used for hash calculation. When
- * CONFIG_CHIP_INIT_ROM_REGION is enabled, this includes the entire
- * FLASH region and the bytes used in the .init_rom section.
- */
-#ifdef CONFIG_CHIP_DATA_IN_INIT_ROM
- __image_size = LENGTH(FLASH) + SIZEOF(.init_rom) + SIZEOF(.data);
-#else
- __image_size = LENGTH(FLASH) + SIZEOF(.init_rom);
-#endif /* CONFIG_CHIP_DATA_IN_INIT_ROM */
-#endif /* CONFIG_CHIP_INIT_ROM_REGION */
-
-#ifdef CONFIG_CHIP_MEMORY_REGIONS
-#define REGION(name, attr, start, size) \
- .name(NOLOAD) : { \
- __##name##_start = .; \
- KEEP(*(SORT(.name.keep.*))) \
- *(SORT(.name.*)) \
- } > name
-#define REGION_LOAD(name, attr, start, size) \
- .name : { \
- __##name##_start = .; \
- KEEP(*(SORT(.name.keep.*))) \
- *(SORT(.name.*)) \
- } > name
-#include "memory_regions.inc"
-#undef REGION
-#undef REGION_LOAD
-#endif /* CONFIG_CHIP_MEMORY_REGIONS */
-
-#ifdef CONFIG_DRAM_BASE
-
- /*
- * Sections in DRAM region are constructed as like in non-DRAM regions:
- * .dram.data LMA is for preserving initialized data across resets.
- * The only difference is that they are all in the DRAM region:
- * .dram.text | LOAD
- * .dram.rodata | LOAD
- * .dram.data LMA | LOAD
- * .dram.data VMA |
- * .dram.bss | NOLOAD
- * TODO(b:123269246): Enable MPU protectable DRAM section. This might
- * introduce a RO-DRAM section for .dram.text, .dram.rodata and
- * .dram.data LMA.
- */
-
- .dram.text : {
- . = ALIGN(4);
- KEEP(*(SORT(.dram.text.keep.*)))
- *(SORT(.dram.text.*))
- . = ALIGN(4);
- } > DRAM
-
- .dram.rodata : {
- . = ALIGN(4);
- KEEP(*(SORT(.dram.rodata.keep.*)))
- *(SORT(.dram.rodata.*))
- . = ALIGN(4);
- } > DRAM
-
- __dram_data_lma_start = ADDR(.dram.rodata) + SIZEOF(.dram.rodata);
-
- /* Place .dram.data LMA in between .dram.rodata and .dram.data VMA. */
-#ifdef __clang__
- /*
- * The evaluation timing for SIZEOF() and symbols are different in
- * ld and lld.
- */
- .dram.data __dram_data_lma_start + SIZEOF(.dram.data) : {
-#else
- .dram.data __dram_data_lma_start +
- (__dram_data_end - __dram_data_start) : {
-#endif /* __clang__ */
- . = ALIGN(4);
- __dram_data_start = .;
- *(.dram.data*)
- . = ALIGN(4);
- __dram_data_end = .;
-
- /*
- * Normally, '> DRAM AT > DRAM' should be the same as '> DRAM',
- * and they will be at the same address. However, if the address
- * of VMA specified, LMA and VMA might have different addresses:
- * '> DRAM' places VMA at the address where section declaration
- * specified.
- * 'AT > DRAM' places LMA at the location counter's address.
- */
- } > DRAM AT > DRAM
-
- /*
- * ld assigns correct attribute for .bss, but not for other .*.bss,
- * we need an explicltly NOLOAD.
- */
- .dram.bss(NOLOAD) : {
- . = ALIGN(4);
- __dram_bss_start = .;
- *(SORT(.dram.bss*))
- . = ALIGN(4);
- __dram_bss_end = .;
- } > DRAM
-#endif
-
-#if !(defined(SECTION_IS_RO) && defined(CONFIG_FLASH_CROS))
- /DISCARD/ : { *(.google) }
-#endif
- /DISCARD/ : { *(.ARM.*) }
-}
diff --git a/core/cortex-m/ghash.S b/core/cortex-m/ghash.S
deleted file mode 120000
index e9acbf4b25..0000000000
--- a/core/cortex-m/ghash.S
+++ /dev/null
@@ -1 +0,0 @@
-../../third_party/boringssl/core/cortex-m/ghash.S \ No newline at end of file
diff --git a/core/cortex-m/include/fpu.h b/core/cortex-m/include/fpu.h
deleted file mode 100644
index 0949d336e2..0000000000
--- a/core/cortex-m/include/fpu.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* Copyright 2012 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-/* Math utility functions for ARMv7 */
-
-#ifndef __CROS_EC_FPU_H
-#define __CROS_EC_FPU_H
-
-#ifdef CONFIG_FPU
-static inline float sqrtf(float v)
-{
- float root;
- asm volatile(
- "fsqrts %0, %1"
- : "=w" (root)
- : "w" (v)
- );
- return root;
-}
-
-static inline float fabsf(float v)
-{
- float root;
- asm volatile(
- "fabss %0, %1"
- : "=w" (root)
- : "w" (v)
- );
- return root;
-}
-#endif /* CONFIG_FPU */
-
-#endif /* __CROS_EC_FPU_H */
diff --git a/core/cortex-m/include/mpu.h b/core/cortex-m/include/mpu.h
deleted file mode 100644
index 610728b501..0000000000
--- a/core/cortex-m/include/mpu.h
+++ /dev/null
@@ -1,144 +0,0 @@
-/* Copyright 2013 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-/* MPU module for Cortex-M3 */
-
-#ifndef __CROS_EC_MPU_H
-#define __CROS_EC_MPU_H
-
-#include "common.h"
-#include "config.h" /* chips might override MPU attribute settings */
-
-/*
- * ARMv7-M SRAM region
- */
-#define CORTEX_M_SRAM_BASE 0x20000000
-
-/*
- * Region assignment. 7 as the highest, a higher index has a higher priority.
- * For example, using 7 for .iram.text allows us to mark entire RAM XN except
- * .iram.text, which is used for hibernation.
- * Region assignment is currently wasteful and can be changed if more
- * regions are needed in the future. For example, a second region may not
- * be necessary for all types, and REGION_CODE_RAM / REGION_STORAGE can be
- * made mutually exclusive.
- */
-enum mpu_region {
- REGION_DATA_RAM = 0, /* For internal data RAM */
- REGION_DATA_RAM2 = 1, /* Second region for unaligned size */
- REGION_CODE_RAM = 2, /* For internal code RAM */
- REGION_CODE_RAM2 = 3, /* Second region for unaligned size */
- REGION_STORAGE = 4, /* For mapped internal storage */
- REGION_STORAGE2 = 5, /* Second region for unaligned size */
- REGION_DATA_RAM_TEXT = 6, /* Exempt region of data RAM */
- REGION_CHIP_RESERVED = 7, /* Reserved for use in chip/ */
- /* only for chips with MPU supporting 16 regions */
- REGION_UNCACHED_RAM = 8, /* For uncached data RAM */
- REGION_UNCACHED_RAM2 = 9, /* Second region for unaligned size */
- REGION_ROLLBACK = 10, /* For rollback */
-};
-
-#define MPU_TYPE REG32(0xe000ed90)
-#define MPU_CTRL REG32(0xe000ed94)
-#define MPU_NUMBER REG32(0xe000ed98)
-#define MPU_BASE REG32(0xe000ed9c)
-#define MPU_SIZE REG16(0xe000eda0)
-#define MPU_ATTR REG16(0xe000eda2)
-
-/*
- * See ARM v7-M Architecture Reference Manual
- * Section B3.5.5 MPU Type Register, MPU_TYPE
- */
-#define MPU_TYPE_UNIFIED_MASK 0x00FF0001
-#define MPU_TYPE_REG_COUNT(t) (((t) >> 8) & 0xFF)
-
-#define MPU_CTRL_PRIVDEFEN BIT(2)
-#define MPU_CTRL_HFNMIENA BIT(1)
-#define MPU_CTRL_ENABLE BIT(0)
-
-/*
- * Minimum region size is 32 bytes, 5 bits of address space
- */
-#define MPU_SIZE_BITS_MIN 5
-
-/*
- * XN (execute never) bit. It's bit 12 if accessed by halfword.
- * 0: XN off
- * 1: XN on
- */
-#define MPU_ATTR_XN BIT(12)
-
-/* AP bit. See table 3-5 of Stellaris LM4F232H5QC datasheet for details */
-#define MPU_ATTR_NO_NO (0 << 8) /* previleged no access, unprev no access */
-#define MPU_ATTR_RW_NO (1 << 8) /* previleged ReadWrite, unprev no access */
-#define MPU_ATTR_RW_RO (2 << 8) /* previleged ReadWrite, unprev Read-only */
-#define MPU_ATTR_RW_RW (3 << 8) /* previleged ReadWrite, unprev ReadWrite */
-#define MPU_ATTR_RO_NO (5 << 8) /* previleged Read-only, unprev no access */
-
-/* Suggested value for TEX S/C/B bit. See table 3-6 of Stellaris LM4F232H5QC
- * datasheet and table 38 of STM32F10xxx Cortex-M3 programming manual. */
-#ifndef MPU_ATTR_INTERNAL_SRAM
-#define MPU_ATTR_INTERNAL_SRAM 6 /* for Internal SRAM */
-#endif
-#ifndef MPU_ATTR_FLASH_MEMORY
-#define MPU_ATTR_FLASH_MEMORY 2 /* for flash memory */
-#endif
-
-/* Represent RW with at most 2 MPU regions. */
-#define MAX_RW_REGIONS 2
-struct mpu_rw_regions {
- int num_regions;
- uint32_t addr[MAX_RW_REGIONS];
- uint32_t size[MAX_RW_REGIONS];
-};
-
-/**
- * Enable MPU
- */
-void mpu_enable(void);
-
-/**
- * Returns the value of MPU type register
- *
- * Bit fields:
- * [15:8] Number of the data regions implemented or 0 if MPU is not present.
- * [1] 0: unified (no distinction between instruction and data)
- * 1: separated
- */
-uint32_t mpu_get_type(void);
-
-/* Location of iram.text */
-extern char __iram_text_start;
-extern char __iram_text_end;
-
-/**
- * Protect RAM from code execution
- */
-int mpu_protect_data_ram(void);
-
-/**
- * Protect code RAM from being overwritten
- */
-int mpu_protect_code_ram(void);
-
-/**
- * Protect internal mapped flash memory from code execution
- */
-int mpu_lock_ro_flash(void);
-int mpu_lock_rw_flash(void);
-
-/**
- * Protect/unprotect rollback region readback.
- */
-int mpu_lock_rollback(int lock);
-
-/**
- * Initialize MPU.
- * It disables all regions if MPU is implemented. Otherwise, returns
- * EC_ERROR_UNIMPLEMENTED.
- */
-int mpu_pre_init(void);
-
-#endif /* __CROS_EC_MPU_H */
diff --git a/core/cortex-m/include/mpu_private.h b/core/cortex-m/include/mpu_private.h
deleted file mode 100644
index e6030114c2..0000000000
--- a/core/cortex-m/include/mpu_private.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* Copyright 2020 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-/**
- * @file
- *
- * @brief Private header file. Not meant to be used outside of mpu.c and tests.
- */
-
-#ifndef __CROS_EC_MPU_PRIVATE_H
-#define __CROS_EC_MPU_PRIVATE_H
-
-int mpu_num_regions(void);
-bool has_mpu(void);
-bool mpu_is_unified(void);
-void mpu_disable(void);
-int mpu_update_region(uint8_t region, uint32_t addr, uint8_t size_bit,
- uint16_t attr, uint8_t enable, uint8_t srd);
-int mpu_config_region(uint8_t region, uint32_t addr, uint32_t size,
- uint16_t attr, uint8_t enable);
-struct mpu_rw_regions mpu_get_rw_regions(void);
-
-#endif /* __CROS_EC_MPU_PRIVATE_H */
diff --git a/core/cortex-m/init.S b/core/cortex-m/init.S
deleted file mode 100644
index bc650c4c64..0000000000
--- a/core/cortex-m/init.S
+++ /dev/null
@@ -1,135 +0,0 @@
-/* Copyright 2011 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- *
- * Cortex-M CPU initialization
- */
-
-#include "config.h"
-
-.text
-.syntax unified
-.code 16
-
-.global reset
-.thumb_func
-reset:
- /*
- * Ensure we're in privileged mode with main stack. Necessary if
- * we've jumped directly here from another image after task_start().
- */
-#ifdef CONFIG_FPU
- mov r0, #(1 << 2) @ priv. mode / main stack / floating point on
-#else
- mov r0, #0 @ priv. mode / main stack / no floating point
-#endif
- msr control, r0
- isb @ ensure the write is done
-
- /* Set the vector table on our current code */
- ldr r1, =vectors
- ldr r2, =0xE000ED08 /* VTABLE register in SCB*/
- str r1, [r2]
-
- /* Clear BSS */
- mov r0, #0
- ldr r1,_bss_start
- ldr r2,_bss_end
-bss_loop:
- cmp r1, r2
- it lt
- strlt r0, [r1], #4
- blt bss_loop
-
- /* Copy initialized data to Internal RAM */
- ldr r0,_data_lma_start
-
- /*
- * When the .data section is linked into the .init_rom section,
- * _data_lma_start is defined as a flash offset instead of a full
- * 32-bit address by the linker script. Add the 32-bit flash base
- * address to get a full 32-bit address.
- *
- * Flash locking isn't needed here as no tasks have been started.
- */
-#ifdef CONFIG_CHIP_DATA_IN_INIT_ROM
- ldr r1, =CONFIG_MAPPED_STORAGE_BASE
- add r0, r0, r1
-#endif
-
- ldr r1,_data_start
- ldr r2,_data_end
-data_loop:
- ldr r3, [r0], #4
- cmp r1, r2
- it lt
- strlt r3, [r1], #4
- blt data_loop
-
- /*
- * Set stack pointer. Already done by Cortex-M hardware, but re-doing
- * this here allows software to jump directly to the reset vector.
- */
- ldr r0, =stack_end
- mov sp, r0
-
-#ifdef CONFIG_FPU
- /* Enable FPU */
- /* CPACR is located at address 0xE000ED88 */
- ldr r0, =0xE000ED88
- /* Read CPACR */
- ldr r1, [r0]
- /* Set bits 20-23 to enable CP10 and CP11 coprocessors */
- orr r1, r1, #(0xF << 20)
- /* Write back the modified value to the CPACR */
- str r1, [r0] /* wait for store to complete */
- dsb
- /* reset pipeline now the FPU is enabled */
- isb
-#endif /* CONFIG_FPU */
-
-#ifdef CONFIG_DEBUG_DISABLE_WRITE_BUFFER
- /* Disable write buffer used for default memory map accesses */
- ldr r0, =0xE000E008 /* Load address of ACTLR */
- ldr r1, [r0] /* Read ACTLR */
- orr r1, r1, #2 /* Set DISDEFWBUF bit */
- str r1, [r0] /* Write back ACTLR */
- dsb /* Wait for store to complete */
- isb /* Reset pipeline */
-#endif /* CONFIG_DEBUG_DISABLE_WRITE_BUFFER */
-
- /* Jump to C code */
- bl main
-
- /* That should not return. If it does, loop forever. */
-fini_loop:
- b fini_loop
-
-.align 2
-_bss_start:
-.long __bss_start
-_bss_end:
-.long __bss_end
-_data_start:
-.long __data_start
-_data_end:
-.long __data_end
-_data_lma_start:
-.long __data_lma_start
-
-/* Mock functions to avoid linker complaints */
-.global __aeabi_unwind_cpp_pr0
-.global __aeabi_unwind_cpp_pr1
-.global __aeabi_unwind_cpp_pr2
-__aeabi_unwind_cpp_pr0:
-__aeabi_unwind_cpp_pr1:
-__aeabi_unwind_cpp_pr2:
- bx lr
-
-/* Reserve space for system stack */
-.section .bss.system_stack
-stack_start:
-.space CONFIG_STACK_SIZE, 0
-stack_end:
-.global stack_end
-
diff --git a/core/cortex-m/irq_handler.h b/core/cortex-m/irq_handler.h
deleted file mode 100644
index ae5d95cd94..0000000000
--- a/core/cortex-m/irq_handler.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* Copyright 2014 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-/* Helper to declare IRQ handling routines */
-
-#ifndef __CROS_EC_IRQ_HANDLER_H
-#define __CROS_EC_IRQ_HANDLER_H
-
-#ifdef CONFIG_TASK_PROFILING
-#define bl_task_start_irq_handler "bl task_start_irq_handler\n"
-#else
-#define bl_task_start_irq_handler ""
-#endif
-
-/* Helper macros to build the IRQ handler and priority struct names */
-#define IRQ_HANDLER(irqname) CONCAT3(irq_, irqname, _handler)
-#define IRQ_PRIORITY(irqname) CONCAT2(prio_, irqname)
-/*
- * Macro to connect the interrupt handler "routine" to the irq number "irq" and
- * ensure it is enabled in the interrupt controller with the right priority.
- */
-#define DECLARE_IRQ(irq, routine, priority) DECLARE_IRQ_(irq, routine, priority)
-#define DECLARE_IRQ_(irq, routine, priority) \
- void IRQ_HANDLER(irq)(void) __attribute__((naked)); \
- typedef struct { \
- int fake[irq >= CONFIG_IRQ_COUNT ? -1 : 1]; \
- } irq_num_check_##irq; \
- void __keep routine(void); \
- void IRQ_HANDLER(irq)(void) \
- { \
- asm volatile("mov r0, lr\n" \
- "push {r0, lr}\n" \
- bl_task_start_irq_handler \
- "bl "#routine"\n" \
- "pop {r0, lr}\n" \
- "b task_resched_if_needed\n" \
- ); \
- } \
- const struct irq_priority __keep IRQ_PRIORITY(irq) \
- __attribute__((section(".rodata.irqprio"))) \
- = {irq, priority}
-#endif /* __CROS_EC_IRQ_HANDLER_H */
diff --git a/core/cortex-m/ldivmod.S b/core/cortex-m/ldivmod.S
deleted file mode 120000
index afdeb4ec1f..0000000000
--- a/core/cortex-m/ldivmod.S
+++ /dev/null
@@ -1 +0,0 @@
-../../third_party/libaeabi-cortexm0/core/cortex-m/ldivmod.S \ No newline at end of file
diff --git a/core/cortex-m/llsr.c b/core/cortex-m/llsr.c
deleted file mode 100644
index 0827121e97..0000000000
--- a/core/cortex-m/llsr.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/* Copyright 2018 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-/* Enable the use of right shift for uint64_t. */
-
-#include <console.h>
-#include <compile_time_macros.h>
-#include <stdint.h>
-
-union words {
- uint64_t u64;
- uint32_t w[2];
-};
-
-uint64_t __attribute__((used)) __aeabi_llsr(uint64_t v, uint32_t shift)
-{
- union words val;
- union words res;
-
- val.u64 = v;
- res.w[1] = val.w[1] >> shift;
- res.w[0] = val.w[0] >> shift;
- res.w[0] |= val.w[1] >> (shift - 32); /* Handle shift >= 32*/
- res.w[0] |= val.w[1] << (32 - shift); /* Handle shift <= 32*/
- return res.u64;
-}
-
-#ifdef CONFIG_LLSR_TEST
-
-static int command_llsr(int argc, char **argv)
-{
- /* Volatile to prevent compilier optimization from interfering. */
- volatile uint64_t start = 0x123456789ABCDEF0ull;
- uint32_t x;
-
- const struct {
- uint32_t shift_by;
- uint64_t result;
- } cases[] = {
- {0, start},
- {16, 0x123456789ABCull},
- {32, 0x12345678u},
- {48, 0x1234u},
- {64, 0u}
- };
-
- for (x = 0; x < ARRAY_SIZE(cases); ++x) {
- if ((start >> cases[x].shift_by) != cases[x].result) {
- ccprintf("FAILED %d\n", cases[x].shift_by);
- return EC_ERROR_UNKNOWN;
- }
- }
-
- ccprintf("SUCCESS\n");
- return EC_SUCCESS;
-}
-
-DECLARE_CONSOLE_COMMAND(
- llsrtest, command_llsr,
- "",
- "Run tests against the LLSR ABI. Prints SUCCESS or FAILURE.");
-
-#endif /* CONFIG_LLSR_TEST */
diff --git a/core/cortex-m/mpu.c b/core/cortex-m/mpu.c
deleted file mode 100644
index 29da931a28..0000000000
--- a/core/cortex-m/mpu.c
+++ /dev/null
@@ -1,457 +0,0 @@
-/* Copyright 2013 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-/* MPU module for Chrome EC */
-
-#include "mpu.h"
-#include "console.h"
-#include "cpu.h"
-#include "registers.h"
-#include "task.h"
-#include "util.h"
-
-/**
- * @return Number of regions supported by the MPU. 0 means the processor does
- * not implement an MPU.
- */
-int mpu_num_regions(void)
-{
- return MPU_TYPE_REG_COUNT(mpu_get_type());
-}
-
-/**
- * @return true if processor has MPU, false otherwise
- */
-bool has_mpu(void)
-{
- return mpu_num_regions() != 0;
-}
-
-/**
- * @return true if MPU has unified instruction and data maps, false otherwise
- */
-bool mpu_is_unified(void)
-{
- return (mpu_get_type() & MPU_TYPE_UNIFIED_MASK) == 0;
-}
-
-
-/**
- * Update a memory region.
- *
- * region: index of the region to update
- * addr: base address of the region
- * size_bit: size of the region in power of two.
- * attr: attribute bits. Current value will be overwritten if enable is true.
- * enable: enables the region if non zero. Otherwise, disables the region.
- * srd: subregion mask to partition region into 1/8ths, 0 = subregion enabled.
- *
- * Based on 3.1.4.1 'Updating an MPU Region' of Stellaris LM4F232H5QC Datasheet
- */
-int mpu_update_region(uint8_t region, uint32_t addr, uint8_t size_bit,
- uint16_t attr, uint8_t enable, uint8_t srd)
-{
- /*
- * Note that on the Cortex-M3, Cortex-M4, and Cortex-M7, the base
- * address used for an MPU region must be aligned to the size of the
- * region:
- *
- * https://developer.arm.com/docs/dui0553/a/cortex-m4-peripherals/optional-memory-protection-unit/mpu-region-base-address-register
- * https://developer.arm.com/docs/dui0552/a/cortex-m3-peripherals/optional-memory-protection-unit/mpu-region-base-address-register
- * https://developer.arm.com/docs/dui0646/a/cortex-m7-peripherals/optional-memory-protection-unit/mpu-region-base-address-register#BABDAHJG
- */
- if (!is_aligned(addr, BIT(size_bit)))
- return -EC_ERROR_INVAL;
-
- if (region >= mpu_num_regions())
- return -EC_ERROR_INVAL;
-
- if (size_bit < MPU_SIZE_BITS_MIN)
- return -EC_ERROR_INVAL;
-
- asm volatile("isb; dsb;");
-
- MPU_NUMBER = region;
- MPU_SIZE &= ~1; /* Disable */
- if (enable) {
- MPU_BASE = addr;
- /*
- * MPU_ATTR = attr;
- * MPU_SIZE = (srd << 8) | ((size_bit - 1) << 1) | 1;
- *
- * WORKAROUND: the 2 half-word accesses above should work
- * according to the doc, but they don't ..., do a single 32-bit
- * one.
- */
- REG32(&MPU_SIZE) = ((uint32_t)attr << 16)
- | (srd << 8) | ((size_bit - 1) << 1) | 1;
- }
-
- asm volatile("isb; dsb;");
-
- return EC_SUCCESS;
-}
-
-/*
- * Greedily configure the largest possible part of the given region from the
- * base address.
- *
- * Returns EC_SUCCESS on success and sets *consumed to the number of bytes
- * mapped from the base address. In case of error, the value of *consumed is
- * unpredictable.
- *
- * For instance, if addr is 0x10070000 and size is 0x30000 then memory in the
- * range 0x10070000-0x10080000 will be configured and *consumed will be set to
- * 0x10000.
- */
-static int mpu_config_region_greedy(uint8_t region, uint32_t addr,
- uint32_t size, uint16_t attr,
- uint8_t enable, uint32_t *consumed)
-{
- /*
- * Compute candidate alignment to be used for the MPU region.
- *
- * This is the minimum of the base address and size alignment, since
- * regions must be naturally aligned to their size.
- */
- uint8_t natural_alignment = MIN(addr == 0 ? 32 : alignment_log2(addr),
- alignment_log2(size));
- uint8_t subregion_disable = 0;
-
- if (natural_alignment >= 5) {
- int sr_idx;
- uint32_t subregion_base, subregion_size;
- /*
- * For MPU regions larger than 256 bytes we can use subregions,
- * (which are a minimum of 32 bytes in size) making the actual
- * MPU region 8x larger. Depending on the address alignment this
- * can allow us to cover a larger area (and never a smaller
- * one).
- */
- natural_alignment += 3;
- /* Region size cannot exceed 4GB. */
- if (natural_alignment > 32)
- natural_alignment = 32;
-
- /*
- * Generate the subregion mask by walking through each,
- * disabling if it is not completely contained in the requested
- * range.
- */
- subregion_base = addr & ~((1 << natural_alignment) - 1);
- subregion_size = 1 << (natural_alignment - 3);
- *consumed = 0;
- for (sr_idx = 0; sr_idx < 8; sr_idx++) {
- if (subregion_base < addr ||
- (subregion_base + subregion_size) > (addr + size))
- /* lsb of subregion mask is lowest address */
- subregion_disable |= 1 << sr_idx;
- else
- /* not disabled means consumed */
- *consumed += subregion_size;
-
- subregion_base += subregion_size;
- }
- } else {
- /* Not using subregions; all enabled */
- *consumed = 1 << natural_alignment;
- }
-
- return mpu_update_region(region,
- addr & ~((1 << natural_alignment) - 1),
- natural_alignment,
- attr, enable, subregion_disable);
-}
-
-/**
- * Configure a region
- *
- * region: index of the region to update
- * addr: Base address of the region
- * size: Size of the region in bytes
- * attr: Attribute bits. Current value will be overwritten if enable is set.
- * enable: Enables the region if non zero. Otherwise, disables the region.
- *
- * Returns EC_SUCCESS on success, -EC_ERROR_OVERFLOW if it is not possible to
- * fully configure the given region, or -EC_ERROR_INVAL if a parameter is
- * invalid (such as the address or size having unsupported alignment).
- */
-int mpu_config_region(uint8_t region, uint32_t addr, uint32_t size,
- uint16_t attr, uint8_t enable)
-{
- int rv;
- uint32_t consumed;
-
- /* Zero size doesn't require configuration */
- if (size == 0)
- return EC_SUCCESS;
-
- rv = mpu_config_region_greedy(region, addr, size,
- attr, enable, &consumed);
- if (rv != EC_SUCCESS)
- return rv;
- ASSERT(consumed <= size);
- addr += consumed;
- size -= consumed;
-
- /* Regions other than DATA_RAM_TEXT may use two MPU regions */
- if (size > 0 && region != REGION_DATA_RAM_TEXT) {
- rv = mpu_config_region_greedy(region + 1, addr, size,
- attr, enable, &consumed);
- if (rv != EC_SUCCESS)
- return rv;
- ASSERT(consumed <= size);
- addr += consumed;
- size -= consumed;
- }
-
- if (size > 0)
- return EC_ERROR_OVERFLOW;
- return EC_SUCCESS;
-}
-
-/**
- * Set a region executable and read-write.
- *
- * region: index of the region
- * addr: base address of the region
- * size: size of the region in bytes
- * texscb: TEX and SCB bit field
- */
-static int mpu_unlock_region(uint8_t region, uint32_t addr, uint32_t size,
- uint8_t texscb)
-{
- return mpu_config_region(region, addr, size,
- MPU_ATTR_RW_RW | texscb, 1);
-}
-
-void mpu_enable(void)
-{
- MPU_CTRL |= MPU_CTRL_PRIVDEFEN | MPU_CTRL_HFNMIENA | MPU_CTRL_ENABLE;
-}
-
-void mpu_disable(void)
-{
- MPU_CTRL &= ~(MPU_CTRL_PRIVDEFEN | MPU_CTRL_HFNMIENA | MPU_CTRL_ENABLE);
-}
-
-uint32_t mpu_get_type(void)
-{
- return MPU_TYPE;
-}
-
-int mpu_protect_data_ram(void)
-{
- int ret;
-
- /* Prevent code execution from data RAM */
- ret = mpu_config_region(REGION_DATA_RAM,
- CONFIG_RAM_BASE,
- CONFIG_DATA_RAM_SIZE,
- MPU_ATTR_XN |
- MPU_ATTR_RW_RW |
- MPU_ATTR_INTERNAL_SRAM,
- 1);
- if (ret != EC_SUCCESS)
- return ret;
-
- /* Exempt the __iram_text section */
- return mpu_unlock_region(
- REGION_DATA_RAM_TEXT, (uint32_t)&__iram_text_start,
- (uint32_t)(&__iram_text_end - &__iram_text_start),
- MPU_ATTR_INTERNAL_SRAM);
-}
-
-#if defined(CONFIG_EXTERNAL_STORAGE) || !defined(CONFIG_FLASH_PHYSICAL)
-int mpu_protect_code_ram(void)
-{
- /* Prevent write access to code RAM */
- return mpu_config_region(REGION_STORAGE,
- CONFIG_PROGRAM_MEMORY_BASE + CONFIG_RO_MEM_OFF,
- CONFIG_CODE_RAM_SIZE,
- MPU_ATTR_RO_NO | MPU_ATTR_INTERNAL_SRAM,
- 1);
-}
-#else
-int mpu_lock_ro_flash(void)
-{
- /* Prevent execution from internal mapped RO flash */
- return mpu_config_region(REGION_STORAGE,
- CONFIG_MAPPED_STORAGE_BASE + CONFIG_RO_MEM_OFF,
- CONFIG_RO_SIZE,
- MPU_ATTR_XN | MPU_ATTR_RW_RW |
- MPU_ATTR_FLASH_MEMORY, 1);
-}
-
-/* Represent RW with at most 2 MPU regions. */
-struct mpu_rw_regions mpu_get_rw_regions(void)
-{
- int aligned_size_bit;
- struct mpu_rw_regions regions = {};
-
- regions.addr[0] = CONFIG_MAPPED_STORAGE_BASE + CONFIG_RW_MEM_OFF;
-
- /*
- * Least significant set bit of the address determines the max size of
- * the region because on the Cortex-M3, Cortex-M4 and Cortex-M7, the
- * address used for an MPU region must be aligned to the size.
- */
- aligned_size_bit =
- __fls(regions.addr[0] & -regions.addr[0]);
- regions.size[0] = MIN(BIT(aligned_size_bit), CONFIG_RW_SIZE);
- regions.addr[1] = regions.addr[0] + regions.size[0];
- regions.size[1] = CONFIG_RW_SIZE - regions.size[0];
- regions.num_regions = (regions.size[1] == 0) ? 1 : 2;
-
- return regions;
-}
-
-int mpu_lock_rw_flash(void)
-{
- /* Prevent execution from internal mapped RW flash */
- const uint16_t mpu_attr = MPU_ATTR_XN | MPU_ATTR_RW_RW |
- MPU_ATTR_FLASH_MEMORY;
- const struct mpu_rw_regions regions = mpu_get_rw_regions();
- int rv;
-
- rv = mpu_config_region(REGION_STORAGE, regions.addr[0], regions.size[0],
- mpu_attr, 1);
- if ((rv != EC_SUCCESS) || (regions.num_regions == 1))
- return rv;
-
- /* If this fails then it's impossible to represent with two regions. */
- return mpu_config_region(REGION_STORAGE2, regions.addr[1],
- regions.size[1], mpu_attr, 1);
-}
-#endif /* !CONFIG_EXTERNAL_STORAGE */
-
-#ifdef CONFIG_ROLLBACK_MPU_PROTECT
-int mpu_lock_rollback(int lock)
-{
- int rv;
- int num_mpu_regions = mpu_num_regions();
-
- const uint32_t rollback_region_start_address =
- CONFIG_MAPPED_STORAGE_BASE + CONFIG_ROLLBACK_OFF;
- const uint32_t rollback_region_total_size = CONFIG_ROLLBACK_SIZE;
- const uint16_t mpu_attr =
- MPU_ATTR_XN /* Execute never */ |
- MPU_ATTR_NO_NO /* No access (privileged or unprivileged */;
-
- /*
- * Originally rollback MPU support was added on Cortex-M7, which
- * supports 16 MPU regions and has rollback region aligned in a way
- * that we can use a single region.
- */
- uint8_t rollback_mpu_region = REGION_ROLLBACK;
-
- if (rollback_mpu_region < num_mpu_regions) {
- rv = mpu_config_region(rollback_mpu_region,
- rollback_region_start_address,
- rollback_region_total_size, mpu_attr,
- lock);
- return rv;
- }
-
- /*
- * If we get here, we can't use REGION_ROLLBACK because our MPU doesn't
- * have enough regions. Instead, we choose unused MPU regions.
- *
- * Note that on the Cortex-M3, Cortex-M4, and Cortex-M7, the base
- * address used for an MPU region must be aligned to the size of the
- * region, so it's not possible to use a single region to protect the
- * entire rollback flash on the STM32F412 (bloonchipper); we have to
- * use two.
- *
- * See mpu_update_region for alignment details.
- */
-
- rollback_mpu_region = REGION_CHIP_RESERVED;
- rv = mpu_config_region(rollback_mpu_region,
- rollback_region_start_address,
- rollback_region_total_size / 2, mpu_attr, lock);
- if (rv != EC_SUCCESS)
- return rv;
-
- rollback_mpu_region = REGION_CODE_RAM;
- rv = mpu_config_region(rollback_mpu_region,
- rollback_region_start_address +
- (rollback_region_total_size / 2),
- rollback_region_total_size / 2, mpu_attr, lock);
- return rv;
-}
-#endif
-
-#ifdef CONFIG_CHIP_UNCACHED_REGION
-/* Store temporarily the regions ranges to use them for the MPU configuration */
-#define REGION(_name, _flag, _start, _size) \
- static const uint32_t CONCAT2(_region_start_, _name) \
- __attribute__((unused, section(".unused"))) = _start; \
- static const uint32_t CONCAT2(_region_size_, _name) \
- __attribute__((unused, section(".unused"))) = _size;
-#include "memory_regions.inc"
-#undef REGION
-#endif /* CONFIG_CHIP_UNCACHED_REGION */
-
-int mpu_pre_init(void)
-{
- int i;
- int num_mpu_regions;
- int rv;
-
- if (!has_mpu())
- return EC_ERROR_HW_INTERNAL;
-
- num_mpu_regions = mpu_num_regions();
-
- /* Supports MPU with 8 or 16 unified regions */
- if (!mpu_is_unified() ||
- (num_mpu_regions != 8 && num_mpu_regions != 16))
- return EC_ERROR_UNIMPLEMENTED;
-
- mpu_disable();
-
- for (i = 0; i < num_mpu_regions; ++i) {
- /*
- * Disable all regions.
- *
- * We use the smallest possible size (32 bytes), but it
- * doesn't really matter since the regions are disabled.
- *
- * Use the fixed SRAM region base to ensure base is aligned
- * to the region size.
- */
- rv = mpu_update_region(i, CORTEX_M_SRAM_BASE, MPU_SIZE_BITS_MIN,
- 0, 0, 0);
- if (rv != EC_SUCCESS)
- return rv;
- }
-
- if (IS_ENABLED(CONFIG_ROLLBACK_MPU_PROTECT)) {
- rv = mpu_lock_rollback(1);
- if (rv != EC_SUCCESS)
- return rv;
- }
-
- if (IS_ENABLED(CONFIG_ARMV7M_CACHE)) {
-#ifdef CONFIG_CHIP_UNCACHED_REGION
- rv = mpu_config_region(
- REGION_UNCACHED_RAM,
- CONCAT2(_region_start_, CONFIG_CHIP_UNCACHED_REGION),
- CONCAT2(_region_size_, CONFIG_CHIP_UNCACHED_REGION),
- MPU_ATTR_XN | MPU_ATTR_RW_RW, 1);
- if (rv != EC_SUCCESS)
- return rv;
-
-#endif
- }
-
- mpu_enable();
-
- if (IS_ENABLED(CONFIG_ARMV7M_CACHE))
- cpu_enable_caches();
-
- return EC_SUCCESS;
-}
diff --git a/core/cortex-m/panic-internal.h b/core/cortex-m/panic-internal.h
deleted file mode 100644
index 1a58afa8a2..0000000000
--- a/core/cortex-m/panic-internal.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* Copyright 2018 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef __CROS_EC_PANIC_INTERNAL_H
-#define __CROS_EC_PANIC_INTERNAL_H
-
-void exception_panic(void) __attribute__((naked));
-
-#endif /* __CROS_EC_PANIC_INTERNAL_H */
diff --git a/core/cortex-m/panic.c b/core/cortex-m/panic.c
deleted file mode 100644
index da6900b1b9..0000000000
--- a/core/cortex-m/panic.c
+++ /dev/null
@@ -1,475 +0,0 @@
-/* Copyright 2012 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "common.h"
-#include "console.h"
-#include "cpu.h"
-#include "host_command.h"
-#include "panic.h"
-#include "panic-internal.h"
-#include "printf.h"
-#include "system.h"
-#include "task.h"
-#include "timer.h"
-#include "uart.h"
-#include "util.h"
-#include "watchdog.h"
-
-/* Whether bus fault is ignored */
-static int bus_fault_ignored;
-
-
-/* Panic data goes at the end of RAM. */
-static struct panic_data * const pdata_ptr = PANIC_DATA_PTR;
-
-/* Preceded by stack, rounded down to nearest 64-bit-aligned boundary */
-static const uint32_t pstack_addr = (CONFIG_RAM_BASE + CONFIG_RAM_SIZE
- - sizeof(struct panic_data)) & ~7;
-
-/**
- * Print the name and value of a register
- *
- * This is a convenient helper function for displaying a register value.
- * It shows the register name in a 3 character field, followed by a colon.
- * The register value is regs[index], and this is shown in hex. If regs is
- * NULL, then we display spaces instead.
- *
- * After displaying the value, either a space or \n is displayed depending
- * on the register number, so that (assuming the caller passes all 16
- * registers in sequence) we put 4 values per line like this
- *
- * r0 :0000000b r1 :00000047 r2 :60000000 r3 :200012b5
- * r4 :00000000 r5 :08004e64 r6 :08004e1c r7 :200012a8
- * r8 :08004e64 r9 :00000002 r10:00000000 r11:00000000
- * r12:0000003f sp :200009a0 lr :0800270d pc :0800351a
- *
- * @param regnum Register number to display (0-15)
- * @param regs Pointer to array holding the registers, or NULL
- * @param index Index into array where the register value is present
- */
-static void print_reg(int regnum, const uint32_t *regs, int index)
-{
- static const char regname[] = "r10r11r12sp lr pc ";
- static char rname[3] = "r ";
- const char *name;
-
- rname[1] = '0' + regnum;
- name = regnum < 10 ? rname : &regname[(regnum - 10) * 3];
- panic_printf("%c%c%c:", name[0], name[1], name[2]);
- if (regs)
- panic_printf("%08x", regs[index]);
- else
- panic_puts(" ");
- panic_puts((regnum & 3) == 3 ? "\n" : " ");
-}
-
-/*
- * Returns non-zero if the exception frame was created on the main stack, or
- * zero if it's on the process stack.
- *
- * See B1.5.8 "Exception return behavior" of ARM DDI 0403D for details.
- */
-static int32_t is_frame_in_handler_stack(const uint32_t exc_return)
-{
- return (exc_return & 0xf) == 1 || (exc_return & 0xf) == 9;
-}
-
-#ifdef CONFIG_DEBUG_EXCEPTIONS
-/* Names for each of the bits in the cfs register, starting at bit 0 */
-static const char * const cfsr_name[32] = {
- /* MMFSR */
- [0] = "Instruction access violation",
- [1] = "Data access violation",
- [3] = "Unstack from exception violation",
- [4] = "Stack from exception violation",
-
- /* BFSR */
- [8] = "Instruction bus error",
- [9] = "Precise data bus error",
- [10] = "Imprecise data bus error",
- [11] = "Unstack from exception bus fault",
- [12] = "Stack from exception bus fault",
-
- /* UFSR */
- [16] = "Undefined instructions",
- [17] = "Invalid state",
- [18] = "Invalid PC",
- [19] = "No coprocessor",
- [24] = "Unaligned",
- [25] = "Divide by 0",
-};
-
-/* Names for the first 5 bits in the DFSR */
-static const char * const dfsr_name[] = {
- "Halt request",
- "Breakpoint",
- "Data watchpoint/trace",
- "Vector catch",
- "External debug request",
-};
-
-/**
- * Helper function to display a separator after the previous item
- *
- * If items have been displayed already, we display a comma separator.
- * In any case, the count of items displayed is incremeneted.
- *
- * @param count Number of items displayed so far (0 for none)
- */
-static void do_separate(int *count)
-{
- if (*count)
- panic_puts(", ");
- (*count)++;
-}
-
-/**
- * Show a textual representaton of the fault registers
- *
- * A list of detected faults is shown, with no trailing newline.
- *
- * @param cfsr Value of Configurable Fault Status
- * @param hfsr Value of Hard Fault Status
- * @param dfsr Value of Debug Fault Status
- */
-static void show_fault(uint32_t cfsr, uint32_t hfsr, uint32_t dfsr)
-{
- unsigned int upto;
- int count = 0;
-
- for (upto = 0; upto < 32; upto++) {
- if ((cfsr & BIT(upto)) && cfsr_name[upto]) {
- do_separate(&count);
- panic_puts(cfsr_name[upto]);
- }
- }
-
- if (hfsr & CPU_NVIC_HFSR_DEBUGEVT) {
- do_separate(&count);
- panic_puts("Debug event");
- }
- if (hfsr & CPU_NVIC_HFSR_FORCED) {
- do_separate(&count);
- panic_puts("Forced hard fault");
- }
- if (hfsr & CPU_NVIC_HFSR_VECTTBL) {
- do_separate(&count);
- panic_puts("Vector table bus fault");
- }
-
- for (upto = 0; upto < 5; upto++) {
- if ((dfsr & BIT(upto))) {
- do_separate(&count);
- panic_puts(dfsr_name[upto]);
- }
- }
-}
-
-/*
- * Returns the size of the exception frame.
- *
- * See B1.5.7 "Stack alignment on exception entry" of ARM DDI 0403D for details.
- * In short, the exception frame size can be either 0x20, 0x24, 0x68, or 0x6c
- * depending on FPU context and padding for 8-byte alignment.
- */
-static uint32_t get_exception_frame_size(const struct panic_data *pdata)
-{
- uint32_t frame_size = 0;
-
- /* base exception frame */
- frame_size += 8 * sizeof(uint32_t);
-
- /* CPU uses xPSR[9] to indicate whether it padded the stack for
- * alignment or not. */
- if (pdata->cm.frame[7] & BIT(9))
- frame_size += sizeof(uint32_t);
-
-#ifdef CONFIG_FPU
- /* CPU uses EXC_RETURN[4] to indicate whether it stored extended
- * frame for FPU or not. */
- if (!(pdata->cm.regs[11] & BIT(4)))
- frame_size += 18 * sizeof(uint32_t);
-#endif
-
- return frame_size;
-}
-
-/*
- * Returns the position of the process stack before the exception frame.
- * It computes the size of the exception frame and adds it to psp.
- * If the exception happened in the exception context, it returns psp as is.
- */
-static uint32_t get_process_stack_position(const struct panic_data *pdata)
-{
- uint32_t psp = pdata->cm.regs[0];
-
- if (!is_frame_in_handler_stack(pdata->cm.regs[11]))
- psp += get_exception_frame_size(pdata);
-
- return psp;
-}
-
-/*
- * Show extra information that might be useful to understand a panic()
- *
- * We show fault register information, including the fault address registers
- * if valid.
- */
-static void panic_show_extra(const struct panic_data *pdata)
-{
- show_fault(pdata->cm.cfsr, pdata->cm.hfsr, pdata->cm.dfsr);
- if (pdata->cm.cfsr & CPU_NVIC_CFSR_BFARVALID)
- panic_printf(", bfar = %x", pdata->cm.bfar);
- if (pdata->cm.cfsr & CPU_NVIC_CFSR_MFARVALID)
- panic_printf(", mfar = %x", pdata->cm.mfar);
- panic_printf("\ncfsr = %x, ", pdata->cm.cfsr);
- panic_printf("shcsr = %x, ", pdata->cm.shcsr);
- panic_printf("hfsr = %x, ", pdata->cm.hfsr);
- panic_printf("dfsr = %x\n", pdata->cm.dfsr);
-}
-
-/*
- * Prints process stack contents stored above the exception frame.
- */
-static void panic_show_process_stack(const struct panic_data *pdata)
-{
- panic_printf("\n=========== Process Stack Contents ===========");
- if (pdata->flags & PANIC_DATA_FLAG_FRAME_VALID) {
- uint32_t psp = get_process_stack_position(pdata);
- int i;
- for (i = 0; i < 16; i++) {
- if (psp + sizeof(uint32_t) >
- CONFIG_RAM_BASE + CONFIG_RAM_SIZE)
- break;
- if (i % 4 == 0)
- panic_printf("\n%08x:", psp);
- panic_printf(" %08x", *(uint32_t *)psp);
- psp += sizeof(uint32_t);
- }
- } else {
- panic_printf("\nBad psp");
- }
-}
-#endif /* CONFIG_DEBUG_EXCEPTIONS */
-
-/*
- * Print panic data
- */
-void panic_data_print(const struct panic_data *pdata)
-{
- const uint32_t *lregs = pdata->cm.regs;
- const uint32_t *sregs = NULL;
- const int32_t in_handler =
- is_frame_in_handler_stack(pdata->cm.regs[11]);
- int i;
-
- if (pdata->flags & PANIC_DATA_FLAG_FRAME_VALID)
- sregs = pdata->cm.frame;
-
- panic_printf("\n=== %s EXCEPTION: %02x ====== xPSR: %08x ===\n",
- in_handler ? "HANDLER" : "PROCESS",
- lregs[1] & 0xff, sregs ? sregs[7] : -1);
- for (i = 0; i < 4; i++)
- print_reg(i, sregs, i);
- for (i = 4; i < 10; i++)
- print_reg(i, lregs, i - 1);
- print_reg(10, lregs, 9);
- print_reg(11, lregs, 10);
- print_reg(12, sregs, 4);
- print_reg(13, lregs, in_handler ? 2 : 0);
- print_reg(14, sregs, 5);
- print_reg(15, sregs, 6);
-
-#ifdef CONFIG_DEBUG_EXCEPTIONS
- panic_show_extra(pdata);
-#endif
-}
-
-void __keep report_panic(void)
-{
- /*
- * Don't need to get pointer via get_panic_data_write()
- * because memory below pdata_ptr is stack now (see exception_panic())
- */
- struct panic_data *pdata = pdata_ptr;
- uint32_t sp;
-
- pdata->magic = PANIC_DATA_MAGIC;
- pdata->struct_size = sizeof(*pdata);
- pdata->struct_version = 2;
- pdata->arch = PANIC_ARCH_CORTEX_M;
- pdata->flags = 0;
- pdata->reserved = 0;
-
- /* Choose the right sp (psp or msp) based on EXC_RETURN value */
- sp = is_frame_in_handler_stack(pdata->cm.regs[11])
- ? pdata->cm.regs[2] : pdata->cm.regs[0];
- /* If stack is valid, copy exception frame to pdata */
- if ((sp & 3) == 0 &&
- sp >= CONFIG_RAM_BASE &&
- sp <= CONFIG_RAM_BASE + CONFIG_RAM_SIZE - 8 * sizeof(uint32_t)) {
- const uint32_t *sregs = (const uint32_t *)sp;
- int i;
-
- /* Skip r0-r3 and r12 registers if necessary */
- for (i = CORTEX_PANIC_FRAME_REGISTER_R0;
- i <= CORTEX_PANIC_FRAME_REGISTER_R12; i++)
- if (IS_ENABLED(CONFIG_PANIC_STRIP_GPR))
- pdata->cm.frame[i] = 0;
- else
- pdata->cm.frame[i] = sregs[i];
-
- for (i = CORTEX_PANIC_FRAME_REGISTER_LR;
- i < NUM_CORTEX_PANIC_FRAME_REGISTERS; i++)
- pdata->cm.frame[i] = sregs[i];
-
- pdata->flags |= PANIC_DATA_FLAG_FRAME_VALID;
- }
-
- /* Save extra information */
- pdata->cm.cfsr = CPU_NVIC_CFSR;
- pdata->cm.bfar = CPU_NVIC_BFAR;
- pdata->cm.mfar = CPU_NVIC_MFAR;
- pdata->cm.shcsr = CPU_NVIC_SHCSR;
- pdata->cm.hfsr = CPU_NVIC_HFSR;
- pdata->cm.dfsr = CPU_NVIC_DFSR;
-
-#ifdef CONFIG_UART_PAD_SWITCH
- uart_reset_default_pad_panic();
-#endif
- panic_data_print(pdata);
-#ifdef CONFIG_DEBUG_EXCEPTIONS
- panic_show_process_stack(pdata);
- /*
- * TODO(crosbug.com/p/23760): Dump main stack contents as well if the
- * exception happened in a handler's context.
- */
-#endif
-
- /* Make sure that all changes are saved into RAM */
- if (IS_ENABLED(CONFIG_ARMV7M_CACHE))
- cpu_clean_invalidate_dcache();
-
- panic_reboot();
-}
-
-/**
- * Default exception handler, which reports a panic.
- *
- * Declare this as a naked call so we can extract raw LR and IPSR values.
- */
-void exception_panic(void)
-{
- /* Save registers and branch directly to panic handler */
- asm volatile(
- "mov r0, %[pregs]\n"
- "mrs r1, psp\n"
- "mrs r2, ipsr\n"
- "mov r3, sp\n"
-#ifdef CONFIG_PANIC_STRIP_GPR
- /*
- * Check if we are in exception. This is similar to
- * in_interrupt_context(). Exception bits are 9 LSB, so
- * we can perform left shift for 23 bits and check if result
- * is 0 (lsls instruction is setting appropriate flags).
- */
- "lsls r6, r2, #23\n"
- /*
- * If this is software panic (shift result == 0) then register
- * r4 and r5 contain additional info about panic.
- * Clear r6-r11 always and r4, r5 only if this is exception
- * panic. To clear r4 and r5, 'movne' conditional instruction
- * is used. It works only when flags contain information that
- * result was != 0. Itt is pseudo instruction which is used
- * to make sure we are using correct conditional instructions.
- */
- "itt ne\n"
- "movne r4, #0\n"
- "movne r5, #0\n"
- "mov r6, #0\n"
- "mov r7, #0\n"
- "mov r8, #0\n"
- "mov r9, #0\n"
- "mov r10, #0\n"
- "mov r11, #0\n"
-#endif
- "stmia r0, {r1-r11, lr}\n"
- "mov sp, %[pstack]\n"
- "bl report_panic\n" : :
- [pregs] "r" (pdata_ptr->cm.regs),
- [pstack] "r" (pstack_addr) :
- /* Constraints protecting these from being clobbered.
- * Gcc should be using r0 & r12 for pregs and pstack. */
- "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9",
- "r10", "r11", "cc", "memory"
- );
-}
-
-#ifdef CONFIG_SOFTWARE_PANIC
-void software_panic(uint32_t reason, uint32_t info)
-{
- __asm__("mov " STRINGIFY(SOFTWARE_PANIC_INFO_REG) ", %0\n"
- "mov " STRINGIFY(SOFTWARE_PANIC_REASON_REG) ", %1\n"
- "bl exception_panic\n"
- : : "r"(info), "r"(reason));
- __builtin_unreachable();
-}
-
-void panic_set_reason(uint32_t reason, uint32_t info, uint8_t exception)
-{
- struct panic_data * const pdata = get_panic_data_write();
- uint32_t *lregs;
-
- lregs = pdata->cm.regs;
-
- /* Setup panic data structure */
- memset(pdata, 0, CONFIG_PANIC_DATA_SIZE);
- pdata->magic = PANIC_DATA_MAGIC;
- pdata->struct_size = CONFIG_PANIC_DATA_SIZE;
- pdata->struct_version = 2;
- pdata->arch = PANIC_ARCH_CORTEX_M;
-
- /* Log panic cause */
- lregs[1] = exception;
- lregs[3] = reason;
- lregs[4] = info;
-}
-
-void panic_get_reason(uint32_t *reason, uint32_t *info, uint8_t *exception)
-{
- struct panic_data * const pdata = panic_get_data();
- uint32_t *lregs;
-
- if (pdata && pdata->struct_version == 2) {
- lregs = pdata->cm.regs;
- *exception = lregs[1];
- *reason = lregs[3];
- *info = lregs[4];
- } else {
- *exception = *reason = *info = 0;
- }
-}
-#endif
-
-void bus_fault_handler(void)
-{
- if (!bus_fault_ignored)
- exception_panic();
-}
-
-void ignore_bus_fault(int ignored)
-{
- if (IS_ENABLED(CHIP_FAMILY_STM32H7)) {
- if (ignored == 0)
- asm volatile("dsb; isb");
- }
-
- /*
- * Flash code might call this before cpu_init(),
- * ensure that the bus faults really go through our handler.
- */
- CPU_NVIC_SHCSR |= CPU_NVIC_SHCSR_BUSFAULTENA;
- bus_fault_ignored = ignored;
-}
diff --git a/core/cortex-m/switch.S b/core/cortex-m/switch.S
deleted file mode 100644
index f56c5e4c74..0000000000
--- a/core/cortex-m/switch.S
+++ /dev/null
@@ -1,101 +0,0 @@
-/* Copyright 2012 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- *
- * Context swtching
- */
-
-#include "config.h"
-
-.text
-
-.syntax unified
-.code 16
-
-/**
- * Task context switching
- *
- * Change the task scheduled after returning from the exception.
- *
- * Save the registers of the current task below the exception context on
- * its task, then restore the live registers of the next task and set the
- * process stack pointer to the new stack.
- *
- * r0: pointer to the task to switch from
- * r1: pointer to the task to switch to
- *
- * must be called from interrupt context
- *
- * the structure of the saved context on the stack is :
- * r0, r1, r2, r3, r12, lr, pc, psr, r4, r5, r6, r7, r8, r9, r10, r11
- * exception frame <|> additional registers
- *
- * if using the FPU, then to store FPU context, add FP regs to the stack. in
- * this case the exception frame by default contains:
- * r0, r1, r2, r3, r12, lr, pc, psr,
- * s0 - s15, FPSCR, +1 word for 64-bit alignment
- * then in addition we store the following registers:
- * r4, r5, r6, r7, r8, r9, r10, r11
- * s16 - s31 (stored iff FP was used by the task (see EXC_RETURN[4]))
- * note that for the context switch to know if the next task has the extra FP
- * regs on the stack or not, we make use of the least significant bit of the
- * stack pointer. lsb of stack pointer is 1 if task has FP regs on stack, and
- * 0 otherwise.
- *
- */
-.global __switchto
-.thumb_func
-__switchto:
- mrs r3, psp @ get the task stack where the context has been saved
- ldr r2, [r1] @ get the new scheduled task stack pointer
- stmdb r3!, {r4-r11} @ save additional r4-r11 in the task stack
-
-#ifdef CONFIG_FPU
- tst lr, #(1<<4) @ test EXC_RETURN[4] for old task
- itt eq @ if EXC_RETURN[4] is zero, add FP regs to stack
- vstmdbeq r3!, {s16-s31}@ save additional FP s16-s31 in the task stack.
- @ if using lazy stacking, this will trigger saving
- @ s0-s15 in the reserved stack space.
- orreq r3, #1 @ set lsb of old stack pointer high to represent this
- @ task uses FPU. note stack pointer should be 64-bit
- @ aligned, so using this bit should be safe.
-
- tst r2, #1 @ test lsb of next stack pointer
- ittte ne @ if lsb is 1, then next task has FP regs on stack
- bicne r2, #1 @ clear lsb of new stack pointer
- bicne lr, #(1<<4) @ clear EXC_RETURN[4] for next task
- vldmiane r2!, {s16-s31}@ restore s16-s31 for the next task context
- orreq lr, #(1<<4) @ else if new stack doesn't use FP, set EXC_RETURN[4]
-#endif
-
- ldmia r2!, {r4-r11} @ restore r4-r11 for the next task context
- str r3, [r0] @ save the task stack pointer in its context
- msr psp, r2 @ set the process stack pointer to exception context
- bx lr @ return from exception
-
-/**
- * Start the task scheduling. r0 is a pointer to task_stack_ready, which is
- * set to 1 after the task stack is set up.
- */
-.global __task_start
-.thumb_func
-__task_start:
- ldr r2,=scratchpad @ area used as thread stack for the first switch
- mov r3, #2 @ use : priv. mode / thread stack / no floating point
- @ setting FP to unused here means first context switch
- @ will not store FP regs
- add r2, #17*4 @ put the pointer at the top of the stack
- mov r1, #0 @ __Schedule parameter : re-schedule nothing
- msr psp, r2 @ setup a thread stack up to the first context switch
- mov r2, #1
- isb @ ensure the write is done
- msr control, r3
- mov r3, r0
- mov r0, #0 @ __Schedule parameter : de-schedule nothing
- isb @ ensure the write is done
- str r2, [r3] @ Task scheduling is now active
- bl __schedule @ execute the task with the highest priority
- /* we should never return here */
- mov r0, #1 @ set to EC_ERROR_UNKNOWN
- bx lr
-
diff --git a/core/cortex-m/task.c b/core/cortex-m/task.c
deleted file mode 100644
index bf0eb5b397..0000000000
--- a/core/cortex-m/task.c
+++ /dev/null
@@ -1,1088 +0,0 @@
-/* Copyright 2012 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-/* Task scheduling / events module for Chrome EC operating system */
-
-#include "atomic.h"
-#include "common.h"
-#include "console.h"
-#include "cpu.h"
-#include "link_defs.h"
-#include "panic.h"
-#include "task.h"
-#include "timer.h"
-#include "util.h"
-
-typedef union {
- struct {
- /*
- * Note that sp must be the first element in the task struct
- * for __switchto() to work.
- */
- uint32_t sp; /* Saved stack pointer for context switch */
- uint32_t events; /* Bitmaps of received events */
- uint64_t runtime; /* Time spent in task */
- uint32_t *stack; /* Start of stack */
- };
-} task_;
-
-/* Value to store in unused stack */
-#define STACK_UNUSED_VALUE 0xdeadd00d
-
-/* declare task routine prototypes */
-#define TASK(n, r, d, s) void r(void *);
-void __idle(void);
-CONFIG_TASK_LIST
-CONFIG_TEST_TASK_LIST
-CONFIG_CTS_TASK_LIST
-#undef TASK
-
-/* Task names for easier debugging */
-#define TASK(n, r, d, s) #n,
-static const char * const task_names[] = {
- "<< idle >>",
- CONFIG_TASK_LIST
- CONFIG_TEST_TASK_LIST
- CONFIG_CTS_TASK_LIST
-};
-#undef TASK
-
-#ifdef CONFIG_TASK_PROFILING
-static uint64_t task_start_time; /* Time task scheduling started */
-/*
- * We only keep 32-bit values for exception start/end time, to avoid
- * accounting errors when we service interrupt when the timer wraps around.
- */
-static uint32_t exc_start_time; /* Time of task->exception transition */
-static uint32_t exc_end_time; /* Time of exception->task transition */
-static uint64_t exc_total_time; /* Total time in exceptions */
-static uint32_t svc_calls; /* Number of service calls */
-static uint32_t task_switches; /* Number of times active task changed */
-static uint32_t irq_dist[CONFIG_IRQ_COUNT]; /* Distribution of IRQ calls */
-#endif
-
-extern void __switchto(task_ *from, task_ *to);
-extern int __task_start(int *task_stack_ready);
-
-#ifndef CONFIG_LOW_POWER_IDLE
-/* Idle task. Executed when no tasks are ready to be scheduled. */
-void __idle(void)
-{
- while (1) {
-#ifdef CHIP_NPCX
-
- /*
- * Using host access to make sure M4 core clock will
- * return when the eSPI accesses the Host modules if
- * CSAE bit is set. Please notice this symptom only
- * occurs at npcx5.
- */
-#if defined(CHIP_FAMILY_NPCX5) && defined(CONFIG_HOSTCMD_ESPI)
- /* Enable Host access wakeup */
- SET_BIT(NPCX_WKEN(MIWU_TABLE_0, MIWU_GROUP_5), 6);
-#endif
-
- /*
- * TODO (ML): A interrupt that occurs shortly before entering
- * idle mode starts getting services while the Core transitions
- * into idle mode. The results in a hard fault when the Core,
- * shortly therefore, resumes execution on exiting idle mode.
- * Workaround: Replace the idle function with the followings
- */
- asm (
- "cpsid i\n" /* Disable interrupt */
- "push {r0-r5}\n" /* Save needed registers */
- "wfi\n" /* Wait for int to enter idle */
- "ldm %0, {r0-r5}\n" /* Add a delay after WFI */
- "pop {r0-r5}\n" /* Restore regs before enabling ints */
- "isb\n" /* Flush the cpu pipeline */
- "cpsie i\n" :: "r" (0x100A8000) /* Enable interrupts */
- );
-#else
- /*
- * Wait for the next irq event. This stops the CPU clock
- * (sleep / deep sleep, depending on chip config).
- */
- asm("wfi");
-#endif
- }
-}
-#endif /* !CONFIG_LOW_POWER_IDLE */
-
-static void task_exit_trap(void)
-{
- int i = task_get_current();
- cprints(CC_TASK, "Task %d (%s) exited!", i, task_names[i]);
- /* Exited tasks simply sleep forever */
- while (1)
- task_wait_event(-1);
-}
-
-/* Startup parameters for all tasks. */
-#define TASK(n, r, d, s) { \
- .r0 = (uint32_t)d, \
- .pc = (uint32_t)r, \
- .stack_size = s, \
-},
-static const struct {
- uint32_t r0;
- uint32_t pc;
- uint16_t stack_size;
-} tasks_init[] = {
- TASK(IDLE, __idle, 0, IDLE_TASK_STACK_SIZE)
- CONFIG_TASK_LIST
- CONFIG_TEST_TASK_LIST
- CONFIG_CTS_TASK_LIST
-};
-#undef TASK
-
-/* Contexts for all tasks */
-static task_ tasks[TASK_ID_COUNT];
-
-/* Reset constants and state for all tasks */
-#define TASK_RESET_SUPPORTED BIT(31)
-#define TASK_RESET_LOCK BIT(30)
-#define TASK_RESET_STATE_MASK (TASK_RESET_SUPPORTED | TASK_RESET_LOCK)
-#define TASK_RESET_WAITERS_MASK ~TASK_RESET_STATE_MASK
-#define TASK_RESET_UNSUPPORTED 0
-#define TASK_RESET_STATE_LOCKED (TASK_RESET_SUPPORTED | TASK_RESET_LOCK)
-#define TASK_RESET_STATE_UNLOCKED TASK_RESET_SUPPORTED
-
-#ifdef CONFIG_TASK_RESET_LIST
-#define ENABLE_RESET(n) \
- [TASK_ID_##n] = TASK_RESET_SUPPORTED,
-static uint32_t task_reset_state[TASK_ID_COUNT] = {
-#ifdef CONFIG_TASK_RESET_LIST
- CONFIG_TASK_RESET_LIST
-#endif
-};
-#undef ENABLE_RESET
-#endif /* CONFIG_TASK_RESET_LIST */
-
-/* Validity checks about static task invariants */
-BUILD_ASSERT(TASK_ID_COUNT <= sizeof(unsigned) * 8);
-BUILD_ASSERT(TASK_ID_COUNT < (1 << (sizeof(task_id_t) * 8)));
-BUILD_ASSERT(BIT(TASK_ID_COUNT) < TASK_RESET_LOCK);
-
-/* Stacks for all tasks */
-#define TASK(n, r, d, s) + s
-uint8_t task_stacks[0
- TASK(IDLE, __idle, 0, IDLE_TASK_STACK_SIZE)
- CONFIG_TASK_LIST
- CONFIG_TEST_TASK_LIST
- CONFIG_CTS_TASK_LIST
-] __aligned(8);
-
-#undef TASK
-
-/* Reserve space to discard context on first context switch. */
-uint32_t scratchpad[17];
-
-static task_ *current_task = (task_ *)scratchpad;
-
-/*
- * Should IRQs chain to svc_handler()? This should be set if either of the
- * following is true:
- *
- * 1) Task scheduling has started, and task profiling is enabled. Task
- * profiling does its tracking in svc_handler().
- *
- * 2) An event was set by an interrupt; this could result in a higher-priority
- * task unblocking. After checking for a task switch, svc_handler() will clear
- * the flag (unless profiling is also enabled; then the flag remains set).
- */
-static int need_resched_or_profiling;
-
-/*
- * Bitmap of all tasks ready to be run.
- *
- * Start off with only the hooks task marked as ready such that all the modules
- * can do their init within a task switching context. The hooks task will then
- * make a call to enable all tasks.
- */
-static uint32_t tasks_ready = BIT(TASK_ID_HOOKS);
-/*
- * Initially allow only the HOOKS and IDLE task to run, regardless of ready
- * status, in order for HOOK_INIT to complete before other tasks.
- * task_enable_all_tasks() will open the flood gates.
- */
-static uint32_t tasks_enabled = BIT(TASK_ID_HOOKS) | BIT(TASK_ID_IDLE);
-
-static int start_called; /* Has task swapping started */
-
-static inline task_ *__task_id_to_ptr(task_id_t id)
-{
- return tasks + id;
-}
-
-void interrupt_disable(void)
-{
- asm("cpsid i");
-}
-
-void interrupt_enable(void)
-{
- asm("cpsie i");
-}
-
-inline int is_interrupt_enabled(void)
-{
- int primask;
-
- /* Interrupts are enabled when PRIMASK bit is 0 */
- asm("mrs %0, primask":"=r"(primask));
-
- return !(primask & 0x1);
-}
-
-inline int in_interrupt_context(void)
-{
- int ret;
- asm("mrs %0, ipsr \n" /* read exception number */
- "lsl %0, #23 \n":"=r"(ret)); /* exception bits are the 9 LSB */
- return ret;
-}
-
-#ifdef CONFIG_TASK_PROFILING
-static inline int get_interrupt_context(void)
-{
- int ret;
- asm("mrs %0, ipsr \n":"=r"(ret)); /* read exception number */
- return ret & 0x1ff; /* exception bits are the 9 LSB */
-}
-#endif
-
-task_id_t task_get_current(void)
-{
-#ifdef CONFIG_DEBUG_BRINGUP
- /* If we haven't done a context switch then our task ID isn't valid */
- ASSERT(current_task != (task_ *)scratchpad);
-#endif
- return current_task - tasks;
-}
-
-uint32_t *task_get_event_bitmap(task_id_t tskid)
-{
- task_ *tsk = __task_id_to_ptr(tskid);
- return &tsk->events;
-}
-
-int task_start_called(void)
-{
- return start_called;
-}
-
-/**
- * Scheduling system call
- */
-void svc_handler(int desched, task_id_t resched)
-{
- task_ *current, *next;
-#ifdef CONFIG_TASK_PROFILING
- int exc = get_interrupt_context();
- uint32_t t;
-#endif
-
- /*
- * Push the priority to -1 until the return, to avoid being
- * interrupted.
- */
- asm volatile("cpsid f\n"
- "isb\n");
-
-#ifdef CONFIG_TASK_PROFILING
- /*
- * SVCall isn't triggered via DECLARE_IRQ(), so it needs to track its
- * start time explicitly.
- */
- if (exc == 0xb) {
- exc_start_time = get_time().le.lo;
- svc_calls++;
- }
-#endif
-
- current = current_task;
-
-#ifdef CONFIG_DEBUG_STACK_OVERFLOW
- if (*current->stack != STACK_UNUSED_VALUE) {
- panic_printf("\n\nStack overflow in %s task!\n",
- task_names[current - tasks]);
-#ifdef CONFIG_SOFTWARE_PANIC
- software_panic(PANIC_SW_STACK_OVERFLOW, current - tasks);
-#endif
- }
-#endif
-
- if (desched && !current->events) {
- /*
- * Remove our own ready bit (current - tasks is same as
- * task_get_current())
- */
- tasks_ready &= ~(1 << (current - tasks));
- }
- ASSERT(resched <= TASK_ID_COUNT);
- tasks_ready |= 1 << resched;
-
- ASSERT(tasks_ready & tasks_enabled);
- next = __task_id_to_ptr(__fls(tasks_ready & tasks_enabled));
-
-#ifdef CONFIG_TASK_PROFILING
- /* Track time in interrupts */
- t = get_time().le.lo;
- exc_total_time += (t - exc_start_time);
-
- /*
- * Bill the current task for time between the end of the last interrupt
- * and the start of this one.
- */
- current->runtime += (exc_start_time - exc_end_time);
- exc_end_time = t;
-#else
- /*
- * Don't chain here from interrupts until the next time an interrupt
- * sets an event.
- */
- need_resched_or_profiling = 0;
-#endif
-
- /* Nothing to do */
- if (next == current)
- return;
-
- /* Switch to new task */
-#ifdef CONFIG_TASK_PROFILING
- task_switches++;
-#endif
- current_task = next;
- __switchto(current, next);
-}
-
-void __schedule(int desched, int resched)
-{
- register int p0 asm("r0") = desched;
- register int p1 asm("r1") = resched;
-
- asm("svc 0"::"r"(p0),"r"(p1));
-}
-
-#ifdef CONFIG_TASK_PROFILING
-void __keep task_start_irq_handler(void *excep_return)
-{
- /*
- * Get time before checking depth, in case this handler is
- * pre-empted.
- */
- uint32_t t = get_time().le.lo;
- int irq = get_interrupt_context() - 16;
-
- /*
- * Track IRQ distribution. No need for atomic add, because an IRQ
- * can't pre-empt itself.
- */
- if (irq < ARRAY_SIZE(irq_dist))
- irq_dist[irq]++;
-
- /*
- * Continue iff a rescheduling event happened or profiling is active,
- * and we are not called from another exception (this must match the
- * logic for when we chain to svc_handler() below).
- */
- if (!need_resched_or_profiling || (((uint32_t)excep_return & 0xf) == 1))
- return;
-
- exc_start_time = t;
-}
-#endif
-
-void __keep task_resched_if_needed(void *excep_return)
-{
- /*
- * Continue iff a rescheduling event happened or profiling is active,
- * and we are not called from another exception.
- */
- if (!need_resched_or_profiling || (((uint32_t)excep_return & 0xf) == 1))
- return;
-
- svc_handler(0, 0);
-}
-
-static uint32_t __wait_evt(int timeout_us, task_id_t resched)
-{
- task_ *tsk = current_task;
- task_id_t me = tsk - tasks;
- uint32_t evt;
- int ret __attribute__((unused));
-
- /*
- * Scheduling task when interrupts are disabled will result in Forced
- * Hard Fault because:
- * - Disabling interrupt using 'cpsid i' also disables SVCall handler
- * (because it has configurable priority)
- * - Escalation to Hard Fault (also known as 'priority escalation')
- * occurs when handler for that fault is not enabled
- */
- ASSERT(is_interrupt_enabled());
- ASSERT(!in_interrupt_context());
-
- if (timeout_us > 0) {
- timestamp_t deadline = get_time();
- deadline.val += timeout_us;
- ret = timer_arm(deadline, me);
- ASSERT(ret == EC_SUCCESS);
- }
- while (!(evt = atomic_clear(&tsk->events))) {
- /* Remove ourself and get the next task in the scheduler */
- __schedule(1, resched);
- resched = TASK_ID_IDLE;
- }
- if (timeout_us > 0) {
- timer_cancel(me);
- /* Ensure timer event is clear, we no longer care about it */
- atomic_clear_bits(&tsk->events, TASK_EVENT_TIMER);
- }
- return evt;
-}
-
-uint32_t task_set_event(task_id_t tskid, uint32_t event)
-{
- task_ *receiver = __task_id_to_ptr(tskid);
- ASSERT(receiver);
-
- /* Set the event bit in the receiver message bitmap */
- atomic_or(&receiver->events, event);
-
- /* Re-schedule if priorities have changed */
- if (in_interrupt_context() || !is_interrupt_enabled()) {
- /* The receiver might run again */
- atomic_or(&tasks_ready, 1 << tskid);
-#ifndef CONFIG_TASK_PROFILING
- if (start_called)
- need_resched_or_profiling = 1;
-#endif
- } else {
- __schedule(0, tskid);
- }
-
- return 0;
-}
-
-uint32_t task_wait_event(int timeout_us)
-{
- return __wait_evt(timeout_us, TASK_ID_IDLE);
-}
-
-uint32_t task_wait_event_mask(uint32_t event_mask, int timeout_us)
-{
- uint64_t deadline = get_time().val + timeout_us;
- uint32_t events = 0;
- int time_remaining_us = timeout_us;
-
- /* Add the timer event to the mask so we can indicate a timeout */
- event_mask |= TASK_EVENT_TIMER;
-
- while (!(events & event_mask)) {
- /* Collect events to re-post later */
- events |= __wait_evt(time_remaining_us, TASK_ID_IDLE);
-
- time_remaining_us = deadline - get_time().val;
- if (timeout_us > 0 && time_remaining_us <= 0) {
- /* Ensure we return a TIMER event if we timeout */
- events |= TASK_EVENT_TIMER;
- break;
- }
- }
-
- /* Re-post any other events collected */
- if (events & ~event_mask)
- atomic_or(&current_task->events, events & ~event_mask);
-
- return events & event_mask;
-}
-
-void task_enable_all_tasks(void)
-{
- /* Mark all tasks as ready and able to run. */
- tasks_ready = tasks_enabled = BIT(TASK_ID_COUNT) - 1;
- /* Reschedule the highest priority task. */
- if (is_interrupt_enabled())
- __schedule(0, 0);
-}
-
-void task_enable_task(task_id_t tskid)
-{
- atomic_or(&tasks_enabled, BIT(tskid));
-}
-
-void task_disable_task(task_id_t tskid)
-{
- atomic_clear_bits(&tasks_enabled, BIT(tskid));
-
- if (!in_interrupt_context() && is_interrupt_enabled() &&
- tskid == task_get_current())
- __schedule(0, 0);
-}
-
-void task_enable_irq(int irq)
-{
- CPU_NVIC_EN(irq / 32) = 1 << (irq % 32);
-}
-
-void __keep task_disable_irq(int irq)
-{
- CPU_NVIC_DIS(irq / 32) = 1 << (irq % 32);
-}
-
-void task_clear_pending_irq(int irq)
-{
- CPU_NVIC_UNPEND(irq / 32) = 1 << (irq % 32);
-}
-
-void task_trigger_irq(int irq)
-{
- CPU_NVIC_SWTRIG = irq;
-}
-
-static uint32_t init_task_context(task_id_t id)
-{
- uint32_t *sp;
- /* Stack size in words */
- uint32_t ssize = tasks_init[id].stack_size / 4;
-
- /*
- * Update stack used by first frame: 8 words for the normal
- * stack, plus 8 for R4-R11. Even if using FPU, the first frame
- * does not store FP regs.
- */
- sp = tasks[id].stack + ssize - 16;
- tasks[id].sp = (uint32_t)sp;
-
- /* Initial context on stack (see __switchto()) */
- sp[8] = tasks_init[id].r0; /* r0 */
- sp[13] = (uint32_t)task_exit_trap; /* lr */
- sp[14] = tasks_init[id].pc; /* pc */
- sp[15] = 0x01000000; /* psr */
-
- /* Fill unused stack; also used to detect stack overflow. */
- for (sp = tasks[id].stack; sp < (uint32_t *)tasks[id].sp; sp++)
- *sp = STACK_UNUSED_VALUE;
-
- return ssize;
-}
-
-#ifdef CONFIG_TASK_RESET_LIST
-
-/*
- * Re-initializes a task stack to its initial state, and marks it ready.
- * The task reset lock must be held prior to calling this function.
- */
-static void do_task_reset(task_id_t id)
-{
- interrupt_disable();
- init_task_context(id);
- tasks_ready |= 1 << id;
- /* TODO: Clear all pending events? */
- interrupt_enable();
-}
-
-/* We can't pass a parameter to a deferred call. Use this instead. */
-/* Mask of task IDs waiting to be reset. */
-static uint32_t deferred_reset_task_ids;
-
-/* Tasks may call this function if they want to reset themselves. */
-static void deferred_task_reset(void)
-{
- while (deferred_reset_task_ids) {
- task_id_t reset_id = __fls(deferred_reset_task_ids);
-
- atomic_clear_bits(&deferred_reset_task_ids, 1 << reset_id);
- do_task_reset(reset_id);
- }
-}
-DECLARE_DEFERRED(deferred_task_reset);
-
-/*
- * Helper for updating task_reset state atomically. Checks the current state,
- * and if it matches if_value, updates the state to new_value, and returns
- * TRUE.
- */
-static int update_reset_state(uint32_t *state,
- uint32_t if_value,
- uint32_t to_value)
-{
- int update;
-
- interrupt_disable();
- update = *state == if_value;
- if (update)
- *state = to_value;
- interrupt_enable();
-
- return update;
-}
-
-/*
- * Helper that acquires the reset lock iff it is not currently held.
- * Returns TRUE if the lock was acquired.
- */
-static inline int try_acquire_reset_lock(uint32_t *state)
-{
- return update_reset_state(state,
- /* if the lock is not held */
- TASK_RESET_STATE_UNLOCKED,
- /* acquire it */
- TASK_RESET_STATE_LOCKED);
-}
-
-/*
- * Helper that releases the reset lock iff it is currently held, and there
- * are no pending resets. Returns TRUE if the lock was released.
- */
-static inline int try_release_reset_lock(uint32_t *state)
-{
- return update_reset_state(state,
- /* if the lock is held, with no waiters */
- TASK_RESET_STATE_LOCKED,
- /* release it */
- TASK_RESET_STATE_UNLOCKED);
-}
-
-/*
- * Helper to cause the current task to sleep indefinitely; useful if the
- * calling task just needs to block until it is reset.
- */
-static inline void sleep_forever(void)
-{
- while (1)
- usleep(-1);
-}
-
-void task_enable_resets(void)
-{
- task_id_t id = task_get_current();
- uint32_t *state = &task_reset_state[id];
-
- if (*state == TASK_RESET_UNSUPPORTED) {
- cprints(CC_TASK,
- "%s called from non-resettable task, id: %d",
- __func__, id);
- return;
- }
-
- /*
- * A correctly written resettable task will only call this function
- * if resets are currently disabled; this implies that this task
- * holds the reset lock.
- */
-
- if (*state == TASK_RESET_STATE_UNLOCKED) {
- cprints(CC_TASK,
- "%s called, but resets already enabled, id: %d",
- __func__, id);
- return;
- }
-
- /*
- * Attempt to release the lock. If we cannot, it means there are tasks
- * waiting for a reset.
- */
- if (try_release_reset_lock(state))
- return;
-
- /* People are waiting for us to reset; schedule a reset. */
- atomic_or(&deferred_reset_task_ids, 1 << id);
- /*
- * This will always trigger a deferred call after our new ID was
- * written. If the hook call is currently executing, it will run
- * again.
- */
- hook_call_deferred(&deferred_task_reset_data, 0);
- /* Wait to be reset. */
- sleep_forever();
-}
-
-void task_disable_resets(void)
-{
- task_id_t id = task_get_current();
- uint32_t *state = &task_reset_state[id];
-
- if (*state == TASK_RESET_UNSUPPORTED) {
- cprints(CC_TASK,
- "%s called from non-resettable task, id %d",
- __func__, id);
- return;
- }
-
- /*
- * A correctly written resettable task will only call this function
- * if resets are currently enabled; this implies that this task does
- * not hold the reset lock.
- */
-
- if (try_acquire_reset_lock(state))
- return;
-
- /*
- * If we can't acquire the lock, we are about to be reset by another
- * task.
- */
- sleep_forever();
-}
-
-int task_reset_cleanup(void)
-{
- task_id_t id = task_get_current();
- uint32_t *state = &task_reset_state[id];
-
- /*
- * If the task has never started before, state will be
- * TASK_RESET_ENABLED.
- *
- * If the task was reset, the TASK_RESET_LOCK bit will be set, and
- * there may additionally be bits representing tasks we must notify
- * that we have reset.
- */
-
- /*
- * Only this task can unset the lock bit so we can read this safely,
- * even though other tasks may be modifying the state to add themselves
- * as waiters.
- */
- int cleanup_req = *state & TASK_RESET_LOCK;
-
- /*
- * Attempt to release the lock. We can only do this when there are no
- * tasks waiting to be notified that we have been reset, so we loop
- * until no tasks are waiting.
- *
- * Other tasks may still be trying to reset us at this point; if they
- * do, they will add themselves to the list of tasks we must notify. We
- * will simply notify them (multiple times if necessary) until we are
- * free to unlock.
- */
- if (cleanup_req) {
- while (!try_release_reset_lock(state)) {
- /* Find the first waiter to notify. */
- task_id_t notify_id = __fls(
- *state & TASK_RESET_WAITERS_MASK);
- /*
- * Remove the task from waiters first, so that
- * when it wakes after being notified, it is in
- * a consistent state (it should not be waiting
- * to be notified and running).
- * After being notified, the task may try to
- * reset us again; if it does, it will just add
- * itself back to the list of tasks to notify,
- * and we will notify it again.
- */
- atomic_clear_bits(state, 1 << notify_id);
- /*
- * Skip any invalid ids set by tasks that
- * requested a non-blocking reset.
- */
- if (notify_id < TASK_ID_COUNT)
- task_set_event(notify_id,
- TASK_EVENT_RESET_DONE);
- }
- }
-
- return cleanup_req;
-}
-
-int task_reset(task_id_t id, int wait)
-{
- task_id_t current = task_get_current();
- uint32_t *state = &task_reset_state[id];
- uint32_t waiter_id;
- int resets_disabled;
-
- if (id == current)
- return EC_ERROR_INVAL;
-
- /*
- * This value is only set at compile time, and will never be modified.
- */
- if (*state == TASK_RESET_UNSUPPORTED)
- return EC_ERROR_INVAL;
-
- /*
- * If we are not blocking for reset, we use an invalid task id to notify
- * the task that _someone_ wanted it to reset, but didn't want to be
- * notified when the reset is complete.
- */
- waiter_id = 1 << (wait ? current : TASK_ID_COUNT);
-
- /*
- * Try and take the lock. If we can't have it, just notify the task we
- * tried; it will reset itself when it next tries to release the lock.
- */
- interrupt_disable();
- resets_disabled = *state & TASK_RESET_LOCK;
- if (resets_disabled)
- *state |= waiter_id;
- else
- *state |= TASK_RESET_LOCK;
- interrupt_enable();
-
- if (!resets_disabled) {
- /* We got the lock, do the reset immediately. */
- do_task_reset(id);
- } else if (wait) {
- /*
- * We couldn't get the lock, and have been asked to block for
- * reset. We have asked the task to reset itself; it will notify
- * us when it has.
- */
- task_wait_event_mask(TASK_EVENT_RESET_DONE, -1);
- }
-
- return EC_SUCCESS;
-}
-
-#endif /* CONFIG_TASK_RESET_LIST */
-
-/*
- * Initialize IRQs in the NVIC and set their priorities as defined by the
- * DECLARE_IRQ statements.
- */
-static void __nvic_init_irqs(void)
-{
- /* Get the IRQ priorities section from the linker */
- int exc_calls = __irqprio_end - __irqprio;
- int i;
-
- /* Mask and clear all pending interrupts */
- for (i = 0; i < 5; i++) {
- CPU_NVIC_DIS(i) = 0xffffffff;
- CPU_NVIC_UNPEND(i) = 0xffffffff;
- }
-
- /*
- * Re-enable global interrupts in case they're disabled. On a reboot,
- * they're already enabled; if we've jumped here from another image,
- * they're not.
- */
- interrupt_enable();
-
- /* Set priorities */
- for (i = 0; i < exc_calls; i++) {
- cpu_set_interrupt_priority(__irqprio[i].irq,
- __irqprio[i].priority);
- }
-}
-
-void mutex_lock(struct mutex *mtx)
-{
- uint32_t value;
- uint32_t id;
-
- /*
- * mutex_lock() must not be used in interrupt context (because we wait
- * if there is contention).
- */
- ASSERT(!in_interrupt_context());
-
- /*
- * Task ID is not valid before task_start() (since current_task is
- * scratchpad), and no need for mutex locking before task switching has
- * begun.
- */
- if (!task_start_called())
- return;
-
- id = 1 << task_get_current();
-
- atomic_or(&mtx->waiters, id);
-
- do {
- /* Try to get the lock (set 1 into the lock field) */
- __asm__ __volatile__(" ldrex %0, [%1]\n"
- " teq %0, #0\n"
- " it eq\n"
- " strexeq %0, %2, [%1]\n"
- : "=&r" (value)
- : "r" (&mtx->lock), "r" (2) : "cc");
- /*
- * "value" is equals to 1 if the store conditional failed,
- * 2 if somebody else owns the mutex, 0 else.
- */
- if (value == 2)
- /* Contention on the mutex */
- task_wait_event_mask(TASK_EVENT_MUTEX, 0);
- } while (value);
-
- atomic_clear_bits(&mtx->waiters, id);
-}
-
-void mutex_unlock(struct mutex *mtx)
-{
- uint32_t waiters;
- task_ *tsk = current_task;
-
- /*
- * Add a critical section to keep the unlock and the snapshotting of
- * waiters atomic in case a task switching occurs between them.
- */
- interrupt_disable();
- waiters = mtx->waiters;
- mtx->lock = 0;
- interrupt_enable();
-
- while (waiters) {
- task_id_t id = __fls(waiters);
- waiters &= ~BIT(id);
-
- /* Somebody is waiting on the mutex */
- task_set_event(id, TASK_EVENT_MUTEX);
- }
-
- /* Ensure no event is remaining from mutex wake-up */
- atomic_clear_bits(&tsk->events, TASK_EVENT_MUTEX);
-}
-
-void task_print_list(void)
-{
- int i;
-
- ccputs("Task Ready Name Events Time (s) StkUsed\n");
-
- for (i = 0; i < TASK_ID_COUNT; i++) {
- char is_ready = (tasks_ready & (1<<i)) ? 'R' : ' ';
- uint32_t *sp;
-
- int stackused = tasks_init[i].stack_size;
-
- for (sp = tasks[i].stack;
- sp < (uint32_t *)tasks[i].sp && *sp == STACK_UNUSED_VALUE;
- sp++)
- stackused -= sizeof(uint32_t);
-
- ccprintf("%4d %c %-16s %08x %11.6lld %3d/%3d\n", i, is_ready,
- task_names[i], tasks[i].events, tasks[i].runtime,
- stackused, tasks_init[i].stack_size);
- cflush();
- }
-}
-
-int command_task_info(int argc, char **argv)
-{
-#ifdef CONFIG_TASK_PROFILING
- int total = 0;
- int i;
-#endif
-
- task_print_list();
-
-#ifdef CONFIG_TASK_PROFILING
- ccputs("IRQ counts by type:\n");
- cflush();
- for (i = 0; i < ARRAY_SIZE(irq_dist); i++) {
- if (irq_dist[i]) {
- ccprintf("%4d %8d\n", i, irq_dist[i]);
- total += irq_dist[i];
- }
- }
- ccprintf("Service calls: %11d\n", svc_calls);
- ccprintf("Total exceptions: %11d\n", total + svc_calls);
- ccprintf("Task switches: %11d\n", task_switches);
- ccprintf("Task switching started: %11.6lld s\n", task_start_time);
- ccprintf("Time in tasks: %11.6lld s\n",
- get_time().val - task_start_time);
- ccprintf("Time in exceptions: %11.6lld s\n", exc_total_time);
-#endif
-
- return EC_SUCCESS;
-}
-DECLARE_SAFE_CONSOLE_COMMAND(taskinfo, command_task_info,
- NULL,
- "Print task info");
-
-#ifdef CONFIG_CMD_TASKREADY
-static int command_task_ready(int argc, char **argv)
-{
- if (argc < 2) {
- ccprintf("tasks_ready: 0x%08x\n", tasks_ready);
- } else {
- tasks_ready = strtoi(argv[1], NULL, 16);
- ccprintf("Setting tasks_ready to 0x%08x\n", tasks_ready);
- __schedule(0, 0);
- }
-
- return EC_SUCCESS;
-}
-DECLARE_CONSOLE_COMMAND(taskready, command_task_ready,
- "[setmask]",
- "Print/set ready tasks");
-#endif
-
-void task_pre_init(void)
-{
- uint32_t *stack_next = (uint32_t *)task_stacks;
- int i;
-
- /* Fill the task memory with initial values */
- for (i = 0; i < TASK_ID_COUNT; i++) {
- tasks[i].stack = stack_next;
- stack_next += init_task_context(i);
- }
-
- /*
- * Fill in guard value in scratchpad to prevent stack overflow
- * detection failure on the first context switch. This works because
- * the first word in the scratchpad is where the switcher will store
- * sp, so it's ok to blow away.
- */
- ((task_ *)scratchpad)->stack = (uint32_t *)scratchpad;
- *(uint32_t *)scratchpad = STACK_UNUSED_VALUE;
-
- /* Initialize IRQs */
- __nvic_init_irqs();
-}
-
-void task_clear_fp_used(void)
-{
- int ctrl;
-
- /* Clear the CONTROL.FPCA bit, which represents FP context active. */
- asm volatile("mrs %0, control" : "=r"(ctrl));
- ctrl &= ~0x4;
- asm volatile("msr control, %0" : : "r"(ctrl));
-
- /* Flush pipeline before returning. */
- asm volatile("isb");
-}
-
-int task_start(void)
-{
-#ifdef CONFIG_TASK_PROFILING
- timestamp_t t = get_time();
-
- task_start_time = t.val;
- exc_end_time = t.le.lo;
-#endif
- start_called = 1;
-
- return __task_start(&need_resched_or_profiling);
-}
-
-#ifdef CONFIG_CMD_TASK_RESET
-static int command_task_reset(int argc, char **argv)
-{
- task_id_t id;
- char *e;
-
- if (argc == 2) {
- id = strtoi(argv[1], &e, 10);
- if (*e)
- return EC_ERROR_PARAM1;
- ccprintf("Resetting task %d\n", id);
- return task_reset(id, 1);
- }
-
- return EC_ERROR_PARAM_COUNT;
-}
-DECLARE_CONSOLE_COMMAND(taskreset, command_task_reset,
- "task_id",
- "Reset a task");
-#endif /* CONFIG_CMD_TASK_RESET */
diff --git a/core/cortex-m/uldivmod.S b/core/cortex-m/uldivmod.S
deleted file mode 120000
index 7047a8c5d4..0000000000
--- a/core/cortex-m/uldivmod.S
+++ /dev/null
@@ -1 +0,0 @@
-../../third_party/libaeabi-cortexm0/core/cortex-m/uldivmod.S \ No newline at end of file
diff --git a/core/cortex-m/vecttable.c b/core/cortex-m/vecttable.c
deleted file mode 100644
index 10b4b22422..0000000000
--- a/core/cortex-m/vecttable.c
+++ /dev/null
@@ -1,400 +0,0 @@
-/* Copyright 2018 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- *
- * Cortex-M CPU vector table
- */
-
-#ifndef ___INIT
-#define ___INIT
-#include "config.h"
-#include <task.h>
-#endif
-
-typedef void (*func)(void);
-
-#ifndef PASS
-#define PASS 1
-#endif
-
-#if PASS == 1
-/* Default exception handler */
-void __attribute__((used, naked)) default_handler(void);
-void default_handler()
-{
- asm(
- ".thumb_func\n"
- " b exception_panic"
- );
-}
-
-#define table(x) x
-
-#define weak_with_default __attribute__((used,weak,alias("default_handler")))
-
-#define vec(name) extern void weak_with_default name ## _handler(void);
-#define irq(num) vec(irq_ ## num)
-
-#define item(name) extern void name(void);
-#define null
-
-extern void stack_end(void); /* not technically correct, it's just a pointer */
-extern void reset(void);
-
-#pragma GCC diagnostic push
-#if __GNUC__ >= 8
-#pragma GCC diagnostic ignored "-Wattribute-alias"
-#endif
-/* Call default_handler if svc_handler is not found (task.c is not built) */
-void weak_with_default svc_handler(int desched, task_id_t resched);
-#pragma GCC diagnostic pop
-
-/*
- * SVC handler helper
- *
- * Work around issue where a late exception can corrupt r0 to r3,
- * see section 2.7 (svc) of Cortex-M3 Application Note 179:
- * http://infocenter.arm.com/help/topic/com.arm.doc.dai0179b/AppsNote179.pdf
- *
- * This approach differs slightly from the one in the document,
- * it only loads r0 (desched) and r1 (resched) for svc_handler.
- */
-void __attribute__((used,naked)) svc_helper_handler(void);
-void svc_helper_handler()
-{
- asm(
- ".thumb_func\n"
- " tst lr, #4 /* see if called from supervisor mode */\n"
- " mrs r2, msp /* get the correct stack pointer into r2 */\n"
- " it ne\n"
- " mrsne r2, psp\n"
- " ldr r1, [r2, #4] /* get regs from stack frame */\n"
- " ldr r0, [r2]\n"
- " b %0 /* call svc_handler */\n"
- :
- : "i"(svc_handler)
- );
-}
-
-#endif /* PASS 1 */
-
-#if PASS == 2
-#undef table
-#undef vec
-#undef irq
-#undef item
-#undef null
-
-/* number of elements before the first irq vector */
-#define IRQ_OFFSET 16
-/* element in the table that is null: extra IRQs are routed there,
- * then finally overwritten
- */
-#define IRQ_UNUSED_OFFSET 8
-
-/* Disable warning that "initializer overrides prior initialization of this
- * subobject", since we are explicitly doing this to handle the unused IRQs.
- */
-#ifdef __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Winitializer-overrides"
-#endif /* __clang__ */
-
-#define table(x) \
- const func vectors[] __attribute__((section(".text.vecttable"))) = { \
- x \
- [IRQ_UNUSED_OFFSET] = null \
- };
-
-#define vec(name) name ## _handler,
-#define irq(num) [num < CONFIG_IRQ_COUNT ? num + IRQ_OFFSET : IRQ_UNUSED_OFFSET] = vec(irq_ ## num)
-
-#define item(name) name,
-#define null (void*)0,
-#endif /* PASS 2 */
-
-table(
- item(stack_end)
- item(reset)
- vec(nmi)
- vec(hard_fault)
- vec(mpu_fault)
- vec(bus_fault)
- vec(usage_fault)
- null
- null
- null
- null
- item(svc_helper_handler)
- vec(debug)
- null
- vec(pendsv)
- vec(sys_tick)
- irq(0)
- irq(1)
- irq(2)
- irq(3)
- irq(4)
- irq(5)
- irq(6)
- irq(7)
- irq(8)
- irq(9)
- irq(10)
- irq(11)
- irq(12)
- irq(13)
- irq(14)
- irq(15)
- irq(16)
- irq(17)
- irq(18)
- irq(19)
- irq(20)
- irq(21)
- irq(22)
- irq(23)
- irq(24)
- irq(25)
- irq(26)
- irq(27)
- irq(28)
- irq(29)
- irq(30)
- irq(31)
- irq(32)
- irq(33)
- irq(34)
- irq(35)
- irq(36)
- irq(37)
- irq(38)
- irq(39)
- irq(40)
- irq(41)
- irq(42)
- irq(43)
- irq(44)
- irq(45)
- irq(46)
- irq(47)
- irq(48)
- irq(49)
- irq(50)
- irq(51)
- irq(52)
- irq(53)
- irq(54)
- irq(55)
- irq(56)
- irq(57)
- irq(58)
- irq(59)
- irq(60)
- irq(61)
- irq(62)
- irq(63)
- irq(64)
- irq(65)
- irq(66)
- irq(67)
- irq(68)
- irq(69)
- irq(70)
- irq(71)
- irq(72)
- irq(73)
- irq(74)
- irq(75)
- irq(76)
- irq(77)
- irq(78)
- irq(79)
- irq(80)
- irq(81)
- irq(82)
- irq(83)
- irq(84)
- irq(85)
- irq(86)
- irq(87)
- irq(88)
- irq(89)
- irq(90)
- irq(91)
- irq(92)
- irq(93)
- irq(94)
- irq(95)
- irq(96)
- irq(97)
- irq(98)
- irq(99)
- irq(100)
- irq(101)
- irq(102)
- irq(103)
- irq(104)
- irq(105)
- irq(106)
- irq(107)
- irq(108)
- irq(109)
- irq(110)
- irq(111)
- irq(112)
- irq(113)
- irq(114)
- irq(115)
- irq(116)
- irq(117)
- irq(118)
- irq(119)
- irq(120)
- irq(121)
- irq(122)
- irq(123)
- irq(124)
- irq(125)
- irq(126)
- irq(127)
- irq(128)
- irq(129)
- irq(130)
- irq(131)
- irq(132)
- irq(133)
- irq(134)
- irq(135)
- irq(136)
- irq(137)
- irq(138)
- irq(139)
- irq(140)
- irq(141)
- irq(142)
- irq(143)
- irq(144)
- irq(145)
- irq(146)
- irq(147)
- irq(148)
- irq(149)
- irq(150)
- irq(151)
- irq(152)
- irq(153)
- irq(154)
- irq(155)
- irq(156)
- irq(157)
- irq(158)
- irq(159)
- irq(160)
- irq(161)
- irq(162)
- irq(163)
- irq(164)
- irq(165)
- irq(166)
- irq(167)
- irq(168)
- irq(169)
- irq(170)
- irq(171)
- irq(172)
- irq(173)
- irq(174)
- irq(175)
- irq(176)
- irq(177)
- irq(178)
- irq(179)
- irq(180)
- irq(181)
- irq(182)
- irq(183)
- irq(184)
- irq(185)
- irq(186)
- irq(187)
- irq(188)
- irq(189)
- irq(190)
- irq(191)
- irq(192)
- irq(193)
- irq(194)
- irq(195)
- irq(196)
- irq(197)
- irq(198)
- irq(199)
- irq(200)
- irq(201)
- irq(202)
- irq(203)
- irq(204)
- irq(205)
- irq(206)
- irq(207)
- irq(208)
- irq(209)
- irq(210)
- irq(211)
- irq(212)
- irq(213)
- irq(214)
- irq(215)
- irq(216)
- irq(217)
- irq(218)
- irq(219)
- irq(220)
- irq(221)
- irq(222)
- irq(223)
- irq(224)
- irq(225)
- irq(226)
- irq(227)
- irq(228)
- irq(229)
- irq(230)
- irq(231)
- irq(232)
- irq(233)
- irq(234)
- irq(235)
- irq(236)
- irq(237)
- irq(238)
- irq(239)
- irq(240)
- irq(241)
- irq(242)
- irq(243)
- irq(244)
- irq(245)
- irq(246)
- irq(247)
- irq(248)
- irq(249)
- irq(250)
- irq(251)
- irq(252)
- irq(253)
- irq(254)
-)
-
-#if PASS == 2
-#ifdef __clang__
-#pragma clang diagnostic pop
-#endif /* __clang__ */
-#endif
-
-#if PASS == 1
-#undef PASS
-#define PASS 2
-#include "vecttable.c"
-#endif
diff --git a/core/cortex-m/watchdog.c b/core/cortex-m/watchdog.c
deleted file mode 100644
index c9faf54b2b..0000000000
--- a/core/cortex-m/watchdog.c
+++ /dev/null
@@ -1,58 +0,0 @@
-/* Copyright 2012 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-/* Watchdog common code */
-
-#include "common.h"
-#include "cpu.h"
-#include "panic.h"
-#include "task.h"
-#include "timer.h"
-#include "watchdog.h"
-
-/*
- * As defined by Armv7-M Reference Manual B1.5.6 "Exception Entry Behavior",
- * the structure of the saved context on the stack is:
- * r0, r1, r2, r3, r12, lr, pc, psr, ...
- */
-#define STACK_IDX_REG_LR 5
-#define STACK_IDX_REG_PC 6
-
-void __keep watchdog_trace(uint32_t excep_lr, uint32_t excep_sp)
-{
- uint32_t psp;
- uint32_t *stack;
-
- asm("mrs %0, psp" : "=r"(psp));
- if ((excep_lr & 0xf) == 1) {
- /* we were already in exception context */
- stack = (uint32_t *)excep_sp;
- } else {
- /* we were in task context */
- stack = (uint32_t *)psp;
- }
-
- panic_set_reason(PANIC_SW_WATCHDOG, stack[STACK_IDX_REG_PC],
- (excep_lr & 0xf) == 1 ? 0xff : task_get_current());
-
- /*
- * This is our last breath, the last opportunity to sort out all
- * matters. Flush and invalidate D-cache if cache enabled.
- */
- if (IS_ENABLED(CONFIG_ARMV7M_CACHE))
- cpu_clean_invalidate_dcache();
-
- panic_printf("### WATCHDOG PC=%08x / LR=%08x / pSP=%08x ",
- stack[STACK_IDX_REG_PC], stack[STACK_IDX_REG_LR], psp);
- if ((excep_lr & 0xf) == 1)
- panic_puts("(exc) ###\n");
- else
- panic_printf("(task %d) ###\n", task_get_current());
-
- /* If we are blocked in a high priority IT handler, the following debug
- * messages might not appear but they are useless in that situation. */
- timer_print_info();
- task_print_list();
-}