summaryrefslogtreecommitdiff
path: root/bl31
diff options
context:
space:
mode:
Diffstat (limited to 'bl31')
-rw-r--r--bl31/aarch64/ea_delegate.S39
-rw-r--r--bl31/aarch64/runtime_exceptions.S40
-rw-r--r--bl31/bl31.ld.S16
3 files changed, 45 insertions, 50 deletions
diff --git a/bl31/aarch64/ea_delegate.S b/bl31/aarch64/ea_delegate.S
index 0661583ca..9419476ce 100644
--- a/bl31/aarch64/ea_delegate.S
+++ b/bl31/aarch64/ea_delegate.S
@@ -16,9 +16,8 @@
#include <context.h>
.globl handle_lower_el_ea_esb
- .globl handle_lower_el_async_ea
- .globl enter_lower_el_sync_ea
- .globl enter_lower_el_async_ea
+ .globl handle_lower_el_sync_ea
+ .globl handle_lower_el_async_ea
/*
@@ -42,17 +41,12 @@ endfunc handle_lower_el_ea_esb
* Implementation Defined Exceptions. If any other kind of exception is detected,
* then this function reports unhandled exception.
*
- * Since it's part of exception vector, this function doesn't expect any GP
- * registers to have been saved. It delegates the handling of the EA to platform
- * handler, and upon successfully handling the EA, exits EL3; otherwise panics.
+ * It delegates the handling of the EA to platform handler, and upon successfully
+ * handling the EA, exits EL3; otherwise panics.
+ *
+ * This function assumes x30 has been saved.
*/
-func enter_lower_el_sync_ea
- /*
- * Explicitly save x30 so as to free up a register and to enable
- * branching.
- */
- str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
-
+func handle_lower_el_sync_ea
mrs x30, esr_el3
ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
@@ -114,24 +108,19 @@ func enter_lower_el_sync_ea
/* Synchronous exceptions other than the above are assumed to be EA */
ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
no_ret report_unhandled_exception
-endfunc enter_lower_el_sync_ea
+endfunc handle_lower_el_sync_ea
/*
* This function handles SErrors from lower ELs.
*
- * Since it's part of exception vector, this function doesn't expect any GP
- * registers to have been saved. It delegates the handling of the EA to platform
- * handler, and upon successfully handling the EA, exits EL3; otherwise panics.
+ * It delegates the handling of the EA to platform handler, and upon successfully
+ * handling the EA, exits EL3; otherwise panics.
+ *
+ * This function assumes x30 has been saved.
*/
-func enter_lower_el_async_ea
- /*
- * Explicitly save x30 so as to free up a register and to enable
- * branching
- */
- str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+func handle_lower_el_async_ea
-handle_lower_el_async_ea:
/*
* Save general purpose and ARMv8.3-PAuth registers (if enabled).
* If Secure Cycle Counter is not disabled in MDCR_EL3 when
@@ -153,7 +142,7 @@ handle_lower_el_async_ea:
/* el3_exit assumes SP_EL0 on entry */
msr spsel, #MODE_SP_EL0
b el3_exit
-endfunc enter_lower_el_async_ea
+endfunc handle_lower_el_async_ea
/*
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
index 2f00e7a85..500e87b9a 100644
--- a/bl31/aarch64/runtime_exceptions.S
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -40,6 +40,14 @@
.globl serror_aarch32
/*
+ * Save LR and make x30 available as most of the routines in vector entry
+ * need a free register
+ */
+ .macro save_x30
+ str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+ .endm
+
+ /*
* Macro that prepares entry to EL3 upon taking an exception.
*
* With RAS_EXTENSION, this macro synchronizes pending errors with an ESB
@@ -58,12 +66,6 @@
/* Unmask the SError interrupt */
msr daifclr, #DAIF_ABT_BIT
- /*
- * Explicitly save x30 so as to free up a register and to enable
- * branching
- */
- str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
-
/* Check for SErrors synchronized by the ESB instruction */
mrs x30, DISR_EL1
tbz x30, #DISR_A_BIT, 1f
@@ -108,11 +110,7 @@
/* Use ISB for the above unmask operation to take effect immediately */
isb
- /*
- * Refer Note 1.
- * No need to restore X30 as macros following this modify x30 anyway.
- */
- str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+ /* Refer Note 1. */
mov x30, #1
str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3]
dmb sy
@@ -153,7 +151,7 @@
/* Synchronous exceptions other than the above are assumed to be EA */
ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
- b enter_lower_el_sync_ea
+ b handle_lower_el_sync_ea
.endm
@@ -316,7 +314,7 @@ vector_entry serror_sp_elx
* triggered due to explicit synchronization in EL3. Refer Note 1.
*/
/* Assumes SP_EL3 on entry */
- str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+ save_x30
ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3]
cbnz x30, 1f
@@ -338,32 +336,36 @@ vector_entry sync_exception_aarch64
* to a valid cpu context where the general purpose and system register
* state can be saved.
*/
+ save_x30
apply_at_speculative_wa
check_and_unmask_ea
handle_sync_exception
end_vector_entry sync_exception_aarch64
vector_entry irq_aarch64
+ save_x30
apply_at_speculative_wa
check_and_unmask_ea
handle_interrupt_exception irq_aarch64
end_vector_entry irq_aarch64
vector_entry fiq_aarch64
+ save_x30
apply_at_speculative_wa
check_and_unmask_ea
handle_interrupt_exception fiq_aarch64
end_vector_entry fiq_aarch64
vector_entry serror_aarch64
+ save_x30
apply_at_speculative_wa
#if RAS_EXTENSION
msr daifclr, #DAIF_ABT_BIT
- b enter_lower_el_async_ea
#else
check_and_unmask_ea
- b handle_lower_el_async_ea
#endif
+ b handle_lower_el_async_ea
+
end_vector_entry serror_aarch64
/* ---------------------------------------------------------------------
@@ -377,32 +379,36 @@ vector_entry sync_exception_aarch32
* to a valid cpu context where the general purpose and system register
* state can be saved.
*/
+ save_x30
apply_at_speculative_wa
check_and_unmask_ea
handle_sync_exception
end_vector_entry sync_exception_aarch32
vector_entry irq_aarch32
+ save_x30
apply_at_speculative_wa
check_and_unmask_ea
handle_interrupt_exception irq_aarch32
end_vector_entry irq_aarch32
vector_entry fiq_aarch32
+ save_x30
apply_at_speculative_wa
check_and_unmask_ea
handle_interrupt_exception fiq_aarch32
end_vector_entry fiq_aarch32
vector_entry serror_aarch32
+ save_x30
apply_at_speculative_wa
#if RAS_EXTENSION
msr daifclr, #DAIF_ABT_BIT
- b enter_lower_el_async_ea
#else
check_and_unmask_ea
- b handle_lower_el_async_ea
#endif
+ b handle_lower_el_async_ea
+
end_vector_entry serror_aarch32
#ifdef MONITOR_TRAPS
diff --git a/bl31/bl31.ld.S b/bl31/bl31.ld.S
index 5d3139b60..c8290587e 100644
--- a/bl31/bl31.ld.S
+++ b/bl31/bl31.ld.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -66,7 +66,7 @@ SECTIONS {
__RODATA_END__ = .;
} >RAM
#else /* SEPARATE_CODE_AND_RODATA */
- ro . : {
+ .ro . : {
__RO_START__ = .;
*bl31_entrypoint.o(.text*)
@@ -111,7 +111,7 @@ SECTIONS {
* There's no need to include this into the RO section of BL31 because it
* doesn't need to be accessed by BL31.
*/
- spm_shim_exceptions : ALIGN(PAGE_SIZE) {
+ .spm_shim_exceptions : ALIGN(PAGE_SIZE) {
__SPM_SHIM_EXCEPTIONS_START__ = .;
*(.spm_shim_exceptions)
@@ -121,9 +121,9 @@ SECTIONS {
__SPM_SHIM_EXCEPTIONS_END__ = .;
} >SPM_SHIM_EXCEPTIONS_VMA AT>RAM
- PROVIDE(__SPM_SHIM_EXCEPTIONS_LMA__ = LOADADDR(spm_shim_exceptions));
+ PROVIDE(__SPM_SHIM_EXCEPTIONS_LMA__ = LOADADDR(.spm_shim_exceptions));
- . = LOADADDR(spm_shim_exceptions) + SIZEOF(spm_shim_exceptions);
+ . = LOADADDR(.spm_shim_exceptions) + SIZEOF(.spm_shim_exceptions);
#endif /* SPM_MM */
__RW_START__ = .;
@@ -162,15 +162,15 @@ SECTIONS {
* not mixed with normal data. This is required to set up the correct
* memory attributes for the coherent data page tables.
*/
- coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
+ .coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
__COHERENT_RAM_START__ = .;
/*
* Bakery locks are stored in coherent memory. Each lock's data is
* contiguous and fully allocated by the compiler.
*/
- *(bakery_lock)
- *(tzfw_coherent_mem)
+ *(.bakery_lock)
+ *(.tzfw_coherent_mem)
__COHERENT_RAM_END_UNALIGNED__ = .;