summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDimitris Papastamos <dimitris.papastamos@arm.com>2018-05-16 11:36:14 +0100
committerDimitris Papastamos <dimitris.papastamos@arm.com>2018-05-23 12:45:48 +0100
commitfe007b2e15ec7b569c07fedbd9bfccb5ed742eec (patch)
tree5a4273d0322ec9567c2bb472a10d9d9c3cb9908a
parente0865708155826a70e2199a54cab8e90e8d07a32 (diff)
downloadarm-trusted-firmware-fe007b2e15ec7b569c07fedbd9bfccb5ed742eec.tar.gz
Add support for dynamic mitigation for CVE-2018-3639
Some CPUS may benefit from using a dynamic mitigation approach for CVE-2018-3639. A new SMC interface is defined to allow software executing in lower ELs to enable or disable the mitigation for their execution context. It should be noted that regardless of the state of the mitigation for lower ELs, code executing in EL3 is always mitigated against CVE-2018-3639. NOTE: This change is a compatibility break for any platform using the declare_cpu_ops_workaround_cve_2017_5715 macro. Migrate to the declare_cpu_ops_wa macro instead. Change-Id: I3509a9337ad217bbd96de9f380c4ff8bf7917013 Signed-off-by: Dimitris Papastamos <dimitris.papastamos@arm.com>
-rw-r--r--docs/cpu-specific-build-macros.rst5
-rw-r--r--include/lib/cpus/aarch64/cpu_macros.S21
-rw-r--r--include/lib/cpus/wa_cve_2018_3639.h12
-rw-r--r--include/lib/el3_runtime/aarch64/context.h15
-rw-r--r--lib/cpus/aarch64/cortex_a57.S3
-rw-r--r--lib/cpus/aarch64/cortex_a72.S3
-rw-r--r--lib/cpus/aarch64/cortex_a73.S3
-rw-r--r--lib/cpus/aarch64/cortex_a75.S3
-rw-r--r--lib/cpus/aarch64/cpu_helpers.S24
-rw-r--r--lib/cpus/cpu-ops.mk10
-rw-r--r--lib/el3_runtime/aarch64/context.S9
-rw-r--r--services/arm_arch_svc/arm_arch_svc_setup.c18
12 files changed, 116 insertions, 10 deletions
diff --git a/docs/cpu-specific-build-macros.rst b/docs/cpu-specific-build-macros.rst
index a89305b7d..c11f64039 100644
--- a/docs/cpu-specific-build-macros.rst
+++ b/docs/cpu-specific-build-macros.rst
@@ -30,6 +30,11 @@ vulnerability workarounds should be applied at runtime.
CVE-2018-3639, in order to comply with the recommendation in the spec
regarding workaround discovery.
+- ``DYNAMIC_WORKAROUND_CVE_2018_3639``: Enables dynamic mitigation for
+ `CVE-2018-3639`_. This build option should be set to 1 if the target
+ platform contains at least 1 CPU that requires dynamic mitigation.
+ Defaults to 0.
+
CPU Errata Workarounds
----------------------
diff --git a/include/lib/cpus/aarch64/cpu_macros.S b/include/lib/cpus/aarch64/cpu_macros.S
index bfe2449e9..cd8f3e8fb 100644
--- a/include/lib/cpus/aarch64/cpu_macros.S
+++ b/include/lib/cpus/aarch64/cpu_macros.S
@@ -18,6 +18,9 @@
/* Special constant to specify that CPU has no reset function */
#define CPU_NO_RESET_FUNC 0
+#define CPU_NO_EXTRA1_FUNC 0
+#define CPU_NO_EXTRA2_FUNC 0
+
/* Word size for 64-bit CPUs */
#define CPU_WORD_SIZE 8
@@ -48,6 +51,8 @@ CPU_RESET_FUNC: /* cpu_ops reset_func */
#endif
CPU_EXTRA1_FUNC:
.space 8
+CPU_EXTRA2_FUNC:
+ .space 8
#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
CPU_PWR_DWN_OPS: /* cpu_ops power down functions */
.space (8 * CPU_MAX_PWR_DWN_OPS)
@@ -119,6 +124,10 @@ CPU_OPS_SIZE = .
* This is a placeholder for future per CPU operations. Currently,
* some CPUs use this entry to set a test function to determine if
* the workaround for CVE-2017-5715 needs to be applied or not.
+ * _extra2:
+ * This is a placeholder for future per CPU operations. Currently
+ * some CPUs use this entry to set a function to disable the
+ * workaround for CVE-2018-3639.
* _power_down_ops:
* Comma-separated list of functions to perform power-down
* operatios on the CPU. At least one, and up to
@@ -129,7 +138,7 @@ CPU_OPS_SIZE = .
* used to handle power down at subsequent levels
*/
.macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \
- _extra1:req, _power_down_ops:vararg
+ _extra1:req, _extra2:req, _power_down_ops:vararg
.section cpu_ops, "a"
.align 3
.type cpu_ops_\_name, %object
@@ -138,6 +147,7 @@ CPU_OPS_SIZE = .
.quad \_resetfunc
#endif
.quad \_extra1
+ .quad \_extra2
#ifdef IMAGE_BL31
1:
/* Insert list of functions */
@@ -196,14 +206,15 @@ CPU_OPS_SIZE = .
.macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
_power_down_ops:vararg
- declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, \
+ declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, \
\_power_down_ops
.endm
- .macro declare_cpu_ops_workaround_cve_2017_5715 _name:req, _midr:req, \
- _resetfunc:req, _extra1:req, _power_down_ops:vararg
+ .macro declare_cpu_ops_wa _name:req, _midr:req, \
+ _resetfunc:req, _extra1:req, _extra2:req, \
+ _power_down_ops:vararg
declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
- \_extra1, \_power_down_ops
+ \_extra1, \_extra2, \_power_down_ops
.endm
#if REPORT_ERRATA
diff --git a/include/lib/cpus/wa_cve_2018_3639.h b/include/lib/cpus/wa_cve_2018_3639.h
new file mode 100644
index 000000000..36546f70d
--- /dev/null
+++ b/include/lib/cpus/wa_cve_2018_3639.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __WA_CVE_2018_3639_H__
+#define __WA_CVE_2018_3639_H__
+
+void *wa_cve_2018_3639_get_disable_ptr(void);
+
+#endif /* __WA_CVE_2018_3639_H__ */
diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h
index cdd74a34c..a4f3ea1bb 100644
--- a/include/lib/el3_runtime/aarch64/context.h
+++ b/include/lib/el3_runtime/aarch64/context.h
@@ -128,8 +128,8 @@
* Constants that allow assembler code to access members of and the 'fp_regs'
* structure at their correct offsets.
******************************************************************************/
-#if CTX_INCLUDE_FPREGS
#define CTX_FPREGS_OFFSET (CTX_SYSREGS_OFFSET + CTX_SYSREGS_END)
+#if CTX_INCLUDE_FPREGS
#define CTX_FP_Q0 U(0x0)
#define CTX_FP_Q1 U(0x10)
#define CTX_FP_Q2 U(0x20)
@@ -170,8 +170,14 @@
#else
#define CTX_FPREGS_END U(0x210) /* Align to the next 16 byte boundary */
#endif
+#else
+#define CTX_FPREGS_END U(0)
#endif
+#define CTX_CVE_2018_3639_OFFSET (CTX_FPREGS_OFFSET + CTX_FPREGS_END)
+#define CTX_CVE_2018_3639_DISABLE U(0)
+#define CTX_CVE_2018_3639_END U(0x10) /* Align to the next 16 byte boundary */
+
#ifndef __ASSEMBLY__
#include <cassert.h>
@@ -195,6 +201,7 @@
#define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT)
#endif
#define CTX_EL3STATE_ALL (CTX_EL3STATE_END >> DWORD_SHIFT)
+#define CTX_CVE_2018_3639_ALL (CTX_CVE_2018_3639_END >> DWORD_SHIFT)
/*
* AArch64 general purpose register context structure. Usually x0-x18,
@@ -227,6 +234,9 @@ DEFINE_REG_STRUCT(fp_regs, CTX_FPREG_ALL);
*/
DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL);
+/* Function pointer used by CVE-2018-3639 dynamic mitigation */
+DEFINE_REG_STRUCT(cve_2018_3639, CTX_CVE_2018_3639_ALL);
+
/*
* Macros to access members of any of the above structures using their
* offsets
@@ -251,6 +261,7 @@ typedef struct cpu_context {
#if CTX_INCLUDE_FPREGS
fp_regs_t fpregs_ctx;
#endif
+ cve_2018_3639_t cve_2018_3639_ctx;
} cpu_context_t;
/* Macros to access members of the 'cpu_context_t' structure */
@@ -276,6 +287,8 @@ CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context_t, fpregs_ctx), \
#endif
CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx), \
assert_core_context_el3state_offset_mismatch);
+CASSERT(CTX_CVE_2018_3639_OFFSET == __builtin_offsetof(cpu_context_t, cve_2018_3639_ctx), \
+ assert_core_context_cve_2018_3639_offset_mismatch);
/*
* Helper macro to set the general purpose registers that correspond to
diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S
index 721bb49ab..07fadd154 100644
--- a/lib/cpus/aarch64/cortex_a57.S
+++ b/lib/cpus/aarch64/cortex_a57.S
@@ -573,8 +573,9 @@ func cortex_a57_cpu_reg_dump
ret
endfunc cortex_a57_cpu_reg_dump
-declare_cpu_ops_workaround_cve_2017_5715 cortex_a57, CORTEX_A57_MIDR, \
+declare_cpu_ops_wa cortex_a57, CORTEX_A57_MIDR, \
cortex_a57_reset_func, \
check_errata_cve_2017_5715, \
+ CPU_NO_EXTRA2_FUNC, \
cortex_a57_core_pwr_dwn, \
cortex_a57_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S
index 6ef35cfcf..bb9381d17 100644
--- a/lib/cpus/aarch64/cortex_a72.S
+++ b/lib/cpus/aarch64/cortex_a72.S
@@ -310,8 +310,9 @@ func cortex_a72_cpu_reg_dump
ret
endfunc cortex_a72_cpu_reg_dump
-declare_cpu_ops_workaround_cve_2017_5715 cortex_a72, CORTEX_A72_MIDR, \
+declare_cpu_ops_wa cortex_a72, CORTEX_A72_MIDR, \
cortex_a72_reset_func, \
check_errata_cve_2017_5715, \
+ CPU_NO_EXTRA2_FUNC, \
cortex_a72_core_pwr_dwn, \
cortex_a72_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S
index 2dbd515f8..d595f128f 100644
--- a/lib/cpus/aarch64/cortex_a73.S
+++ b/lib/cpus/aarch64/cortex_a73.S
@@ -187,8 +187,9 @@ func cortex_a73_cpu_reg_dump
ret
endfunc cortex_a73_cpu_reg_dump
-declare_cpu_ops_workaround_cve_2017_5715 cortex_a73, CORTEX_A73_MIDR, \
+declare_cpu_ops_wa cortex_a73, CORTEX_A73_MIDR, \
cortex_a73_reset_func, \
check_errata_cve_2017_5715, \
+ CPU_NO_EXTRA2_FUNC, \
cortex_a73_core_pwr_dwn, \
cortex_a73_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S
index 9cc2c01ed..20ec32ce2 100644
--- a/lib/cpus/aarch64/cortex_a75.S
+++ b/lib/cpus/aarch64/cortex_a75.S
@@ -130,7 +130,8 @@ func cortex_a75_cpu_reg_dump
ret
endfunc cortex_a75_cpu_reg_dump
-declare_cpu_ops_workaround_cve_2017_5715 cortex_a75, CORTEX_A75_MIDR, \
+declare_cpu_ops_wa cortex_a75, CORTEX_A75_MIDR, \
cortex_a75_reset_func, \
check_errata_cve_2017_5715, \
+ CPU_NO_EXTRA2_FUNC, \
cortex_a75_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S
index 78c66e652..69ece8fff 100644
--- a/lib/cpus/aarch64/cpu_helpers.S
+++ b/lib/cpus/aarch64/cpu_helpers.S
@@ -316,3 +316,27 @@ func check_wa_cve_2017_5715
mov x0, #ERRATA_NOT_APPLIES
ret
endfunc check_wa_cve_2017_5715
+
+/*
+ * void *wa_cve_2018_3639_get_disable_ptr(void);
+ *
+ * Returns a function pointer which is used to disable mitigation
+ * for CVE-2018-3639.
+ * The function pointer is only returned on cores that employ
+ * dynamic mitigation. If the core uses static mitigation or is
+ * unaffected by CVE-2018-3639 this function returns NULL.
+ *
+ * NOTE: Must be called only after cpu_ops have been initialized
+ * in per-CPU data.
+ */
+ .globl wa_cve_2018_3639_get_disable_ptr
+func wa_cve_2018_3639_get_disable_ptr
+ mrs x0, tpidr_el3
+#if ENABLE_ASSERTIONS
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+ ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
+ ldr x0, [x0, #CPU_EXTRA2_FUNC]
+ ret
+endfunc wa_cve_2018_3639_get_disable_ptr
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index 31cd837be..434c13ea0 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -18,6 +18,7 @@ A57_DISABLE_NON_TEMPORAL_HINT ?=1
WORKAROUND_CVE_2017_5715 ?=1
WORKAROUND_CVE_2018_3639 ?=1
+DYNAMIC_WORKAROUND_CVE_2018_3639 ?=0
# Process SKIP_A57_L1_FLUSH_PWR_DWN flag
$(eval $(call assert_boolean,SKIP_A57_L1_FLUSH_PWR_DWN))
@@ -39,6 +40,15 @@ $(eval $(call add_define,WORKAROUND_CVE_2017_5715))
$(eval $(call assert_boolean,WORKAROUND_CVE_2018_3639))
$(eval $(call add_define,WORKAROUND_CVE_2018_3639))
+$(eval $(call assert_boolean,DYNAMIC_WORKAROUND_CVE_2018_3639))
+$(eval $(call add_define,DYNAMIC_WORKAROUND_CVE_2018_3639))
+
+ifneq (${DYNAMIC_WORKAROUND_CVE_2018_3639},0)
+ ifeq (${WORKAROUND_CVE_2018_3639},0)
+ $(error "Error: WORKAROUND_CVE_2018_3639 must be 1 if DYNAMIC_WORKAROUND_CVE_2018_3639 is 1")
+ endif
+endif
+
# CPU Errata Build flags.
# These should be enabled by the platform if the erratum workaround needs to be
# applied.
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
index 121ca4d30..707e6dbd4 100644
--- a/lib/el3_runtime/aarch64/context.S
+++ b/lib/el3_runtime/aarch64/context.S
@@ -404,6 +404,15 @@ func el3_exit
msr spsr_el3, x16
msr elr_el3, x17
+#if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639
+ /* Restore mitigation state as it was on entry to EL3 */
+ ldr x17, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
+ cmp x17, xzr
+ beq 1f
+ blr x17
+#endif
+
+1:
/* Restore saved general purpose registers and return */
b restore_gp_registers_eret
endfunc el3_exit
diff --git a/services/arm_arch_svc/arm_arch_svc_setup.c b/services/arm_arch_svc/arm_arch_svc_setup.c
index 6089cf6ac..45c4704ee 100644
--- a/services/arm_arch_svc/arm_arch_svc_setup.c
+++ b/services/arm_arch_svc/arm_arch_svc_setup.c
@@ -11,6 +11,7 @@
#include <smccc.h>
#include <smccc_helpers.h>
#include <wa_cve_2017_5715.h>
+#include <wa_cve_2018_3639.h>
static int32_t smccc_version(void)
{
@@ -31,8 +32,25 @@ static int32_t smccc_arch_features(u_register_t arg)
#endif
#if WORKAROUND_CVE_2018_3639
case SMCCC_ARCH_WORKAROUND_2:
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ /*
+ * On a platform where at least one CPU requires
+ * dynamic mitigation but others are either unaffected
+ * or permanently mitigated, report the latter as not
+ * needing dynamic mitigation.
+ */
+ if (wa_cve_2018_3639_get_disable_ptr() == NULL)
+ return 1;
+ /*
+ * If we get here, this CPU requires dynamic mitigation
+ * so report it as such.
+ */
+ return 0;
+#else
+ /* Either the CPUs are unaffected or permanently mitigated */
return SMCCC_ARCH_NOT_REQUIRED;
#endif
+#endif
default:
return SMC_UNK;
}