summaryrefslogtreecommitdiff
path: root/arch/arm/vfp
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/vfp')
-rw-r--r--arch/arm/vfp/vfp.h2
-rw-r--r--arch/arm/vfp/vfphw.S2
-rw-r--r--arch/arm/vfp/vfpmodule.c61
3 files changed, 61 insertions, 4 deletions
diff --git a/arch/arm/vfp/vfp.h b/arch/arm/vfp/vfp.h
index 8de86e4feada..c8c98dd44ad4 100644
--- a/arch/arm/vfp/vfp.h
+++ b/arch/arm/vfp/vfp.h
@@ -377,6 +377,4 @@ struct op {
u32 flags;
};
-#if defined(CONFIG_SMP) || defined(CONFIG_PM)
extern void vfp_save_state(void *location, u32 fpexc);
-#endif
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index c92a08bd6a86..a5a4e57763c3 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -172,7 +172,6 @@ process_exception:
@ retry the faulted instruction
ENDPROC(vfp_support_entry)
-#if defined(CONFIG_SMP) || defined(CONFIG_PM)
ENTRY(vfp_save_state)
@ Save the current VFP state
@ r0 - save location
@@ -190,7 +189,6 @@ ENTRY(vfp_save_state)
stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2
mov pc, lr
ENDPROC(vfp_save_state)
-#endif
last_VFP_context_address:
.word last_VFP_context
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 9f476a1be2ca..75457b30d813 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -377,6 +377,55 @@ static void vfp_pm_init(void)
static inline void vfp_pm_init(void) { }
#endif /* CONFIG_PM */
+/*
+ * Synchronise the hardware VFP state of a thread other than current with the
+ * saved one. This function is used by the ptrace mechanism.
+ */
+#ifdef CONFIG_SMP
+void vfp_sync_state(struct thread_info *thread)
+{
+ /*
+ * On SMP systems, the VFP state is automatically saved at every
+ * context switch. We mark the thread VFP state as belonging to a
+ * non-existent CPU so that the saved one will be reloaded when
+ * needed.
+ */
+ thread->vfpstate.hard.cpu = NR_CPUS;
+}
+#else
+void vfp_sync_state(struct thread_info *thread)
+{
+ unsigned int cpu = get_cpu();
+ u32 fpexc = fmrx(FPEXC);
+
+ /*
+ * If VFP is enabled, the previous state was already saved and
+ * last_VFP_context updated.
+ */
+ if (fpexc & FPEXC_EN)
+ goto out;
+
+ if (!last_VFP_context[cpu])
+ goto out;
+
+ /*
+ * Save the last VFP state on this CPU.
+ */
+ fmxr(FPEXC, fpexc | FPEXC_EN);
+ vfp_save_state(last_VFP_context[cpu], fpexc);
+ fmxr(FPEXC, fpexc);
+
+ /*
+ * Set the context to NULL to force a reload the next time the thread
+ * uses the VFP.
+ */
+ last_VFP_context[cpu] = NULL;
+
+out:
+ put_cpu();
+}
+#endif
+
#include <linux/smp.h>
/*
@@ -427,6 +476,18 @@ static int __init vfp_init(void)
* in place; report VFP support to userspace.
*/
elf_hwcap |= HWCAP_VFP;
+#ifdef CONFIG_VFPv3
+ if (VFP_arch >= 3) {
+ elf_hwcap |= HWCAP_VFPv3;
+
+ /*
+ * Check for VFPv3 D16. CPUs in this configuration
+ * only have 16 x 64bit registers.
+ */
+ if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
+ elf_hwcap |= HWCAP_VFPv3D16;
+ }
+#endif
#ifdef CONFIG_NEON
/*
* Check for the presence of the Advanced SIMD