summaryrefslogtreecommitdiff
path: root/FreeRTOS/Demo/Common/ARMv8M/reg_tests/GCC/ARM_CM33/non_secure/reg_test_asm.c
diff options
context:
space:
mode:
authorGaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com>2023-04-18 10:45:59 +0530
committerGitHub <noreply@github.com>2023-04-18 10:45:59 +0530
commit5eaf2f3bce7d4f2959b289a2e4c17167f55a00fc (patch)
treeb5a44318b3a4659b3e928c8ab4e9a722106a21e1 /FreeRTOS/Demo/Common/ARMv8M/reg_tests/GCC/ARM_CM33/non_secure/reg_test_asm.c
parent4727d6b3cc369310306ff24f61cafc1017853f82 (diff)
downloadfreertos-git-5eaf2f3bce7d4f2959b289a2e4c17167f55a00fc.tar.gz
Add reg tests to LPC55S69 project (#989)
* Update LPCXpresso55S69 SDK to 2.13.1 * Enable print from non-secure side * Add register tests Signed-off-by: Gaurav Aggarwal <aggarg@amazon.com>
Diffstat (limited to 'FreeRTOS/Demo/Common/ARMv8M/reg_tests/GCC/ARM_CM33/non_secure/reg_test_asm.c')
-rw-r--r--FreeRTOS/Demo/Common/ARMv8M/reg_tests/GCC/ARM_CM33/non_secure/reg_test_asm.c1220
1 files changed, 1220 insertions, 0 deletions
diff --git a/FreeRTOS/Demo/Common/ARMv8M/reg_tests/GCC/ARM_CM33/non_secure/reg_test_asm.c b/FreeRTOS/Demo/Common/ARMv8M/reg_tests/GCC/ARM_CM33/non_secure/reg_test_asm.c
new file mode 100644
index 000000000..6b276ca29
--- /dev/null
+++ b/FreeRTOS/Demo/Common/ARMv8M/reg_tests/GCC/ARM_CM33/non_secure/reg_test_asm.c
@@ -0,0 +1,1220 @@
+/*
+ * FreeRTOS V202212.00
+ * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of
+ * this software and associated documentation files (the "Software"), to deal in
+ * the Software without restriction, including without limitation the rights to
+ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * https://www.FreeRTOS.org
+ * https://github.com/FreeRTOS
+ *
+ */
+
+/*
+ * "Reg tests" - These tests fill the registers with known values, then check
+ * that each register maintains its expected value for the lifetime of the
+ * task. Each task uses a different set of values. The reg test tasks execute
+ * with a very low priority, so get preempted very frequently. A register
+ * containing an unexpected value is indicative of an error in the context
+ * switching mechanism.
+ */
+
+#include "reg_test_asm.h"
+/*-----------------------------------------------------------*/
+
+void vRegTest1Asm_NonSecure( void ) /* __attribute__(( naked )) */
+{
+ __asm volatile
+ (
+ ".extern ulRegTest1LoopCounter \n"
+ ".syntax unified \n"
+ " \n"
+ " /* Fill the core registers with known values. */ \n"
+ " movs r0, #100 \n"
+ " movs r1, #101 \n"
+ " movs r2, #102 \n"
+ " movs r3, #103 \n"
+ " movs r4, #104 \n"
+ " movs r5, #105 \n"
+ " movs r6, #106 \n"
+ " movs r7, #107 \n"
+ " mov r8, #108 \n"
+ " mov r9, #109 \n"
+ " mov r10, #110 \n"
+ " mov r11, #111 \n"
+ " mov r12, #112 \n"
+ " \n"
+ " /* Fill the FPU registers with known values. */ \n"
+ " vmov.f32 s1, #1.5 \n"
+ " vmov.f32 s2, #2.5 \n"
+ " vmov.f32 s3, #3.5 \n"
+ " vmov.f32 s4, #4.5 \n"
+ " vmov.f32 s5, #5.5 \n"
+ " vmov.f32 s6, #6.5 \n"
+ " vmov.f32 s7, #7.5 \n"
+ " vmov.f32 s8, #8.5 \n"
+ " vmov.f32 s9, #9.5 \n"
+ " vmov.f32 s10, #10.5 \n"
+ " vmov.f32 s11, #11.5 \n"
+ " vmov.f32 s12, #12.5 \n"
+ " vmov.f32 s13, #13.5 \n"
+ " vmov.f32 s14, #14.5 \n"
+ " vmov.f32 s15, #1.0 \n"
+ " vmov.f32 s16, #2.0 \n"
+ " vmov.f32 s17, #3.0 \n"
+ " vmov.f32 s18, #4.0 \n"
+ " vmov.f32 s19, #5.0 \n"
+ " vmov.f32 s20, #6.0 \n"
+ " vmov.f32 s21, #7.0 \n"
+ " vmov.f32 s22, #8.0 \n"
+ " vmov.f32 s23, #9.0 \n"
+ " vmov.f32 s24, #10.0 \n"
+ " vmov.f32 s25, #11.0 \n"
+ " vmov.f32 s26, #12.0 \n"
+ " vmov.f32 s27, #13.0 \n"
+ " vmov.f32 s28, #14.0 \n"
+ " vmov.f32 s29, #1.5 \n"
+ " vmov.f32 s30, #2.5 \n"
+ " vmov.f32 s31, #3.5 \n"
+ " \n"
+ "reg1_loop: \n"
+ " \n"
+ " /* Verify that core registers contain correct values. */ \n"
+ " cmp r0, #100 \n"
+ " bne reg1_error_loop \n"
+ " cmp r1, #101 \n"
+ " bne reg1_error_loop \n"
+ " cmp r2, #102 \n"
+ " bne reg1_error_loop \n"
+ " cmp r3, #103 \n"
+ " bne reg1_error_loop \n"
+ " cmp r4, #104 \n"
+ " bne reg1_error_loop \n"
+ " cmp r5, #105 \n"
+ " bne reg1_error_loop \n"
+ " cmp r6, #106 \n"
+ " bne reg1_error_loop \n"
+ " cmp r7, #107 \n"
+ " bne reg1_error_loop \n"
+ " cmp r8, #108 \n"
+ " bne reg1_error_loop \n"
+ " cmp r9, #109 \n"
+ " bne reg1_error_loop \n"
+ " cmp r10, #110 \n"
+ " bne reg1_error_loop \n"
+ " cmp r11, #111 \n"
+ " bne reg1_error_loop \n"
+ " cmp r12, #112 \n"
+ " bne reg1_error_loop \n"
+ " \n"
+ " /* Verify that FPU registers contain correct values. */ \n"
+ " vmov.f32 s0, #1.5 \n" /* s0 = 1.5. */
+ " vcmp.f32 s1, s0 \n" /* Compare s0 and s1. */
+ " vmrs APSR_nzcv, FPSCR \n" /* Copy floating point flags (FPSCR flags) to ASPR flags - needed for next bne to work. */
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #2.5 \n"
+ " vcmp.f32 s2, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #3.5 \n"
+ " vcmp.f32 s3, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #4.5 \n"
+ " vcmp.f32 s4, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #5.5 \n"
+ " vcmp.f32 s5, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #6.5 \n"
+ " vcmp.f32 s6, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #7.5 \n"
+ " vcmp.f32 s7, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #8.5 \n"
+ " vcmp.f32 s8, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #9.5 \n"
+ " vcmp.f32 s9, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #10.5 \n"
+ " vcmp.f32 s10, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #11.5 \n"
+ " vcmp.f32 s11, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #12.5 \n"
+ " vcmp.f32 s12, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #13.5 \n"
+ " vcmp.f32 s13, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #14.5 \n"
+ " vcmp.f32 s14, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #1.0 \n"
+ " vcmp.f32 s15, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #2.0 \n"
+ " vcmp.f32 s16, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #3.0 \n"
+ " vcmp.f32 s17, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #4.0 \n"
+ " vcmp.f32 s18, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #5.0 \n"
+ " vcmp.f32 s19, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #6.0 \n"
+ " vcmp.f32 s20, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #7.0 \n"
+ " vcmp.f32 s21, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #8.0 \n"
+ " vcmp.f32 s22, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #9.0 \n"
+ " vcmp.f32 s23, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #10.0 \n"
+ " vcmp.f32 s24, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #11.0 \n"
+ " vcmp.f32 s25, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #12.0 \n"
+ " vcmp.f32 s26, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #13.0 \n"
+ " vcmp.f32 s27, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #14.0 \n"
+ " vcmp.f32 s28, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #1.5 \n"
+ " vcmp.f32 s29, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #2.5 \n"
+ " vcmp.f32 s30, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " vmov.f32 s0, #3.5 \n"
+ " vcmp.f32 s31, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg1_error_loop \n"
+ " \n"
+ " /* Everything passed, inc the loop counter. */ \n"
+ " push { r0, r1 } \n"
+ " ldr r0, =ulRegTest1LoopCounter \n"
+ " ldr r1, [r0] \n"
+ " adds r1, r1, #1 \n"
+ " str r1, [r0] \n"
+ " \n"
+ " /* Yield to increase test coverage. */ \n"
+ " movs r0, #0x01 \n"
+ " ldr r1, =0xe000ed04 \n" /* NVIC_ICSR. */
+ " lsls r0, #28 \n" /* Shift to PendSV bit. */
+ " str r0, [r1] \n"
+ " dsb \n"
+ " pop { r0, r1 } \n"
+ " \n"
+ " /* Start again. */ \n"
+ " b reg1_loop \n"
+ " \n"
+ "reg1_error_loop: \n"
+ " /* If this line is hit then there was an error in \n"
+ " * a core register value. The loop ensures the \n"
+ " * loop counter stops incrementing. */ \n"
+ " b reg1_error_loop \n"
+ " nop \n"
+ ".ltorg \n"
+ );
+}
+/*-----------------------------------------------------------*/
+
+void vRegTest2Asm_NonSecure( void ) /* __attribute__(( naked )) */
+{
+ __asm volatile
+ (
+ ".extern ulRegTest2LoopCounter \n"
+ ".syntax unified \n"
+ " \n"
+ " /* Fill the core registers with known values. */ \n"
+ " movs r0, #0 \n"
+ " movs r1, #1 \n"
+ " movs r2, #2 \n"
+ " movs r3, #3 \n"
+ " movs r4, #4 \n"
+ " movs r5, #5 \n"
+ " movs r6, #6 \n"
+ " movs r7, #7 \n"
+ " mov r8, #8 \n"
+ " mov r9, #9 \n"
+ " movs r10, #10 \n"
+ " movs r11, #11 \n"
+ " movs r12, #12 \n"
+ " \n"
+ " /* Fill the FPU registers with known values. */ \n"
+ " vmov.f32 s1, #1.0 \n"
+ " vmov.f32 s2, #2.0 \n"
+ " vmov.f32 s3, #3.0 \n"
+ " vmov.f32 s4, #4.0 \n"
+ " vmov.f32 s5, #5.0 \n"
+ " vmov.f32 s6, #6.0 \n"
+ " vmov.f32 s7, #7.0 \n"
+ " vmov.f32 s8, #8.0 \n"
+ " vmov.f32 s9, #9.0 \n"
+ " vmov.f32 s10, #10.0 \n"
+ " vmov.f32 s11, #11.0 \n"
+ " vmov.f32 s12, #12.0 \n"
+ " vmov.f32 s13, #13.0 \n"
+ " vmov.f32 s14, #14.0 \n"
+ " vmov.f32 s15, #1.5 \n"
+ " vmov.f32 s16, #2.5 \n"
+ " vmov.f32 s17, #3.5 \n"
+ " vmov.f32 s18, #4.5 \n"
+ " vmov.f32 s19, #5.5 \n"
+ " vmov.f32 s20, #6.5 \n"
+ " vmov.f32 s21, #7.5 \n"
+ " vmov.f32 s22, #8.5 \n"
+ " vmov.f32 s23, #9.5 \n"
+ " vmov.f32 s24, #10.5 \n"
+ " vmov.f32 s25, #11.5 \n"
+ " vmov.f32 s26, #12.5 \n"
+ " vmov.f32 s27, #13.5 \n"
+ " vmov.f32 s28, #14.5 \n"
+ " vmov.f32 s29, #1.0 \n"
+ " vmov.f32 s30, #2.0 \n"
+ " vmov.f32 s31, #3.0 \n"
+ " \n"
+ "reg2_loop: \n"
+ " \n"
+ " /* Verify that core registers contain correct values. */ \n"
+ " cmp r0, #0 \n"
+ " bne reg2_error_loop \n"
+ " cmp r1, #1 \n"
+ " bne reg2_error_loop \n"
+ " cmp r2, #2 \n"
+ " bne reg2_error_loop \n"
+ " cmp r3, #3 \n"
+ " bne reg2_error_loop \n"
+ " cmp r4, #4 \n"
+ " bne reg2_error_loop \n"
+ " cmp r5, #5 \n"
+ " bne reg2_error_loop \n"
+ " cmp r6, #6 \n"
+ " bne reg2_error_loop \n"
+ " cmp r7, #7 \n"
+ " bne reg2_error_loop \n"
+ " cmp r8, #8 \n"
+ " bne reg2_error_loop \n"
+ " cmp r9, #9 \n"
+ " bne reg2_error_loop \n"
+ " cmp r10, #10 \n"
+ " bne reg2_error_loop \n"
+ " cmp r11, #11 \n"
+ " bne reg2_error_loop \n"
+ " cmp r12, #12 \n"
+ " bne reg2_error_loop \n"
+ " \n"
+ " /* Verify that FPU registers contain correct values. */ \n"
+ " vmov.f32 s0, #1.0 \n"
+ " vcmp.f32 s1, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #2.0 \n"
+ " vcmp.f32 s2, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #3.0 \n"
+ " vcmp.f32 s3, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #4.0 \n"
+ " vcmp.f32 s4, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #5.0 \n"
+ " vcmp.f32 s5, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #6.0 \n"
+ " vcmp.f32 s6, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #7.0 \n"
+ " vcmp.f32 s7, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #8.0 \n"
+ " vcmp.f32 s8, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #9.0 \n"
+ " vcmp.f32 s9, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #10.0 \n"
+ " vcmp.f32 s10, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #11.0 \n"
+ " vcmp.f32 s11, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #12.0 \n"
+ " vcmp.f32 s12, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #13.0 \n"
+ " vcmp.f32 s13, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #14.0 \n"
+ " vcmp.f32 s14, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #1.5 \n"
+ " vcmp.f32 s15, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #2.5 \n"
+ " vcmp.f32 s16, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #3.5 \n"
+ " vcmp.f32 s17, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #4.5 \n"
+ " vcmp.f32 s18, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #5.5 \n"
+ " vcmp.f32 s19, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #6.5 \n"
+ " vcmp.f32 s20, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #7.5 \n"
+ " vcmp.f32 s21, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #8.5 \n"
+ " vcmp.f32 s22, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #9.5 \n"
+ " vcmp.f32 s23, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #10.5 \n"
+ " vcmp.f32 s24, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #11.5 \n"
+ " vcmp.f32 s25, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #12.5 \n"
+ " vcmp.f32 s26, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #13.5 \n"
+ " vcmp.f32 s27, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #14.5 \n"
+ " vcmp.f32 s28, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #1.0 \n"
+ " vcmp.f32 s29, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #2.0 \n"
+ " vcmp.f32 s30, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " vmov.f32 s0, #3.0 \n"
+ " vcmp.f32 s31, s0 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg2_error_loop \n"
+ " \n"
+ " /* Everything passed, inc the loop counter. */ \n"
+ " push { r0, r1 } \n"
+ " ldr r0, =ulRegTest2LoopCounter \n"
+ " ldr r1, [r0] \n"
+ " adds r1, r1, #1 \n"
+ " str r1, [r0] \n"
+ " pop { r0, r1 } \n"
+ " \n"
+ " /* Start again. */ \n"
+ " b reg2_loop \n"
+ " \n"
+ "reg2_error_loop: \n"
+ " /* If this line is hit then there was an error in \n"
+ " * a core register value. The loop ensures the \n"
+ " * loop counter stops incrementing. */ \n"
+ " b reg2_error_loop \n"
+ " nop \n"
+ ".ltorg \n"
+ );
+}
+/*-----------------------------------------------------------*/
+
+void vRegTest3Asm_NonSecure( void ) /* __attribute__(( naked )) */
+{
+ __asm volatile
+ (
+ ".extern ulRegTest3LoopCounter \n"
+ ".syntax unified \n"
+ " \n"
+ " /* Fill the core registers with known values. */ \n"
+ " movs r0, #100 \n"
+ " movs r1, #101 \n"
+ " movs r2, #102 \n"
+ " movs r3, #103 \n"
+ " movs r4, #104 \n"
+ " movs r5, #105 \n"
+ " movs r6, #106 \n"
+ " movs r7, #107 \n"
+ " mov r8, #108 \n"
+ " mov r9, #109 \n"
+ " mov r10, #110 \n"
+ " mov r11, #111 \n"
+ " mov r12, #112 \n"
+ " \n"
+ " /* Fill the FPU registers with known values. */ \n"
+ " vmov.f32 s0, #1.5 \n"
+ " vmov.f32 s2, #2.0 \n"
+ " vmov.f32 s3, #3.5 \n"
+ " vmov.f32 s4, #4.0 \n"
+ " vmov.f32 s5, #5.5 \n"
+ " vmov.f32 s6, #6.0 \n"
+ " vmov.f32 s7, #7.5 \n"
+ " vmov.f32 s8, #8.0 \n"
+ " vmov.f32 s9, #9.5 \n"
+ " vmov.f32 s10, #10.0 \n"
+ " vmov.f32 s11, #11.5 \n"
+ " vmov.f32 s12, #12.0 \n"
+ " vmov.f32 s13, #13.5 \n"
+ " vmov.f32 s14, #14.0 \n"
+ " vmov.f32 s15, #1.5 \n"
+ " vmov.f32 s16, #2.0 \n"
+ " vmov.f32 s17, #3.5 \n"
+ " vmov.f32 s18, #4.0 \n"
+ " vmov.f32 s19, #5.5 \n"
+ " vmov.f32 s20, #6.0 \n"
+ " vmov.f32 s21, #7.5 \n"
+ " vmov.f32 s22, #8.0 \n"
+ " vmov.f32 s23, #9.5 \n"
+ " vmov.f32 s24, #10.0 \n"
+ " vmov.f32 s25, #11.5 \n"
+ " vmov.f32 s26, #12.0 \n"
+ " vmov.f32 s27, #13.5 \n"
+ " vmov.f32 s28, #14.0 \n"
+ " vmov.f32 s29, #1.5 \n"
+ " vmov.f32 s30, #2.0 \n"
+ " vmov.f32 s31, #3.5 \n"
+ " \n"
+ "reg3_loop: \n"
+ " \n"
+ " /* Verify that core registers contain correct values. */ \n"
+ " cmp r0, #100 \n"
+ " bne reg3_error_loop \n"
+ " cmp r1, #101 \n"
+ " bne reg3_error_loop \n"
+ " cmp r2, #102 \n"
+ " bne reg3_error_loop \n"
+ " cmp r3, #103 \n"
+ " bne reg3_error_loop \n"
+ " cmp r4, #104 \n"
+ " bne reg3_error_loop \n"
+ " cmp r5, #105 \n"
+ " bne reg3_error_loop \n"
+ " cmp r6, #106 \n"
+ " bne reg3_error_loop \n"
+ " cmp r7, #107 \n"
+ " bne reg3_error_loop \n"
+ " cmp r8, #108 \n"
+ " bne reg3_error_loop \n"
+ " cmp r9, #109 \n"
+ " bne reg3_error_loop \n"
+ " cmp r10, #110 \n"
+ " bne reg3_error_loop \n"
+ " cmp r11, #111 \n"
+ " bne reg3_error_loop \n"
+ " cmp r12, #112 \n"
+ " bne reg3_error_loop \n"
+ " \n"
+ " /* Verify that FPU registers contain correct values. */ \n"
+ " vmov.f32 s1, #1.5 \n"
+ " vcmp.f32 s0, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #2.0 \n"
+ " vcmp.f32 s2, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #3.5 \n"
+ " vcmp.f32 s3, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #4.0 \n"
+ " vcmp.f32 s4, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #5.5 \n"
+ " vcmp.f32 s5, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #6.0 \n"
+ " vcmp.f32 s6, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #7.5 \n"
+ " vcmp.f32 s7, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #8.0 \n"
+ " vcmp.f32 s8, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #9.5 \n"
+ " vcmp.f32 s9, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #10.0 \n"
+ " vcmp.f32 s10, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #11.5 \n"
+ " vcmp.f32 s11, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #12.0 \n"
+ " vcmp.f32 s12, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #13.5 \n"
+ " vcmp.f32 s13, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #14.0 \n"
+ " vcmp.f32 s14, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #1.5 \n"
+ " vcmp.f32 s15, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #2.0 \n"
+ " vcmp.f32 s16, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #3.5 \n"
+ " vcmp.f32 s17, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #4.0 \n"
+ " vcmp.f32 s18, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #5.5 \n"
+ " vcmp.f32 s19, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #6.0 \n"
+ " vcmp.f32 s20, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #7.5 \n"
+ " vcmp.f32 s21, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #8.0 \n"
+ " vcmp.f32 s22, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #9.5 \n"
+ " vcmp.f32 s23, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #10.0 \n"
+ " vcmp.f32 s24, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #11.5 \n"
+ " vcmp.f32 s25, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #12.0 \n"
+ " vcmp.f32 s26, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #13.5 \n"
+ " vcmp.f32 s27, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #14.0 \n"
+ " vcmp.f32 s28, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #1.5 \n"
+ " vcmp.f32 s29, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #2.0 \n"
+ " vcmp.f32 s30, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " vmov.f32 s1, #3.5 \n"
+ " vcmp.f32 s31, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg3_error_loop \n"
+ " \n"
+ " /* Everything passed, inc the loop counter. */ \n"
+ " push { r0, r1 } \n"
+ " ldr r0, =ulRegTest3LoopCounter \n"
+ " ldr r1, [r0] \n"
+ " adds r1, r1, #1 \n"
+ " str r1, [r0] \n"
+ " \n"
+ " /* Yield to increase test coverage. */ \n"
+ " movs r0, #0x01 \n"
+ " ldr r1, =0xe000ed04 \n" /* NVIC_ICSR. */
+ " lsls r0, #28 \n" /* Shift to PendSV bit. */
+ " str r0, [r1] \n"
+ " dsb \n"
+ " pop { r0, r1 } \n"
+ " \n"
+ " /* Start again. */ \n"
+ " b reg3_loop \n"
+ " \n"
+ "reg3_error_loop: \n"
+ " /* If this line is hit then there was an error in \n"
+ " * a core register value. The loop ensures the \n"
+ " * loop counter stops incrementing. */ \n"
+ " b reg3_error_loop \n"
+ " nop \n"
+ ".ltorg \n"
+ );
+}
+/*-----------------------------------------------------------*/
+
+void vRegTest4Asm_NonSecure( void ) /* __attribute__(( naked )) */
+{
+ __asm volatile
+ (
+ ".extern ulRegTest4LoopCounter \n"
+ ".syntax unified \n"
+ " \n"
+ " /* Fill the core registers with known values. */ \n"
+ " movs r0, #0 \n"
+ " movs r1, #1 \n"
+ " movs r2, #2 \n"
+ " movs r3, #3 \n"
+ " movs r4, #4 \n"
+ " movs r5, #5 \n"
+ " movs r6, #6 \n"
+ " movs r7, #7 \n"
+ " mov r8, #8 \n"
+ " mov r9, #9 \n"
+ " movs r10, #10 \n"
+ " movs r11, #11 \n"
+ " movs r12, #12 \n"
+ " \n"
+ " /* Fill the FPU registers with known values. */ \n"
+ " vmov.f32 s0, #1.5 \n"
+ " vmov.f32 s2, #2.0 \n"
+ " vmov.f32 s3, #3.0 \n"
+ " vmov.f32 s4, #4.5 \n"
+ " vmov.f32 s5, #5.0 \n"
+ " vmov.f32 s6, #6.0 \n"
+ " vmov.f32 s7, #7.5 \n"
+ " vmov.f32 s8, #8.0 \n"
+ " vmov.f32 s9, #9.0 \n"
+ " vmov.f32 s10, #10.5 \n"
+ " vmov.f32 s11, #11.0 \n"
+ " vmov.f32 s12, #12.0 \n"
+ " vmov.f32 s13, #13.5 \n"
+ " vmov.f32 s14, #14.0 \n"
+ " vmov.f32 s15, #1.0 \n"
+ " vmov.f32 s16, #2.5 \n"
+ " vmov.f32 s17, #3.0 \n"
+ " vmov.f32 s18, #4.0 \n"
+ " vmov.f32 s19, #5.5 \n"
+ " vmov.f32 s20, #6.0 \n"
+ " vmov.f32 s21, #7.0 \n"
+ " vmov.f32 s22, #8.5 \n"
+ " vmov.f32 s23, #9.0 \n"
+ " vmov.f32 s24, #10.0 \n"
+ " vmov.f32 s25, #11.5 \n"
+ " vmov.f32 s26, #12.0 \n"
+ " vmov.f32 s27, #13.0 \n"
+ " vmov.f32 s28, #14.5 \n"
+ " vmov.f32 s29, #1.0 \n"
+ " vmov.f32 s30, #2.0 \n"
+ " vmov.f32 s31, #3.5 \n"
+ " \n"
+ "reg4_loop: \n"
+ " \n"
+ " /* Verify that core registers contain correct values. */ \n"
+ " cmp r0, #0 \n"
+ " bne reg4_error_loop \n"
+ " cmp r1, #1 \n"
+ " bne reg4_error_loop \n"
+ " cmp r2, #2 \n"
+ " bne reg4_error_loop \n"
+ " cmp r3, #3 \n"
+ " bne reg4_error_loop \n"
+ " cmp r4, #4 \n"
+ " bne reg4_error_loop \n"
+ " cmp r5, #5 \n"
+ " bne reg4_error_loop \n"
+ " cmp r6, #6 \n"
+ " bne reg4_error_loop \n"
+ " cmp r7, #7 \n"
+ " bne reg4_error_loop \n"
+ " cmp r8, #8 \n"
+ " bne reg4_error_loop \n"
+ " cmp r9, #9 \n"
+ " bne reg4_error_loop \n"
+ " cmp r10, #10 \n"
+ " bne reg4_error_loop \n"
+ " cmp r11, #11 \n"
+ " bne reg4_error_loop \n"
+ " cmp r12, #12 \n"
+ " bne reg4_error_loop \n"
+ " \n"
+ " /* Verify that FPU registers contain correct values. */ \n"
+ " vmov.f32 s1, #1.5 \n"
+ " vcmp.f32 s0, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #2.0 \n"
+ " vcmp.f32 s2, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #3.0 \n"
+ " vcmp.f32 s3, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #4.5 \n"
+ " vcmp.f32 s4, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #5.0 \n"
+ " vcmp.f32 s5, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #6.0 \n"
+ " vcmp.f32 s6, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #7.5 \n"
+ " vcmp.f32 s7, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #8.0 \n"
+ " vcmp.f32 s8, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #9.0 \n"
+ " vcmp.f32 s9, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #10.5 \n"
+ " vcmp.f32 s10, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #11.0 \n"
+ " vcmp.f32 s11, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #12.0 \n"
+ " vcmp.f32 s12, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #13.5 \n"
+ " vcmp.f32 s13, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #14.0 \n"
+ " vcmp.f32 s14, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #1.0 \n"
+ " vcmp.f32 s15, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #2.5 \n"
+ " vcmp.f32 s16, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #3.0 \n"
+ " vcmp.f32 s17, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #4.0 \n"
+ " vcmp.f32 s18, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #5.5 \n"
+ " vcmp.f32 s19, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #6.0 \n"
+ " vcmp.f32 s20, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #7.0 \n"
+ " vcmp.f32 s21, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #8.5 \n"
+ " vcmp.f32 s22, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #9.0 \n"
+ " vcmp.f32 s23, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #10.0 \n"
+ " vcmp.f32 s24, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #11.5 \n"
+ " vcmp.f32 s25, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #12.0 \n"
+ " vcmp.f32 s26, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #13.0 \n"
+ " vcmp.f32 s27, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #14.5 \n"
+ " vcmp.f32 s28, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #1.0 \n"
+ " vcmp.f32 s29, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #2.0 \n"
+ " vcmp.f32 s30, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " vmov.f32 s1, #3.5 \n"
+ " vcmp.f32 s31, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg4_error_loop \n"
+ " \n"
+ " /* Everything passed, inc the loop counter. */ \n"
+ " push { r0, r1 } \n"
+ " ldr r0, =ulRegTest4LoopCounter \n"
+ " ldr r1, [r0] \n"
+ " adds r1, r1, #1 \n"
+ " str r1, [r0] \n"
+ " pop { r0, r1 } \n"
+ " \n"
+ " /* Start again. */ \n"
+ " b reg4_loop \n"
+ " \n"
+ "reg4_error_loop: \n"
+ " /* If this line is hit then there was an error in \n"
+ " * a core register value. The loop ensures the \n"
+ " * loop counter stops incrementing. */ \n"
+ " b reg4_error_loop \n"
+ " nop \n"
+ ".ltorg \n"
+ );
+}
+/*-----------------------------------------------------------*/
+
+void vRegTestAsm_NonSecureCallback( void )
+{
+ __asm volatile
+ (
+ ".syntax unified \n"
+ " \n"
+ " /* Store callee saved registers. */ \n"
+ " push { r4-r12 } \n"
+ " \n"
+ " /* Fill the core registers with known values. */ \n"
+ " movs r0, #150 \n"
+ " movs r1, #151 \n"
+ " movs r2, #152 \n"
+ " movs r3, #153 \n"
+ " movs r4, #154 \n"
+ " movs r5, #155 \n"
+ " movs r6, #156 \n"
+ " movs r7, #157 \n"
+ " movs r8, #158 \n"
+ " movs r9, #159 \n"
+ " movs r10, #160 \n"
+ " movs r11, #161 \n"
+ " movs r12, #162 \n"
+ " \n"
+ " /* Fill the FPU registers with known values. */ \n"
+ " vmov.f32 s0, #1.0 \n"
+ " vmov.f32 s2, #2.5 \n"
+ " vmov.f32 s3, #3.5 \n"
+ " vmov.f32 s4, #4.0 \n"
+ " vmov.f32 s5, #5.5 \n"
+ " vmov.f32 s6, #6.5 \n"
+ " vmov.f32 s7, #7.0 \n"
+ " vmov.f32 s8, #8.5 \n"
+ " vmov.f32 s9, #9.5 \n"
+ " vmov.f32 s10, #10.0 \n"
+ " vmov.f32 s11, #11.5 \n"
+ " vmov.f32 s12, #12.5 \n"
+ " vmov.f32 s13, #13.0 \n"
+ " vmov.f32 s14, #14.5 \n"
+ " vmov.f32 s15, #1.5 \n"
+ " vmov.f32 s16, #2.0 \n"
+ " vmov.f32 s17, #3.5 \n"
+ " vmov.f32 s18, #4.5 \n"
+ " vmov.f32 s19, #5.0 \n"
+ " vmov.f32 s20, #6.5 \n"
+ " vmov.f32 s21, #7.5 \n"
+ " vmov.f32 s22, #8.0 \n"
+ " vmov.f32 s23, #9.5 \n"
+ " vmov.f32 s24, #10.5 \n"
+ " vmov.f32 s25, #11.0 \n"
+ " vmov.f32 s26, #12.5 \n"
+ " vmov.f32 s27, #13.5 \n"
+ " vmov.f32 s28, #14.0 \n"
+ " vmov.f32 s29, #1.5 \n"
+ " vmov.f32 s30, #2.5 \n"
+ " vmov.f32 s31, #3.0 \n"
+ " \n"
+ " /* Force a context switch by pending sv. */ \n"
+ " push { r0, r1 } \n"
+ " movs r0, #0x01 \n"
+ " ldr r1, =0xe000ed04 \n" /* NVIC_ICSR. */
+ " lsls r0, #28 \n" /* Shift to PendSV bit. */
+ " str r0, [r1] \n"
+ " dsb \n"
+ " pop { r0, r1 } \n"
+ " \n"
+ " /* Verify that core registers contain correct values. */ \n"
+ " cmp r0, #150 \n"
+ " bne reg_nscb_error_loop \n"
+ " cmp r1, #151 \n"
+ " bne reg_nscb_error_loop \n"
+ " cmp r2, #152 \n"
+ " bne reg_nscb_error_loop \n"
+ " cmp r3, #153 \n"
+ " bne reg_nscb_error_loop \n"
+ " cmp r4, #154 \n"
+ " bne reg_nscb_error_loop \n"
+ " cmp r5, #155 \n"
+ " bne reg_nscb_error_loop \n"
+ " cmp r6, #156 \n"
+ " bne reg_nscb_error_loop \n"
+ " cmp r7, #157 \n"
+ " bne reg_nscb_error_loop \n"
+ " cmp r8, #158 \n"
+ " bne reg_nscb_error_loop \n"
+ " cmp r9, #159 \n"
+ " bne reg_nscb_error_loop \n"
+ " cmp r10, #160 \n"
+ " bne reg_nscb_error_loop \n"
+ " cmp r11, #161 \n"
+ " bne reg_nscb_error_loop \n"
+ " cmp r12, #162 \n"
+ " bne reg_nscb_error_loop \n"
+ " \n"
+ " /* Verify that FPU registers contain correct values. */ \n"
+ " vmov.f32 s1, #1.0 \n"
+ " vcmp.f32 s0, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #2.5 \n"
+ " vcmp.f32 s2, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #3.5 \n"
+ " vcmp.f32 s3, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #4.0 \n"
+ " vcmp.f32 s4, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #5.5 \n"
+ " vcmp.f32 s5, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #6.5 \n"
+ " vcmp.f32 s6, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #7.0 \n"
+ " vcmp.f32 s7, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #8.5 \n"
+ " vcmp.f32 s8, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #9.5 \n"
+ " vcmp.f32 s9, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #10.0 \n"
+ " vcmp.f32 s10, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #11.5 \n"
+ " vcmp.f32 s11, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #12.5 \n"
+ " vcmp.f32 s12, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #13.0 \n"
+ " vcmp.f32 s13, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #14.5 \n"
+ " vcmp.f32 s14, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #1.5 \n"
+ " vcmp.f32 s15, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #2.0 \n"
+ " vcmp.f32 s16, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #3.5 \n"
+ " vcmp.f32 s17, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #4.5 \n"
+ " vcmp.f32 s18, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #5.0 \n"
+ " vcmp.f32 s19, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #6.5 \n"
+ " vcmp.f32 s20, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #7.5 \n"
+ " vcmp.f32 s21, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #8.0 \n"
+ " vcmp.f32 s22, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #9.5 \n"
+ " vcmp.f32 s23, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #10.5 \n"
+ " vcmp.f32 s24, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #11.0 \n"
+ " vcmp.f32 s25, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #12.5 \n"
+ " vcmp.f32 s26, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #13.5 \n"
+ " vcmp.f32 s27, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #14.0 \n"
+ " vcmp.f32 s28, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #1.5 \n"
+ " vcmp.f32 s29, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #2.5 \n"
+ " vcmp.f32 s30, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " vmov.f32 s1, #3.0 \n"
+ " vcmp.f32 s31, s1 \n"
+ " vmrs APSR_nzcv, FPSCR \n"
+ " bne reg_nscb_error_loop \n"
+ " \n"
+ " /* Everything passed, finish. */ \n"
+ " b reg_nscb_success \n"
+ " \n"
+ "reg_nscb_error_loop: \n"
+ " /* If this line is hit then there was an error in \n"
+ " * a core register value. The loop ensures the \n"
+ " * loop counter stops incrementing. */ \n"
+ " b reg_nscb_error_loop \n"
+ " nop \n"
+ " \n"
+ "reg_nscb_success: \n"
+ " /* Restore callee saved registers. */ \n"
+ " pop { r4-r12 } \n"
+ );
+}
+/*-----------------------------------------------------------*/