summaryrefslogtreecommitdiff
path: root/libcxxabi
diff options
context:
space:
mode:
authorNemanja Ivanovic <nemanja.i.ibm@gmail.com>2023-02-16 13:37:31 -0500
committerNemanja Ivanovic <nemanja.i.ibm@gmail.com>2023-02-16 13:37:58 -0500
commit372820bf571c8d32c8165cfc74b0439c7bb397f9 (patch)
treec2e4b16dd904305331d306f46abdd7c0c612089b /libcxxabi
parentfbe210dc7a6ad87a30e5ffe928a168e621f6fcc5 (diff)
downloadllvm-372820bf571c8d32c8165cfc74b0439c7bb397f9.tar.gz
[libunwind][PowerPC] Fix saving/restoring VSX registers on LE systems
Currently, libunwind just uses stxvd2x/lxvd2x to save/restore VSX registers respectively. This puts the registers in doubleword-reversed order into memory on little endian systems. If both the save and restore are done the same way, this isn't a problem. However if the unwinder is just restoring a callee-saved register, it will restore it in the wrong order (since function prologues save them in the correct order). This patch adds the necessary swaps before the saves and after the restores. Differential revision: https://reviews.llvm.org/D137599
Diffstat (limited to 'libcxxabi')
-rw-r--r--libcxxabi/test/vendor/ibm/vec_reg_restore-le.pass.cpp90
1 files changed, 90 insertions, 0 deletions
diff --git a/libcxxabi/test/vendor/ibm/vec_reg_restore-le.pass.cpp b/libcxxabi/test/vendor/ibm/vec_reg_restore-le.pass.cpp
new file mode 100644
index 000000000000..413d248a9886
--- /dev/null
+++ b/libcxxabi/test/vendor/ibm/vec_reg_restore-le.pass.cpp
@@ -0,0 +1,90 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// Check that the PowerPC vector registers are restored properly during
+// unwinding.
+
+// REQUIRES: target=powerpc{{(64)?}}le-unknown-linux-gnu
+// UNSUPPORTED: no-exceptions
+
+// Callee-saved VSR's 62 and 63 (vr30, vr31 respectively) are set to 16 bytes
+// with values 1, 2 respectively in main. In order to ensure the two doublewords
+// in each register are different, they are merged. Then they are reset to 16
+// bytes with values 9 and 12 respectively in a callee and an exception is
+// thrown. When catching an exception in main, the values in the two registers
+// need to be the original ones (including the correct doubleword order).
+
+#include <cassert>
+#include <cstdlib>
+
+int __attribute__((noinline)) test2(int i) {
+ if (i > 3)
+ throw i;
+ srand(i);
+ return rand();
+}
+
+int __attribute__((noinline)) test(int i) {
+ // Clobber VS63 and VS62 in the function body.
+ // Set VS63 to 16 bytes each with value 9
+ asm volatile("vspltisb 31, 9" : : : "v31");
+
+ // Set VS62 to 16 bytes each with value 12
+ asm volatile("vspltisb 30, 12" : : : "v30");
+ return test2(i);
+}
+
+#define cmpVS63(vec, result) \
+ { \
+ vector unsigned char gbg; \
+ asm volatile("vcmpequb. %[gbg], 31, %[veca];" \
+ "mfocrf %[res], 2;" \
+ "rlwinm %[res], %[res], 25, 31, 31" \
+ : [res] "=r"(result), [gbg] "=v"(gbg) \
+ : [veca] "v"(vec) \
+ : "cr6"); \
+ }
+
+#define cmpVS62(vec, result) \
+ { \
+ vector unsigned char gbg; \
+ asm volatile("vcmpequb. %[gbg], 30, %[veca];" \
+ "mfocrf %[res], 2;" \
+ "rlwinm %[res], %[res], 25, 31, 31" \
+ : [res] "=r"(result), [gbg] "=v"(gbg) \
+ : [veca] "v"(vec) \
+ : "cr6"); \
+ }
+
+int main(int, char **) {
+ // Set VS63 to 16 bytes each with value 1.
+ asm volatile("vspltisb 31, 1" : : : "v31");
+
+ // Set VS62 to 16 bytes each with value 2.
+ asm volatile("vspltisb 30, 2" : : : "v30");
+
+ // Mix doublewords for both VS62 and VS63.
+ asm volatile("xxmrghd 63, 63, 62");
+ asm volatile("xxmrghd 62, 63, 62");
+
+ vector unsigned long long expectedVS63Value = {0x202020202020202,
+ 0x101010101010101};
+ vector unsigned long long expectedVS62Value = {0x202020202020202,
+ 0x101010101010101};
+ try {
+ test(4);
+ } catch (int num) {
+ // If the unwinder restores VS63 and VS62 correctly, they should contain
+ // 0x01's and 0x02's respectively instead of 0x09's and 0x12's.
+ bool isEqualVS63, isEqualVS62;
+ cmpVS63(expectedVS63Value, isEqualVS63);
+ cmpVS62(expectedVS62Value, isEqualVS62);
+ assert(isEqualVS63 && isEqualVS62);
+ }
+ return 0;
+}