summaryrefslogtreecommitdiff
path: root/test/CodeGen/builtins-arm64.c
diff options
context:
space:
mode:
authorTim Northover <tnorthover@apple.com>2016-03-09 18:54:42 +0000
committerTim Northover <tnorthover@apple.com>2016-03-09 18:54:42 +0000
commit0ca63760b6a135252850be0ba8cc73f6a0fb2b73 (patch)
treeed69c8fab41b12ae07f32475b53059bc21952766 /test/CodeGen/builtins-arm64.c
parente2e2605a5665587116181091d1ddc42972c8548a (diff)
downloadclang-0ca63760b6a135252850be0ba8cc73f6a0fb2b73.tar.gz
ARM & AArch64: convert asm tests to LLVM IR and restrict optimizations.
This is mostly a one-time autoconversion of tests that checked assembly after "-Owhatever" compiles to only run "opt -mem2reg" and check the assembly. This should make them much more stable to changes in LLVM so they won't break on unrelated changes. "opt -mem2reg" is a compromise designed to increase the readability of tests that check dataflow, while minimizing dependency on LLVM. Hopefully mem2reg is stable enough that no surpises will come along. Should address http://llvm.org/PR26815. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@263048 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/builtins-arm64.c')
-rw-r--r--test/CodeGen/builtins-arm64.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/test/CodeGen/builtins-arm64.c b/test/CodeGen/builtins-arm64.c
index 16e22d771f..0525dd6219 100644
--- a/test/CodeGen/builtins-arm64.c
+++ b/test/CodeGen/builtins-arm64.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -triple arm64-apple-ios -O3 -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple arm64-apple-ios -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
void f0(void *a, void *b) {
__clear_cache(a,b);
@@ -50,7 +50,7 @@ void prefetch() {
}
unsigned rsr() {
- // CHECK: [[V0:[%A-Za-z0-9.]+]] = {{.*}} call i64 @llvm.read_register.i64(metadata ![[M0:[0-9]]])
+ // CHECK: [[V0:[%A-Za-z0-9.]+]] = call i64 @llvm.read_register.i64(metadata ![[M0:[0-9]]])
// CHECK-NEXT: trunc i64 [[V0]] to i32
return __builtin_arm_rsr("1:2:3:4:5");
}
@@ -61,7 +61,7 @@ unsigned long rsr64() {
}
void *rsrp() {
- // CHECK: [[V0:[%A-Za-z0-9.]+]] = {{.*}} call i64 @llvm.read_register.i64(metadata ![[M0:[0-9]]])
+ // CHECK: [[V0:[%A-Za-z0-9.]+]] = call i64 @llvm.read_register.i64(metadata ![[M0:[0-9]]])
// CHECK-NEXT: inttoptr i64 [[V0]] to i8*
return __builtin_arm_rsrp("1:2:3:4:5");
}