summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPhilip Reames <listmail@philipreames.com>2019-05-06 22:12:07 +0000
committerPhilip Reames <listmail@philipreames.com>2019-05-06 22:12:07 +0000
commit96ebfdf0edb112732b07b2164d73e05224b1a2a7 (patch)
tree1e8fd496e7b36710b30f29008e75d49efeec0411
parentca13910a6c9c19ba0d58090fd4bef793b14f2a25 (diff)
downloadllvm-96ebfdf0edb112732b07b2164d73e05224b1a2a7.tar.gz
[Tests] Autogen a test in advance of updates
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@360091 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--test/CodeGen/X86/element-wise-atomic-memory-intrinsics.ll278
1 files changed, 216 insertions, 62 deletions
diff --git a/test/CodeGen/X86/element-wise-atomic-memory-intrinsics.ll b/test/CodeGen/X86/element-wise-atomic-memory-intrinsics.ll
index 0872da2a183c..e85db2698be7 100644
--- a/test/CodeGen/X86/element-wise-atomic-memory-intrinsics.ll
+++ b/test/CodeGen/X86/element-wise-atomic-memory-intrinsics.ll
@@ -1,188 +1,342 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
define i8* @test_memcpy1(i8* %P, i8* %Q) {
- ; CHECK: test_memcpy
+; CHECK-LABEL: test_memcpy1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: movl $1024, %edx # imm = 0x400
+; CHECK-NEXT: callq __llvm_memcpy_element_unordered_atomic_1
+; CHECK-NEXT: movq %rbx, %rax
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1024, i32 1)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $1024, %edx
- ; CHECK: __llvm_memcpy_element_unordered_atomic_1
}
define i8* @test_memcpy2(i8* %P, i8* %Q) {
- ; CHECK: test_memcpy2
+; CHECK-LABEL: test_memcpy2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: movl $1024, %edx # imm = 0x400
+; CHECK-NEXT: callq __llvm_memcpy_element_unordered_atomic_2
+; CHECK-NEXT: movq %rbx, %rax
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1024, i32 2)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $1024, %edx
- ; CHECK: __llvm_memcpy_element_unordered_atomic_2
}
define i8* @test_memcpy4(i8* %P, i8* %Q) {
- ; CHECK: test_memcpy4
+; CHECK-LABEL: test_memcpy4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: movl $1024, %edx # imm = 0x400
+; CHECK-NEXT: callq __llvm_memcpy_element_unordered_atomic_4
+; CHECK-NEXT: movq %rbx, %rax
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1024, i32 4)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $1024, %edx
- ; CHECK: __llvm_memcpy_element_unordered_atomic_4
}
define i8* @test_memcpy8(i8* %P, i8* %Q) {
- ; CHECK: test_memcpy8
+; CHECK-LABEL: test_memcpy8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: movl $1024, %edx # imm = 0x400
+; CHECK-NEXT: callq __llvm_memcpy_element_unordered_atomic_8
+; CHECK-NEXT: movq %rbx, %rax
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %P, i8* align 8 %Q, i32 1024, i32 8)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $1024, %edx
- ; CHECK: __llvm_memcpy_element_unordered_atomic_8
}
define i8* @test_memcpy16(i8* %P, i8* %Q) {
- ; CHECK: test_memcpy16
+; CHECK-LABEL: test_memcpy16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: movl $1024, %edx # imm = 0x400
+; CHECK-NEXT: callq __llvm_memcpy_element_unordered_atomic_16
+; CHECK-NEXT: movq %rbx, %rax
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %P, i8* align 16 %Q, i32 1024, i32 16)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $1024, %edx
- ; CHECK: __llvm_memcpy_element_unordered_atomic_16
}
define void @test_memcpy_args(i8** %Storage) {
- ; CHECK: test_memcpy_args
+; CHECK-LABEL: test_memcpy_args:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: movq 8(%rdi), %rsi
+; CHECK-NEXT: movq %rax, %rdi
+; CHECK-NEXT: movl $1024, %edx # imm = 0x400
+; CHECK-NEXT: callq __llvm_memcpy_element_unordered_atomic_4
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
%Dst = load i8*, i8** %Storage
%Src.addr = getelementptr i8*, i8** %Storage, i64 1
%Src = load i8*, i8** %Src.addr
; 1st arg (%rdi)
- ; CHECK-DAG: movq (%rdi), [[REG1:%r.+]]
- ; CHECK-DAG: movq [[REG1]], %rdi
; 2nd arg (%rsi)
- ; CHECK-DAG: movq 8(%rdi), %rsi
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $1024, %edx
- ; CHECK: __llvm_memcpy_element_unordered_atomic_4
call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %Dst, i8* align 4 %Src, i32 1024, i32 4)
ret void
}
define i8* @test_memmove1(i8* %P, i8* %Q) {
- ; CHECK: test_memmove
+; CHECK-LABEL: test_memmove1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: movl $1024, %edx # imm = 0x400
+; CHECK-NEXT: callq __llvm_memmove_element_unordered_atomic_1
+; CHECK-NEXT: movq %rbx, %rax
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1024, i32 1)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $1024, %edx
- ; CHECK: __llvm_memmove_element_unordered_atomic_1
}
define i8* @test_memmove2(i8* %P, i8* %Q) {
- ; CHECK: test_memmove2
+; CHECK-LABEL: test_memmove2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: movl $1024, %edx # imm = 0x400
+; CHECK-NEXT: callq __llvm_memmove_element_unordered_atomic_2
+; CHECK-NEXT: movq %rbx, %rax
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1024, i32 2)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $1024, %edx
- ; CHECK: __llvm_memmove_element_unordered_atomic_2
}
define i8* @test_memmove4(i8* %P, i8* %Q) {
- ; CHECK: test_memmove4
+; CHECK-LABEL: test_memmove4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: movl $1024, %edx # imm = 0x400
+; CHECK-NEXT: callq __llvm_memmove_element_unordered_atomic_4
+; CHECK-NEXT: movq %rbx, %rax
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %P, i8* align 4 %Q, i32 1024, i32 4)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $1024, %edx
- ; CHECK: __llvm_memmove_element_unordered_atomic_4
}
define i8* @test_memmove8(i8* %P, i8* %Q) {
- ; CHECK: test_memmove8
+; CHECK-LABEL: test_memmove8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: movl $1024, %edx # imm = 0x400
+; CHECK-NEXT: callq __llvm_memmove_element_unordered_atomic_8
+; CHECK-NEXT: movq %rbx, %rax
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %P, i8* align 8 %Q, i32 1024, i32 8)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $1024, %edx
- ; CHECK: __llvm_memmove_element_unordered_atomic_8
}
define i8* @test_memmove16(i8* %P, i8* %Q) {
- ; CHECK: test_memmove16
+; CHECK-LABEL: test_memmove16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: movl $1024, %edx # imm = 0x400
+; CHECK-NEXT: callq __llvm_memmove_element_unordered_atomic_16
+; CHECK-NEXT: movq %rbx, %rax
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %P, i8* align 16 %Q, i32 1024, i32 16)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $1024, %edx
- ; CHECK: __llvm_memmove_element_unordered_atomic_16
}
define void @test_memmove_args(i8** %Storage) {
- ; CHECK: test_memmove_args
+; CHECK-LABEL: test_memmove_args:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: movq 8(%rdi), %rsi
+; CHECK-NEXT: movq %rax, %rdi
+; CHECK-NEXT: movl $1024, %edx # imm = 0x400
+; CHECK-NEXT: callq __llvm_memmove_element_unordered_atomic_4
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
%Dst = load i8*, i8** %Storage
%Src.addr = getelementptr i8*, i8** %Storage, i64 1
%Src = load i8*, i8** %Src.addr
; 1st arg (%rdi)
- ; CHECK-DAG: movq (%rdi), [[REG1:%r.+]]
- ; CHECK-DAG: movq [[REG1]], %rdi
; 2nd arg (%rsi)
- ; CHECK-DAG: movq 8(%rdi), %rsi
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $1024, %edx
- ; CHECK: __llvm_memmove_element_unordered_atomic_4
call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %Dst, i8* align 4 %Src, i32 1024, i32 4)
ret void
}
define i8* @test_memset1(i8* %P, i8 %V) {
- ; CHECK: test_memset
+; CHECK-LABEL: test_memset1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: movl $1024, %edx # imm = 0x400
+; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_1
+; CHECK-NEXT: movq %rbx, %rax
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %P, i8 %V, i32 1024, i32 1)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $1024, %edx
- ; CHECK: __llvm_memset_element_unordered_atomic_1
}
define i8* @test_memset2(i8* %P, i8 %V) {
- ; CHECK: test_memset2
+; CHECK-LABEL: test_memset2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: movl $1024, %edx # imm = 0x400
+; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_2
+; CHECK-NEXT: movq %rbx, %rax
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 %P, i8 %V, i32 1024, i32 2)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $1024, %edx
- ; CHECK: __llvm_memset_element_unordered_atomic_2
}
define i8* @test_memset4(i8* %P, i8 %V) {
- ; CHECK: test_memset4
+; CHECK-LABEL: test_memset4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: movl $1024, %edx # imm = 0x400
+; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_4
+; CHECK-NEXT: movq %rbx, %rax
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %P, i8 %V, i32 1024, i32 4)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $1024, %edx
- ; CHECK: __llvm_memset_element_unordered_atomic_4
}
define i8* @test_memset8(i8* %P, i8 %V) {
- ; CHECK: test_memset8
+; CHECK-LABEL: test_memset8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: movl $1024, %edx # imm = 0x400
+; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_8
+; CHECK-NEXT: movq %rbx, %rax
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 8 %P, i8 %V, i32 1024, i32 8)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $1024, %edx
- ; CHECK: __llvm_memset_element_unordered_atomic_8
}
define i8* @test_memset16(i8* %P, i8 %V) {
- ; CHECK: test_memset16
+; CHECK-LABEL: test_memset16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset %rbx, -16
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: movl $1024, %edx # imm = 0x400
+; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_16
+; CHECK-NEXT: movq %rbx, %rax
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 16 %P, i8 %V, i32 1024, i32 16)
ret i8* %P
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $1024, %edx
- ; CHECK: __llvm_memset_element_unordered_atomic_16
}
define void @test_memset_args(i8** %Storage, i8* %V) {
- ; CHECK: test_memset_args
+; CHECK-LABEL: test_memset_args:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: movq (%rdi), %rdi
+; CHECK-NEXT: movzbl (%rsi), %esi
+; CHECK-NEXT: movl $1024, %edx # imm = 0x400
+; CHECK-NEXT: callq __llvm_memset_element_unordered_atomic_4
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
%Dst = load i8*, i8** %Storage
%Val = load i8, i8* %V
; 1st arg (%rdi)
- ; CHECK-DAG: movq (%rdi), %rdi
; 2nd arg (%rsi)
- ; CHECK-DAG: movzbl (%rsi), %esi
; 3rd arg (%edx) -- length
- ; CHECK-DAG: movl $1024, %edx
- ; CHECK: __llvm_memset_element_unordered_atomic_4
call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %Dst, i8 %Val, i32 1024, i32 4)
ret void
}