summaryrefslogtreecommitdiff
path: root/test/CodeGen/RISCV
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/RISCV')
-rw-r--r--test/CodeGen/RISCV/alu32.ll1
-rw-r--r--test/CodeGen/RISCV/branch.ll121
-rw-r--r--test/CodeGen/RISCV/calls.ll83
-rw-r--r--test/CodeGen/RISCV/imm.ll47
-rw-r--r--test/CodeGen/RISCV/mem.ll202
-rw-r--r--test/CodeGen/RISCV/wide-mem.ll34
6 files changed, 487 insertions, 1 deletions
diff --git a/test/CodeGen/RISCV/alu32.ll b/test/CodeGen/RISCV/alu32.ll
index 32242d2e40d3..9db6bb9dd434 100644
--- a/test/CodeGen/RISCV/alu32.ll
+++ b/test/CodeGen/RISCV/alu32.ll
@@ -7,7 +7,6 @@ define i32 @addi(i32 %a) nounwind {
; RV32I-LABEL: addi:
; RV32I: addi a0, a0, 1
; RV32I: jalr zero, ra, 0
-; TODO: check support for materialising larger constants
%1 = add i32 %a, 1
ret i32 %1
}
diff --git a/test/CodeGen/RISCV/branch.ll b/test/CodeGen/RISCV/branch.ll
new file mode 100644
index 000000000000..194083b07c71
--- /dev/null
+++ b/test/CodeGen/RISCV/branch.ll
@@ -0,0 +1,121 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32I %s
+
+define void @foo(i32 %a, i32 *%b, i1 %c) {
+; RV32I-LABEL: foo:
+; RV32I: # BB#0:
+; RV32I-NEXT: lw a3, 0(a1)
+; RV32I-NEXT: beq a3, a0, .LBB0_12
+; RV32I-NEXT: jal zero, .LBB0_1
+; RV32I-NEXT: .LBB0_1: # %test2
+; RV32I-NEXT: lw a3, 0(a1)
+; RV32I-NEXT: bne a3, a0, .LBB0_12
+; RV32I-NEXT: jal zero, .LBB0_2
+; RV32I-NEXT: .LBB0_2: # %test3
+; RV32I-NEXT: lw a3, 0(a1)
+; RV32I-NEXT: blt a3, a0, .LBB0_12
+; RV32I-NEXT: jal zero, .LBB0_3
+; RV32I-NEXT: .LBB0_3: # %test4
+; RV32I-NEXT: lw a3, 0(a1)
+; RV32I-NEXT: bge a3, a0, .LBB0_12
+; RV32I-NEXT: jal zero, .LBB0_4
+; RV32I-NEXT: .LBB0_4: # %test5
+; RV32I-NEXT: lw a3, 0(a1)
+; RV32I-NEXT: bltu a3, a0, .LBB0_12
+; RV32I-NEXT: jal zero, .LBB0_5
+; RV32I-NEXT: .LBB0_5: # %test6
+; RV32I-NEXT: lw a3, 0(a1)
+; RV32I-NEXT: bgeu a3, a0, .LBB0_12
+; RV32I-NEXT: jal zero, .LBB0_6
+; RV32I-NEXT: .LBB0_6: # %test7
+; RV32I-NEXT: lw a3, 0(a1)
+; RV32I-NEXT: blt a0, a3, .LBB0_12
+; RV32I-NEXT: jal zero, .LBB0_7
+; RV32I-NEXT: .LBB0_7: # %test8
+; RV32I-NEXT: lw a3, 0(a1)
+; RV32I-NEXT: bge a0, a3, .LBB0_12
+; RV32I-NEXT: jal zero, .LBB0_8
+; RV32I-NEXT: .LBB0_8: # %test9
+; RV32I-NEXT: lw a3, 0(a1)
+; RV32I-NEXT: bltu a0, a3, .LBB0_12
+; RV32I-NEXT: jal zero, .LBB0_9
+; RV32I-NEXT: .LBB0_9: # %test10
+; RV32I-NEXT: lw a3, 0(a1)
+; RV32I-NEXT: bgeu a0, a3, .LBB0_12
+; RV32I-NEXT: jal zero, .LBB0_10
+; RV32I-NEXT: .LBB0_10: # %test11
+; RV32I-NEXT: lw a0, 0(a1)
+; RV32I-NEXT: andi a0, a2, 1
+; RV32I-NEXT: bne a0, zero, .LBB0_12
+; RV32I-NEXT: jal zero, .LBB0_11
+; RV32I-NEXT: .LBB0_11: # %test12
+; RV32I-NEXT: lw a0, 0(a1)
+; RV32I-NEXT: .LBB0_12: # %end
+; RV32I-NEXT: jalr zero, ra, 0
+
+ %val1 = load volatile i32, i32* %b
+ %tst1 = icmp eq i32 %val1, %a
+ br i1 %tst1, label %end, label %test2
+
+test2:
+ %val2 = load volatile i32, i32* %b
+ %tst2 = icmp ne i32 %val2, %a
+ br i1 %tst2, label %end, label %test3
+
+test3:
+ %val3 = load volatile i32, i32* %b
+ %tst3 = icmp slt i32 %val3, %a
+ br i1 %tst3, label %end, label %test4
+
+test4:
+ %val4 = load volatile i32, i32* %b
+ %tst4 = icmp sge i32 %val4, %a
+ br i1 %tst4, label %end, label %test5
+
+test5:
+ %val5 = load volatile i32, i32* %b
+ %tst5 = icmp ult i32 %val5, %a
+ br i1 %tst5, label %end, label %test6
+
+test6:
+ %val6 = load volatile i32, i32* %b
+ %tst6 = icmp uge i32 %val6, %a
+ br i1 %tst6, label %end, label %test7
+
+; Check for condition codes that don't have a matching instruction
+
+test7:
+ %val7 = load volatile i32, i32* %b
+ %tst7 = icmp sgt i32 %val7, %a
+ br i1 %tst7, label %end, label %test8
+
+test8:
+ %val8 = load volatile i32, i32* %b
+ %tst8 = icmp sle i32 %val8, %a
+ br i1 %tst8, label %end, label %test9
+
+test9:
+ %val9 = load volatile i32, i32* %b
+ %tst9 = icmp ugt i32 %val9, %a
+ br i1 %tst9, label %end, label %test10
+
+test10:
+ %val10 = load volatile i32, i32* %b
+ %tst10 = icmp ule i32 %val10, %a
+ br i1 %tst10, label %end, label %test11
+
+; Check the case of a branch where the condition was generated in another
+; function
+
+test11:
+ %val11 = load volatile i32, i32* %b
+ br i1 %c, label %end, label %test12
+
+test12:
+ %val12 = load volatile i32, i32* %b
+ br label %end
+
+end:
+ ret void
+}
diff --git a/test/CodeGen/RISCV/calls.ll b/test/CodeGen/RISCV/calls.ll
new file mode 100644
index 000000000000..8abe5e92a8e0
--- /dev/null
+++ b/test/CodeGen/RISCV/calls.ll
@@ -0,0 +1,83 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32I %s
+
+declare i32 @external_function(i32)
+
+define i32 @test_call_external(i32 %a) nounwind {
+; RV32I-LABEL: test_call_external:
+; RV32I: # BB#0:
+; RV32I-NEXT: sw ra, 12(s0)
+; RV32I-NEXT: lui a1, %hi(external_function)
+; RV32I-NEXT: addi a1, a1, %lo(external_function)
+; RV32I-NEXT: jalr ra, a1, 0
+; RV32I-NEXT: lw ra, 12(s0)
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = call i32 @external_function(i32 %a)
+ ret i32 %1
+}
+
+define i32 @defined_function(i32 %a) nounwind {
+; RV32I-LABEL: defined_function:
+; RV32I: # BB#0:
+; RV32I-NEXT: addi a0, a0, 1
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = add i32 %a, 1
+ ret i32 %1
+}
+
+define i32 @test_call_defined(i32 %a) nounwind {
+; RV32I-LABEL: test_call_defined:
+; RV32I: # BB#0:
+; RV32I-NEXT: sw ra, 12(s0)
+; RV32I-NEXT: lui a1, %hi(defined_function)
+; RV32I-NEXT: addi a1, a1, %lo(defined_function)
+; RV32I-NEXT: jalr ra, a1, 0
+; RV32I-NEXT: lw ra, 12(s0)
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = call i32 @defined_function(i32 %a) nounwind
+ ret i32 %1
+}
+
+define i32 @test_call_indirect(i32 (i32)* %a, i32 %b) nounwind {
+; RV32I-LABEL: test_call_indirect:
+; RV32I: # BB#0:
+; RV32I-NEXT: sw ra, 12(s0)
+; RV32I-NEXT: addi a2, a0, 0
+; RV32I-NEXT: addi a0, a1, 0
+; RV32I-NEXT: jalr ra, a2, 0
+; RV32I-NEXT: lw ra, 12(s0)
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = call i32 %a(i32 %b)
+ ret i32 %1
+}
+
+; Ensure that calls to fastcc functions aren't rejected. Such calls may be
+; introduced when compiling with optimisation.
+
+define fastcc i32 @fastcc_function(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: fastcc_function:
+; RV32I: # BB#0:
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = add i32 %a, %b
+ ret i32 %1
+}
+
+define i32 @test_call_fastcc(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: test_call_fastcc:
+; RV32I: # BB#0:
+; RV32I-NEXT: sw ra, 12(s0)
+; RV32I-NEXT: sw s1, 8(s0)
+; RV32I-NEXT: addi s1, a0, 0
+; RV32I-NEXT: lui a0, %hi(fastcc_function)
+; RV32I-NEXT: addi a2, a0, %lo(fastcc_function)
+; RV32I-NEXT: addi a0, s1, 0
+; RV32I-NEXT: jalr ra, a2, 0
+; RV32I-NEXT: addi a0, s1, 0
+; RV32I-NEXT: lw s1, 8(s0)
+; RV32I-NEXT: lw ra, 12(s0)
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = call fastcc i32 @fastcc_function(i32 %a, i32 %b)
+ ret i32 %a
+}
diff --git a/test/CodeGen/RISCV/imm.ll b/test/CodeGen/RISCV/imm.ll
new file mode 100644
index 000000000000..c52638da02eb
--- /dev/null
+++ b/test/CodeGen/RISCV/imm.ll
@@ -0,0 +1,47 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefix=RV32I
+
+; Materializing constants
+
+define i32 @zero() nounwind {
+; RV32I-LABEL: zero:
+; RV32I: # BB#0:
+; RV32I-NEXT: addi a0, zero, 0
+; RV32I-NEXT: jalr zero, ra, 0
+ ret i32 0
+}
+
+define i32 @pos_small() nounwind {
+; RV32I-LABEL: pos_small:
+; RV32I: # BB#0:
+; RV32I-NEXT: addi a0, zero, 2047
+; RV32I-NEXT: jalr zero, ra, 0
+ ret i32 2047
+}
+
+define i32 @neg_small() nounwind {
+; RV32I-LABEL: neg_small:
+; RV32I: # BB#0:
+; RV32I-NEXT: addi a0, zero, -2048
+; RV32I-NEXT: jalr zero, ra, 0
+ ret i32 -2048
+}
+
+define i32 @pos_i32() nounwind {
+; RV32I-LABEL: pos_i32:
+; RV32I: # BB#0:
+; RV32I-NEXT: lui a0, 423811
+; RV32I-NEXT: addi a0, a0, -1297
+; RV32I-NEXT: jalr zero, ra, 0
+ ret i32 1735928559
+}
+
+define i32 @neg_i32() nounwind {
+; RV32I-LABEL: neg_i32:
+; RV32I: # BB#0:
+; RV32I-NEXT: lui a0, 912092
+; RV32I-NEXT: addi a0, a0, -273
+; RV32I-NEXT: jalr zero, ra, 0
+ ret i32 -559038737
+}
diff --git a/test/CodeGen/RISCV/mem.ll b/test/CodeGen/RISCV/mem.ll
new file mode 100644
index 000000000000..b06382f8742a
--- /dev/null
+++ b/test/CodeGen/RISCV/mem.ll
@@ -0,0 +1,202 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefix=RV32I
+
+; Check indexed and unindexed, sext, zext and anyext loads
+
+define i32 @lb(i8 *%a) nounwind {
+; RV32I-LABEL: lb:
+; RV32I: # BB#0:
+; RV32I-NEXT: lb a1, 0(a0)
+; RV32I-NEXT: lb a0, 1(a0)
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = getelementptr i8, i8* %a, i32 1
+ %2 = load i8, i8* %1
+ %3 = sext i8 %2 to i32
+ ; the unused load will produce an anyext for selection
+ %4 = load volatile i8, i8* %a
+ ret i32 %3
+}
+
+define i32 @lh(i16 *%a) nounwind {
+; RV32I-LABEL: lh:
+; RV32I: # BB#0:
+; RV32I-NEXT: lh a1, 0(a0)
+; RV32I-NEXT: lh a0, 4(a0)
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = getelementptr i16, i16* %a, i32 2
+ %2 = load i16, i16* %1
+ %3 = sext i16 %2 to i32
+ ; the unused load will produce an anyext for selection
+ %4 = load volatile i16, i16* %a
+ ret i32 %3
+}
+
+define i32 @lw(i32 *%a) nounwind {
+; RV32I-LABEL: lw:
+; RV32I: # BB#0:
+; RV32I-NEXT: lw a1, 0(a0)
+; RV32I-NEXT: lw a0, 12(a0)
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = getelementptr i32, i32* %a, i32 3
+ %2 = load i32, i32* %1
+ %3 = load volatile i32, i32* %a
+ ret i32 %2
+}
+
+define i32 @lbu(i8 *%a) nounwind {
+; RV32I-LABEL: lbu:
+; RV32I: # BB#0:
+; RV32I-NEXT: lbu a1, 0(a0)
+; RV32I-NEXT: lbu a0, 4(a0)
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = getelementptr i8, i8* %a, i32 4
+ %2 = load i8, i8* %1
+ %3 = zext i8 %2 to i32
+ %4 = load volatile i8, i8* %a
+ %5 = zext i8 %4 to i32
+ %6 = add i32 %3, %5
+ ret i32 %6
+}
+
+define i32 @lhu(i16 *%a) nounwind {
+; RV32I-LABEL: lhu:
+; RV32I: # BB#0:
+; RV32I-NEXT: lhu a1, 0(a0)
+; RV32I-NEXT: lhu a0, 10(a0)
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = getelementptr i16, i16* %a, i32 5
+ %2 = load i16, i16* %1
+ %3 = zext i16 %2 to i32
+ %4 = load volatile i16, i16* %a
+ %5 = zext i16 %4 to i32
+ %6 = add i32 %3, %5
+ ret i32 %6
+}
+
+; Check indexed and unindexed stores
+
+define void @sb(i8 *%a, i8 %b) nounwind {
+; RV32I-LABEL: sb:
+; RV32I: # BB#0:
+; RV32I-NEXT: sb a1, 6(a0)
+; RV32I-NEXT: sb a1, 0(a0)
+; RV32I-NEXT: jalr zero, ra, 0
+ store i8 %b, i8* %a
+ %1 = getelementptr i8, i8* %a, i32 6
+ store i8 %b, i8* %1
+ ret void
+}
+
+define void @sh(i16 *%a, i16 %b) nounwind {
+; RV32I-LABEL: sh:
+; RV32I: # BB#0:
+; RV32I-NEXT: sh a1, 14(a0)
+; RV32I-NEXT: sh a1, 0(a0)
+; RV32I-NEXT: jalr zero, ra, 0
+ store i16 %b, i16* %a
+ %1 = getelementptr i16, i16* %a, i32 7
+ store i16 %b, i16* %1
+ ret void
+}
+
+define void @sw(i32 *%a, i32 %b) nounwind {
+; RV32I-LABEL: sw:
+; RV32I: # BB#0:
+; RV32I-NEXT: sw a1, 32(a0)
+; RV32I-NEXT: sw a1, 0(a0)
+; RV32I-NEXT: jalr zero, ra, 0
+ store i32 %b, i32* %a
+ %1 = getelementptr i32, i32* %a, i32 8
+ store i32 %b, i32* %1
+ ret void
+}
+
+; Check load and store to an i1 location
+define i32 @load_sext_zext_anyext_i1(i1 *%a) nounwind {
+; RV32I-LABEL: load_sext_zext_anyext_i1:
+; RV32I: # BB#0:
+; RV32I-NEXT: lb a1, 0(a0)
+; RV32I-NEXT: lbu a1, 1(a0)
+; RV32I-NEXT: lbu a0, 2(a0)
+; RV32I-NEXT: sub a0, a0, a1
+; RV32I-NEXT: jalr zero, ra, 0
+ ; sextload i1
+ %1 = getelementptr i1, i1* %a, i32 1
+ %2 = load i1, i1* %1
+ %3 = sext i1 %2 to i32
+ ; zextload i1
+ %4 = getelementptr i1, i1* %a, i32 2
+ %5 = load i1, i1* %4
+ %6 = zext i1 %5 to i32
+ %7 = add i32 %3, %6
+ ; extload i1 (anyext). Produced as the load is unused.
+ %8 = load volatile i1, i1* %a
+ ret i32 %7
+}
+
+define i16 @load_sext_zext_anyext_i1_i16(i1 *%a) nounwind {
+; RV32I-LABEL: load_sext_zext_anyext_i1_i16:
+; RV32I: # BB#0:
+; RV32I-NEXT: lb a1, 0(a0)
+; RV32I-NEXT: lbu a1, 1(a0)
+; RV32I-NEXT: lbu a0, 2(a0)
+; RV32I-NEXT: sub a0, a0, a1
+; RV32I-NEXT: jalr zero, ra, 0
+ ; sextload i1
+ %1 = getelementptr i1, i1* %a, i32 1
+ %2 = load i1, i1* %1
+ %3 = sext i1 %2 to i16
+ ; zextload i1
+ %4 = getelementptr i1, i1* %a, i32 2
+ %5 = load i1, i1* %4
+ %6 = zext i1 %5 to i16
+ %7 = add i16 %3, %6
+ ; extload i1 (anyext). Produced as the load is unused.
+ %8 = load volatile i1, i1* %a
+ ret i16 %7
+}
+
+; Check load and store to a global
+@G = global i32 0
+
+define i32 @lw_sw_global(i32 %a) nounwind {
+; TODO: the addi should be folded in to the lw/sw operations
+; RV32I-LABEL: lw_sw_global:
+; RV32I: # BB#0:
+; RV32I-NEXT: lui a1, %hi(G)
+; RV32I-NEXT: addi a2, a1, %lo(G)
+; RV32I-NEXT: lw a1, 0(a2)
+; RV32I-NEXT: sw a0, 0(a2)
+; RV32I-NEXT: lui a2, %hi(G+36)
+; RV32I-NEXT: addi a2, a2, %lo(G+36)
+; RV32I-NEXT: lw a3, 0(a2)
+; RV32I-NEXT: sw a0, 0(a2)
+; RV32I-NEXT: addi a0, a1, 0
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = load volatile i32, i32* @G
+ store i32 %a, i32* @G
+ %2 = getelementptr i32, i32* @G, i32 9
+ %3 = load volatile i32, i32* %2
+ store i32 %a, i32* %2
+ ret i32 %1
+}
+
+; Ensure that 1 is added to the high 20 bits if bit 11 of the low part is 1
+define i32 @lw_sw_constant(i32 %a) nounwind {
+; TODO: the addi should be folded in to the lw/sw
+; RV32I-LABEL: lw_sw_constant:
+; RV32I: # BB#0:
+; RV32I-NEXT: lui a1, 912092
+; RV32I-NEXT: addi a2, a1, -273
+; RV32I-NEXT: lw a1, 0(a2)
+; RV32I-NEXT: sw a0, 0(a2)
+; RV32I-NEXT: addi a0, a1, 0
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = inttoptr i32 3735928559 to i32*
+ %2 = load volatile i32, i32* %1
+ store i32 %a, i32* %1
+ ret i32 %2
+}
diff --git a/test/CodeGen/RISCV/wide-mem.ll b/test/CodeGen/RISCV/wide-mem.ll
new file mode 100644
index 000000000000..18ab52aaf138
--- /dev/null
+++ b/test/CodeGen/RISCV/wide-mem.ll
@@ -0,0 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefix=RV32I
+
+; Check load/store operations on values wider than what is natively supported
+
+define i64 @load_i64(i64 *%a) nounwind {
+; RV32I-LABEL: load_i64:
+; RV32I: # BB#0:
+; RV32I-NEXT: lw a2, 0(a0)
+; RV32I-NEXT: lw a1, 4(a0)
+; RV32I-NEXT: addi a0, a2, 0
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = load i64, i64* %a
+ ret i64 %1
+}
+
+@val64 = local_unnamed_addr global i64 2863311530, align 8
+
+; TODO: codegen on this should be improved. It shouldn't be necessary to
+; generate two addi
+define i64 @load_i64_global() nounwind {
+; RV32I-LABEL: load_i64_global:
+; RV32I: # BB#0:
+; RV32I-NEXT: lui a0, %hi(val64)
+; RV32I-NEXT: addi a0, a0, %lo(val64)
+; RV32I-NEXT: lw a0, 0(a0)
+; RV32I-NEXT: lui a1, %hi(val64+4)
+; RV32I-NEXT: addi a1, a1, %lo(val64+4)
+; RV32I-NEXT: lw a1, 0(a1)
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = load i64, i64* @val64
+ ret i64 %1
+}