summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@sifive.com>2021-12-14 08:58:16 -0800
committerCraig Topper <craig.topper@sifive.com>2021-12-14 09:14:03 -0800
commit7598ac5ec5a6bebc347ce95daf3be7a62ab2888e (patch)
tree8341fda021be73acd7a44f48be7d4c32a34da675
parentaedb328a4dc9cb48ee3cf3198281649ea2c4f532 (diff)
downloadllvm-7598ac5ec5a6bebc347ce95daf3be7a62ab2888e.tar.gz
[RISCV] Convert (splat_vector (load)) to vlse with 0 stride.
We already do this for splat nodes that carry a VL, but not for splats that use VLMAX. Reviewed By: frasercrmck Differential Revision: https://reviews.llvm.org/D115483
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll19
2 files changed, 24 insertions, 1 deletions
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 516e765c9202..b24eb5f7bbf4 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -1515,6 +1515,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
ReplaceNode(Node, Extract.getNode());
return;
}
+ case ISD::SPLAT_VECTOR:
case RISCVISD::VMV_V_X_VL:
case RISCVISD::VFMV_V_F_VL: {
// Try to match splat of a scalar load to a strided load with stride of x0.
@@ -1531,7 +1532,10 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
break;
SDValue VL;
- selectVLOp(Node->getOperand(1), VL);
+ if (Node->getOpcode() == ISD::SPLAT_VECTOR)
+ VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
+ else
+ selectVLOp(Node->getOperand(1), VL);
unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll
index d9b5583f5bb6..a58822bb3b7e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll
@@ -105,3 +105,22 @@ define <vscale x 8 x double> @vsplat_zero_nxv8f64() {
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x double> %splat
}
+
+; Test that we fold this to a vlse with 0 stride.
+define <vscale x 8 x float> @vsplat_load_nxv8f32(float* %ptr) {
+; RV32V-LABEL: vsplat_load_nxv8f32:
+; RV32V: # %bb.0:
+; RV32V-NEXT: vsetvli a1, zero, e32, m4, ta, mu
+; RV32V-NEXT: vlse32.v v8, (a0), zero
+; RV32V-NEXT: ret
+;
+; RV64V-LABEL: vsplat_load_nxv8f32:
+; RV64V: # %bb.0:
+; RV64V-NEXT: vsetvli a1, zero, e32, m4, ta, mu
+; RV64V-NEXT: vlse32.v v8, (a0), zero
+; RV64V-NEXT: ret
+ %f = load float, float* %ptr
+ %head = insertelement <vscale x 8 x float> undef, float %f, i32 0
+ %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+ ret <vscale x 8 x float> %splat
+}