summaryrefslogtreecommitdiff
path: root/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp')
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp31
1 files changed, 31 insertions, 0 deletions
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index b87a25278922..5afae3265f6c 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -572,6 +572,37 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
break;
}
+ case ISD::SRA: {
+ // Optimize (sra (sext_inreg X, i16), C) ->
+ // (srai (slli X, (XLen-16), (XLen-16) + C)
+ // And (sra (sext_inreg X, i8), C) ->
+ // (srai (slli X, (XLen-8), (XLen-8) + C)
+ // This can occur when Zbb is enabled, which makes sext_inreg i16/i8 legal.
+ // This transform matches the code we get without Zbb. The shifts are more
+ // compressible, and this can help expose CSE opportunities in the sdiv by
+ // constant optimization.
+ auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
+ if (!N1C)
+ break;
+ SDValue N0 = Node->getOperand(0);
+ if (N0.getOpcode() != ISD::SIGN_EXTEND_INREG || !N0.hasOneUse())
+ break;
+ uint64_t ShAmt = N1C->getZExtValue();
+ unsigned ExtSize =
+ cast<VTSDNode>(N0.getOperand(1))->getVT().getSizeInBits();
+ // ExtSize of 32 should use sraiw via tablegen pattern.
+ if (ExtSize >= 32 || ShAmt >= ExtSize)
+ break;
+ unsigned LShAmt = Subtarget->getXLen() - ExtSize;
+ SDNode *SLLI =
+ CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
+ CurDAG->getTargetConstant(LShAmt, DL, VT));
+ SDNode *SRAI = CurDAG->getMachineNode(
+ RISCV::SRAI, DL, VT, SDValue(SLLI, 0),
+ CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
+ ReplaceNode(Node, SRAI);
+ return;
+ }
case ISD::AND: {
auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
if (!N1C)