summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAmara Emerson <amara@apple.com>2020-08-22 23:28:07 -0700
committerAmara Emerson <amara@apple.com>2020-09-01 11:06:06 -0700
commit5ded4442520d3dbb1aa72e6fe03cddef8828c618 (patch)
treecc6ddb30af6d3aaede81c929cd0fa5fc537cb71d
parentb11c52781635bd871abd6d932cfd5dcd6f311903 (diff)
downloadllvm-5ded4442520d3dbb1aa72e6fe03cddef8828c618.tar.gz
[AArch64][GlobalISel] Optimize away a Not feeding a brcond by using tbz instead of tbnz.
Usually brconds are fed by compares, but not always, in which case we would miss this fold. Differential Revision: https://reviews.llvm.org/D86413
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp17
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/select-brcond-of-not.mir76
2 files changed, 91 insertions, 2 deletions
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index 5e5f902e1107..a8d68180bb76 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -41,6 +41,7 @@
#define DEBUG_TYPE "aarch64-isel"
using namespace llvm;
+using namespace MIPatternMatch;
namespace {
@@ -1883,7 +1884,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
return false;
}
- const Register CondReg = I.getOperand(0).getReg();
+ Register CondReg = I.getOperand(0).getReg();
MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
// Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z
@@ -1893,7 +1894,19 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
return true;
if (ProduceNonFlagSettingCondBr) {
- auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::TBNZW))
+ unsigned BOpc = AArch64::TBNZW;
+ // Try to fold a not, i.e. a xor, cond, 1.
+ Register XorSrc;
+ int64_t Cst;
+ if (mi_match(CondReg, MRI,
+ m_GTrunc(m_GXor(m_Reg(XorSrc), m_ICst(Cst)))) &&
+ Cst == 1) {
+ CondReg = XorSrc;
+ BOpc = AArch64::TBZW;
+ if (MRI.getType(XorSrc).getSizeInBits() > 32)
+ BOpc = AArch64::TBZX;
+ }
+ auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(BOpc))
.addUse(CondReg)
.addImm(/*bit offset=*/0)
.addMBB(DestMBB);
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-brcond-of-not.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-brcond-of-not.mir
new file mode 100644
index 000000000000..41fe50d9bb7d
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-brcond-of-not.mir
@@ -0,0 +1,76 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+---
+name: condbr_of_not
+legalized: true
+regBankSelected: true
+liveins:
+ - { reg: '$x0' }
+body: |
+ ; CHECK-LABEL: name: condbr_of_not
+ ; CHECK: bb.0:
+ ; CHECK: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+ ; CHECK: [[LDRBBui:%[0-9]+]]:gpr32 = LDRBBui [[COPY]], 0 :: (load 1)
+ ; CHECK: TBZW [[LDRBBui]], 0, %bb.2
+ ; CHECK: bb.1:
+ ; CHECK: RET_ReallyLR
+ ; CHECK: bb.2:
+ ; CHECK: RET_ReallyLR
+ bb.1:
+ successors: %bb.2, %bb.3
+ liveins: $x0
+
+ %0:gpr(p0) = COPY $x0
+ %8:gpr(s8) = G_LOAD %0(p0) :: (load 1)
+ %4:gpr(s32) = G_ANYEXT %8(s8)
+ %5:gpr(s32) = G_CONSTANT i32 1
+ %6:gpr(s32) = G_XOR %4, %5
+ %3:gpr(s1) = G_TRUNC %6(s32)
+ G_BRCOND %3(s1), %bb.3
+
+ bb.2:
+ RET_ReallyLR
+
+ bb.3:
+ RET_ReallyLR
+
+...
+---
+name: condbr_of_not_64
+legalized: true
+regBankSelected: true
+liveins:
+ - { reg: '$x0' }
+body: |
+ ; CHECK-LABEL: name: condbr_of_not_64
+ ; CHECK: bb.0:
+ ; CHECK: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+ ; CHECK: [[LDRBBui:%[0-9]+]]:gpr32 = LDRBBui [[COPY]], 0 :: (load 1)
+ ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[LDRBBui]], %subreg.sub_32
+ ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY [[SUBREG_TO_REG]]
+ ; CHECK: TBZX [[COPY1]], 0, %bb.2
+ ; CHECK: bb.1:
+ ; CHECK: RET_ReallyLR
+ ; CHECK: bb.2:
+ ; CHECK: RET_ReallyLR
+ bb.1:
+ successors: %bb.2, %bb.3
+ liveins: $x0
+
+ %0:gpr(p0) = COPY $x0
+ %8:gpr(s8) = G_LOAD %0(p0) :: (load 1)
+ %4:gpr(s64) = G_ANYEXT %8(s8)
+ %5:gpr(s64) = G_CONSTANT i64 1
+ %6:gpr(s64) = G_XOR %4, %5
+ %3:gpr(s1) = G_TRUNC %6(s64)
+ G_BRCOND %3(s1), %bb.3
+
+ bb.2:
+ RET_ReallyLR
+
+ bb.3:
+ RET_ReallyLR
+
+...