diff options
author | Stanislav Mekhanoshin <Stanislav.Mekhanoshin@amd.com> | 2021-10-15 13:07:32 -0700 |
---|---|---|
committer | Stanislav Mekhanoshin <Stanislav.Mekhanoshin@amd.com> | 2021-10-18 01:44:25 -0700 |
commit | 7cdb1df8c70425b30905418636f9008cf8d3a844 (patch) | |
tree | caa75c8fff034fcfcf0a8c30db5a70cf56d0681f | |
parent | 605efd5dd5bf5f174df7cbd6be9d4e06d6e6249d (diff) | |
download | llvm-7cdb1df8c70425b30905418636f9008cf8d3a844.tar.gz |
[AMDGPU] Divergence driven selection for fused bitlogic
The change adds divergence predicates for fused logical operations.
The problem with selecting a scalar fused op such as S_NOR_B32 is
that it does not have a VALU counterpart and will be split in
moveToVALU. At the same time it prevents selection of a better
opcode on the VALU side (such as V_OR3_B32) which does not have a
counterpart on SALU side.
XNOR opcodes are left as is and selected as scalar to get advantage
of the SIInstrInfo::lowerScalarXnor() code which can commute
operations to keep one of two opcodes on SALU if possible. See
xnor.ll test for this.
Differential Revision: https://reviews.llvm.org/D111907
-rw-r--r-- | llvm/lib/Target/AMDGPU/SOPInstructions.td | 8 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/VOP3Instructions.td | 8 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/fused-bitlogic.ll | 11 |
3 files changed, 16 insertions, 11 deletions
diff --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td index ff7c0a87a7f7..5f63970c6697 100644 --- a/llvm/lib/Target/AMDGPU/SOPInstructions.td +++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td @@ -558,19 +558,19 @@ def S_XNOR_B64 : SOP2_64 <"s_xnor_b64", >; def S_NAND_B32 : SOP2_32 <"s_nand_b32", - [(set i32:$sdst, (not (and_oneuse i32:$src0, i32:$src1)))] + [(set i32:$sdst, (UniformUnaryFrag<not> (and_oneuse i32:$src0, i32:$src1)))] >; def S_NAND_B64 : SOP2_64 <"s_nand_b64", - [(set i64:$sdst, (not (and_oneuse i64:$src0, i64:$src1)))] + [(set i64:$sdst, (UniformUnaryFrag<not> (and_oneuse i64:$src0, i64:$src1)))] >; def S_NOR_B32 : SOP2_32 <"s_nor_b32", - [(set i32:$sdst, (not (or_oneuse i32:$src0, i32:$src1)))] + [(set i32:$sdst, (UniformUnaryFrag<not> (or_oneuse i32:$src0, i32:$src1)))] >; def S_NOR_B64 : SOP2_64 <"s_nor_b64", - [(set i64:$sdst, (not (or_oneuse i64:$src0, i64:$src1)))] + [(set i64:$sdst, (UniformUnaryFrag<not> (or_oneuse i64:$src0, i64:$src1)))] >; } // End isCommutable = 1 diff --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td index 4b216a4ec157..8c6d1884c5c5 100644 --- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td +++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td @@ -667,6 +667,14 @@ def : ThreeOp_i32_Pats<xor, add, V_XAD_U32_e64>; def : VOPBinOpClampPat<saddsat, V_ADD_I32_e64, i32>; def : VOPBinOpClampPat<ssubsat, V_SUB_I32_e64, i32>; +def : GCNPat<(getDivergentFrag<or>.ret (or_oneuse i64:$src0, i64:$src1), i64:$src2), + (REG_SEQUENCE VReg_64, + (V_OR3_B32_e64 (i32 (EXTRACT_SUBREG $src0, sub0)), + (i32 (EXTRACT_SUBREG $src1, sub0)), + (i32 (EXTRACT_SUBREG $src2, sub0))), sub0, + (V_OR3_B32_e64 (i32 (EXTRACT_SUBREG $src0, sub1)), + (i32 (EXTRACT_SUBREG $src1, sub1)), + (i32 (EXTRACT_SUBREG $src2, sub1))), sub1)>; // FIXME: Probably should hardcode clamp bit in pseudo and avoid this. class OpSelBinOpClampPat<SDPatternOperator node, diff --git a/llvm/test/CodeGen/AMDGPU/fused-bitlogic.ll b/llvm/test/CodeGen/AMDGPU/fused-bitlogic.ll index 654ed0faa11d..933f73c0fe5d 100644 --- a/llvm/test/CodeGen/AMDGPU/fused-bitlogic.ll +++ b/llvm/test/CodeGen/AMDGPU/fused-bitlogic.ll @@ -9,8 +9,7 @@ define amdgpu_kernel void @divergent_or3_b32(<3 x i32> addrspace(1)* %arg) { ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: global_load_dwordx3 v[0:2], v3, s[0:1] ; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: v_or_b32_e32 v0, v1, v0 -; GCN-NEXT: v_or_b32_e32 v0, v0, v2 +; GCN-NEXT: v_or3_b32 v0, v1, v0, v2 ; GCN-NEXT: v_not_b32_e32 v0, v0 ; GCN-NEXT: global_store_dword v3, v0, s[0:1] ; GCN-NEXT: s_endpgm @@ -39,10 +38,8 @@ define amdgpu_kernel void @divergent_or3_b64(<3 x i64> addrspace(1)* %arg) { ; GCN-NEXT: global_load_dwordx2 v[4:5], v6, s[0:1] offset:16 ; GCN-NEXT: global_load_dwordx4 v[0:3], v6, s[0:1] ; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: v_or_b32_e32 v1, v3, v1 -; GCN-NEXT: v_or_b32_e32 v0, v2, v0 -; GCN-NEXT: v_or_b32_e32 v0, v0, v4 -; GCN-NEXT: v_or_b32_e32 v1, v1, v5 +; GCN-NEXT: v_or3_b32 v1, v3, v1, v5 +; GCN-NEXT: v_or3_b32 v0, v2, v0, v4 ; GCN-NEXT: v_not_b32_e32 v0, v0 ; GCN-NEXT: v_not_b32_e32 v1, v1 ; GCN-NEXT: global_store_dwordx2 v6, v[0:1], s[0:1] @@ -103,8 +100,8 @@ define amdgpu_kernel void @divergent_and3_b64(<3 x i64> addrspace(1)* %arg) { ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: v_and_b32_e32 v1, v3, v1 ; GCN-NEXT: v_and_b32_e32 v0, v2, v0 -; GCN-NEXT: v_and_b32_e32 v0, v0, v4 ; GCN-NEXT: v_and_b32_e32 v1, v1, v5 +; GCN-NEXT: v_and_b32_e32 v0, v0, v4 ; GCN-NEXT: v_not_b32_e32 v0, v0 ; GCN-NEXT: v_not_b32_e32 v1, v1 ; GCN-NEXT: global_store_dwordx2 v6, v[0:1], s[0:1] |