summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2019-09-25 15:08:33 +0000
committerTom Stellard <tstellar@redhat.com>2019-11-12 13:57:03 -0800
commit7a140f4a293e05a1047bc39c805fba5878b6fa3a (patch)
tree995863eeceb04e75d2401e8561d00e217e73898e
parent07c142228696bba02c339563858f2da8f3376fd3 (diff)
downloadllvm-7a140f4a293e05a1047bc39c805fba5878b6fa3a.tar.gz
Merging r372886:
------------------------------------------------------------------------ r372886 | spatel | 2019-09-25 08:08:33 -0700 (Wed, 25 Sep 2019) | 7 lines [DAGCombiner] add one-use restriction to vector transform with cheap extract We might be able to do better on the example in the test, but in general, we should not scalarize a splatted vector binop if there are other uses of the binop. Otherwise, we can end up with code as we had - a scalar op that is redundant with a vector op. ------------------------------------------------------------------------
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp2
-rw-r--r--llvm/test/CodeGen/X86/scalarize-fp.ll17
2 files changed, 8 insertions, 11 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 49c922f560fa..2530beabac81 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -18878,7 +18878,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
// build_vector.
if (SVN->isSplat() && SVN->getSplatIndex() < (int)NumElts) {
int SplatIndex = SVN->getSplatIndex();
- if (TLI.isExtractVecEltCheap(VT, SplatIndex) &&
+ if (N0.hasOneUse() && TLI.isExtractVecEltCheap(VT, SplatIndex) &&
TLI.isBinOp(N0.getOpcode()) && N0.getNode()->getNumValues() == 1) {
// splat (vector_bo L, R), Index -->
// splat (scalar_bo (extelt L, Index), (extelt R, Index))
diff --git a/llvm/test/CodeGen/X86/scalarize-fp.ll b/llvm/test/CodeGen/X86/scalarize-fp.ll
index 5c72d4ba3c07..5b98272fdce5 100644
--- a/llvm/test/CodeGen/X86/scalarize-fp.ll
+++ b/llvm/test/CodeGen/X86/scalarize-fp.ll
@@ -777,21 +777,18 @@ define <8 x float> @splat0_fdiv_const_op0_v8f32(<8 x float> %vx) {
define <4 x float> @multi_use_binop(<4 x float> %x, <4 x float> %y) {
; SSE-LABEL: multi_use_binop:
; SSE: # %bb.0:
-; SSE-NEXT: movaps %xmm0, %xmm2
-; SSE-NEXT: mulps %xmm1, %xmm2
-; SSE-NEXT: mulss %xmm1, %xmm0
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE-NEXT: mulps %xmm1, %xmm0
+; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1,2,0]
; SSE-NEXT: addps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: multi_use_binop:
; AVX: # %bb.0:
-; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm2
-; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; AVX-NEXT: vmovddup {{.*#+}} xmm1 = xmm2[0,0]
-; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[0,1,2,0]
+; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; AVX-NEXT: vaddps %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%mul = fmul <4 x float> %x, %y
%mul0 = shufflevector <4 x float> %mul, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 undef, i32 0>