summaryrefslogtreecommitdiff
path: root/gcc/tree-vect-loop.c
diff options
context:
space:
mode:
authormatz <matz@138bc75d-0d04-0410-961f-82ee72b054a4>2010-05-10 14:31:37 +0000
committermatz <matz@138bc75d-0d04-0410-961f-82ee72b054a4>2010-05-10 14:31:37 +0000
commitf4a50267b9196603596fb82bb914e9000848cc1a (patch)
tree23003a727a703aa73b4c09b5ba60eb77c600ebdf /gcc/tree-vect-loop.c
parent7ec7a88e8e1f3fa1d8934a3e4e0516f0196495e6 (diff)
downloadgcc-f4a50267b9196603596fb82bb914e9000848cc1a.tar.gz
* tree-ssa-reassoc.c (undistribute_ops_list): Use create_tmp_reg.
(can_reassociate_p): Use FLOAT_TYPE_P. * tree-vectorizer.h (vect_is_simple_reduction): Rename to ... (vect_force_simple_reduction): ... this. * tree-parloops.c (gather_scalar_reductions): Use vect_force_simple_reduction. * tree-vect-loop.c (vect_is_simple_reduction_1): Rename from vect_is_simple_reduction, add modify argument, if true rewrite "a-b" into "a+(-b)". (vect_is_simple_reduction, vect_force_simple_reduction): New functions. (vect_analyze_scalar_cycles_1): Use vect_force_simple_reduction. testsuite/ * gcc.dg/vect/fast-math-vect-reduc-8.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@159226 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/tree-vect-loop.c')
-rw-r--r--gcc/tree-vect-loop.c68
1 files changed, 60 insertions, 8 deletions
diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c
index 83b823d84bf..8acc99289bc 100644
--- a/gcc/tree-vect-loop.c
+++ b/gcc/tree-vect-loop.c
@@ -513,8 +513,8 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo));
- reduc_stmt = vect_is_simple_reduction (loop_vinfo, phi, !nested_cycle,
- &double_reduc);
+ reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi, !nested_cycle,
+ &double_reduc);
if (reduc_stmt)
{
if (double_reduc)
@@ -1584,7 +1584,7 @@ report_vect_op (gimple stmt, const char *msg)
}
-/* Function vect_is_simple_reduction
+/* Function vect_is_simple_reduction_1
(1) Detect a cross-iteration def-use cycle that represents a simple
reduction computation. We look for the following pattern:
@@ -1612,18 +1612,23 @@ report_vect_op (gimple stmt, const char *msg)
a1 = phi < a0, a2 >
inner loop (def of a3)
a2 = phi < a3 >
+
+ If MODIFY is true it tries also to rework the code in-place to enable
+ detection of more reduction patterns. For the time being we rewrite
+ "res -= RHS" into "rhs += -RHS" when it seems worthwhile.
*/
-gimple
-vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
- bool check_reduction, bool *double_reduc)
+static gimple
+vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple phi,
+ bool check_reduction, bool *double_reduc,
+ bool modify)
{
struct loop *loop = (gimple_bb (phi))->loop_father;
struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
edge latch_e = loop_latch_edge (loop);
tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
gimple def_stmt, def1 = NULL, def2 = NULL;
- enum tree_code code;
+ enum tree_code orig_code, code;
tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
tree type;
int nloop_uses;
@@ -1743,7 +1748,14 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
return NULL;
}
- code = gimple_assign_rhs_code (def_stmt);
+ code = orig_code = gimple_assign_rhs_code (def_stmt);
+
+ /* We can handle "res -= x[i]", which is non-associative by
+ simply rewriting this into "res += -x[i]". Avoid changing
+ gimple instruction for the first simple tests and only do this
+ if we're allowed to change code at all. */
+ if (code == MINUS_EXPR && modify)
+ code = PLUS_EXPR;
if (check_reduction
&& (!commutative_tree_code (code) || !associative_tree_code (code)))
@@ -1863,6 +1875,24 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
return NULL;
}
+ /* If we detected "res -= x[i]" earlier, rewrite it into
+ "res += -x[i]" now. If this turns out to be useless reassoc
+ will clean it up again. */
+ if (orig_code == MINUS_EXPR)
+ {
+ tree rhs = gimple_assign_rhs2 (def_stmt);
+ tree negrhs = make_ssa_name (SSA_NAME_VAR (rhs), NULL);
+ gimple negate_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, negrhs,
+ rhs, NULL);
+ gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
+ set_vinfo_for_stmt (negate_stmt, new_stmt_vec_info (negate_stmt,
+ loop_info, NULL));
+ gsi_insert_before (&gsi, negate_stmt, GSI_NEW_STMT);
+ gimple_assign_set_rhs2 (def_stmt, negrhs);
+ gimple_assign_set_rhs_code (def_stmt, PLUS_EXPR);
+ update_stmt (def_stmt);
+ }
+
/* Reduction is safe. We're dealing with one of the following:
1) integer arithmetic and no trapv
2) floating point arithmetic, and special flags permit this optimization
@@ -1940,6 +1970,28 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
}
}
+/* Wrapper around vect_is_simple_reduction_1, that won't modify code
+ in-place. Arguments as there. */
+
+static gimple
+vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
+ bool check_reduction, bool *double_reduc)
+{
+ return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
+ double_reduc, false);
+}
+
+/* Wrapper around vect_is_simple_reduction_1, which will modify code
+ in-place if it enables detection of more reductions. Arguments
+ as there. */
+
+gimple
+vect_force_simple_reduction (loop_vec_info loop_info, gimple phi,
+ bool check_reduction, bool *double_reduc)
+{
+ return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
+ double_reduc, true);
+}
/* Function vect_estimate_min_profitable_iters