summaryrefslogtreecommitdiff
path: root/gcc/tree-vect-data-refs.c
diff options
context:
space:
mode:
authorrguenth <rguenth@138bc75d-0d04-0410-961f-82ee72b054a4>2013-04-15 14:08:41 +0000
committerrguenth <rguenth@138bc75d-0d04-0410-961f-82ee72b054a4>2013-04-15 14:08:41 +0000
commit4d52578396953614d8a30df76c1b3699829f5cb2 (patch)
tree52afb1966c9cc2b68285ff2632e2f574ef6e4c9d /gcc/tree-vect-data-refs.c
parent7cdd84a2f95dbd2bbee1fba5930214332fed2421 (diff)
downloadgcc-4d52578396953614d8a30df76c1b3699829f5cb2.tar.gz
2013-04-15 Richard Biener <rguenther@suse.de>
PR tree-optimization/56933 * tree-vectorizer.h (struct _stmt_vec_info): Remove read_write_dep member. (GROUP_READ_WRITE_DEPENDENCE): Remove. (STMT_VINFO_GROUP_READ_WRITE_DEPENDENCE): Likewise. * tree-vect-data-refs.c (vect_analyze_group_access): Move dependence check ... vect_analyze_data_ref_dependence (vect_analyze_data_ref_dependence): ... here. * tree-vect-stmts.c (new_stmt_vec_info): Do not initialize GROUP_READ_WRITE_DEPENDENCE. * gcc.dg/vect/pr56933.c: New testcase. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@197972 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/tree-vect-data-refs.c')
-rw-r--r--gcc/tree-vect-data-refs.c47
1 files changed, 28 insertions, 19 deletions
diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c
index 64f2a8b6d9e..1fe5047c70b 100644
--- a/gcc/tree-vect-data-refs.c
+++ b/gcc/tree-vect-data-refs.c
@@ -341,14 +341,34 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
}
- /* For interleaving, mark that there is a read-write dependency if
- necessary. We check before that one of the data-refs is store. */
- if (DR_IS_READ (dra))
- GROUP_READ_WRITE_DEPENDENCE (stmtinfo_a) = true;
- else
- {
- if (DR_IS_READ (drb))
- GROUP_READ_WRITE_DEPENDENCE (stmtinfo_b) = true;
+ /* When we perform grouped accesses and perform implicit CSE
+ by detecting equal accesses and doing disambiguation with
+ runtime alias tests like for
+ .. = a[i];
+ .. = a[i+1];
+ a[i] = ..;
+ a[i+1] = ..;
+ *p = ..;
+ .. = a[i];
+ .. = a[i+1];
+ where we will end up loading { a[i], a[i+1] } once, make
+ sure that inserting group loads before the first load and
+ stores after the last store will do the right thing. */
+ if ((STMT_VINFO_GROUPED_ACCESS (stmtinfo_a)
+ && GROUP_SAME_DR_STMT (stmtinfo_a))
+ || (STMT_VINFO_GROUPED_ACCESS (stmtinfo_b)
+ && GROUP_SAME_DR_STMT (stmtinfo_b)))
+ {
+ gimple earlier_stmt;
+ earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
+ if (DR_IS_WRITE
+ (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "READ_WRITE dependence in interleaving.");
+ return true;
+ }
}
continue;
@@ -2097,17 +2117,6 @@ vect_analyze_group_access (struct data_reference *dr)
return false;
}
- /* Check that there is no load-store dependencies for this loads
- to prevent a case of load-store-load to the same location. */
- if (GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (next))
- || GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (prev)))
- {
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "READ_WRITE dependence in interleaving.");
- return false;
- }
-
/* For load use the same data-ref load. */
GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev;