summaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorrakdver <rakdver@138bc75d-0d04-0410-961f-82ee72b054a4>2007-09-07 02:40:14 +0000
committerrakdver <rakdver@138bc75d-0d04-0410-961f-82ee72b054a4>2007-09-07 02:40:14 +0000
commit79acaae17e126f58016355519d445e2766a34771 (patch)
tree7c5c590c4bfd0b7c4f0dda268de7e85ce217d91f /gcc
parent0c234f81517b076084b84ce17198cff0522e9a9e (diff)
downloadgcc-79acaae17e126f58016355519d445e2766a34771.tar.gz
* cgraphbuild.c (rebuild_cgraph_edges): Export.
* cgraph.h (rebuild_cgraph_edges): Declare. * tree-pass.h (pass_expand_omp_ssa): New. * omp-low.c (find_omp_clause): Export. (copy_var_decl): Split from omp_copy_decl_2. (build_omp_barrier): Return the call to emit instead of emitting it directly. (lower_rec_input_clauses, expand_omp_single): Gimplify the result of build_omp_barrier. (extract_omp_for_data, expand_parallel_call, expand_omp_parallel, expand_omp_for_generic, expand_omp_for_static_nochunk, expand_omp_for_static_chunk, expand_omp_for, expand_omp_sections): Adapted to work on SSA form. (execute_expand_omp): Do not invalidate dominance information. (gate_expand_omp): Do not run with -fopenmp-ssa flag. (gate_expand_omp_ssa, pass_expand_omp_ssa): New. * gimplify.c (gimplify_omp_for): Ensure that the control variable is a gimple_reg. (force_gimple_operand): Allow gimplifying code expressions without value. * tree-predcom.c (mark_virtual_ops_for_renaming): Handle phi nodes. * common.opt (fopenmp-ssa): New. * tree-flow.h (find_omp_clause, copy_var_decl): Declare. * Makefile.in (tree-cfg.o): Add TREE_INLINE_H dependency. * tree-cfg.c: Include tree-inline.h. (struct move_stmt_d): Replace vars_to_remove by vars_map field. (replace_by_duplicate_decl, replace_ssa_name, mark_virtual_ops_in_region): New functions. (move_stmt_r, move_block_to_fn, move_sese_region_to_fn): Adapted to work on SSA form. * passes.c (init_optimization_passes): Add pass_expand_omp_ssa pass. * tree-ssa-operands.c (get_expr_operands): Handle operands of OMP constructs. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@128223 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog36
-rw-r--r--gcc/Makefile.in2
-rw-r--r--gcc/cgraph.h3
-rw-r--r--gcc/cgraphbuild.c2
-rw-r--r--gcc/common.opt4
-rw-r--r--gcc/gimplify.c56
-rw-r--r--gcc/omp-low.c787
-rw-r--r--gcc/passes.c1
-rw-r--r--gcc/tree-cfg.c258
-rw-r--r--gcc/tree-flow.h2
-rw-r--r--gcc/tree-pass.h1
-rw-r--r--gcc/tree-predcom.c11
-rw-r--r--gcc/tree-ssa-operands.c59
13 files changed, 854 insertions, 368 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 7b0c48390f2..76a81d88ab3 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,39 @@
+2007-09-06 Zdenek Dvorak <ook@ucw.cz>
+
+ * cgraphbuild.c (rebuild_cgraph_edges): Export.
+ * cgraph.h (rebuild_cgraph_edges): Declare.
+ * tree-pass.h (pass_expand_omp_ssa): New.
+ * omp-low.c (find_omp_clause): Export.
+ (copy_var_decl): Split from omp_copy_decl_2.
+ (build_omp_barrier): Return the call to emit instead of emitting
+ it directly.
+ (lower_rec_input_clauses, expand_omp_single): Gimplify the result of
+ build_omp_barrier.
+ (extract_omp_for_data, expand_parallel_call, expand_omp_parallel,
+ expand_omp_for_generic, expand_omp_for_static_nochunk,
+ expand_omp_for_static_chunk, expand_omp_for, expand_omp_sections):
+ Adapted to work on SSA form.
+ (execute_expand_omp): Do not invalidate dominance information.
+ (gate_expand_omp): Do not run with -fopenmp-ssa flag.
+ (gate_expand_omp_ssa, pass_expand_omp_ssa): New.
+ * gimplify.c (gimplify_omp_for): Ensure that the control variable is
+ a gimple_reg.
+ (force_gimple_operand): Allow gimplifying code expressions without
+ value.
+ * tree-predcom.c (mark_virtual_ops_for_renaming): Handle phi nodes.
+ * common.opt (fopenmp-ssa): New.
+ * tree-flow.h (find_omp_clause, copy_var_decl): Declare.
+ * Makefile.in (tree-cfg.o): Add TREE_INLINE_H dependency.
+ * tree-cfg.c: Include tree-inline.h.
+ (struct move_stmt_d): Replace vars_to_remove by vars_map field.
+ (replace_by_duplicate_decl, replace_ssa_name,
+ mark_virtual_ops_in_region): New functions.
+ (move_stmt_r, move_block_to_fn, move_sese_region_to_fn): Adapted
+ to work on SSA form.
+ * passes.c (init_optimization_passes): Add pass_expand_omp_ssa pass.
+ * tree-ssa-operands.c (get_expr_operands): Handle operands of OMP
+ constructs.
+
2007-09-06 Laurynas Biveinis <laurynas.biveinis@gmail.com>
* tree-loop-linear.c: Include obstack.h.
diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index 93f277a3ac0..fee5d25cab7 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -2079,7 +2079,7 @@ tree-cfg.o : tree-cfg.c $(TREE_FLOW_H) $(CONFIG_H) $(SYSTEM_H) \
$(DIAGNOSTIC_H) $(FUNCTION_H) $(TIMEVAR_H) $(TM_H) coretypes.h \
$(TREE_DUMP_H) except.h langhooks.h $(CFGLOOP_H) tree-pass.h \
$(CFGLAYOUT_H) $(BASIC_BLOCK_H) hard-reg-set.h toplev.h \
- tree-ssa-propagate.h
+ tree-ssa-propagate.h $(TREE_INLINE_H)
tree-cfgcleanup.o : tree-cfgcleanup.c $(TREE_FLOW_H) $(CONFIG_H) $(SYSTEM_H) \
$(RTL_H) $(TREE_H) $(TM_P_H) $(EXPR_H) $(GGC_H) $(FLAGS_H) output.h \
$(DIAGNOSTIC_H) toplev.h $(FUNCTION_H) $(TIMEVAR_H) $(TM_H) coretypes.h \
diff --git a/gcc/cgraph.h b/gcc/cgraph.h
index cb9e729bbf5..2d6d7c94c9a 100644
--- a/gcc/cgraph.h
+++ b/gcc/cgraph.h
@@ -344,6 +344,9 @@ struct cgraph_node *save_inline_function_body (struct cgraph_node *);
void record_references_in_initializer (tree);
bool cgraph_process_new_functions (void);
+/* In cgraphbuild.c */
+unsigned int rebuild_cgraph_edges (void);
+
/* In ipa.c */
bool cgraph_remove_unreachable_nodes (bool, FILE *);
int cgraph_postorder (struct cgraph_node **);
diff --git a/gcc/cgraphbuild.c b/gcc/cgraphbuild.c
index 0f4303a20ff..1e3e5da1910 100644
--- a/gcc/cgraphbuild.c
+++ b/gcc/cgraphbuild.c
@@ -202,7 +202,7 @@ record_references_in_initializer (tree decl)
/* Rebuild cgraph edges for current function node. This needs to be run after
passes that don't update the cgraph. */
-static unsigned int
+unsigned int
rebuild_cgraph_edges (void)
{
basic_block bb;
diff --git a/gcc/common.opt b/gcc/common.opt
index 3f44ba8f1b8..c53a93e0483 100644
--- a/gcc/common.opt
+++ b/gcc/common.opt
@@ -709,6 +709,10 @@ fomit-frame-pointer
Common Report Var(flag_omit_frame_pointer) Optimization
When possible do not generate stack frames
+fopenmp-ssa
+Common Report Var(flag_openmp_ssa)
+Expand OpenMP operations on SSA form
+
foptimize-register-move
Common Report Var(flag_regmove) Optimization
Do the full register move optimization pass
diff --git a/gcc/gimplify.c b/gcc/gimplify.c
index c4d4f629255..d10c8486df8 100644
--- a/gcc/gimplify.c
+++ b/gcc/gimplify.c
@@ -5114,8 +5114,9 @@ gimplify_omp_parallel (tree *expr_p, tree *pre_p)
static enum gimplify_status
gimplify_omp_for (tree *expr_p, tree *pre_p)
{
- tree for_stmt, decl, t;
+ tree for_stmt, decl, var, t;
enum gimplify_status ret = GS_OK;
+ tree body, init_decl = NULL_TREE;
for_stmt = *expr_p;
@@ -5134,6 +5135,20 @@ gimplify_omp_for (tree *expr_p, tree *pre_p)
else
omp_add_variable (gimplify_omp_ctxp, decl, GOVD_PRIVATE | GOVD_SEEN);
+ /* If DECL is not a gimple register, create a temporary variable to act as an
+ iteration counter. This is valid, since DECL cannot be modified in the
+ body of the loop. */
+ if (!is_gimple_reg (decl))
+ {
+ var = create_tmp_var (TREE_TYPE (decl), get_name (decl));
+ GENERIC_TREE_OPERAND (t, 0) = var;
+
+ init_decl = build_gimple_modify_stmt (decl, var);
+ omp_add_variable (gimplify_omp_ctxp, var, GOVD_PRIVATE | GOVD_SEEN);
+ }
+ else
+ var = decl;
+
ret |= gimplify_expr (&GENERIC_TREE_OPERAND (t, 1),
&OMP_FOR_PRE_BODY (for_stmt),
NULL, is_gimple_val, fb_rvalue);
@@ -5143,6 +5158,7 @@ gimplify_omp_for (tree *expr_p, tree *pre_p)
t = OMP_FOR_COND (for_stmt);
gcc_assert (COMPARISON_CLASS_P (t));
gcc_assert (GENERIC_TREE_OPERAND (t, 0) == decl);
+ TREE_OPERAND (t, 0) = var;
ret |= gimplify_expr (&GENERIC_TREE_OPERAND (t, 1),
&OMP_FOR_PRE_BODY (for_stmt),
@@ -5155,21 +5171,23 @@ gimplify_omp_for (tree *expr_p, tree *pre_p)
case PREINCREMENT_EXPR:
case POSTINCREMENT_EXPR:
t = build_int_cst (TREE_TYPE (decl), 1);
- t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
- t = build_gimple_modify_stmt (decl, t);
+ t = build2 (PLUS_EXPR, TREE_TYPE (decl), var, t);
+ t = build_gimple_modify_stmt (var, t);
OMP_FOR_INCR (for_stmt) = t;
break;
case PREDECREMENT_EXPR:
case POSTDECREMENT_EXPR:
t = build_int_cst (TREE_TYPE (decl), -1);
- t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
- t = build_gimple_modify_stmt (decl, t);
+ t = build2 (PLUS_EXPR, TREE_TYPE (decl), var, t);
+ t = build_gimple_modify_stmt (var, t);
OMP_FOR_INCR (for_stmt) = t;
break;
case GIMPLE_MODIFY_STMT:
gcc_assert (GIMPLE_STMT_OPERAND (t, 0) == decl);
+ GIMPLE_STMT_OPERAND (t, 0) = var;
+
t = GIMPLE_STMT_OPERAND (t, 1);
switch (TREE_CODE (t))
{
@@ -5177,11 +5195,14 @@ gimplify_omp_for (tree *expr_p, tree *pre_p)
if (TREE_OPERAND (t, 1) == decl)
{
TREE_OPERAND (t, 1) = TREE_OPERAND (t, 0);
- TREE_OPERAND (t, 0) = decl;
+ TREE_OPERAND (t, 0) = var;
break;
}
+
+ /* Fallthru. */
case MINUS_EXPR:
gcc_assert (TREE_OPERAND (t, 0) == decl);
+ TREE_OPERAND (t, 0) = var;
break;
default:
gcc_unreachable ();
@@ -5195,7 +5216,13 @@ gimplify_omp_for (tree *expr_p, tree *pre_p)
gcc_unreachable ();
}
- gimplify_to_stmt_list (&OMP_FOR_BODY (for_stmt));
+ body = OMP_FOR_BODY (for_stmt);
+ gimplify_to_stmt_list (&body);
+ t = alloc_stmt_list ();
+ if (init_decl)
+ append_to_statement_list (init_decl, &t);
+ append_to_statement_list (body, &t);
+ OMP_FOR_BODY (for_stmt) = t;
gimplify_adjust_omp_clauses (&OMP_FOR_CLAUSES (for_stmt));
return ret == GS_ALL_DONE ? GS_ALL_DONE : GS_ERROR;
@@ -6591,9 +6618,18 @@ force_gimple_operand (tree expr, tree *stmts, bool simple, tree var)
if (var)
expr = build_gimple_modify_stmt (var, expr);
- ret = gimplify_expr (&expr, stmts, NULL,
- gimple_test_f, fb_rvalue);
- gcc_assert (ret != GS_ERROR);
+ if (TREE_CODE (expr) != GIMPLE_MODIFY_STMT
+ && TREE_TYPE (expr) == void_type_node)
+ {
+ gimplify_and_add (expr, stmts);
+ expr = NULL_TREE;
+ }
+ else
+ {
+ ret = gimplify_expr (&expr, stmts, NULL,
+ gimple_test_f, fb_rvalue);
+ gcc_assert (ret != GS_ERROR);
+ }
if (gimple_referenced_vars (cfun))
{
diff --git a/gcc/omp-low.c b/gcc/omp-low.c
index c1ab3f31193..57f36509c67 100644
--- a/gcc/omp-low.c
+++ b/gcc/omp-low.c
@@ -117,7 +117,7 @@ static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
/* Find an OpenMP clause of type KIND within CLAUSES. */
-static tree
+tree
find_omp_clause (tree clauses, enum tree_code kind)
{
for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
@@ -151,7 +151,7 @@ is_combined_parallel (struct omp_region *region)
static void
extract_omp_for_data (tree for_stmt, struct omp_for_data *fd)
{
- tree t;
+ tree t, var;
fd->for_stmt = for_stmt;
fd->pre = NULL;
@@ -159,13 +159,14 @@ extract_omp_for_data (tree for_stmt, struct omp_for_data *fd)
t = OMP_FOR_INIT (for_stmt);
gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
fd->v = GIMPLE_STMT_OPERAND (t, 0);
- gcc_assert (DECL_P (fd->v));
+ gcc_assert (SSA_VAR_P (fd->v));
gcc_assert (TREE_CODE (TREE_TYPE (fd->v)) == INTEGER_TYPE);
+ var = TREE_CODE (fd->v) == SSA_NAME ? SSA_NAME_VAR (fd->v) : fd->v;
fd->n1 = GIMPLE_STMT_OPERAND (t, 1);
t = OMP_FOR_COND (for_stmt);
fd->cond_code = TREE_CODE (t);
- gcc_assert (TREE_OPERAND (t, 0) == fd->v);
+ gcc_assert (TREE_OPERAND (t, 0) == var);
fd->n2 = TREE_OPERAND (t, 1);
switch (fd->cond_code)
{
@@ -188,9 +189,9 @@ extract_omp_for_data (tree for_stmt, struct omp_for_data *fd)
t = OMP_FOR_INCR (fd->for_stmt);
gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
- gcc_assert (GIMPLE_STMT_OPERAND (t, 0) == fd->v);
+ gcc_assert (GIMPLE_STMT_OPERAND (t, 0) == var);
t = GIMPLE_STMT_OPERAND (t, 1);
- gcc_assert (TREE_OPERAND (t, 0) == fd->v);
+ gcc_assert (TREE_OPERAND (t, 0) == var);
switch (TREE_CODE (t))
{
case PLUS_EXPR:
@@ -513,22 +514,34 @@ use_pointer_for_field (const_tree decl, bool shared_p)
return false;
}
-/* Construct a new automatic decl similar to VAR. */
+/* Create a new VAR_DECL and copy information from VAR to it. */
-static tree
-omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
+tree
+copy_var_decl (tree var, tree name, tree type)
{
tree copy = build_decl (VAR_DECL, name, type);
TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
+ TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
DECL_NO_TBAA_P (copy) = DECL_NO_TBAA_P (var);
DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
+ DECL_CONTEXT (copy) = DECL_CONTEXT (var);
TREE_USED (copy) = 1;
- DECL_CONTEXT (copy) = current_function_decl;
DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
+ return copy;
+}
+
+/* Construct a new automatic decl similar to VAR. */
+
+static tree
+omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
+{
+ tree copy = copy_var_decl (var, name, type);
+
+ DECL_CONTEXT (copy) = current_function_decl;
TREE_CHAIN (copy) = ctx->block_vars;
ctx->block_vars = copy;
@@ -1432,11 +1445,10 @@ scan_omp (tree *stmt_p, omp_context *ctx)
/* Build a call to GOMP_barrier. */
-static void
-build_omp_barrier (tree *stmt_list)
+static tree
+build_omp_barrier (void)
{
- tree t = build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
- gimplify_and_add (t, stmt_list);
+ return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
}
/* If a context was created for STMT when it was scanned, return it. */
@@ -1829,7 +1841,7 @@ lower_rec_input_clauses (tree clauses, tree *ilist, tree *dlist,
lastprivate clauses we need to ensure the lastprivate copying
happens after firstprivate copying in all threads. */
if (copyin_by_ref || lastprivate_firstprivate)
- build_omp_barrier (ilist);
+ gimplify_and_add (build_omp_barrier (), ilist);
}
@@ -2153,12 +2165,11 @@ static void
expand_parallel_call (struct omp_region *region, basic_block bb,
tree entry_stmt, tree ws_args)
{
- tree t, t1, t2, val, cond, c, list, clauses;
+ tree t, t1, t2, val, cond, c, clauses;
block_stmt_iterator si;
int start_ix;
clauses = OMP_PARALLEL_CLAUSES (entry_stmt);
- push_gimplify_context ();
/* Determine what flavor of GOMP_parallel_start we will be
emitting. */
@@ -2204,15 +2215,28 @@ expand_parallel_call (struct omp_region *region, basic_block bb,
cond = gimple_boolify (cond);
if (integer_zerop (val))
- val = build2 (EQ_EXPR, unsigned_type_node, cond,
- build_int_cst (TREE_TYPE (cond), 0));
+ val = fold_build2 (EQ_EXPR, unsigned_type_node, cond,
+ build_int_cst (TREE_TYPE (cond), 0));
else
{
basic_block cond_bb, then_bb, else_bb;
- edge e;
- tree t, tmp;
+ edge e, e_then, e_else;
+ tree t, tmp_then, tmp_else, tmp_join, tmp_var;
+
+ tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
+ if (gimple_in_ssa_p (cfun))
+ {
+ tmp_then = make_ssa_name (tmp_var, NULL_TREE);
+ tmp_else = make_ssa_name (tmp_var, NULL_TREE);
+ tmp_join = make_ssa_name (tmp_var, NULL_TREE);
+ }
+ else
+ {
+ tmp_then = tmp_var;
+ tmp_else = tmp_var;
+ tmp_join = tmp_var;
+ }
- tmp = create_tmp_var (TREE_TYPE (val), NULL);
e = split_block (bb, NULL);
cond_bb = e->src;
bb = e->dest;
@@ -2220,6 +2244,8 @@ expand_parallel_call (struct omp_region *region, basic_block bb,
then_bb = create_empty_bb (cond_bb);
else_bb = create_empty_bb (then_bb);
+ set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
+ set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
t = build3 (COND_EXPR, void_type_node,
cond, NULL_TREE, NULL_TREE);
@@ -2228,29 +2254,40 @@ expand_parallel_call (struct omp_region *region, basic_block bb,
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
si = bsi_start (then_bb);
- t = build_gimple_modify_stmt (tmp, val);
+ t = build_gimple_modify_stmt (tmp_then, val);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (tmp_then) = t;
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
si = bsi_start (else_bb);
- t = build_gimple_modify_stmt (tmp,
+ t = build_gimple_modify_stmt (tmp_else,
build_int_cst (unsigned_type_node, 1));
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (tmp_else) = t;
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
- make_edge (then_bb, bb, EDGE_FALLTHRU);
- make_edge (else_bb, bb, EDGE_FALLTHRU);
+ e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
+ e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
- val = tmp;
+ if (gimple_in_ssa_p (cfun))
+ {
+ tree phi = create_phi_node (tmp_join, bb);
+ SSA_NAME_DEF_STMT (tmp_join) = phi;
+ add_phi_arg (phi, tmp_then, e_then);
+ add_phi_arg (phi, tmp_else, e_else);
+ }
+
+ val = tmp_join;
}
- list = NULL_TREE;
- val = get_formal_tmp_var (val, &list);
si = bsi_start (bb);
- bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
+ val = force_gimple_operand_bsi (&si, val, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
}
- list = NULL_TREE;
+ si = bsi_last (bb);
t = OMP_PARALLEL_DATA_ARG (entry_stmt);
if (t == NULL)
t1 = null_pointer_node;
@@ -2268,7 +2305,8 @@ expand_parallel_call (struct omp_region *region, basic_block bb,
else
t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
- gimplify_and_add (t, &list);
+ force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
t = OMP_PARALLEL_DATA_ARG (entry_stmt);
if (t == NULL)
@@ -2276,15 +2314,12 @@ expand_parallel_call (struct omp_region *region, basic_block bb,
else
t = build_fold_addr_expr (t);
t = build_call_expr (OMP_PARALLEL_FN (entry_stmt), 1, t);
- gimplify_and_add (t, &list);
+ force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
t = build_call_expr (built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
- gimplify_and_add (t, &list);
-
- si = bsi_last (bb);
- bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
-
- pop_gimplify_context (NULL_TREE);
+ force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
}
@@ -2408,7 +2443,6 @@ expand_omp_parallel (struct omp_region *region)
block_stmt_iterator si;
tree entry_stmt;
edge e;
- bool do_cleanup_cfg = false;
entry_stmt = last_stmt (region->entry);
child_fn = OMP_PARALLEL_FN (entry_stmt);
@@ -2437,13 +2471,12 @@ expand_omp_parallel (struct omp_region *region)
bsi_remove (&si, true);
new_bb = entry_bb;
- remove_edge (entry_succ_e);
if (exit_bb)
{
exit_succ_e = single_succ_edge (exit_bb);
make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
}
- do_cleanup_cfg = true;
+ remove_edge_and_dominated_blocks (entry_succ_e);
}
else
{
@@ -2464,6 +2497,7 @@ expand_omp_parallel (struct omp_region *region)
{
basic_block entry_succ_bb = single_succ (entry_bb);
block_stmt_iterator si;
+ tree parcopy_stmt = NULL_TREE, arg, narg;
for (si = bsi_start (entry_succ_bb); ; bsi_next (&si))
{
@@ -2480,14 +2514,32 @@ expand_omp_parallel (struct omp_region *region)
&& TREE_OPERAND (arg, 0)
== OMP_PARALLEL_DATA_ARG (entry_stmt))
{
- if (GIMPLE_STMT_OPERAND (stmt, 0)
- == DECL_ARGUMENTS (child_fn))
- bsi_remove (&si, true);
- else
- GIMPLE_STMT_OPERAND (stmt, 1) = DECL_ARGUMENTS (child_fn);
+ parcopy_stmt = stmt;
break;
}
}
+
+ gcc_assert (parcopy_stmt != NULL_TREE);
+ arg = DECL_ARGUMENTS (child_fn);
+
+ if (!gimple_in_ssa_p (cfun))
+ {
+ if (GIMPLE_STMT_OPERAND (parcopy_stmt, 0) == arg)
+ bsi_remove (&si, true);
+ else
+ GIMPLE_STMT_OPERAND (parcopy_stmt, 1) = arg;
+ }
+ else
+ {
+ /* If we are in ssa form, we must load the value from the default
+ definition of the argument. That should not be defined now,
+ since the argument is not used uninitialized. */
+ gcc_assert (gimple_default_def (cfun, arg) == NULL);
+ narg = make_ssa_name (arg, build_empty_stmt ());
+ set_default_def (arg, narg);
+ GIMPLE_STMT_OPERAND (parcopy_stmt, 1) = narg;
+ update_stmt (parcopy_stmt);
+ }
}
/* Declare local variables needed in CHILD_CFUN. */
@@ -2495,10 +2547,7 @@ expand_omp_parallel (struct omp_region *region)
BLOCK_VARS (block) = list2chain (child_cfun->unexpanded_var_list);
DECL_SAVED_TREE (child_fn) = bb_stmt_list (single_succ (entry_bb));
- /* Reset DECL_CONTEXT on locals and function arguments. */
- for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
- DECL_CONTEXT (t) = child_fn;
-
+ /* Reset DECL_CONTEXT on function arguments. */
for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
DECL_CONTEXT (t) = child_fn;
@@ -2512,17 +2561,6 @@ expand_omp_parallel (struct omp_region *region)
entry_bb = e->dest;
single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
- /* Move the parallel region into CHILD_CFUN. We need to reset
- dominance information because the expansion of the inner
- regions has invalidated it. */
- free_dominance_info (CDI_DOMINATORS);
- new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb);
- if (exit_bb)
- single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
- DECL_STRUCT_FUNCTION (child_fn)->curr_properties
- = cfun->curr_properties;
- cgraph_add_new_function (child_fn, true);
-
/* Convert OMP_RETURN into a RETURN_EXPR. */
if (exit_bb)
{
@@ -2533,18 +2571,35 @@ expand_omp_parallel (struct omp_region *region)
bsi_insert_after (&si, t, BSI_SAME_STMT);
bsi_remove (&si, true);
}
+
+ /* Move the parallel region into CHILD_CFUN. */
+
+ if (gimple_in_ssa_p (cfun))
+ {
+ push_cfun (child_cfun);
+ init_tree_ssa ();
+ init_ssa_operands ();
+ cfun->gimple_df->in_ssa_p = true;
+ pop_cfun ();
+ }
+ new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb);
+ if (exit_bb)
+ single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
+
+ /* Inform the callgraph about the new function. */
+ DECL_STRUCT_FUNCTION (child_fn)->curr_properties
+ = cfun->curr_properties;
+ cgraph_add_new_function (child_fn, true);
+
+ /* Fix the callgraph edges for child_cfun. Those for cfun will be
+ fixed in a following pass. */
+ push_cfun (child_cfun);
+ rebuild_cgraph_edges ();
+ pop_cfun ();
}
/* Emit a library call to launch the children threads. */
expand_parallel_call (region, new_bb, entry_stmt, ws_args);
-
- if (do_cleanup_cfg)
- {
- /* Clean up the unreachable sub-graph we created above. */
- free_dominance_info (CDI_DOMINATORS);
- free_dominance_info (CDI_POST_DOMINATORS);
- cleanup_tree_cfg ();
- }
}
@@ -2569,7 +2624,7 @@ expand_omp_parallel (struct omp_region *region)
L3:
If this is a combined omp parallel loop, instead of the call to
- GOMP_loop_foo_start, we emit 'goto L2'. */
+ GOMP_loop_foo_start, we call GOMP_loop_foo_next. */
static void
expand_omp_for_generic (struct omp_region *region,
@@ -2577,13 +2632,14 @@ expand_omp_for_generic (struct omp_region *region,
enum built_in_function start_fn,
enum built_in_function next_fn)
{
- tree type, istart0, iend0, iend;
- tree t, list;
+ tree type, istart0, iend0, iend, phi;
+ tree t, vmain, vback;
basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb;
basic_block l2_bb = NULL, l3_bb = NULL;
block_stmt_iterator si;
bool in_combined_parallel = is_combined_parallel (region);
bool broken_loop = region->cont == NULL;
+ edge e, ne;
gcc_assert (!broken_loop || !in_combined_parallel);
@@ -2591,9 +2647,13 @@ expand_omp_for_generic (struct omp_region *region,
istart0 = create_tmp_var (long_integer_type_node, ".istart0");
iend0 = create_tmp_var (long_integer_type_node, ".iend0");
- iend = create_tmp_var (type, NULL);
TREE_ADDRESSABLE (istart0) = 1;
TREE_ADDRESSABLE (iend0) = 1;
+ if (gimple_in_ssa_p (cfun))
+ {
+ add_referenced_var (istart0);
+ add_referenced_var (iend0);
+ }
entry_bb = region->entry;
cont_bb = region->cont;
@@ -2615,12 +2675,19 @@ expand_omp_for_generic (struct omp_region *region,
si = bsi_last (entry_bb);
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
- if (!in_combined_parallel)
+ if (in_combined_parallel)
+ {
+ /* In a combined parallel loop, emit a call to
+ GOMP_loop_foo_next. */
+ t = build_call_expr (built_in_decls[next_fn], 2,
+ build_fold_addr_expr (istart0),
+ build_fold_addr_expr (iend0));
+ }
+ else
{
tree t0, t1, t2, t3, t4;
/* If this is not a combined parallel loop, emit a call to
GOMP_loop_foo_start in ENTRY_BB. */
- list = alloc_stmt_list ();
t4 = build_fold_addr_expr (iend0);
t3 = build_fold_addr_expr (istart0);
t2 = fold_convert (long_integer_type_node, fd->step);
@@ -2635,58 +2702,80 @@ expand_omp_for_generic (struct omp_region *region,
else
t = build_call_expr (built_in_decls[start_fn], 5,
t0, t1, t2, t3, t4);
- t = get_formal_tmp_var (t, &list);
- t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
- append_to_statement_list (t, &list);
- bsi_insert_after (&si, list, BSI_SAME_STMT);
}
+ t = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ true, BSI_SAME_STMT);
+ t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
+ bsi_insert_after (&si, t, BSI_SAME_STMT);
+
+ /* V may be used outside of the loop (e.g., to handle lastprivate clause).
+ If this is the case, its value is undefined if the loop is not entered
+ at all. To handle this case, set its initial value to N1. */
+ if (gimple_in_ssa_p (cfun))
+ {
+ e = find_edge (entry_bb, l3_bb);
+ for (phi = phi_nodes (l3_bb); phi; phi = PHI_CHAIN (phi))
+ if (PHI_ARG_DEF_FROM_EDGE (phi, e) == fd->v)
+ SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), fd->n1);
+ }
+ else
+ {
+ t = build_gimple_modify_stmt (fd->v, fd->n1);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
+ }
+
+ /* Remove the OMP_FOR statement. */
bsi_remove (&si, true);
/* Iteration setup for sequential loop goes in L0_BB. */
- list = alloc_stmt_list ();
+ si = bsi_start (l0_bb);
t = fold_convert (type, istart0);
+ t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
t = build_gimple_modify_stmt (fd->v, t);
- gimplify_and_add (t, &list);
+ bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (fd->v) = t;
t = fold_convert (type, iend0);
- t = build_gimple_modify_stmt (iend, t);
- gimplify_and_add (t, &list);
-
- si = bsi_start (l0_bb);
- bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
+ iend = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
if (!broken_loop)
{
/* Code to control the increment and predicate for the sequential
loop goes in the CONT_BB. */
- list = alloc_stmt_list ();
-
- t = build2 (PLUS_EXPR, type, fd->v, fd->step);
- t = build_gimple_modify_stmt (fd->v, t);
- gimplify_and_add (t, &list);
+ si = bsi_last (cont_bb);
+ t = bsi_stmt (si);
+ gcc_assert (TREE_CODE (t) == OMP_CONTINUE);
+ vmain = TREE_OPERAND (t, 1);
+ vback = TREE_OPERAND (t, 0);
+
+ t = fold_build2 (PLUS_EXPR, type, vmain, fd->step);
+ t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
+ true, BSI_SAME_STMT);
+ t = build_gimple_modify_stmt (vback, t);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (vback) = t;
- t = build2 (fd->cond_code, boolean_type_node, fd->v, iend);
- t = get_formal_tmp_var (t, &list);
+ t = build2 (fd->cond_code, boolean_type_node, vback, iend);
t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
- append_to_statement_list (t, &list);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
- si = bsi_last (cont_bb);
- bsi_insert_after (&si, list, BSI_SAME_STMT);
- gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE);
+ /* Remove OMP_CONTINUE. */
bsi_remove (&si, true);
/* Emit code to get the next parallel iteration in L2_BB. */
- list = alloc_stmt_list ();
+ si = bsi_start (l2_bb);
t = build_call_expr (built_in_decls[next_fn], 2,
build_fold_addr_expr (istart0),
build_fold_addr_expr (iend0));
- t = get_formal_tmp_var (t, &list);
+ t = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
- append_to_statement_list (t, &list);
-
- si = bsi_start (l2_bb);
- bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
+ bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
}
/* Add the loop cleanup function. */
@@ -2700,25 +2789,31 @@ expand_omp_for_generic (struct omp_region *region,
bsi_remove (&si, true);
/* Connect the new blocks. */
- if (in_combined_parallel)
- {
- remove_edge (BRANCH_EDGE (entry_bb));
- redirect_edge_and_branch (single_succ_edge (entry_bb), l2_bb);
- }
- else
- {
- find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
- find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
- }
+ find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
+ find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
if (!broken_loop)
{
+ e = find_edge (cont_bb, l3_bb);
+ ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
+
+ for (phi = phi_nodes (l3_bb); phi; phi = PHI_CHAIN (phi))
+ SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
+ PHI_ARG_DEF_FROM_EDGE (phi, e));
+ remove_edge (e);
+
find_edge (cont_bb, l1_bb)->flags = EDGE_TRUE_VALUE;
- remove_edge (find_edge (cont_bb, l3_bb));
make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
-
make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
- make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
+
+ set_immediate_dominator (CDI_DOMINATORS, l2_bb,
+ recompute_dominator (CDI_DOMINATORS, l2_bb));
+ set_immediate_dominator (CDI_DOMINATORS, l3_bb,
+ recompute_dominator (CDI_DOMINATORS, l3_bb));
+ set_immediate_dominator (CDI_DOMINATORS, l0_bb,
+ recompute_dominator (CDI_DOMINATORS, l0_bb));
+ set_immediate_dominator (CDI_DOMINATORS, l1_bb,
+ recompute_dominator (CDI_DOMINATORS, l1_bb));
}
}
@@ -2740,9 +2835,9 @@ expand_omp_for_generic (struct omp_region *region,
q += (q * nthreads != n);
s0 = q * threadid;
e0 = min(s0 + q, n);
+ V = s0 * STEP + N1;
if (s0 >= e0) goto L2; else goto L0;
L0:
- V = s0 * STEP + N1;
e = e0 * STEP + N1;
L1:
BODY;
@@ -2756,7 +2851,7 @@ expand_omp_for_static_nochunk (struct omp_region *region,
struct omp_for_data *fd)
{
tree n, q, s0, e0, e, t, nthreads, threadid;
- tree type, list;
+ tree type, vmain, vback;
basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
basic_block fin_bb;
block_stmt_iterator si;
@@ -2775,27 +2870,33 @@ expand_omp_for_static_nochunk (struct omp_region *region,
exit_bb = region->exit;
/* Iteration space partitioning goes in ENTRY_BB. */
- list = alloc_stmt_list ();
+ si = bsi_last (entry_bb);
+ gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
t = fold_convert (type, t);
- nthreads = get_formal_tmp_var (t, &list);
+ nthreads = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ true, BSI_SAME_STMT);
t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
t = fold_convert (type, t);
- threadid = get_formal_tmp_var (t, &list);
+ threadid = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ true, BSI_SAME_STMT);
- fd->n1 = fold_convert (type, fd->n1);
- if (!is_gimple_val (fd->n1))
- fd->n1 = get_formal_tmp_var (fd->n1, &list);
+ fd->n1 = force_gimple_operand_bsi (&si,
+ fold_convert (type, fd->n1),
+ true, NULL_TREE,
+ true, BSI_SAME_STMT);
- fd->n2 = fold_convert (type, fd->n2);
- if (!is_gimple_val (fd->n2))
- fd->n2 = get_formal_tmp_var (fd->n2, &list);
+ fd->n2 = force_gimple_operand_bsi (&si,
+ fold_convert (type, fd->n2),
+ true, NULL_TREE,
+ true, BSI_SAME_STMT);
- fd->step = fold_convert (type, fd->step);
- if (!is_gimple_val (fd->step))
- fd->step = get_formal_tmp_var (fd->step, &list);
+ fd->step = force_gimple_operand_bsi (&si,
+ fold_convert (type, fd->step),
+ true, NULL_TREE,
+ true, BSI_SAME_STMT);
t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1));
t = fold_build2 (PLUS_EXPR, type, fd->step, t);
@@ -2803,85 +2904,90 @@ expand_omp_for_static_nochunk (struct omp_region *region,
t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
t = fold_convert (type, t);
- if (is_gimple_val (t))
- n = t;
- else
- n = get_formal_tmp_var (t, &list);
+ n = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
- t = build2 (TRUNC_DIV_EXPR, type, n, nthreads);
- q = get_formal_tmp_var (t, &list);
+ t = fold_build2 (TRUNC_DIV_EXPR, type, n, nthreads);
+ q = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
- t = build2 (MULT_EXPR, type, q, nthreads);
- t = build2 (NE_EXPR, type, t, n);
- t = build2 (PLUS_EXPR, type, q, t);
- q = get_formal_tmp_var (t, &list);
+ t = fold_build2 (MULT_EXPR, type, q, nthreads);
+ t = fold_build2 (NE_EXPR, type, t, n);
+ t = fold_build2 (PLUS_EXPR, type, q, t);
+ q = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
t = build2 (MULT_EXPR, type, q, threadid);
- s0 = get_formal_tmp_var (t, &list);
+ s0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
+
+ t = fold_build2 (PLUS_EXPR, type, s0, q);
+ t = fold_build2 (MIN_EXPR, type, t, n);
+ e0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
- t = build2 (PLUS_EXPR, type, s0, q);
- t = build2 (MIN_EXPR, type, t, n);
- e0 = get_formal_tmp_var (t, &list);
+ t = fold_convert (type, s0);
+ t = fold_build2 (MULT_EXPR, type, t, fd->step);
+ t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
+ t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
+ true, BSI_SAME_STMT);
+ t = build_gimple_modify_stmt (fd->v, t);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (fd->v) = t;
t = build2 (GE_EXPR, boolean_type_node, s0, e0);
t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
- append_to_statement_list (t, &list);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
- si = bsi_last (entry_bb);
- gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
- bsi_insert_after (&si, list, BSI_SAME_STMT);
+ /* Remove the OMP_FOR statement. */
bsi_remove (&si, true);
/* Setup code for sequential iteration goes in SEQ_START_BB. */
- list = alloc_stmt_list ();
-
- t = fold_convert (type, s0);
- t = build2 (MULT_EXPR, type, t, fd->step);
- t = build2 (PLUS_EXPR, type, t, fd->n1);
- t = build_gimple_modify_stmt (fd->v, t);
- gimplify_and_add (t, &list);
+ si = bsi_start (seq_start_bb);
t = fold_convert (type, e0);
- t = build2 (MULT_EXPR, type, t, fd->step);
- t = build2 (PLUS_EXPR, type, t, fd->n1);
- e = get_formal_tmp_var (t, &list);
-
- si = bsi_start (seq_start_bb);
- bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
+ t = fold_build2 (MULT_EXPR, type, t, fd->step);
+ t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
+ e = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
/* The code controlling the sequential loop replaces the OMP_CONTINUE. */
- list = alloc_stmt_list ();
-
- t = build2 (PLUS_EXPR, type, fd->v, fd->step);
- t = build_gimple_modify_stmt (fd->v, t);
- gimplify_and_add (t, &list);
-
- t = build2 (fd->cond_code, boolean_type_node, fd->v, e);
- t = get_formal_tmp_var (t, &list);
+ si = bsi_last (cont_bb);
+ t = bsi_stmt (si);
+ gcc_assert (TREE_CODE (t) == OMP_CONTINUE);
+ vmain = TREE_OPERAND (t, 1);
+ vback = TREE_OPERAND (t, 0);
+
+ t = fold_build2 (PLUS_EXPR, type, vmain, fd->step);
+ t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
+ true, BSI_SAME_STMT);
+ t = build_gimple_modify_stmt (vback, t);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (vback) = t;
+
+ t = build2 (fd->cond_code, boolean_type_node, vback, e);
t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
- append_to_statement_list (t, &list);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
- si = bsi_last (cont_bb);
- gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE);
- bsi_insert_after (&si, list, BSI_SAME_STMT);
+ /* Remove the OMP_CONTINUE statement. */
bsi_remove (&si, true);
/* Replace the OMP_RETURN with a barrier, or nothing. */
si = bsi_last (exit_bb);
if (!OMP_RETURN_NOWAIT (bsi_stmt (si)))
- {
- list = alloc_stmt_list ();
- build_omp_barrier (&list);
- bsi_insert_after (&si, list, BSI_SAME_STMT);
- }
+ force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE,
+ false, BSI_SAME_STMT);
bsi_remove (&si, true);
/* Connect all the blocks. */
find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
-
+
find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
+
+ set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
+ set_immediate_dominator (CDI_DOMINATORS, body_bb,
+ recompute_dominator (CDI_DOMINATORS, body_bb));
+ set_immediate_dominator (CDI_DOMINATORS, fin_bb,
+ recompute_dominator (CDI_DOMINATORS, fin_bb));
}
@@ -2899,6 +3005,9 @@ expand_omp_for_static_nochunk (struct omp_region *region,
adj = STEP + 1;
n = (adj + N2 - N1) / STEP;
trip = 0;
+ V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
+ here so that V is defined
+ if the loop is not entered
L0:
s0 = (trip * nthreads + threadid) * CHUNK;
e0 = min(s0 + CHUNK, n);
@@ -2919,14 +3028,13 @@ expand_omp_for_static_nochunk (struct omp_region *region,
static void
expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
{
- tree n, s0, e0, e, t;
- tree trip, nthreads, threadid;
- tree type;
+ tree n, s0, e0, e, t, phi, nphi, args;
+ tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
+ tree type, cont, v_main, v_back, v_extra;
basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
basic_block trip_update_bb, cont_bb, fin_bb;
- tree list;
block_stmt_iterator si;
- edge se;
+ edge se, re, ene;
type = TREE_TYPE (fd->v);
@@ -2947,31 +3055,33 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
exit_bb = region->exit;
/* Trip and adjustment setup goes in ENTRY_BB. */
- list = alloc_stmt_list ();
+ si = bsi_last (entry_bb);
+ gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
t = fold_convert (type, t);
- nthreads = get_formal_tmp_var (t, &list);
+ nthreads = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ true, BSI_SAME_STMT);
t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
t = fold_convert (type, t);
- threadid = get_formal_tmp_var (t, &list);
-
- fd->n1 = fold_convert (type, fd->n1);
- if (!is_gimple_val (fd->n1))
- fd->n1 = get_formal_tmp_var (fd->n1, &list);
-
- fd->n2 = fold_convert (type, fd->n2);
- if (!is_gimple_val (fd->n2))
- fd->n2 = get_formal_tmp_var (fd->n2, &list);
-
- fd->step = fold_convert (type, fd->step);
- if (!is_gimple_val (fd->step))
- fd->step = get_formal_tmp_var (fd->step, &list);
-
- fd->chunk_size = fold_convert (type, fd->chunk_size);
- if (!is_gimple_val (fd->chunk_size))
- fd->chunk_size = get_formal_tmp_var (fd->chunk_size, &list);
+ threadid = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ true, BSI_SAME_STMT);
+
+ fd->n1 = force_gimple_operand_bsi (&si, fold_convert (type, fd->n1),
+ true, NULL_TREE,
+ true, BSI_SAME_STMT);
+ fd->n2 = force_gimple_operand_bsi (&si, fold_convert (type, fd->n2),
+ true, NULL_TREE,
+ true, BSI_SAME_STMT);
+ fd->step = force_gimple_operand_bsi (&si, fold_convert (type, fd->step),
+ true, NULL_TREE,
+ true, BSI_SAME_STMT);
+ fd->chunk_size
+ = force_gimple_operand_bsi (&si, fold_convert (type,
+ fd->chunk_size),
+ true, NULL_TREE,
+ true, BSI_SAME_STMT);
t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1));
t = fold_build2 (PLUS_EXPR, type, fd->step, t);
@@ -2979,102 +3089,170 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
t = fold_convert (type, t);
- if (is_gimple_val (t))
- n = t;
+ n = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ true, BSI_SAME_STMT);
+
+ trip_var = create_tmp_var (type, ".trip");
+ if (gimple_in_ssa_p (cfun))
+ {
+ add_referenced_var (trip_var);
+ trip_init = make_ssa_name (trip_var, NULL_TREE);
+ trip_main = make_ssa_name (trip_var, NULL_TREE);
+ trip_back = make_ssa_name (trip_var, NULL_TREE);
+ }
else
- n = get_formal_tmp_var (t, &list);
+ {
+ trip_init = trip_var;
+ trip_main = trip_var;
+ trip_back = trip_var;
+ }
- t = build_int_cst (type, 0);
- trip = get_initialized_tmp_var (t, &list, NULL);
+ t = build_gimple_modify_stmt (trip_init, build_int_cst (type, 0));
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (trip_init) = t;
- si = bsi_last (entry_bb);
- gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
- bsi_insert_after (&si, list, BSI_SAME_STMT);
+ t = fold_build2 (MULT_EXPR, type, threadid, fd->chunk_size);
+ t = fold_build2 (MULT_EXPR, type, t, fd->step);
+ t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
+ v_extra = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ true, BSI_SAME_STMT);
+
+ /* Remove the OMP_FOR. */
bsi_remove (&si, true);
/* Iteration space partitioning goes in ITER_PART_BB. */
- list = alloc_stmt_list ();
+ si = bsi_last (iter_part_bb);
- t = build2 (MULT_EXPR, type, trip, nthreads);
- t = build2 (PLUS_EXPR, type, t, threadid);
- t = build2 (MULT_EXPR, type, t, fd->chunk_size);
- s0 = get_formal_tmp_var (t, &list);
+ t = fold_build2 (MULT_EXPR, type, trip_main, nthreads);
+ t = fold_build2 (PLUS_EXPR, type, t, threadid);
+ t = fold_build2 (MULT_EXPR, type, t, fd->chunk_size);
+ s0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
- t = build2 (PLUS_EXPR, type, s0, fd->chunk_size);
- t = build2 (MIN_EXPR, type, t, n);
- e0 = get_formal_tmp_var (t, &list);
+ t = fold_build2 (PLUS_EXPR, type, s0, fd->chunk_size);
+ t = fold_build2 (MIN_EXPR, type, t, n);
+ e0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
t = build2 (LT_EXPR, boolean_type_node, s0, n);
t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
- append_to_statement_list (t, &list);
-
- si = bsi_start (iter_part_bb);
- bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
+ bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
/* Setup code for sequential iteration goes in SEQ_START_BB. */
- list = alloc_stmt_list ();
+ si = bsi_start (seq_start_bb);
t = fold_convert (type, s0);
- t = build2 (MULT_EXPR, type, t, fd->step);
- t = build2 (PLUS_EXPR, type, t, fd->n1);
+ t = fold_build2 (MULT_EXPR, type, t, fd->step);
+ t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
+ t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
t = build_gimple_modify_stmt (fd->v, t);
- gimplify_and_add (t, &list);
+ bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (fd->v) = t;
t = fold_convert (type, e0);
- t = build2 (MULT_EXPR, type, t, fd->step);
- t = build2 (PLUS_EXPR, type, t, fd->n1);
- e = get_formal_tmp_var (t, &list);
-
- si = bsi_start (seq_start_bb);
- bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
+ t = fold_build2 (MULT_EXPR, type, t, fd->step);
+ t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
+ e = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
/* The code controlling the sequential loop goes in CONT_BB,
replacing the OMP_CONTINUE. */
- list = alloc_stmt_list ();
-
- t = build2 (PLUS_EXPR, type, fd->v, fd->step);
- t = build_gimple_modify_stmt (fd->v, t);
- gimplify_and_add (t, &list);
-
- t = build2 (fd->cond_code, boolean_type_node, fd->v, e);
- t = get_formal_tmp_var (t, &list);
+ si = bsi_last (cont_bb);
+ cont = bsi_stmt (si);
+ gcc_assert (TREE_CODE (cont) == OMP_CONTINUE);
+ v_main = TREE_OPERAND (cont, 1);
+ v_back = TREE_OPERAND (cont, 0);
+
+ t = build2 (PLUS_EXPR, type, v_main, fd->step);
+ t = build_gimple_modify_stmt (v_back, t);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (v_back) = t;
+
+ t = build2 (fd->cond_code, boolean_type_node, v_back, e);
t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
- append_to_statement_list (t, &list);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
- si = bsi_last (cont_bb);
- gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE);
- bsi_insert_after (&si, list, BSI_SAME_STMT);
+ /* Remove OMP_CONTINUE. */
bsi_remove (&si, true);
/* Trip update code goes into TRIP_UPDATE_BB. */
- list = alloc_stmt_list ();
+ si = bsi_start (trip_update_bb);
t = build_int_cst (type, 1);
- t = build2 (PLUS_EXPR, type, trip, t);
- t = build_gimple_modify_stmt (trip, t);
- gimplify_and_add (t, &list);
-
- si = bsi_start (trip_update_bb);
- bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
+ t = build2 (PLUS_EXPR, type, trip_main, t);
+ t = build_gimple_modify_stmt (trip_back, t);
+ bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (trip_back) = t;
/* Replace the OMP_RETURN with a barrier, or nothing. */
si = bsi_last (exit_bb);
if (!OMP_RETURN_NOWAIT (bsi_stmt (si)))
- {
- list = alloc_stmt_list ();
- build_omp_barrier (&list);
- bsi_insert_after (&si, list, BSI_SAME_STMT);
- }
+ force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE,
+ false, BSI_SAME_STMT);
bsi_remove (&si, true);
/* Connect the new blocks. */
find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
-
+
find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
-
+
redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
+
+ if (gimple_in_ssa_p (cfun))
+ {
+ /* When we redirect the edge from trip_update_bb to iter_part_bb, we
+ remove arguments of the phi nodes in fin_bb. We need to create
+ appropriate phi nodes in iter_part_bb instead. */
+ se = single_pred_edge (fin_bb);
+ re = single_succ_edge (trip_update_bb);
+ ene = single_succ_edge (entry_bb);
+
+ args = PENDING_STMT (re);
+ PENDING_STMT (re) = NULL_TREE;
+ for (phi = phi_nodes (fin_bb);
+ phi && args;
+ phi = PHI_CHAIN (phi), args = TREE_CHAIN (args))
+ {
+ t = PHI_RESULT (phi);
+ gcc_assert (t == TREE_PURPOSE (args));
+ nphi = create_phi_node (t, iter_part_bb);
+ SSA_NAME_DEF_STMT (t) = nphi;
+
+ t = PHI_ARG_DEF_FROM_EDGE (phi, se);
+ /* A special case -- fd->v is not yet computed in iter_part_bb, we
+ need to use v_extra instead. */
+ if (t == fd->v)
+ t = v_extra;
+ add_phi_arg (nphi, t, ene);
+ add_phi_arg (nphi, TREE_VALUE (args), re);
+ }
+ gcc_assert (!phi && !args);
+ while ((phi = phi_nodes (fin_bb)) != NULL_TREE)
+ remove_phi_node (phi, NULL_TREE, false);
+
+ /* Make phi node for trip. */
+ phi = create_phi_node (trip_main, iter_part_bb);
+ SSA_NAME_DEF_STMT (trip_main) = phi;
+ add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb));
+ add_phi_arg (phi, trip_init, single_succ_edge (entry_bb));
+ }
+
+ set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
+ set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
+ recompute_dominator (CDI_DOMINATORS, iter_part_bb));
+ set_immediate_dominator (CDI_DOMINATORS, fin_bb,
+ recompute_dominator (CDI_DOMINATORS, fin_bb));
+ set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
+ recompute_dominator (CDI_DOMINATORS, seq_start_bb));
+ set_immediate_dominator (CDI_DOMINATORS, body_bb,
+ recompute_dominator (CDI_DOMINATORS, body_bb));
}
@@ -3085,8 +3263,6 @@ expand_omp_for (struct omp_region *region)
{
struct omp_for_data fd;
- push_gimplify_context ();
-
extract_omp_for_data (last_stmt (region->entry), &fd);
region->sched_kind = fd.sched_kind;
@@ -3106,8 +3282,6 @@ expand_omp_for (struct omp_region *region)
int next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
expand_omp_for_generic (region, &fd, start_ix, next_ix);
}
-
- pop_gimplify_context (NULL);
}
@@ -3136,12 +3310,12 @@ expand_omp_for (struct omp_region *region)
reduction;
If this is a combined parallel sections, replace the call to
- GOMP_sections_start with 'goto L1'. */
+ GOMP_sections_start with call to GOMP_sections_next. */
static void
expand_omp_sections (struct omp_region *region)
{
- tree label_vec, l1, l2, t, u, v, sections_stmt;
+ tree label_vec, l1, l2, t, u, sections_stmt, vin, vmain, vnext, cont;
unsigned i, casei, len;
basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
block_stmt_iterator si;
@@ -3178,7 +3352,7 @@ expand_omp_sections (struct omp_region *region)
si = bsi_last (entry_bb);
sections_stmt = bsi_stmt (si);
gcc_assert (TREE_CODE (sections_stmt) == OMP_SECTIONS);
- v = OMP_SECTIONS_CONTROL (sections_stmt);
+ vin = OMP_SECTIONS_CONTROL (sections_stmt);
if (!is_combined_parallel (region))
{
/* If we are not inside a combined parallel+sections region,
@@ -3187,16 +3361,36 @@ expand_omp_sections (struct omp_region *region)
exit_reachable ? len - 1 : len);
u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
t = build_call_expr (u, 1, t);
- t = build_gimple_modify_stmt (v, t);
- bsi_insert_after (&si, t, BSI_SAME_STMT);
}
+ else
+ {
+ /* Otherwise, call GOMP_sections_next. */
+ u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
+ t = build_call_expr (u, 0);
+ }
+ t = build_gimple_modify_stmt (vin, t);
+ bsi_insert_after (&si, t, BSI_SAME_STMT);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (vin) = t;
bsi_remove (&si, true);
/* The switch() statement replacing OMP_SECTIONS_SWITCH goes in L0_BB. */
si = bsi_last (l0_bb);
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SECTIONS_SWITCH);
+ if (exit_reachable)
+ {
+ cont = last_stmt (l1_bb);
+ gcc_assert (TREE_CODE (cont) == OMP_CONTINUE);
+ vmain = TREE_OPERAND (cont, 1);
+ vnext = TREE_OPERAND (cont, 0);
+ }
+ else
+ {
+ vmain = vin;
+ vnext = NULL_TREE;
+ }
- t = build3 (SWITCH_EXPR, void_type_node, v, NULL, label_vec);
+ t = build3 (SWITCH_EXPR, void_type_node, vmain, NULL, label_vec);
bsi_insert_after (&si, t, BSI_SAME_STMT);
bsi_remove (&si, true);
@@ -3257,8 +3451,10 @@ expand_omp_sections (struct omp_region *region)
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE);
t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
- t = build_gimple_modify_stmt (v, t);
+ t = build_gimple_modify_stmt (vnext, t);
bsi_insert_after (&si, t, BSI_SAME_STMT);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (vnext) = t;
bsi_remove (&si, true);
single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
@@ -3274,15 +3470,7 @@ expand_omp_sections (struct omp_region *region)
bsi_remove (&si, true);
}
- /* Connect the new blocks. */
- if (is_combined_parallel (region))
- {
- /* If this was a combined parallel+sections region, we did not
- emit a GOMP_sections_start in the entry block, so we just
- need to jump to L1_BB to get the next section. */
- gcc_assert (exit_reachable);
- redirect_edge_and_branch (single_succ_edge (entry_bb), l1_bb);
- }
+ set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
}
@@ -3312,11 +3500,8 @@ expand_omp_single (struct omp_region *region)
si = bsi_last (exit_bb);
if (!OMP_RETURN_NOWAIT (bsi_stmt (si)) || need_barrier)
- {
- tree t = alloc_stmt_list ();
- build_omp_barrier (&t);
- bsi_insert_after (&si, t, BSI_SAME_STMT);
- }
+ force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE,
+ false, BSI_SAME_STMT);
bsi_remove (&si, true);
single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
}
@@ -3498,8 +3683,6 @@ execute_expand_omp (void)
expand_omp (root_omp_region);
- free_dominance_info (CDI_DOMINATORS);
- free_dominance_info (CDI_POST_DOMINATORS);
cleanup_tree_cfg ();
free_omp_regions ();
@@ -3507,10 +3690,38 @@ execute_expand_omp (void)
return 0;
}
+/* OMP expansion in SSA form. For testing purposes only. */
+
+static bool
+gate_expand_omp_ssa (void)
+{
+ return flag_openmp_ssa && flag_openmp != 0 && errorcount == 0;
+}
+
+struct tree_opt_pass pass_expand_omp_ssa =
+{
+ "ompexpssa", /* name */
+ gate_expand_omp_ssa, /* gate */
+ execute_expand_omp, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ 0, /* tv_id */
+ PROP_gimple_any, /* properties_required */
+ PROP_gimple_lomp, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_dump_func, /* todo_flags_finish */
+ 0 /* letter */
+};
+
+/* OMP expansion -- the default pass, run before creation of SSA form. */
+
static bool
gate_expand_omp (void)
{
- return flag_openmp != 0 && errorcount == 0;
+ return ((!flag_openmp_ssa || !optimize)
+ && flag_openmp != 0 && errorcount == 0);
}
struct tree_opt_pass pass_expand_omp =
diff --git a/gcc/passes.c b/gcc/passes.c
index db6128e9b86..29ec8e282eb 100644
--- a/gcc/passes.c
+++ b/gcc/passes.c
@@ -515,6 +515,7 @@ init_optimization_passes (void)
NEXT_PASS (pass_referenced_vars);
NEXT_PASS (pass_reset_cc_flags);
NEXT_PASS (pass_build_ssa);
+ NEXT_PASS (pass_expand_omp_ssa);
NEXT_PASS (pass_early_warn_uninitialized);
NEXT_PASS (pass_rebuild_cgraph_edges);
NEXT_PASS (pass_early_inline);
diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c
index 461f3f2db25..84000537b2f 100644
--- a/gcc/tree-cfg.c
+++ b/gcc/tree-cfg.c
@@ -46,6 +46,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-ssa-propagate.h"
#include "value-prof.h"
#include "pointer-set.h"
+#include "tree-inline.h"
/* This file contains functions for building the Control Flow Graph (CFG)
for a function tree. */
@@ -5248,13 +5249,89 @@ gather_blocks_in_sese_region (basic_block entry, basic_block exit,
}
}
+/* Replaces *TP with a duplicate (belonging to function TO_CONTEXT).
+ The duplicates are recorded in VARS_MAP. */
+
+static void
+replace_by_duplicate_decl (tree *tp, struct pointer_map_t *vars_map,
+ tree to_context)
+{
+ tree t = *tp, new_t;
+ struct function *f = DECL_STRUCT_FUNCTION (to_context);
+ void **loc;
+
+ if (DECL_CONTEXT (t) == to_context)
+ return;
+
+ loc = pointer_map_contains (vars_map, t);
+
+ if (!loc)
+ {
+ loc = pointer_map_insert (vars_map, t);
+
+ if (SSA_VAR_P (t))
+ {
+ new_t = copy_var_decl (t, DECL_NAME (t), TREE_TYPE (t));
+ f->unexpanded_var_list
+ = tree_cons (NULL_TREE, new_t, f->unexpanded_var_list);
+ }
+ else
+ {
+ gcc_assert (TREE_CODE (t) == CONST_DECL);
+ new_t = copy_node (t);
+ }
+ DECL_CONTEXT (new_t) = to_context;
+
+ *loc = new_t;
+ }
+ else
+ new_t = *loc;
+
+ *tp = new_t;
+}
+
+/* Creates an ssa name in TO_CONTEXT equivalent to NAME.
+ VARS_MAP maps old ssa names and var_decls to the new ones. */
+
+static tree
+replace_ssa_name (tree name, struct pointer_map_t *vars_map,
+ tree to_context)
+{
+ void **loc;
+ tree new_name, decl = SSA_NAME_VAR (name);
+
+ gcc_assert (is_gimple_reg (name));
+
+ loc = pointer_map_contains (vars_map, name);
+
+ if (!loc)
+ {
+ replace_by_duplicate_decl (&decl, vars_map, to_context);
+
+ push_cfun (DECL_STRUCT_FUNCTION (to_context));
+ if (gimple_in_ssa_p (cfun))
+ add_referenced_var (decl);
+
+ new_name = make_ssa_name (decl, SSA_NAME_DEF_STMT (name));
+ if (SSA_NAME_IS_DEFAULT_DEF (name))
+ set_default_def (decl, new_name);
+ pop_cfun ();
+
+ loc = pointer_map_insert (vars_map, name);
+ *loc = new_name;
+ }
+ else
+ new_name = *loc;
+
+ return new_name;
+}
struct move_stmt_d
{
tree block;
tree from_context;
tree to_context;
- bitmap vars_to_remove;
+ struct pointer_map_t *vars_map;
htab_t new_label_map;
bool remap_decls_p;
};
@@ -5289,9 +5366,11 @@ move_stmt_r (tree *tp, int *walk_subtrees, void *data)
p->remap_decls_p = save_remap_decls_p;
}
- else if (DECL_P (t) && DECL_CONTEXT (t) == p->from_context)
+ else if (DECL_P (t) || TREE_CODE (t) == SSA_NAME)
{
- if (TREE_CODE (t) == LABEL_DECL)
+ if (TREE_CODE (t) == SSA_NAME)
+ *tp = replace_ssa_name (t, p->vars_map, p->to_context);
+ else if (TREE_CODE (t) == LABEL_DECL)
{
if (p->new_label_map)
{
@@ -5306,20 +5385,26 @@ move_stmt_r (tree *tp, int *walk_subtrees, void *data)
}
else if (p->remap_decls_p)
{
- DECL_CONTEXT (t) = p->to_context;
-
- if (TREE_CODE (t) == VAR_DECL)
+ /* Replace T with its duplicate. T should no longer appear in the
+ parent function, so this looks wasteful; however, it may appear
+ in referenced_vars, and more importantly, as virtual operands of
+ statements, and in alias lists of other variables. It would be
+ quite difficult to expunge it from all those places. ??? It might
+ suffice to do this for addressable variables. */
+ if ((TREE_CODE (t) == VAR_DECL
+ && !is_global_var (t))
+ || TREE_CODE (t) == CONST_DECL)
+ replace_by_duplicate_decl (tp, p->vars_map, p->to_context);
+
+ if (SSA_VAR_P (t)
+ && gimple_in_ssa_p (cfun))
{
- struct function *f = DECL_STRUCT_FUNCTION (p->to_context);
- f->unexpanded_var_list
- = tree_cons (0, t, f->unexpanded_var_list);
-
- /* Mark T to be removed from the original function,
- otherwise it will be given a DECL_RTL when the
- original function is expanded. */
- bitmap_set_bit (p->vars_to_remove, DECL_UID (t));
+ push_cfun (DECL_STRUCT_FUNCTION (p->to_context));
+ add_referenced_var (*tp);
+ pop_cfun ();
}
}
+ *walk_subtrees = 0;
}
else if (TYPE_P (t))
*walk_subtrees = 0;
@@ -5327,6 +5412,26 @@ move_stmt_r (tree *tp, int *walk_subtrees, void *data)
return NULL_TREE;
}
+/* Marks virtual operands of all statements in basic blocks BBS for
+ renaming. */
+
+static void
+mark_virtual_ops_in_region (VEC (basic_block,heap) *bbs)
+{
+ tree phi;
+ block_stmt_iterator bsi;
+ basic_block bb;
+ unsigned i;
+
+ for (i = 0; VEC_iterate (basic_block, bbs, i, bb); i++)
+ {
+ for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi))
+ mark_virtual_ops_for_renaming (phi);
+
+ for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
+ mark_virtual_ops_for_renaming (bsi_stmt (bsi));
+ }
+}
/* Move basic block BB from function CFUN to function DEST_FN. The
block is moved out of the original linked list and placed after
@@ -5335,13 +5440,14 @@ move_stmt_r (tree *tp, int *walk_subtrees, void *data)
If UPDATE_EDGE_COUNT_P is true, the edge counts on both CFGs is
updated to reflect the moved edges.
- On exit, local variables that need to be removed from
- CFUN->UNEXPANDED_VAR_LIST will have been added to VARS_TO_REMOVE. */
+ The local variables are remapped to new instances, VARS_MAP is used
+ to record the mapping. */
static void
move_block_to_fn (struct function *dest_cfun, basic_block bb,
basic_block after, bool update_edge_count_p,
- bitmap vars_to_remove, htab_t new_label_map, int eh_offset)
+ struct pointer_map_t *vars_map, htab_t new_label_map,
+ int eh_offset)
{
struct control_flow_graph *cfg;
edge_iterator ei;
@@ -5349,6 +5455,7 @@ move_block_to_fn (struct function *dest_cfun, basic_block bb,
block_stmt_iterator si;
struct move_stmt_d d;
unsigned old_len, new_len;
+ tree phi;
/* Remove BB from dominance structures. */
delete_from_dominance_info (CDI_DOMINATORS, bb);
@@ -5385,20 +5492,39 @@ move_block_to_fn (struct function *dest_cfun, basic_block bb,
VEC_replace (basic_block, cfg->x_basic_block_info,
bb->index, bb);
+ /* Remap the variables in phi nodes. */
+ for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi))
+ {
+ use_operand_p use;
+ tree op = PHI_RESULT (phi);
+ ssa_op_iter oi;
+
+ if (!is_gimple_reg (op))
+ continue;
+
+ SET_PHI_RESULT (phi, replace_ssa_name (op, vars_map, dest_cfun->decl));
+ FOR_EACH_PHI_ARG (use, phi, oi, SSA_OP_USE)
+ {
+ op = USE_FROM_PTR (use);
+ if (TREE_CODE (op) == SSA_NAME)
+ SET_USE (use, replace_ssa_name (op, vars_map, dest_cfun->decl));
+ }
+ }
+
/* The statements in BB need to be associated with a new TREE_BLOCK.
Labels need to be associated with a new label-to-block map. */
memset (&d, 0, sizeof (d));
- d.vars_to_remove = vars_to_remove;
+ d.vars_map = vars_map;
+ d.from_context = cfun->decl;
+ d.to_context = dest_cfun->decl;
+ d.new_label_map = new_label_map;
for (si = bsi_start (bb); !bsi_end_p (si); bsi_next (&si))
{
tree stmt = bsi_stmt (si);
int region;
- d.from_context = cfun->decl;
- d.to_context = dest_cfun->decl;
d.remap_decls_p = true;
- d.new_label_map = new_label_map;
if (TREE_BLOCK (stmt))
d.block = DECL_INITIAL (dest_cfun->decl);
@@ -5441,6 +5567,8 @@ move_block_to_fn (struct function *dest_cfun, basic_block bb,
gimple_duplicate_stmt_histograms (dest_cfun, stmt, cfun, stmt);
gimple_remove_stmt_histograms (cfun, stmt);
}
+
+ update_stmt (stmt);
}
}
@@ -5518,21 +5646,17 @@ basic_block
move_sese_region_to_fn (struct function *dest_cfun, basic_block entry_bb,
basic_block exit_bb)
{
- VEC(basic_block,heap) *bbs;
- basic_block after, bb, *entry_pred, *exit_succ;
- struct function *saved_cfun;
+ VEC(basic_block,heap) *bbs, *dom_bbs;
+ basic_block dom_entry = get_immediate_dominator (CDI_DOMINATORS, entry_bb);
+ basic_block after, bb, *entry_pred, *exit_succ, abb;
+ struct function *saved_cfun = cfun;
int *entry_flag, *exit_flag, eh_offset;
+ unsigned *entry_prob, *exit_prob;
unsigned i, num_entry_edges, num_exit_edges;
edge e;
edge_iterator ei;
- bitmap vars_to_remove;
htab_t new_label_map;
-
- saved_cfun = cfun;
-
- /* Collect all the blocks in the region. Manually add ENTRY_BB
- because it won't be added by dfs_enumerate_from. */
- calculate_dominance_info (CDI_DOMINATORS);
+ struct pointer_map_t *vars_map;
/* If ENTRY does not strictly dominate EXIT, this cannot be an SESE
region. */
@@ -5540,10 +5664,18 @@ move_sese_region_to_fn (struct function *dest_cfun, basic_block entry_bb,
&& (!exit_bb
|| dominated_by_p (CDI_DOMINATORS, exit_bb, entry_bb)));
+ /* Collect all the blocks in the region. Manually add ENTRY_BB
+ because it won't be added by dfs_enumerate_from. */
bbs = NULL;
VEC_safe_push (basic_block, heap, bbs, entry_bb);
gather_blocks_in_sese_region (entry_bb, exit_bb, &bbs);
+ /* The blocks that used to be dominated by something in BBS will now be
+ dominated by the new block. */
+ dom_bbs = get_dominated_by_region (CDI_DOMINATORS,
+ VEC_address (basic_block, bbs),
+ VEC_length (basic_block, bbs));
+
/* Detach ENTRY_BB and EXIT_BB from CFUN->CFG. We need to remember
the predecessor edges to ENTRY_BB and the successor edges to
EXIT_BB so that we can re-attach them to the new basic block that
@@ -5551,9 +5683,11 @@ move_sese_region_to_fn (struct function *dest_cfun, basic_block entry_bb,
num_entry_edges = EDGE_COUNT (entry_bb->preds);
entry_pred = (basic_block *) xcalloc (num_entry_edges, sizeof (basic_block));
entry_flag = (int *) xcalloc (num_entry_edges, sizeof (int));
+ entry_prob = XNEWVEC (unsigned, num_entry_edges);
i = 0;
for (ei = ei_start (entry_bb->preds); (e = ei_safe_edge (ei)) != NULL;)
{
+ entry_prob[i] = e->probability;
entry_flag[i] = e->flags;
entry_pred[i++] = e->src;
remove_edge (e);
@@ -5565,9 +5699,11 @@ move_sese_region_to_fn (struct function *dest_cfun, basic_block entry_bb,
exit_succ = (basic_block *) xcalloc (num_exit_edges,
sizeof (basic_block));
exit_flag = (int *) xcalloc (num_exit_edges, sizeof (int));
+ exit_prob = XNEWVEC (unsigned, num_exit_edges);
i = 0;
for (ei = ei_start (exit_bb->succs); (e = ei_safe_edge (ei)) != NULL;)
{
+ exit_prob[i] = e->probability;
exit_flag[i] = e->flags;
exit_succ[i++] = e->dest;
remove_edge (e);
@@ -5578,11 +5714,12 @@ move_sese_region_to_fn (struct function *dest_cfun, basic_block entry_bb,
num_exit_edges = 0;
exit_succ = NULL;
exit_flag = NULL;
+ exit_prob = NULL;
}
/* Switch context to the child function to initialize DEST_FN's CFG. */
gcc_assert (dest_cfun->cfg == NULL);
- set_cfun (dest_cfun);
+ push_cfun (dest_cfun);
init_empty_tree_cfg ();
@@ -5605,46 +5742,30 @@ move_sese_region_to_fn (struct function *dest_cfun, basic_block entry_bb,
}
}
- set_cfun (saved_cfun);
+ pop_cfun ();
+
+ /* The ssa form for virtual operands in the source function will have to
+ be repaired. We do not care for the real operands -- the sese region
+ must be closed with respect to those. */
+ mark_virtual_ops_in_region (bbs);
/* Move blocks from BBS into DEST_CFUN. */
gcc_assert (VEC_length (basic_block, bbs) >= 2);
after = dest_cfun->cfg->x_entry_block_ptr;
- vars_to_remove = BITMAP_ALLOC (NULL);
+ vars_map = pointer_map_create ();
for (i = 0; VEC_iterate (basic_block, bbs, i, bb); i++)
{
/* No need to update edge counts on the last block. It has
already been updated earlier when we detached the region from
the original CFG. */
- move_block_to_fn (dest_cfun, bb, after, bb != exit_bb, vars_to_remove,
+ move_block_to_fn (dest_cfun, bb, after, bb != exit_bb, vars_map,
new_label_map, eh_offset);
after = bb;
}
if (new_label_map)
htab_delete (new_label_map);
-
- /* Remove the variables marked in VARS_TO_REMOVE from
- CFUN->UNEXPANDED_VAR_LIST. Otherwise, they will be given a
- DECL_RTL in the context of CFUN. */
- if (!bitmap_empty_p (vars_to_remove))
- {
- tree *p;
-
- for (p = &cfun->unexpanded_var_list; *p; )
- {
- tree var = TREE_VALUE (*p);
- if (bitmap_bit_p (vars_to_remove, DECL_UID (var)))
- {
- *p = TREE_CHAIN (*p);
- continue;
- }
-
- p = &TREE_CHAIN (*p);
- }
- }
-
- BITMAP_FREE (vars_to_remove);
+ pointer_map_destroy (vars_map);
/* Rewire the entry and exit blocks. The successor to the entry
block turns into the successor of DEST_FN's ENTRY_BLOCK_PTR in
@@ -5655,30 +5776,41 @@ move_sese_region_to_fn (struct function *dest_cfun, basic_block entry_bb,
FIXME, this is silly. The CFG ought to become a parameter to
these helpers. */
- set_cfun (dest_cfun);
+ push_cfun (dest_cfun);
make_edge (ENTRY_BLOCK_PTR, entry_bb, EDGE_FALLTHRU);
if (exit_bb)
make_edge (exit_bb, EXIT_BLOCK_PTR, 0);
- set_cfun (saved_cfun);
+ pop_cfun ();
/* Back in the original function, the SESE region has disappeared,
create a new basic block in its place. */
bb = create_empty_bb (entry_pred[0]);
for (i = 0; i < num_entry_edges; i++)
- make_edge (entry_pred[i], bb, entry_flag[i]);
+ {
+ e = make_edge (entry_pred[i], bb, entry_flag[i]);
+ e->probability = entry_prob[i];
+ }
for (i = 0; i < num_exit_edges; i++)
- make_edge (bb, exit_succ[i], exit_flag[i]);
+ {
+ e = make_edge (bb, exit_succ[i], exit_flag[i]);
+ e->probability = exit_prob[i];
+ }
+
+ set_immediate_dominator (CDI_DOMINATORS, bb, dom_entry);
+ for (i = 0; VEC_iterate (basic_block, dom_bbs, i, abb); i++)
+ set_immediate_dominator (CDI_DOMINATORS, abb, bb);
+ VEC_free (basic_block, heap, dom_bbs);
if (exit_bb)
{
+ free (exit_prob);
free (exit_flag);
free (exit_succ);
}
+ free (entry_prob);
free (entry_flag);
free (entry_pred);
- free_dominance_info (CDI_DOMINATORS);
- free_dominance_info (CDI_POST_DOMINATORS);
VEC_free (basic_block, heap, bbs);
return bb;
diff --git a/gcc/tree-flow.h b/gcc/tree-flow.h
index 755b51f929f..83956a1a355 100644
--- a/gcc/tree-flow.h
+++ b/gcc/tree-flow.h
@@ -711,6 +711,8 @@ extern struct omp_region *root_omp_region;
extern struct omp_region *new_omp_region (basic_block, enum tree_code,
struct omp_region *);
extern void free_omp_regions (void);
+extern tree find_omp_clause (tree, enum tree_code);
+tree copy_var_decl (tree, tree, tree);
/*---------------------------------------------------------------------------
Function prototypes
diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
index 45ea307df59..a7de71700b2 100644
--- a/gcc/tree-pass.h
+++ b/gcc/tree-pass.h
@@ -290,6 +290,7 @@ extern struct tree_opt_pass pass_lower_vector;
extern struct tree_opt_pass pass_lower_vector_ssa;
extern struct tree_opt_pass pass_lower_omp;
extern struct tree_opt_pass pass_expand_omp;
+extern struct tree_opt_pass pass_expand_omp_ssa;
extern struct tree_opt_pass pass_object_sizes;
extern struct tree_opt_pass pass_fold_builtins;
extern struct tree_opt_pass pass_stdarg;
diff --git a/gcc/tree-predcom.c b/gcc/tree-predcom.c
index fcf42911d28..6fa80ee6133 100644
--- a/gcc/tree-predcom.c
+++ b/gcc/tree-predcom.c
@@ -1393,7 +1393,16 @@ mark_virtual_ops_for_renaming (tree stmt)
tree var;
if (TREE_CODE (stmt) == PHI_NODE)
- return;
+ {
+ var = PHI_RESULT (stmt);
+ if (is_gimple_reg (var))
+ return;
+
+ if (TREE_CODE (var) == SSA_NAME)
+ var = SSA_NAME_VAR (var);
+ mark_sym_for_renaming (var);
+ return;
+ }
update_stmt (stmt);
diff --git a/gcc/tree-ssa-operands.c b/gcc/tree-ssa-operands.c
index 4996e09e854..c18f97d4960 100644
--- a/gcc/tree-ssa-operands.c
+++ b/gcc/tree-ssa-operands.c
@@ -2247,21 +2247,72 @@ get_expr_operands (tree stmt, tree *expr_p, int flags)
get_expr_operands (stmt, &CHANGE_DYNAMIC_TYPE_LOCATION (expr), opf_use);
return;
+ case OMP_FOR:
+ {
+ tree init = OMP_FOR_INIT (expr);
+ tree cond = OMP_FOR_COND (expr);
+ tree incr = OMP_FOR_INCR (expr);
+ tree c, clauses = OMP_FOR_CLAUSES (stmt);
+
+ get_expr_operands (stmt, &GIMPLE_STMT_OPERAND (init, 0), opf_def);
+ get_expr_operands (stmt, &GIMPLE_STMT_OPERAND (init, 1), opf_use);
+ get_expr_operands (stmt, &TREE_OPERAND (cond, 1), opf_use);
+ get_expr_operands (stmt, &TREE_OPERAND (GIMPLE_STMT_OPERAND (incr, 1), 1),
+ opf_use);
+
+ c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
+ if (c)
+ get_expr_operands (stmt, &OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c),
+ opf_use);
+ return;
+ }
+
+ case OMP_CONTINUE:
+ {
+ get_expr_operands (stmt, &TREE_OPERAND (expr, 0), opf_def);
+ get_expr_operands (stmt, &TREE_OPERAND (expr, 1), opf_use);
+ return;
+ }
+
+ case OMP_PARALLEL:
+ {
+ tree c, clauses = OMP_PARALLEL_CLAUSES (stmt);
+
+ if (OMP_PARALLEL_DATA_ARG (stmt))
+ {
+ get_expr_operands (stmt, &OMP_PARALLEL_DATA_ARG (stmt), opf_use);
+ add_to_addressable_set (OMP_PARALLEL_DATA_ARG (stmt),
+ &s_ann->addresses_taken);
+ }
+
+ c = find_omp_clause (clauses, OMP_CLAUSE_IF);
+ if (c)
+ get_expr_operands (stmt, &OMP_CLAUSE_IF_EXPR (c), opf_use);
+ c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
+ if (c)
+ get_expr_operands (stmt, &OMP_CLAUSE_NUM_THREADS_EXPR (c), opf_use);
+ return;
+ }
+
+ case OMP_SECTIONS:
+ {
+ get_expr_operands (stmt, &OMP_SECTIONS_CONTROL (expr), opf_def);
+ return;
+ }
+
case BLOCK:
case FUNCTION_DECL:
case EXC_PTR_EXPR:
case FILTER_EXPR:
case LABEL_DECL:
case CONST_DECL:
- case OMP_PARALLEL:
- case OMP_SECTIONS:
- case OMP_FOR:
case OMP_SINGLE:
case OMP_MASTER:
case OMP_ORDERED:
case OMP_CRITICAL:
case OMP_RETURN:
- case OMP_CONTINUE:
+ case OMP_SECTION:
+ case OMP_SECTIONS_SWITCH:
/* Expressions that make no memory references. */
return;