summaryrefslogtreecommitdiff
path: root/gcc/omp-low.c
diff options
context:
space:
mode:
authorrakdver <rakdver@138bc75d-0d04-0410-961f-82ee72b054a4>2007-09-07 02:40:14 +0000
committerrakdver <rakdver@138bc75d-0d04-0410-961f-82ee72b054a4>2007-09-07 02:40:14 +0000
commit79acaae17e126f58016355519d445e2766a34771 (patch)
tree7c5c590c4bfd0b7c4f0dda268de7e85ce217d91f /gcc/omp-low.c
parent0c234f81517b076084b84ce17198cff0522e9a9e (diff)
downloadgcc-79acaae17e126f58016355519d445e2766a34771.tar.gz
* cgraphbuild.c (rebuild_cgraph_edges): Export.
* cgraph.h (rebuild_cgraph_edges): Declare. * tree-pass.h (pass_expand_omp_ssa): New. * omp-low.c (find_omp_clause): Export. (copy_var_decl): Split from omp_copy_decl_2. (build_omp_barrier): Return the call to emit instead of emitting it directly. (lower_rec_input_clauses, expand_omp_single): Gimplify the result of build_omp_barrier. (extract_omp_for_data, expand_parallel_call, expand_omp_parallel, expand_omp_for_generic, expand_omp_for_static_nochunk, expand_omp_for_static_chunk, expand_omp_for, expand_omp_sections): Adapted to work on SSA form. (execute_expand_omp): Do not invalidate dominance information. (gate_expand_omp): Do not run with -fopenmp-ssa flag. (gate_expand_omp_ssa, pass_expand_omp_ssa): New. * gimplify.c (gimplify_omp_for): Ensure that the control variable is a gimple_reg. (force_gimple_operand): Allow gimplifying code expressions without value. * tree-predcom.c (mark_virtual_ops_for_renaming): Handle phi nodes. * common.opt (fopenmp-ssa): New. * tree-flow.h (find_omp_clause, copy_var_decl): Declare. * Makefile.in (tree-cfg.o): Add TREE_INLINE_H dependency. * tree-cfg.c: Include tree-inline.h. (struct move_stmt_d): Replace vars_to_remove by vars_map field. (replace_by_duplicate_decl, replace_ssa_name, mark_virtual_ops_in_region): New functions. (move_stmt_r, move_block_to_fn, move_sese_region_to_fn): Adapted to work on SSA form. * passes.c (init_optimization_passes): Add pass_expand_omp_ssa pass. * tree-ssa-operands.c (get_expr_operands): Handle operands of OMP constructs. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@128223 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/omp-low.c')
-rw-r--r--gcc/omp-low.c787
1 files changed, 499 insertions, 288 deletions
diff --git a/gcc/omp-low.c b/gcc/omp-low.c
index c1ab3f31193..57f36509c67 100644
--- a/gcc/omp-low.c
+++ b/gcc/omp-low.c
@@ -117,7 +117,7 @@ static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
/* Find an OpenMP clause of type KIND within CLAUSES. */
-static tree
+tree
find_omp_clause (tree clauses, enum tree_code kind)
{
for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
@@ -151,7 +151,7 @@ is_combined_parallel (struct omp_region *region)
static void
extract_omp_for_data (tree for_stmt, struct omp_for_data *fd)
{
- tree t;
+ tree t, var;
fd->for_stmt = for_stmt;
fd->pre = NULL;
@@ -159,13 +159,14 @@ extract_omp_for_data (tree for_stmt, struct omp_for_data *fd)
t = OMP_FOR_INIT (for_stmt);
gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
fd->v = GIMPLE_STMT_OPERAND (t, 0);
- gcc_assert (DECL_P (fd->v));
+ gcc_assert (SSA_VAR_P (fd->v));
gcc_assert (TREE_CODE (TREE_TYPE (fd->v)) == INTEGER_TYPE);
+ var = TREE_CODE (fd->v) == SSA_NAME ? SSA_NAME_VAR (fd->v) : fd->v;
fd->n1 = GIMPLE_STMT_OPERAND (t, 1);
t = OMP_FOR_COND (for_stmt);
fd->cond_code = TREE_CODE (t);
- gcc_assert (TREE_OPERAND (t, 0) == fd->v);
+ gcc_assert (TREE_OPERAND (t, 0) == var);
fd->n2 = TREE_OPERAND (t, 1);
switch (fd->cond_code)
{
@@ -188,9 +189,9 @@ extract_omp_for_data (tree for_stmt, struct omp_for_data *fd)
t = OMP_FOR_INCR (fd->for_stmt);
gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
- gcc_assert (GIMPLE_STMT_OPERAND (t, 0) == fd->v);
+ gcc_assert (GIMPLE_STMT_OPERAND (t, 0) == var);
t = GIMPLE_STMT_OPERAND (t, 1);
- gcc_assert (TREE_OPERAND (t, 0) == fd->v);
+ gcc_assert (TREE_OPERAND (t, 0) == var);
switch (TREE_CODE (t))
{
case PLUS_EXPR:
@@ -513,22 +514,34 @@ use_pointer_for_field (const_tree decl, bool shared_p)
return false;
}
-/* Construct a new automatic decl similar to VAR. */
+/* Create a new VAR_DECL and copy information from VAR to it. */
-static tree
-omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
+tree
+copy_var_decl (tree var, tree name, tree type)
{
tree copy = build_decl (VAR_DECL, name, type);
TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
+ TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
DECL_NO_TBAA_P (copy) = DECL_NO_TBAA_P (var);
DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
+ DECL_CONTEXT (copy) = DECL_CONTEXT (var);
TREE_USED (copy) = 1;
- DECL_CONTEXT (copy) = current_function_decl;
DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
+ return copy;
+}
+
+/* Construct a new automatic decl similar to VAR. */
+
+static tree
+omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
+{
+ tree copy = copy_var_decl (var, name, type);
+
+ DECL_CONTEXT (copy) = current_function_decl;
TREE_CHAIN (copy) = ctx->block_vars;
ctx->block_vars = copy;
@@ -1432,11 +1445,10 @@ scan_omp (tree *stmt_p, omp_context *ctx)
/* Build a call to GOMP_barrier. */
-static void
-build_omp_barrier (tree *stmt_list)
+static tree
+build_omp_barrier (void)
{
- tree t = build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
- gimplify_and_add (t, stmt_list);
+ return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
}
/* If a context was created for STMT when it was scanned, return it. */
@@ -1829,7 +1841,7 @@ lower_rec_input_clauses (tree clauses, tree *ilist, tree *dlist,
lastprivate clauses we need to ensure the lastprivate copying
happens after firstprivate copying in all threads. */
if (copyin_by_ref || lastprivate_firstprivate)
- build_omp_barrier (ilist);
+ gimplify_and_add (build_omp_barrier (), ilist);
}
@@ -2153,12 +2165,11 @@ static void
expand_parallel_call (struct omp_region *region, basic_block bb,
tree entry_stmt, tree ws_args)
{
- tree t, t1, t2, val, cond, c, list, clauses;
+ tree t, t1, t2, val, cond, c, clauses;
block_stmt_iterator si;
int start_ix;
clauses = OMP_PARALLEL_CLAUSES (entry_stmt);
- push_gimplify_context ();
/* Determine what flavor of GOMP_parallel_start we will be
emitting. */
@@ -2204,15 +2215,28 @@ expand_parallel_call (struct omp_region *region, basic_block bb,
cond = gimple_boolify (cond);
if (integer_zerop (val))
- val = build2 (EQ_EXPR, unsigned_type_node, cond,
- build_int_cst (TREE_TYPE (cond), 0));
+ val = fold_build2 (EQ_EXPR, unsigned_type_node, cond,
+ build_int_cst (TREE_TYPE (cond), 0));
else
{
basic_block cond_bb, then_bb, else_bb;
- edge e;
- tree t, tmp;
+ edge e, e_then, e_else;
+ tree t, tmp_then, tmp_else, tmp_join, tmp_var;
+
+ tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
+ if (gimple_in_ssa_p (cfun))
+ {
+ tmp_then = make_ssa_name (tmp_var, NULL_TREE);
+ tmp_else = make_ssa_name (tmp_var, NULL_TREE);
+ tmp_join = make_ssa_name (tmp_var, NULL_TREE);
+ }
+ else
+ {
+ tmp_then = tmp_var;
+ tmp_else = tmp_var;
+ tmp_join = tmp_var;
+ }
- tmp = create_tmp_var (TREE_TYPE (val), NULL);
e = split_block (bb, NULL);
cond_bb = e->src;
bb = e->dest;
@@ -2220,6 +2244,8 @@ expand_parallel_call (struct omp_region *region, basic_block bb,
then_bb = create_empty_bb (cond_bb);
else_bb = create_empty_bb (then_bb);
+ set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
+ set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
t = build3 (COND_EXPR, void_type_node,
cond, NULL_TREE, NULL_TREE);
@@ -2228,29 +2254,40 @@ expand_parallel_call (struct omp_region *region, basic_block bb,
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
si = bsi_start (then_bb);
- t = build_gimple_modify_stmt (tmp, val);
+ t = build_gimple_modify_stmt (tmp_then, val);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (tmp_then) = t;
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
si = bsi_start (else_bb);
- t = build_gimple_modify_stmt (tmp,
+ t = build_gimple_modify_stmt (tmp_else,
build_int_cst (unsigned_type_node, 1));
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (tmp_else) = t;
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
- make_edge (then_bb, bb, EDGE_FALLTHRU);
- make_edge (else_bb, bb, EDGE_FALLTHRU);
+ e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
+ e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
- val = tmp;
+ if (gimple_in_ssa_p (cfun))
+ {
+ tree phi = create_phi_node (tmp_join, bb);
+ SSA_NAME_DEF_STMT (tmp_join) = phi;
+ add_phi_arg (phi, tmp_then, e_then);
+ add_phi_arg (phi, tmp_else, e_else);
+ }
+
+ val = tmp_join;
}
- list = NULL_TREE;
- val = get_formal_tmp_var (val, &list);
si = bsi_start (bb);
- bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
+ val = force_gimple_operand_bsi (&si, val, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
}
- list = NULL_TREE;
+ si = bsi_last (bb);
t = OMP_PARALLEL_DATA_ARG (entry_stmt);
if (t == NULL)
t1 = null_pointer_node;
@@ -2268,7 +2305,8 @@ expand_parallel_call (struct omp_region *region, basic_block bb,
else
t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
- gimplify_and_add (t, &list);
+ force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
t = OMP_PARALLEL_DATA_ARG (entry_stmt);
if (t == NULL)
@@ -2276,15 +2314,12 @@ expand_parallel_call (struct omp_region *region, basic_block bb,
else
t = build_fold_addr_expr (t);
t = build_call_expr (OMP_PARALLEL_FN (entry_stmt), 1, t);
- gimplify_and_add (t, &list);
+ force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
t = build_call_expr (built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
- gimplify_and_add (t, &list);
-
- si = bsi_last (bb);
- bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
-
- pop_gimplify_context (NULL_TREE);
+ force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
}
@@ -2408,7 +2443,6 @@ expand_omp_parallel (struct omp_region *region)
block_stmt_iterator si;
tree entry_stmt;
edge e;
- bool do_cleanup_cfg = false;
entry_stmt = last_stmt (region->entry);
child_fn = OMP_PARALLEL_FN (entry_stmt);
@@ -2437,13 +2471,12 @@ expand_omp_parallel (struct omp_region *region)
bsi_remove (&si, true);
new_bb = entry_bb;
- remove_edge (entry_succ_e);
if (exit_bb)
{
exit_succ_e = single_succ_edge (exit_bb);
make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
}
- do_cleanup_cfg = true;
+ remove_edge_and_dominated_blocks (entry_succ_e);
}
else
{
@@ -2464,6 +2497,7 @@ expand_omp_parallel (struct omp_region *region)
{
basic_block entry_succ_bb = single_succ (entry_bb);
block_stmt_iterator si;
+ tree parcopy_stmt = NULL_TREE, arg, narg;
for (si = bsi_start (entry_succ_bb); ; bsi_next (&si))
{
@@ -2480,14 +2514,32 @@ expand_omp_parallel (struct omp_region *region)
&& TREE_OPERAND (arg, 0)
== OMP_PARALLEL_DATA_ARG (entry_stmt))
{
- if (GIMPLE_STMT_OPERAND (stmt, 0)
- == DECL_ARGUMENTS (child_fn))
- bsi_remove (&si, true);
- else
- GIMPLE_STMT_OPERAND (stmt, 1) = DECL_ARGUMENTS (child_fn);
+ parcopy_stmt = stmt;
break;
}
}
+
+ gcc_assert (parcopy_stmt != NULL_TREE);
+ arg = DECL_ARGUMENTS (child_fn);
+
+ if (!gimple_in_ssa_p (cfun))
+ {
+ if (GIMPLE_STMT_OPERAND (parcopy_stmt, 0) == arg)
+ bsi_remove (&si, true);
+ else
+ GIMPLE_STMT_OPERAND (parcopy_stmt, 1) = arg;
+ }
+ else
+ {
+ /* If we are in ssa form, we must load the value from the default
+ definition of the argument. That should not be defined now,
+ since the argument is not used uninitialized. */
+ gcc_assert (gimple_default_def (cfun, arg) == NULL);
+ narg = make_ssa_name (arg, build_empty_stmt ());
+ set_default_def (arg, narg);
+ GIMPLE_STMT_OPERAND (parcopy_stmt, 1) = narg;
+ update_stmt (parcopy_stmt);
+ }
}
/* Declare local variables needed in CHILD_CFUN. */
@@ -2495,10 +2547,7 @@ expand_omp_parallel (struct omp_region *region)
BLOCK_VARS (block) = list2chain (child_cfun->unexpanded_var_list);
DECL_SAVED_TREE (child_fn) = bb_stmt_list (single_succ (entry_bb));
- /* Reset DECL_CONTEXT on locals and function arguments. */
- for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
- DECL_CONTEXT (t) = child_fn;
-
+ /* Reset DECL_CONTEXT on function arguments. */
for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
DECL_CONTEXT (t) = child_fn;
@@ -2512,17 +2561,6 @@ expand_omp_parallel (struct omp_region *region)
entry_bb = e->dest;
single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
- /* Move the parallel region into CHILD_CFUN. We need to reset
- dominance information because the expansion of the inner
- regions has invalidated it. */
- free_dominance_info (CDI_DOMINATORS);
- new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb);
- if (exit_bb)
- single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
- DECL_STRUCT_FUNCTION (child_fn)->curr_properties
- = cfun->curr_properties;
- cgraph_add_new_function (child_fn, true);
-
/* Convert OMP_RETURN into a RETURN_EXPR. */
if (exit_bb)
{
@@ -2533,18 +2571,35 @@ expand_omp_parallel (struct omp_region *region)
bsi_insert_after (&si, t, BSI_SAME_STMT);
bsi_remove (&si, true);
}
+
+ /* Move the parallel region into CHILD_CFUN. */
+
+ if (gimple_in_ssa_p (cfun))
+ {
+ push_cfun (child_cfun);
+ init_tree_ssa ();
+ init_ssa_operands ();
+ cfun->gimple_df->in_ssa_p = true;
+ pop_cfun ();
+ }
+ new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb);
+ if (exit_bb)
+ single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
+
+ /* Inform the callgraph about the new function. */
+ DECL_STRUCT_FUNCTION (child_fn)->curr_properties
+ = cfun->curr_properties;
+ cgraph_add_new_function (child_fn, true);
+
+ /* Fix the callgraph edges for child_cfun. Those for cfun will be
+ fixed in a following pass. */
+ push_cfun (child_cfun);
+ rebuild_cgraph_edges ();
+ pop_cfun ();
}
/* Emit a library call to launch the children threads. */
expand_parallel_call (region, new_bb, entry_stmt, ws_args);
-
- if (do_cleanup_cfg)
- {
- /* Clean up the unreachable sub-graph we created above. */
- free_dominance_info (CDI_DOMINATORS);
- free_dominance_info (CDI_POST_DOMINATORS);
- cleanup_tree_cfg ();
- }
}
@@ -2569,7 +2624,7 @@ expand_omp_parallel (struct omp_region *region)
L3:
If this is a combined omp parallel loop, instead of the call to
- GOMP_loop_foo_start, we emit 'goto L2'. */
+ GOMP_loop_foo_start, we call GOMP_loop_foo_next. */
static void
expand_omp_for_generic (struct omp_region *region,
@@ -2577,13 +2632,14 @@ expand_omp_for_generic (struct omp_region *region,
enum built_in_function start_fn,
enum built_in_function next_fn)
{
- tree type, istart0, iend0, iend;
- tree t, list;
+ tree type, istart0, iend0, iend, phi;
+ tree t, vmain, vback;
basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb;
basic_block l2_bb = NULL, l3_bb = NULL;
block_stmt_iterator si;
bool in_combined_parallel = is_combined_parallel (region);
bool broken_loop = region->cont == NULL;
+ edge e, ne;
gcc_assert (!broken_loop || !in_combined_parallel);
@@ -2591,9 +2647,13 @@ expand_omp_for_generic (struct omp_region *region,
istart0 = create_tmp_var (long_integer_type_node, ".istart0");
iend0 = create_tmp_var (long_integer_type_node, ".iend0");
- iend = create_tmp_var (type, NULL);
TREE_ADDRESSABLE (istart0) = 1;
TREE_ADDRESSABLE (iend0) = 1;
+ if (gimple_in_ssa_p (cfun))
+ {
+ add_referenced_var (istart0);
+ add_referenced_var (iend0);
+ }
entry_bb = region->entry;
cont_bb = region->cont;
@@ -2615,12 +2675,19 @@ expand_omp_for_generic (struct omp_region *region,
si = bsi_last (entry_bb);
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
- if (!in_combined_parallel)
+ if (in_combined_parallel)
+ {
+ /* In a combined parallel loop, emit a call to
+ GOMP_loop_foo_next. */
+ t = build_call_expr (built_in_decls[next_fn], 2,
+ build_fold_addr_expr (istart0),
+ build_fold_addr_expr (iend0));
+ }
+ else
{
tree t0, t1, t2, t3, t4;
/* If this is not a combined parallel loop, emit a call to
GOMP_loop_foo_start in ENTRY_BB. */
- list = alloc_stmt_list ();
t4 = build_fold_addr_expr (iend0);
t3 = build_fold_addr_expr (istart0);
t2 = fold_convert (long_integer_type_node, fd->step);
@@ -2635,58 +2702,80 @@ expand_omp_for_generic (struct omp_region *region,
else
t = build_call_expr (built_in_decls[start_fn], 5,
t0, t1, t2, t3, t4);
- t = get_formal_tmp_var (t, &list);
- t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
- append_to_statement_list (t, &list);
- bsi_insert_after (&si, list, BSI_SAME_STMT);
}
+ t = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ true, BSI_SAME_STMT);
+ t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
+ bsi_insert_after (&si, t, BSI_SAME_STMT);
+
+ /* V may be used outside of the loop (e.g., to handle lastprivate clause).
+ If this is the case, its value is undefined if the loop is not entered
+ at all. To handle this case, set its initial value to N1. */
+ if (gimple_in_ssa_p (cfun))
+ {
+ e = find_edge (entry_bb, l3_bb);
+ for (phi = phi_nodes (l3_bb); phi; phi = PHI_CHAIN (phi))
+ if (PHI_ARG_DEF_FROM_EDGE (phi, e) == fd->v)
+ SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), fd->n1);
+ }
+ else
+ {
+ t = build_gimple_modify_stmt (fd->v, fd->n1);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
+ }
+
+ /* Remove the OMP_FOR statement. */
bsi_remove (&si, true);
/* Iteration setup for sequential loop goes in L0_BB. */
- list = alloc_stmt_list ();
+ si = bsi_start (l0_bb);
t = fold_convert (type, istart0);
+ t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
t = build_gimple_modify_stmt (fd->v, t);
- gimplify_and_add (t, &list);
+ bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (fd->v) = t;
t = fold_convert (type, iend0);
- t = build_gimple_modify_stmt (iend, t);
- gimplify_and_add (t, &list);
-
- si = bsi_start (l0_bb);
- bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
+ iend = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
if (!broken_loop)
{
/* Code to control the increment and predicate for the sequential
loop goes in the CONT_BB. */
- list = alloc_stmt_list ();
-
- t = build2 (PLUS_EXPR, type, fd->v, fd->step);
- t = build_gimple_modify_stmt (fd->v, t);
- gimplify_and_add (t, &list);
+ si = bsi_last (cont_bb);
+ t = bsi_stmt (si);
+ gcc_assert (TREE_CODE (t) == OMP_CONTINUE);
+ vmain = TREE_OPERAND (t, 1);
+ vback = TREE_OPERAND (t, 0);
+
+ t = fold_build2 (PLUS_EXPR, type, vmain, fd->step);
+ t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
+ true, BSI_SAME_STMT);
+ t = build_gimple_modify_stmt (vback, t);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (vback) = t;
- t = build2 (fd->cond_code, boolean_type_node, fd->v, iend);
- t = get_formal_tmp_var (t, &list);
+ t = build2 (fd->cond_code, boolean_type_node, vback, iend);
t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
- append_to_statement_list (t, &list);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
- si = bsi_last (cont_bb);
- bsi_insert_after (&si, list, BSI_SAME_STMT);
- gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE);
+ /* Remove OMP_CONTINUE. */
bsi_remove (&si, true);
/* Emit code to get the next parallel iteration in L2_BB. */
- list = alloc_stmt_list ();
+ si = bsi_start (l2_bb);
t = build_call_expr (built_in_decls[next_fn], 2,
build_fold_addr_expr (istart0),
build_fold_addr_expr (iend0));
- t = get_formal_tmp_var (t, &list);
+ t = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
- append_to_statement_list (t, &list);
-
- si = bsi_start (l2_bb);
- bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
+ bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
}
/* Add the loop cleanup function. */
@@ -2700,25 +2789,31 @@ expand_omp_for_generic (struct omp_region *region,
bsi_remove (&si, true);
/* Connect the new blocks. */
- if (in_combined_parallel)
- {
- remove_edge (BRANCH_EDGE (entry_bb));
- redirect_edge_and_branch (single_succ_edge (entry_bb), l2_bb);
- }
- else
- {
- find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
- find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
- }
+ find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
+ find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
if (!broken_loop)
{
+ e = find_edge (cont_bb, l3_bb);
+ ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
+
+ for (phi = phi_nodes (l3_bb); phi; phi = PHI_CHAIN (phi))
+ SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
+ PHI_ARG_DEF_FROM_EDGE (phi, e));
+ remove_edge (e);
+
find_edge (cont_bb, l1_bb)->flags = EDGE_TRUE_VALUE;
- remove_edge (find_edge (cont_bb, l3_bb));
make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
-
make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
- make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
+
+ set_immediate_dominator (CDI_DOMINATORS, l2_bb,
+ recompute_dominator (CDI_DOMINATORS, l2_bb));
+ set_immediate_dominator (CDI_DOMINATORS, l3_bb,
+ recompute_dominator (CDI_DOMINATORS, l3_bb));
+ set_immediate_dominator (CDI_DOMINATORS, l0_bb,
+ recompute_dominator (CDI_DOMINATORS, l0_bb));
+ set_immediate_dominator (CDI_DOMINATORS, l1_bb,
+ recompute_dominator (CDI_DOMINATORS, l1_bb));
}
}
@@ -2740,9 +2835,9 @@ expand_omp_for_generic (struct omp_region *region,
q += (q * nthreads != n);
s0 = q * threadid;
e0 = min(s0 + q, n);
+ V = s0 * STEP + N1;
if (s0 >= e0) goto L2; else goto L0;
L0:
- V = s0 * STEP + N1;
e = e0 * STEP + N1;
L1:
BODY;
@@ -2756,7 +2851,7 @@ expand_omp_for_static_nochunk (struct omp_region *region,
struct omp_for_data *fd)
{
tree n, q, s0, e0, e, t, nthreads, threadid;
- tree type, list;
+ tree type, vmain, vback;
basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
basic_block fin_bb;
block_stmt_iterator si;
@@ -2775,27 +2870,33 @@ expand_omp_for_static_nochunk (struct omp_region *region,
exit_bb = region->exit;
/* Iteration space partitioning goes in ENTRY_BB. */
- list = alloc_stmt_list ();
+ si = bsi_last (entry_bb);
+ gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
t = fold_convert (type, t);
- nthreads = get_formal_tmp_var (t, &list);
+ nthreads = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ true, BSI_SAME_STMT);
t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
t = fold_convert (type, t);
- threadid = get_formal_tmp_var (t, &list);
+ threadid = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ true, BSI_SAME_STMT);
- fd->n1 = fold_convert (type, fd->n1);
- if (!is_gimple_val (fd->n1))
- fd->n1 = get_formal_tmp_var (fd->n1, &list);
+ fd->n1 = force_gimple_operand_bsi (&si,
+ fold_convert (type, fd->n1),
+ true, NULL_TREE,
+ true, BSI_SAME_STMT);
- fd->n2 = fold_convert (type, fd->n2);
- if (!is_gimple_val (fd->n2))
- fd->n2 = get_formal_tmp_var (fd->n2, &list);
+ fd->n2 = force_gimple_operand_bsi (&si,
+ fold_convert (type, fd->n2),
+ true, NULL_TREE,
+ true, BSI_SAME_STMT);
- fd->step = fold_convert (type, fd->step);
- if (!is_gimple_val (fd->step))
- fd->step = get_formal_tmp_var (fd->step, &list);
+ fd->step = force_gimple_operand_bsi (&si,
+ fold_convert (type, fd->step),
+ true, NULL_TREE,
+ true, BSI_SAME_STMT);
t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1));
t = fold_build2 (PLUS_EXPR, type, fd->step, t);
@@ -2803,85 +2904,90 @@ expand_omp_for_static_nochunk (struct omp_region *region,
t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
t = fold_convert (type, t);
- if (is_gimple_val (t))
- n = t;
- else
- n = get_formal_tmp_var (t, &list);
+ n = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
- t = build2 (TRUNC_DIV_EXPR, type, n, nthreads);
- q = get_formal_tmp_var (t, &list);
+ t = fold_build2 (TRUNC_DIV_EXPR, type, n, nthreads);
+ q = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
- t = build2 (MULT_EXPR, type, q, nthreads);
- t = build2 (NE_EXPR, type, t, n);
- t = build2 (PLUS_EXPR, type, q, t);
- q = get_formal_tmp_var (t, &list);
+ t = fold_build2 (MULT_EXPR, type, q, nthreads);
+ t = fold_build2 (NE_EXPR, type, t, n);
+ t = fold_build2 (PLUS_EXPR, type, q, t);
+ q = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
t = build2 (MULT_EXPR, type, q, threadid);
- s0 = get_formal_tmp_var (t, &list);
+ s0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
+
+ t = fold_build2 (PLUS_EXPR, type, s0, q);
+ t = fold_build2 (MIN_EXPR, type, t, n);
+ e0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
- t = build2 (PLUS_EXPR, type, s0, q);
- t = build2 (MIN_EXPR, type, t, n);
- e0 = get_formal_tmp_var (t, &list);
+ t = fold_convert (type, s0);
+ t = fold_build2 (MULT_EXPR, type, t, fd->step);
+ t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
+ t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
+ true, BSI_SAME_STMT);
+ t = build_gimple_modify_stmt (fd->v, t);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (fd->v) = t;
t = build2 (GE_EXPR, boolean_type_node, s0, e0);
t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
- append_to_statement_list (t, &list);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
- si = bsi_last (entry_bb);
- gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
- bsi_insert_after (&si, list, BSI_SAME_STMT);
+ /* Remove the OMP_FOR statement. */
bsi_remove (&si, true);
/* Setup code for sequential iteration goes in SEQ_START_BB. */
- list = alloc_stmt_list ();
-
- t = fold_convert (type, s0);
- t = build2 (MULT_EXPR, type, t, fd->step);
- t = build2 (PLUS_EXPR, type, t, fd->n1);
- t = build_gimple_modify_stmt (fd->v, t);
- gimplify_and_add (t, &list);
+ si = bsi_start (seq_start_bb);
t = fold_convert (type, e0);
- t = build2 (MULT_EXPR, type, t, fd->step);
- t = build2 (PLUS_EXPR, type, t, fd->n1);
- e = get_formal_tmp_var (t, &list);
-
- si = bsi_start (seq_start_bb);
- bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
+ t = fold_build2 (MULT_EXPR, type, t, fd->step);
+ t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
+ e = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
/* The code controlling the sequential loop replaces the OMP_CONTINUE. */
- list = alloc_stmt_list ();
-
- t = build2 (PLUS_EXPR, type, fd->v, fd->step);
- t = build_gimple_modify_stmt (fd->v, t);
- gimplify_and_add (t, &list);
-
- t = build2 (fd->cond_code, boolean_type_node, fd->v, e);
- t = get_formal_tmp_var (t, &list);
+ si = bsi_last (cont_bb);
+ t = bsi_stmt (si);
+ gcc_assert (TREE_CODE (t) == OMP_CONTINUE);
+ vmain = TREE_OPERAND (t, 1);
+ vback = TREE_OPERAND (t, 0);
+
+ t = fold_build2 (PLUS_EXPR, type, vmain, fd->step);
+ t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
+ true, BSI_SAME_STMT);
+ t = build_gimple_modify_stmt (vback, t);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (vback) = t;
+
+ t = build2 (fd->cond_code, boolean_type_node, vback, e);
t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
- append_to_statement_list (t, &list);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
- si = bsi_last (cont_bb);
- gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE);
- bsi_insert_after (&si, list, BSI_SAME_STMT);
+ /* Remove the OMP_CONTINUE statement. */
bsi_remove (&si, true);
/* Replace the OMP_RETURN with a barrier, or nothing. */
si = bsi_last (exit_bb);
if (!OMP_RETURN_NOWAIT (bsi_stmt (si)))
- {
- list = alloc_stmt_list ();
- build_omp_barrier (&list);
- bsi_insert_after (&si, list, BSI_SAME_STMT);
- }
+ force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE,
+ false, BSI_SAME_STMT);
bsi_remove (&si, true);
/* Connect all the blocks. */
find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
-
+
find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
+
+ set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
+ set_immediate_dominator (CDI_DOMINATORS, body_bb,
+ recompute_dominator (CDI_DOMINATORS, body_bb));
+ set_immediate_dominator (CDI_DOMINATORS, fin_bb,
+ recompute_dominator (CDI_DOMINATORS, fin_bb));
}
@@ -2899,6 +3005,9 @@ expand_omp_for_static_nochunk (struct omp_region *region,
adj = STEP + 1;
n = (adj + N2 - N1) / STEP;
trip = 0;
+ V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
+ here so that V is defined
+ if the loop is not entered
L0:
s0 = (trip * nthreads + threadid) * CHUNK;
e0 = min(s0 + CHUNK, n);
@@ -2919,14 +3028,13 @@ expand_omp_for_static_nochunk (struct omp_region *region,
static void
expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
{
- tree n, s0, e0, e, t;
- tree trip, nthreads, threadid;
- tree type;
+ tree n, s0, e0, e, t, phi, nphi, args;
+ tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
+ tree type, cont, v_main, v_back, v_extra;
basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
basic_block trip_update_bb, cont_bb, fin_bb;
- tree list;
block_stmt_iterator si;
- edge se;
+ edge se, re, ene;
type = TREE_TYPE (fd->v);
@@ -2947,31 +3055,33 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
exit_bb = region->exit;
/* Trip and adjustment setup goes in ENTRY_BB. */
- list = alloc_stmt_list ();
+ si = bsi_last (entry_bb);
+ gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
t = fold_convert (type, t);
- nthreads = get_formal_tmp_var (t, &list);
+ nthreads = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ true, BSI_SAME_STMT);
t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
t = fold_convert (type, t);
- threadid = get_formal_tmp_var (t, &list);
-
- fd->n1 = fold_convert (type, fd->n1);
- if (!is_gimple_val (fd->n1))
- fd->n1 = get_formal_tmp_var (fd->n1, &list);
-
- fd->n2 = fold_convert (type, fd->n2);
- if (!is_gimple_val (fd->n2))
- fd->n2 = get_formal_tmp_var (fd->n2, &list);
-
- fd->step = fold_convert (type, fd->step);
- if (!is_gimple_val (fd->step))
- fd->step = get_formal_tmp_var (fd->step, &list);
-
- fd->chunk_size = fold_convert (type, fd->chunk_size);
- if (!is_gimple_val (fd->chunk_size))
- fd->chunk_size = get_formal_tmp_var (fd->chunk_size, &list);
+ threadid = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ true, BSI_SAME_STMT);
+
+ fd->n1 = force_gimple_operand_bsi (&si, fold_convert (type, fd->n1),
+ true, NULL_TREE,
+ true, BSI_SAME_STMT);
+ fd->n2 = force_gimple_operand_bsi (&si, fold_convert (type, fd->n2),
+ true, NULL_TREE,
+ true, BSI_SAME_STMT);
+ fd->step = force_gimple_operand_bsi (&si, fold_convert (type, fd->step),
+ true, NULL_TREE,
+ true, BSI_SAME_STMT);
+ fd->chunk_size
+ = force_gimple_operand_bsi (&si, fold_convert (type,
+ fd->chunk_size),
+ true, NULL_TREE,
+ true, BSI_SAME_STMT);
t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1));
t = fold_build2 (PLUS_EXPR, type, fd->step, t);
@@ -2979,102 +3089,170 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
t = fold_convert (type, t);
- if (is_gimple_val (t))
- n = t;
+ n = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ true, BSI_SAME_STMT);
+
+ trip_var = create_tmp_var (type, ".trip");
+ if (gimple_in_ssa_p (cfun))
+ {
+ add_referenced_var (trip_var);
+ trip_init = make_ssa_name (trip_var, NULL_TREE);
+ trip_main = make_ssa_name (trip_var, NULL_TREE);
+ trip_back = make_ssa_name (trip_var, NULL_TREE);
+ }
else
- n = get_formal_tmp_var (t, &list);
+ {
+ trip_init = trip_var;
+ trip_main = trip_var;
+ trip_back = trip_var;
+ }
- t = build_int_cst (type, 0);
- trip = get_initialized_tmp_var (t, &list, NULL);
+ t = build_gimple_modify_stmt (trip_init, build_int_cst (type, 0));
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (trip_init) = t;
- si = bsi_last (entry_bb);
- gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
- bsi_insert_after (&si, list, BSI_SAME_STMT);
+ t = fold_build2 (MULT_EXPR, type, threadid, fd->chunk_size);
+ t = fold_build2 (MULT_EXPR, type, t, fd->step);
+ t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
+ v_extra = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ true, BSI_SAME_STMT);
+
+ /* Remove the OMP_FOR. */
bsi_remove (&si, true);
/* Iteration space partitioning goes in ITER_PART_BB. */
- list = alloc_stmt_list ();
+ si = bsi_last (iter_part_bb);
- t = build2 (MULT_EXPR, type, trip, nthreads);
- t = build2 (PLUS_EXPR, type, t, threadid);
- t = build2 (MULT_EXPR, type, t, fd->chunk_size);
- s0 = get_formal_tmp_var (t, &list);
+ t = fold_build2 (MULT_EXPR, type, trip_main, nthreads);
+ t = fold_build2 (PLUS_EXPR, type, t, threadid);
+ t = fold_build2 (MULT_EXPR, type, t, fd->chunk_size);
+ s0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
- t = build2 (PLUS_EXPR, type, s0, fd->chunk_size);
- t = build2 (MIN_EXPR, type, t, n);
- e0 = get_formal_tmp_var (t, &list);
+ t = fold_build2 (PLUS_EXPR, type, s0, fd->chunk_size);
+ t = fold_build2 (MIN_EXPR, type, t, n);
+ e0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
t = build2 (LT_EXPR, boolean_type_node, s0, n);
t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
- append_to_statement_list (t, &list);
-
- si = bsi_start (iter_part_bb);
- bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
+ bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
/* Setup code for sequential iteration goes in SEQ_START_BB. */
- list = alloc_stmt_list ();
+ si = bsi_start (seq_start_bb);
t = fold_convert (type, s0);
- t = build2 (MULT_EXPR, type, t, fd->step);
- t = build2 (PLUS_EXPR, type, t, fd->n1);
+ t = fold_build2 (MULT_EXPR, type, t, fd->step);
+ t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
+ t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
t = build_gimple_modify_stmt (fd->v, t);
- gimplify_and_add (t, &list);
+ bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (fd->v) = t;
t = fold_convert (type, e0);
- t = build2 (MULT_EXPR, type, t, fd->step);
- t = build2 (PLUS_EXPR, type, t, fd->n1);
- e = get_formal_tmp_var (t, &list);
-
- si = bsi_start (seq_start_bb);
- bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
+ t = fold_build2 (MULT_EXPR, type, t, fd->step);
+ t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
+ e = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
/* The code controlling the sequential loop goes in CONT_BB,
replacing the OMP_CONTINUE. */
- list = alloc_stmt_list ();
-
- t = build2 (PLUS_EXPR, type, fd->v, fd->step);
- t = build_gimple_modify_stmt (fd->v, t);
- gimplify_and_add (t, &list);
-
- t = build2 (fd->cond_code, boolean_type_node, fd->v, e);
- t = get_formal_tmp_var (t, &list);
+ si = bsi_last (cont_bb);
+ cont = bsi_stmt (si);
+ gcc_assert (TREE_CODE (cont) == OMP_CONTINUE);
+ v_main = TREE_OPERAND (cont, 1);
+ v_back = TREE_OPERAND (cont, 0);
+
+ t = build2 (PLUS_EXPR, type, v_main, fd->step);
+ t = build_gimple_modify_stmt (v_back, t);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (v_back) = t;
+
+ t = build2 (fd->cond_code, boolean_type_node, v_back, e);
t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
- append_to_statement_list (t, &list);
+ bsi_insert_before (&si, t, BSI_SAME_STMT);
- si = bsi_last (cont_bb);
- gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE);
- bsi_insert_after (&si, list, BSI_SAME_STMT);
+ /* Remove OMP_CONTINUE. */
bsi_remove (&si, true);
/* Trip update code goes into TRIP_UPDATE_BB. */
- list = alloc_stmt_list ();
+ si = bsi_start (trip_update_bb);
t = build_int_cst (type, 1);
- t = build2 (PLUS_EXPR, type, trip, t);
- t = build_gimple_modify_stmt (trip, t);
- gimplify_and_add (t, &list);
-
- si = bsi_start (trip_update_bb);
- bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
+ t = build2 (PLUS_EXPR, type, trip_main, t);
+ t = build_gimple_modify_stmt (trip_back, t);
+ bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (trip_back) = t;
/* Replace the OMP_RETURN with a barrier, or nothing. */
si = bsi_last (exit_bb);
if (!OMP_RETURN_NOWAIT (bsi_stmt (si)))
- {
- list = alloc_stmt_list ();
- build_omp_barrier (&list);
- bsi_insert_after (&si, list, BSI_SAME_STMT);
- }
+ force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE,
+ false, BSI_SAME_STMT);
bsi_remove (&si, true);
/* Connect the new blocks. */
find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
-
+
find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
-
+
redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
+
+ if (gimple_in_ssa_p (cfun))
+ {
+ /* When we redirect the edge from trip_update_bb to iter_part_bb, we
+ remove arguments of the phi nodes in fin_bb. We need to create
+ appropriate phi nodes in iter_part_bb instead. */
+ se = single_pred_edge (fin_bb);
+ re = single_succ_edge (trip_update_bb);
+ ene = single_succ_edge (entry_bb);
+
+ args = PENDING_STMT (re);
+ PENDING_STMT (re) = NULL_TREE;
+ for (phi = phi_nodes (fin_bb);
+ phi && args;
+ phi = PHI_CHAIN (phi), args = TREE_CHAIN (args))
+ {
+ t = PHI_RESULT (phi);
+ gcc_assert (t == TREE_PURPOSE (args));
+ nphi = create_phi_node (t, iter_part_bb);
+ SSA_NAME_DEF_STMT (t) = nphi;
+
+ t = PHI_ARG_DEF_FROM_EDGE (phi, se);
+ /* A special case -- fd->v is not yet computed in iter_part_bb, we
+ need to use v_extra instead. */
+ if (t == fd->v)
+ t = v_extra;
+ add_phi_arg (nphi, t, ene);
+ add_phi_arg (nphi, TREE_VALUE (args), re);
+ }
+ gcc_assert (!phi && !args);
+ while ((phi = phi_nodes (fin_bb)) != NULL_TREE)
+ remove_phi_node (phi, NULL_TREE, false);
+
+ /* Make phi node for trip. */
+ phi = create_phi_node (trip_main, iter_part_bb);
+ SSA_NAME_DEF_STMT (trip_main) = phi;
+ add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb));
+ add_phi_arg (phi, trip_init, single_succ_edge (entry_bb));
+ }
+
+ set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
+ set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
+ recompute_dominator (CDI_DOMINATORS, iter_part_bb));
+ set_immediate_dominator (CDI_DOMINATORS, fin_bb,
+ recompute_dominator (CDI_DOMINATORS, fin_bb));
+ set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
+ recompute_dominator (CDI_DOMINATORS, seq_start_bb));
+ set_immediate_dominator (CDI_DOMINATORS, body_bb,
+ recompute_dominator (CDI_DOMINATORS, body_bb));
}
@@ -3085,8 +3263,6 @@ expand_omp_for (struct omp_region *region)
{
struct omp_for_data fd;
- push_gimplify_context ();
-
extract_omp_for_data (last_stmt (region->entry), &fd);
region->sched_kind = fd.sched_kind;
@@ -3106,8 +3282,6 @@ expand_omp_for (struct omp_region *region)
int next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
expand_omp_for_generic (region, &fd, start_ix, next_ix);
}
-
- pop_gimplify_context (NULL);
}
@@ -3136,12 +3310,12 @@ expand_omp_for (struct omp_region *region)
reduction;
If this is a combined parallel sections, replace the call to
- GOMP_sections_start with 'goto L1'. */
+ GOMP_sections_start with call to GOMP_sections_next. */
static void
expand_omp_sections (struct omp_region *region)
{
- tree label_vec, l1, l2, t, u, v, sections_stmt;
+ tree label_vec, l1, l2, t, u, sections_stmt, vin, vmain, vnext, cont;
unsigned i, casei, len;
basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
block_stmt_iterator si;
@@ -3178,7 +3352,7 @@ expand_omp_sections (struct omp_region *region)
si = bsi_last (entry_bb);
sections_stmt = bsi_stmt (si);
gcc_assert (TREE_CODE (sections_stmt) == OMP_SECTIONS);
- v = OMP_SECTIONS_CONTROL (sections_stmt);
+ vin = OMP_SECTIONS_CONTROL (sections_stmt);
if (!is_combined_parallel (region))
{
/* If we are not inside a combined parallel+sections region,
@@ -3187,16 +3361,36 @@ expand_omp_sections (struct omp_region *region)
exit_reachable ? len - 1 : len);
u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
t = build_call_expr (u, 1, t);
- t = build_gimple_modify_stmt (v, t);
- bsi_insert_after (&si, t, BSI_SAME_STMT);
}
+ else
+ {
+ /* Otherwise, call GOMP_sections_next. */
+ u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
+ t = build_call_expr (u, 0);
+ }
+ t = build_gimple_modify_stmt (vin, t);
+ bsi_insert_after (&si, t, BSI_SAME_STMT);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (vin) = t;
bsi_remove (&si, true);
/* The switch() statement replacing OMP_SECTIONS_SWITCH goes in L0_BB. */
si = bsi_last (l0_bb);
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SECTIONS_SWITCH);
+ if (exit_reachable)
+ {
+ cont = last_stmt (l1_bb);
+ gcc_assert (TREE_CODE (cont) == OMP_CONTINUE);
+ vmain = TREE_OPERAND (cont, 1);
+ vnext = TREE_OPERAND (cont, 0);
+ }
+ else
+ {
+ vmain = vin;
+ vnext = NULL_TREE;
+ }
- t = build3 (SWITCH_EXPR, void_type_node, v, NULL, label_vec);
+ t = build3 (SWITCH_EXPR, void_type_node, vmain, NULL, label_vec);
bsi_insert_after (&si, t, BSI_SAME_STMT);
bsi_remove (&si, true);
@@ -3257,8 +3451,10 @@ expand_omp_sections (struct omp_region *region)
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE);
t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
- t = build_gimple_modify_stmt (v, t);
+ t = build_gimple_modify_stmt (vnext, t);
bsi_insert_after (&si, t, BSI_SAME_STMT);
+ if (gimple_in_ssa_p (cfun))
+ SSA_NAME_DEF_STMT (vnext) = t;
bsi_remove (&si, true);
single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
@@ -3274,15 +3470,7 @@ expand_omp_sections (struct omp_region *region)
bsi_remove (&si, true);
}
- /* Connect the new blocks. */
- if (is_combined_parallel (region))
- {
- /* If this was a combined parallel+sections region, we did not
- emit a GOMP_sections_start in the entry block, so we just
- need to jump to L1_BB to get the next section. */
- gcc_assert (exit_reachable);
- redirect_edge_and_branch (single_succ_edge (entry_bb), l1_bb);
- }
+ set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
}
@@ -3312,11 +3500,8 @@ expand_omp_single (struct omp_region *region)
si = bsi_last (exit_bb);
if (!OMP_RETURN_NOWAIT (bsi_stmt (si)) || need_barrier)
- {
- tree t = alloc_stmt_list ();
- build_omp_barrier (&t);
- bsi_insert_after (&si, t, BSI_SAME_STMT);
- }
+ force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE,
+ false, BSI_SAME_STMT);
bsi_remove (&si, true);
single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
}
@@ -3498,8 +3683,6 @@ execute_expand_omp (void)
expand_omp (root_omp_region);
- free_dominance_info (CDI_DOMINATORS);
- free_dominance_info (CDI_POST_DOMINATORS);
cleanup_tree_cfg ();
free_omp_regions ();
@@ -3507,10 +3690,38 @@ execute_expand_omp (void)
return 0;
}
+/* OMP expansion in SSA form. For testing purposes only. */
+
+static bool
+gate_expand_omp_ssa (void)
+{
+ return flag_openmp_ssa && flag_openmp != 0 && errorcount == 0;
+}
+
+struct tree_opt_pass pass_expand_omp_ssa =
+{
+ "ompexpssa", /* name */
+ gate_expand_omp_ssa, /* gate */
+ execute_expand_omp, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ 0, /* tv_id */
+ PROP_gimple_any, /* properties_required */
+ PROP_gimple_lomp, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_dump_func, /* todo_flags_finish */
+ 0 /* letter */
+};
+
+/* OMP expansion -- the default pass, run before creation of SSA form. */
+
static bool
gate_expand_omp (void)
{
- return flag_openmp != 0 && errorcount == 0;
+ return ((!flag_openmp_ssa || !optimize)
+ && flag_openmp != 0 && errorcount == 0);
}
struct tree_opt_pass pass_expand_omp =