summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gcc/ChangeLog21
-rw-r--r--gcc/tree-flow.h3
-rw-r--r--gcc/tree-ssa-dom.c17
-rw-r--r--gcc/tree-ssa-threadupdate.c76
4 files changed, 92 insertions, 25 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index f361cc9cba8..b5fd19ccffb 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,24 @@
+2006-01-11 Jeff Law <law@redhat.com>
+
+ * tree-ssa-threadupdate.c (threaded_edges): New VEC to
+ hold edge pairs.
+ (mark_threaded_blocks, register_jump_thread): New functions.
+ (thread_through_all_blocks): Remove unwanted argument. No
+ longer rely on e->aux to communicate thread target info.
+ Call mark_threaded_blocks. Release the threaded_blocks
+ bitmap and threaded_edges vector when complete.
+ * tree-ssa-dom.c (struct edge_info): Remove redirection_target field.
+ (threaded_blocks): Remove.
+ (tree_ssa_dominator_optimize): Remove initialization and
+ finalization of threaded_blocks. Simplify call to
+ thread_through_all_blocks.
+ (thread_across_edge): Call register_jump_thread rather than
+ storing thread information into e->aux.
+ (free_all_edge_infos): Simplify now that e->aux is no longer
+ used to communicate with thread_through_all_blocks.
+ * tree-flow.h (thread_through_all_blocks): Update prototype.
+ (register_jump_thread): Prototype.
+
2006-01-11 Kazu Hirata <kazu@codesourcery.com>
* df-core.c (df_compact_blocks, df_bb_replace): Use
diff --git a/gcc/tree-flow.h b/gcc/tree-flow.h
index 0c685522bd9..92a6035c6a9 100644
--- a/gcc/tree-flow.h
+++ b/gcc/tree-flow.h
@@ -807,7 +807,8 @@ bool multiplier_allowed_in_address_p (HOST_WIDE_INT);
unsigned multiply_by_cost (HOST_WIDE_INT, enum machine_mode);
/* In tree-ssa-threadupdate.c. */
-extern bool thread_through_all_blocks (bitmap);
+extern bool thread_through_all_blocks (void);
+extern void register_jump_thread (edge, edge);
/* In gimplify.c */
tree force_gimple_operand (tree, tree *, bool, tree);
diff --git a/gcc/tree-ssa-dom.c b/gcc/tree-ssa-dom.c
index 3631a218b63..b522fac3e44 100644
--- a/gcc/tree-ssa-dom.c
+++ b/gcc/tree-ssa-dom.c
@@ -74,9 +74,6 @@ struct edge_info
and its maximum index rather than use a varray. */
tree *cond_equivalences;
unsigned int max_cond_equivalences;
-
- /* If we can thread this edge this field records the new target. */
- edge redirection_target;
};
@@ -141,10 +138,6 @@ static VEC(tree,heap) *const_and_copies_stack;
know their exact value. */
static bitmap nonzero_vars;
-/* Bitmap of blocks that are scheduled to be threaded through. This
- is used to communicate with thread_through_blocks. */
-static bitmap threaded_blocks;
-
/* Stack of SSA_NAMEs which need their NONZERO_VARS property cleared
when the current block is finalized.
@@ -326,10 +319,10 @@ free_all_edge_infos (void)
if (edge_info)
{
- e->aux = edge_info->redirection_target;
if (edge_info->cond_equivalences)
free (edge_info->cond_equivalences);
free (edge_info);
+ e->aux = NULL;
}
}
}
@@ -372,7 +365,6 @@ tree_ssa_dominator_optimize (void)
vrp_variables_stack = VEC_alloc (tree, heap, 20);
stmts_to_rescan = VEC_alloc (tree, heap, 20);
nonzero_vars = BITMAP_ALLOC (NULL);
- threaded_blocks = BITMAP_ALLOC (NULL);
need_eh_cleanup = BITMAP_ALLOC (NULL);
/* Setup callbacks for the generic dominator tree walker. */
@@ -448,7 +440,7 @@ tree_ssa_dominator_optimize (void)
free_all_edge_infos ();
/* Thread jumps, creating duplicate blocks as needed. */
- cfg_altered |= thread_through_all_blocks (threaded_blocks);
+ cfg_altered |= thread_through_all_blocks ();
/* Removal of statements may make some EH edges dead. Purge
such edges from the CFG as needed. */
@@ -487,7 +479,6 @@ tree_ssa_dominator_optimize (void)
/* Reinitialize the various tables. */
bitmap_clear (nonzero_vars);
- bitmap_clear (threaded_blocks);
htab_empty (avail_exprs);
htab_empty (vrp_data);
@@ -533,7 +524,6 @@ tree_ssa_dominator_optimize (void)
/* Free nonzero_vars. */
BITMAP_FREE (nonzero_vars);
- BITMAP_FREE (threaded_blocks);
BITMAP_FREE (need_eh_cleanup);
VEC_free (tree, heap, avail_exprs_stack);
@@ -924,8 +914,7 @@ thread_across_edge (struct dom_walk_data *walk_data, edge e)
edge_info = (struct edge_info *) e->aux;
else
edge_info = allocate_edge_info (e);
- edge_info->redirection_target = taken_edge;
- bitmap_set_bit (threaded_blocks, e->dest->index);
+ register_jump_thread (e, taken_edge);
}
}
}
diff --git a/gcc/tree-ssa-threadupdate.c b/gcc/tree-ssa-threadupdate.c
index 8c21ac2aa16..b6264f5de54 100644
--- a/gcc/tree-ssa-threadupdate.c
+++ b/gcc/tree-ssa-threadupdate.c
@@ -147,6 +147,14 @@ struct local_info
bool jumps_threaded;
};
+/* Passes which use the jump threading code register jump threading
+ opportunities as they are discovered. We keep the registered
+ jump threading opportunities in this vector as edge pairs
+ (original_edge, target_edge). */
+DEF_VEC_ALLOC_P(edge,heap);
+static VEC(edge,heap) *threaded_edges;
+
+
/* Jump threading statistics. */
struct thread_stats_d
@@ -802,18 +810,37 @@ thread_block (basic_block bb)
return local_info.jumps_threaded;
}
-/* Walk through all blocks and thread incoming edges to the block's
- destinations as requested. This is the only entry point into this
- file.
+/* Walk through the registered jump threads and convert them into a
+ form convienent for this pass.
+
+ Any block which has incoming edges threaded to outgoing edges
+ will have its entry in THREADED_BLOCK set.
- Blocks which have one or more incoming edges have INCOMING_EDGE_THREADED
- set in the block's annotation.
+ Any threaded edge will have its new outgoing edge stored in the
+ original edge's AUX field.
- Each edge that should be threaded has the new destination edge stored in
- the original edge's AUX field.
+ This form avoids the need to walk all the edges in the CFG to
+ discover blocks which need processing and avoids unnecessary
+ hash table lookups to map from threaded edge to new target. */
- This routine (or one of its callees) will clear INCOMING_EDGE_THREADED
- in the block annotations and the AUX field in the edges.
+static void
+mark_threaded_blocks (bitmap threaded_blocks)
+{
+ unsigned int i;
+
+ for (i = 0; i < VEC_length (edge, threaded_edges); i += 2)
+ {
+ edge e = VEC_index (edge, threaded_edges, i);
+ edge e2 = VEC_index (edge, threaded_edges, i + 1);
+
+ e->aux = e2;
+ bitmap_set_bit (threaded_blocks, e->dest->index);
+ }
+}
+
+
+/* Walk through all blocks and thread incoming edges to the appropriate
+ outgoing edge for each edge pair recorded in THREADED_EDGES.
It is the caller's responsibility to fix the dominance information
and rewrite duplicated SSA_NAMEs back into SSA form.
@@ -821,15 +848,22 @@ thread_block (basic_block bb)
Returns true if one or more edges were threaded, false otherwise. */
bool
-thread_through_all_blocks (bitmap threaded_blocks)
+thread_through_all_blocks (void)
{
bool retval = false;
unsigned int i;
bitmap_iterator bi;
+ bitmap threaded_blocks;
+
+ if (threaded_edges == NULL)
+ return false;
+ threaded_blocks = BITMAP_ALLOC (NULL);
rediscover_loops_after_threading = false;
memset (&thread_stats, 0, sizeof (thread_stats));
+ mark_threaded_blocks (threaded_blocks);
+
EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi)
{
basic_block bb = BASIC_BLOCK (i);
@@ -842,5 +876,27 @@ thread_through_all_blocks (bitmap threaded_blocks)
fprintf (dump_file, "\nJumps threaded: %lu\n",
thread_stats.num_threaded_edges);
+ BITMAP_FREE (threaded_blocks);
+ threaded_blocks = NULL;
+ VEC_free (edge, heap, threaded_edges);
+ threaded_edges = NULL;
return retval;
}
+
+/* Register a jump threading opportunity. We queue up all the jump
+ threading opportunities discovered by a pass and update the CFG
+ and SSA form all at once.
+
+ E is the edge we can thread, E2 is the new target edge. ie, we
+ are effectively recording that E->dest can be changed to E2->dest
+ after fixing the SSA graph. */
+
+void
+register_jump_thread (edge e, edge e2)
+{
+ if (threaded_edges == NULL)
+ threaded_edges = VEC_alloc (edge, heap, 10);
+
+ VEC_safe_push (edge, heap, threaded_edges, e);
+ VEC_safe_push (edge, heap, threaded_edges, e2);
+}