summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gcc/ChangeLog11
-rw-r--r--gcc/tree-ssa-threadupdate.c77
2 files changed, 69 insertions, 19 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 93b086231a1..5db305fa730 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,14 @@
+2011-05-03 Jeff Law <law@redhat.com>
+
+ * tree-ssa-threadupdate.c (THREAD_TARGET): define.
+ (remove_ctrl_stmt_and_useless_edges): Clear AUX field of outgoing edges.
+ (craete_edge_and_update_destination_phis): Use THREAD_TARGET rather
+ than accessing AUX field directly. Free the AUX field before clearing it.
+ (thread_block, thread_through_loop_header): Likewise.
+ (thread_single_edge, mark_threaded_blocks): Likewise.
+ (redirect_edges): Delay clearing the AUX field. Free the AUX field.
+ (register_jump_thread): Do not attempt to thread to a NULL edge.
+
2011-05-03 Bernd Schmidt <bernds@codesourcery.com>
* function.c (init_function_start): Call decide_function_section.
diff --git a/gcc/tree-ssa-threadupdate.c b/gcc/tree-ssa-threadupdate.c
index 40669eb8101..841b710d19b 100644
--- a/gcc/tree-ssa-threadupdate.c
+++ b/gcc/tree-ssa-threadupdate.c
@@ -149,6 +149,10 @@ struct local_info
(original_edge, target_edge). */
static VEC(edge,heap) *threaded_edges;
+/* When we start updating the CFG for threading, data necessary for jump
+ threading is attached to the AUX field for the incoming edge. Use these
+ macros to access the underlying structure attached to the AUX field. */
+#define THREAD_TARGET(E) ((edge *)(E)->aux)[0]
/* Jump threading statistics. */
@@ -200,10 +204,16 @@ remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
static void
create_block_for_threading (basic_block bb, struct redirection_data *rd)
{
+ edge_iterator ei;
+ edge e;
+
/* We can use the generic block duplication code and simply remove
the stuff we do not need. */
rd->dup_block = duplicate_block (bb, NULL, NULL);
+ FOR_EACH_EDGE (e, ei, rd->dup_block->succs)
+ e->aux = NULL;
+
/* Zero out the profile, since the block is unreachable for now. */
rd->dup_block->frequency = 0;
rd->dup_block->count = 0;
@@ -314,7 +324,16 @@ create_edge_and_update_destination_phis (struct redirection_data *rd,
rescan_loop_exit (e, true, false);
e->probability = REG_BR_PROB_BASE;
e->count = bb->count;
- e->aux = rd->outgoing_edge->aux;
+
+ if (rd->outgoing_edge->aux)
+ {
+ e->aux = (edge *) XNEWVEC (edge, 1);
+ THREAD_TARGET(e) = THREAD_TARGET (rd->outgoing_edge);
+ }
+ else
+ {
+ e->aux = NULL;
+ }
/* If there are any PHI nodes at the destination of the outgoing edge
from the duplicate block, then we will need to add a new argument
@@ -406,10 +425,6 @@ redirect_edges (void **slot, void *data)
next = el->next;
free (el);
- /* Go ahead and clear E->aux. It's not needed anymore and failure
- to clear it will cause all kinds of unpleasant problems later. */
- e->aux = NULL;
-
thread_stats.num_threaded_edges++;
if (rd->dup_block)
@@ -429,6 +444,12 @@ redirect_edges (void **slot, void *data)
gcc_assert (e == e2);
flush_pending_stmts (e2);
}
+
+ /* Go ahead and clear E->aux. It's not needed anymore and failure
+ to clear it will cause all kinds of unpleasant problems later. */
+ free (e->aux);
+ e->aux = NULL;
+
}
/* Indicate that we actually threaded one or more jumps. */
@@ -512,7 +533,11 @@ thread_block (basic_block bb, bool noloop_only)
if (loop->header == bb)
{
e = loop_latch_edge (loop);
- e2 = (edge) e->aux;
+
+ if (e->aux)
+ e2 = THREAD_TARGET (e);
+ else
+ e2 = NULL;
if (e2 && loop_exit_edge_p (loop, e2))
{
@@ -525,19 +550,22 @@ thread_block (basic_block bb, bool noloop_only)
efficient lookups. */
FOR_EACH_EDGE (e, ei, bb->preds)
{
- e2 = (edge) e->aux;
+ if (e->aux == NULL)
+ continue;
+
+ e2 = THREAD_TARGET (e);
if (!e2
/* If NOLOOP_ONLY is true, we only allow threading through the
header of a loop to exit edges. */
|| (noloop_only
&& bb == bb->loop_father->header
- && !loop_exit_edge_p (bb->loop_father, e2)))
+ && (!loop_exit_edge_p (bb->loop_father, e2))))
continue;
if (e->dest == e2->src)
update_bb_profile_for_threading (e->dest, EDGE_FREQUENCY (e),
- e->count, (edge) e->aux);
+ e->count, THREAD_TARGET (e));
/* Insert the outgoing edge into the hash table if it is not
already in the hash table. */
@@ -582,17 +610,18 @@ thread_block (basic_block bb, bool noloop_only)
return local_info.jumps_threaded;
}
-/* Threads edge E through E->dest to the edge E->aux. Returns the copy
- of E->dest created during threading, or E->dest if it was not necessary
+/* Threads edge E through E->dest to the edge THREAD_TARGET (E). Returns the
+ copy of E->dest created during threading, or E->dest if it was not necessary
to copy it (E is its single predecessor). */
static basic_block
thread_single_edge (edge e)
{
basic_block bb = e->dest;
- edge eto = (edge) e->aux;
+ edge eto = THREAD_TARGET (e);
struct redirection_data rd;
+ free (e->aux);
e->aux = NULL;
thread_stats.num_threaded_edges++;
@@ -794,7 +823,7 @@ thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
if (latch->aux)
{
- tgt_edge = (edge) latch->aux;
+ tgt_edge = THREAD_TARGET (latch);
tgt_bb = tgt_edge->dest;
}
else if (!may_peel_loop_headers
@@ -817,7 +846,7 @@ thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
goto fail;
}
- tgt_edge = (edge) e->aux;
+ tgt_edge = THREAD_TARGET (e);
atgt_bb = tgt_edge->dest;
if (!tgt_bb)
tgt_bb = atgt_bb;
@@ -883,7 +912,7 @@ thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
/* Now consider the case entry edges are redirected to the new entry
block. Remember one entry edge, so that we can find the new
- preheader (its destination after threading). */
+ preheader (its destination after threading). */
FOR_EACH_EDGE (e, ei, header->preds)
{
if (e->aux)
@@ -915,6 +944,7 @@ fail:
/* We failed to thread anything. Cancel the requests. */
FOR_EACH_EDGE (e, ei, header->preds)
{
+ free (e->aux);
e->aux = NULL;
}
return false;
@@ -946,9 +976,10 @@ mark_threaded_blocks (bitmap threaded_blocks)
for (i = 0; i < VEC_length (edge, threaded_edges); i += 2)
{
edge e = VEC_index (edge, threaded_edges, i);
- edge e2 = VEC_index (edge, threaded_edges, i + 1);
+ edge *x = (edge *) XNEWVEC (edge, 1);
- e->aux = e2;
+ x[0] = VEC_index (edge, threaded_edges, i + 1);
+ e->aux = x;
bitmap_set_bit (tmp, e->dest->index);
}
@@ -963,7 +994,10 @@ mark_threaded_blocks (bitmap threaded_blocks)
&& !redirection_block_p (bb))
{
FOR_EACH_EDGE (e, ei, bb->preds)
- e->aux = NULL;
+ {
+ free (e->aux);
+ e->aux = NULL;
+ }
}
else
bitmap_set_bit (threaded_blocks, i);
@@ -1059,8 +1093,13 @@ thread_through_all_blocks (bool may_peel_loop_headers)
void
register_jump_thread (edge e, edge e2)
{
+ /* This can occur if we're jumping to a constant address or
+ or something similar. Just get out now. */
+ if (e2 == NULL)
+ return;
+
if (threaded_edges == NULL)
- threaded_edges = VEC_alloc (edge, heap, 10);
+ threaded_edges = VEC_alloc (edge, heap, 15);
if (dump_file && (dump_flags & TDF_DETAILS)
&& e->dest != e2->src)