summaryrefslogtreecommitdiff
path: root/gcc/sched-rgn.c
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/sched-rgn.c')
-rw-r--r--gcc/sched-rgn.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c
index a85ee5b033c..7fa9759f846 100644
--- a/gcc/sched-rgn.c
+++ b/gcc/sched-rgn.c
@@ -272,7 +272,7 @@ is_cfg_nonregular (void)
/* If we have insns which refer to labels as non-jumped-to operands,
then we consider the cfg not well structured. */
- FOR_EACH_BB (b)
+ FOR_EACH_BB_FN (b, cfun)
FOR_BB_INSNS (b, insn)
{
rtx note, next, set, dest;
@@ -317,7 +317,7 @@ is_cfg_nonregular (void)
Unreachable loops with a single block are detected here. This
test is redundant with the one in find_rgns, but it's much
cheaper to go ahead and catch the trivial case here. */
- FOR_EACH_BB (b)
+ FOR_EACH_BB_FN (b, cfun)
{
if (EDGE_COUNT (b->preds) == 0
|| (single_pred_p (b)
@@ -479,7 +479,7 @@ find_single_block_region (bool ebbs_p)
probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
- FOR_EACH_BB (ebb_start)
+ FOR_EACH_BB_FN (ebb_start, cfun)
{
RGN_NR_BLOCKS (nr_regions) = 0;
RGN_BLOCKS (nr_regions) = i;
@@ -512,7 +512,7 @@ find_single_block_region (bool ebbs_p)
}
}
else
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rgn_bb_table[nr_regions] = bb->index;
RGN_NR_BLOCKS (nr_regions) = 1;
@@ -762,7 +762,7 @@ haifa_find_rgns (void)
the entry node by placing a nonzero value in dfs_nr. Thus if
dfs_nr is zero for any block, then it must be unreachable. */
unreachable = 0;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (dfs_nr[bb->index] == 0)
{
unreachable = 1;
@@ -773,7 +773,7 @@ haifa_find_rgns (void)
to hold degree counts. */
degree = dfs_nr;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
degree[bb->index] = EDGE_COUNT (bb->preds);
/* Do not perform region scheduling if there are any unreachable
@@ -807,7 +807,7 @@ haifa_find_rgns (void)
/* Find blocks which are inner loop headers. We still have non-reducible
loops to consider at this point. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (bitmap_bit_p (header, bb->index) && bitmap_bit_p (inner, bb->index))
{
@@ -826,7 +826,7 @@ haifa_find_rgns (void)
If there exists a block that is not dominated by the loop
header, then the block is reachable from outside the loop
and thus the loop is not a natural loop. */
- FOR_EACH_BB (jbb)
+ FOR_EACH_BB_FN (jbb, cfun)
{
/* First identify blocks in the loop, except for the loop
entry block. */
@@ -874,7 +874,7 @@ haifa_find_rgns (void)
Place those blocks into the queue. */
if (no_loops)
{
- FOR_EACH_BB (jbb)
+ FOR_EACH_BB_FN (jbb, cfun)
/* Leaf nodes have only a single successor which must
be EXIT_BLOCK. */
if (single_succ_p (jbb)
@@ -1052,7 +1052,7 @@ haifa_find_rgns (void)
/* Any block that did not end up in a region is placed into a region
by itself. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (degree[bb->index] >= 0)
{
rgn_bb_table[idx] = bb->index;
@@ -3281,7 +3281,7 @@ sched_rgn_local_init (int rgn)
/* Use ->aux to implement EDGE_TO_BIT mapping. */
rgn_nr_edges = 0;
- FOR_EACH_BB (block)
+ FOR_EACH_BB_FN (block, cfun)
{
if (CONTAINING_RGN (block->index) != rgn)
continue;
@@ -3291,7 +3291,7 @@ sched_rgn_local_init (int rgn)
rgn_edges = XNEWVEC (edge, rgn_nr_edges);
rgn_nr_edges = 0;
- FOR_EACH_BB (block)
+ FOR_EACH_BB_FN (block, cfun)
{
if (CONTAINING_RGN (block->index) != rgn)
continue;
@@ -3312,7 +3312,7 @@ sched_rgn_local_init (int rgn)
/* Cleanup ->aux used for EDGE_TO_BIT mapping. */
/* We don't need them anymore. But we want to avoid duplication of
aux fields in the newly created edges. */
- FOR_EACH_BB (block)
+ FOR_EACH_BB_FN (block, cfun)
{
if (CONTAINING_RGN (block->index) != rgn)
continue;