summaryrefslogtreecommitdiff
path: root/gcc/sched-rgn.c
diff options
context:
space:
mode:
authorrakdver <rakdver@138bc75d-0d04-0410-961f-82ee72b054a4>2002-05-23 19:23:51 +0000
committerrakdver <rakdver@138bc75d-0d04-0410-961f-82ee72b054a4>2002-05-23 19:23:51 +0000
commit4c26117afc55fbf0b998d0bf25f1ab56da4dd180 (patch)
treed9ef360c452a150ca3f25a23e593846ce26b64f0 /gcc/sched-rgn.c
parentdf64d85ee58f6385c06511417a9a8a2f60a17261 (diff)
downloadgcc-4c26117afc55fbf0b998d0bf25f1ab56da4dd180.tar.gz
* bb-reorder.c (make_reorder_chain, make_reorder_chain_1):
Use FOR_EACH_BB macros to iterate over basic block chain. * cfg.c (clear_edges, clear_bb_flags, dump_flow_info, alloc_aux_for_blocks, clear_aux_for_blocks, alloc_aux_for_edges): Likewise. * cfganal.c (set_edge_can_fallthru_flag, flow_call_edges_add, find_unreachable_blocks, create_edge_list, verify_edge_list, remove_fake_edges, add_noreturn_fake_exit_edges, flow_preorder_transversal_compute, flow_dfs_compute_reverse_execute): Likewise. * cfgbuild.c (make_edges, find_basic_blocks, find_many_sub_basic_blocks, find_sub_basic_blocks): Likewise. * cfgcleanup.c (try_optimize_cfg, delete_unreachable_blocks): Likewise. * cfglayout.c (record_effective_endpoints, cleanup_unconditional_jumps): Likewise. * cfgloop.c (flow_loops_cfg_dump, flow_loops_find): Likewise. * cfgrtl.c (compute_bb_for_insn, tidy_fallthru_edges, commit_edge_insertions, commit_edge_insertions_watch_calls, print_rtl_with_bb, verify_flow_info, purge_all_dead_edges): Likewise. * combine.c (combine_instructions, reg_dead_at_p): Likewise. * conflict.c (conflict_graph_compute): Likewise. * df.c (df_bitmaps_alloc, df_bitmaps_free, df_alloc, df_analyse_1, df_modified_p, df_refs_unlink, df_dump): Likewise. * dominance.c (calc_dfs_tree, calculate_dominance_info): Likewise. * final.c (compute_alignments): Likewise. * flow.c (update_life_info, update_life_info_in_dirty_blocks, delete_noop_moves, calculate_global_regs_live, allocate_bb_life_data, count_or_remove_death_notes): Likewise. * gcse.c (oprs_unchanged_p, record_last_reg_set_info, compute_hash_table, compute_kill_rd, compute_rd, compute_ae_kill, classic_gcse, compute_transp, cprop, compute_pre_data, compute_transpout, invalidate_nonnull_info, delete_null_pointer_checks_1, delete_null_pointer_checks, compute_code_hoist_vbeinout, hoist_code, compute_ld_motion_mems, compute_store_table, build_store_vectors, store_motion): Likewise. * global.c (global_conflicts, mark_elimination): Likewise. * graph.c (print_rtl_graph_with_bb): Likewise. * haifa-sched.c (sched_init): Likewise. * ifcvt.c (if_convert): Likewise. * lcm.c (compute_antinout_edge, compute_laterin, compute_insert_delete, compute_available, compute_nearerout, compute_rev_insert_delete, optimize_mode_switching): Likewise. * local-alloc.c (local_alloc, update_equiv_regs): Likewise. * predict.c (estimate_probability, note_prediction_to_br_prob, propagate_freq, counts_to_freqs, expensive_function_p, estimate_bb_frequencies): Likewise. * profile.c (instrument_edges, get_exec_counts, compute_branch_probabilities, compute_checksum, branch_prob, find_spanning_tree): Likewise. * recog.c (split_all_insns, peephole2_optimize): Likewise. * reg-stack.c (reg_to_stack, convert_regs_entry, convert_regs): Likewise. * regclass.c (scan_one_insn, regclass): Likewise. * regmove.c (mark_flags_life_zones, regmove_optimize, record_stack_memrefs): Likewise. * regrename.c (regrename_optimize, copyprop_hardreg_forward): Likewise. * reload1.c (reload, reload_combine, fixup_abnormal_edges): Likewise. * resource.c (find_basic_block): Likewise. * sched-ebb.c (schedule_ebbs): Likewise. * sched-rgn.c (is_cfg_nonregular, build_control_flow, find_single_block_region, find_rgns, schedule_insns) * sibcall.c (optimize_sibling_and_tail_recursive_call) * ssa-ccp.c (optimize_unexecutable_edges, ssa_ccp_df_delete_unreachable_insns): Likewise. * ssa-dce.c (ssa_eliminate_dead_code): Likewise. * ssa.c (find_evaluations, compute_dominance_frontiers_1, rename_block, convert_to_ssa, compute_conservative_reg_partition, compute_coalesced_reg_partition, rename_equivalent_regs, convert_from_ssa): Likewise. * config/ia64/ia64.c (emit_predicate_relation_info, process_epilogue, process_for_unwind_directive): Likewise. * df.c (FOR_ALL_BBS): Removed. * gcse.c (struct null_pointer_info): Type of current_block field changed. (struct reg_avail_info): Type of last_bb field changed. * config/ia64/ia64.c (block_num): Removed. (need_copy_state): Type changed. (last_block): New. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@53804 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/sched-rgn.c')
-rw-r--r--gcc/sched-rgn.c105
1 files changed, 55 insertions, 50 deletions
diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c
index acc8477e3ab..9f88dcc459b 100644
--- a/gcc/sched-rgn.c
+++ b/gcc/sched-rgn.c
@@ -319,7 +319,7 @@ static void free_pending_lists PARAMS ((void));
static int
is_cfg_nonregular ()
{
- int b;
+ basic_block b;
rtx insn;
RTX_CODE code;
@@ -346,8 +346,8 @@ is_cfg_nonregular ()
/* If we have non-jumping insns which refer to labels, then we consider
the cfg not well structured. */
/* Check for labels referred to other thn by jumps. */
- for (b = 0; b < n_basic_blocks; b++)
- for (insn = BLOCK_HEAD (b);; insn = NEXT_INSN (insn))
+ FOR_EACH_BB (b)
+ for (insn = b->head;; insn = NEXT_INSN (insn))
{
code = GET_CODE (insn);
if (GET_RTX_CLASS (code) == 'i' && code != JUMP_INSN)
@@ -361,7 +361,7 @@ is_cfg_nonregular ()
return 1;
}
- if (insn == BLOCK_END (b))
+ if (insn == b->end)
break;
}
@@ -382,6 +382,7 @@ build_control_flow (edge_list)
struct edge_list *edge_list;
{
int i, unreachable, num_edges;
+ basic_block b;
/* This already accounts for entry/exit edges. */
num_edges = NUM_EDGES (edge_list);
@@ -393,10 +394,8 @@ build_control_flow (edge_list)
test is redundant with the one in find_rgns, but it's much
cheaper to go ahead and catch the trivial case here. */
unreachable = 0;
- for (i = 0; i < n_basic_blocks; i++)
+ FOR_EACH_BB (b)
{
- basic_block b = BASIC_BLOCK (i);
-
if (b->pred == NULL
|| (b->pred->src == b
&& b->pred->pred_next == NULL))
@@ -544,17 +543,19 @@ debug_regions ()
static void
find_single_block_region ()
{
- int i;
+ basic_block bb;
- for (i = 0; i < n_basic_blocks; i++)
+ nr_regions = 0;
+
+ FOR_EACH_BB (bb)
{
- rgn_bb_table[i] = i;
- RGN_NR_BLOCKS (i) = 1;
- RGN_BLOCKS (i) = i;
- CONTAINING_RGN (i) = i;
- BLOCK_TO_BB (i) = 0;
+ rgn_bb_table[nr_regions] = bb->index;
+ RGN_NR_BLOCKS (nr_regions) = 1;
+ RGN_BLOCKS (nr_regions) = nr_regions;
+ CONTAINING_RGN (bb->index) = nr_regions;
+ BLOCK_TO_BB (bb->index) = 0;
+ nr_regions++;
}
- nr_regions = n_basic_blocks;
}
/* Update number of blocks and the estimate for number of insns
@@ -631,6 +632,7 @@ find_rgns (edge_list, dom)
int count = 0, sp, idx = 0, current_edge = out_edges[0];
int num_bbs, num_insns, unreachable;
int too_large_failure;
+ basic_block bb;
/* Note if an edge has been passed. */
sbitmap passed;
@@ -772,8 +774,8 @@ find_rgns (edge_list, dom)
the entry node by placing a nonzero value in dfs_nr. Thus if
dfs_nr is zero for any block, then it must be unreachable. */
unreachable = 0;
- for (i = 0; i < n_basic_blocks; i++)
- if (dfs_nr[i] == 0)
+ FOR_EACH_BB (bb)
+ if (dfs_nr[bb->index] == 0)
{
unreachable = 1;
break;
@@ -783,8 +785,8 @@ find_rgns (edge_list, dom)
to hold degree counts. */
degree = dfs_nr;
- for (i = 0; i < n_basic_blocks; i++)
- degree[i] = 0;
+ FOR_EACH_BB (bb)
+ degree[bb->index] = 0;
for (i = 0; i < num_edges; i++)
{
edge e = INDEX_EDGE (edge_list, i);
@@ -809,12 +811,12 @@ find_rgns (edge_list, dom)
/* Find blocks which are inner loop headers. We still have non-reducible
loops to consider at this point. */
- for (i = 0; i < n_basic_blocks; i++)
+ FOR_EACH_BB (bb)
{
- if (TEST_BIT (header, i) && TEST_BIT (inner, i))
+ if (TEST_BIT (header, bb->index) && TEST_BIT (inner, bb->index))
{
edge e;
- int j;
+ basic_block jbb;
/* Now check that the loop is reducible. We do this separate
from finding inner loops so that we do not find a reducible
@@ -827,15 +829,15 @@ find_rgns (edge_list, dom)
If there exists a block that is not dominated by the loop
header, then the block is reachable from outside the loop
and thus the loop is not a natural loop. */
- for (j = 0; j < n_basic_blocks; j++)
+ FOR_EACH_BB (jbb)
{
/* First identify blocks in the loop, except for the loop
entry block. */
- if (i == max_hdr[j] && i != j)
+ if (bb->index == max_hdr[jbb->index] && bb != jbb)
{
/* Now verify that the block is dominated by the loop
header. */
- if (!TEST_BIT (dom[j], i))
+ if (!TEST_BIT (dom[jbb->index], bb->index))
break;
}
}
@@ -843,25 +845,25 @@ find_rgns (edge_list, dom)
/* If we exited the loop early, then I is the header of
a non-reducible loop and we should quit processing it
now. */
- if (j != n_basic_blocks)
+ if (jbb != EXIT_BLOCK_PTR)
continue;
/* I is a header of an inner loop, or block 0 in a subroutine
with no loops at all. */
head = tail = -1;
too_large_failure = 0;
- loop_head = max_hdr[i];
+ loop_head = max_hdr[bb->index];
/* Decrease degree of all I's successors for topological
ordering. */
- for (e = BASIC_BLOCK (i)->succ; e; e = e->succ_next)
+ for (e = bb->succ; e; e = e->succ_next)
if (e->dest != EXIT_BLOCK_PTR)
--degree[e->dest->index];
/* Estimate # insns, and count # blocks in the region. */
num_bbs = 1;
- num_insns = (INSN_LUID (BLOCK_END (i))
- - INSN_LUID (BLOCK_HEAD (i)));
+ num_insns = (INSN_LUID (bb->end)
+ - INSN_LUID (bb->head));
/* Find all loop latches (blocks with back edges to the loop
header) or all the leaf blocks in the cfg has no loops.
@@ -869,17 +871,17 @@ find_rgns (edge_list, dom)
Place those blocks into the queue. */
if (no_loops)
{
- for (j = 0; j < n_basic_blocks; j++)
+ FOR_EACH_BB (jbb)
/* Leaf nodes have only a single successor which must
be EXIT_BLOCK. */
- if (BASIC_BLOCK (j)->succ
- && BASIC_BLOCK (j)->succ->dest == EXIT_BLOCK_PTR
- && BASIC_BLOCK (j)->succ->succ_next == NULL)
+ if (jbb->succ
+ && jbb->succ->dest == EXIT_BLOCK_PTR
+ && jbb->succ->succ_next == NULL)
{
- queue[++tail] = j;
- SET_BIT (in_queue, j);
+ queue[++tail] = jbb->index;
+ SET_BIT (in_queue, jbb->index);
- if (too_large (j, &num_bbs, &num_insns))
+ if (too_large (jbb->index, &num_bbs, &num_insns))
{
too_large_failure = 1;
break;
@@ -890,14 +892,14 @@ find_rgns (edge_list, dom)
{
edge e;
- for (e = BASIC_BLOCK (i)->pred; e; e = e->pred_next)
+ for (e = bb->pred; e; e = e->pred_next)
{
if (e->src == ENTRY_BLOCK_PTR)
continue;
node = e->src->index;
- if (max_hdr[node] == loop_head && node != i)
+ if (max_hdr[node] == loop_head && node != bb->index)
{
/* This is a loop latch. */
queue[++tail] = node;
@@ -959,7 +961,7 @@ find_rgns (edge_list, dom)
tail = -1;
break;
}
- else if (!TEST_BIT (in_queue, node) && node != i)
+ else if (!TEST_BIT (in_queue, node) && node != bb->index)
{
queue[++tail] = node;
SET_BIT (in_queue, node);
@@ -976,12 +978,12 @@ find_rgns (edge_list, dom)
if (tail >= 0 && !too_large_failure)
{
/* Place the loop header into list of region blocks. */
- degree[i] = -1;
- rgn_bb_table[idx] = i;
+ degree[bb->index] = -1;
+ rgn_bb_table[idx] = bb->index;
RGN_NR_BLOCKS (nr_regions) = num_bbs;
RGN_BLOCKS (nr_regions) = idx++;
- CONTAINING_RGN (i) = nr_regions;
- BLOCK_TO_BB (i) = count = 0;
+ CONTAINING_RGN (bb->index) = nr_regions;
+ BLOCK_TO_BB (bb->index) = count = 0;
/* Remove blocks from queue[] when their in degree
becomes zero. Repeat until no blocks are left on the
@@ -1020,14 +1022,14 @@ find_rgns (edge_list, dom)
/* Any block that did not end up in a region is placed into a region
by itself. */
- for (i = 0; i < n_basic_blocks; i++)
- if (degree[i] >= 0)
+ FOR_EACH_BB (bb)
+ if (degree[bb->index] >= 0)
{
- rgn_bb_table[idx] = i;
+ rgn_bb_table[idx] = bb->index;
RGN_NR_BLOCKS (nr_regions) = 1;
RGN_BLOCKS (nr_regions) = idx++;
- CONTAINING_RGN (i) = nr_regions++;
- BLOCK_TO_BB (i) = 0;
+ CONTAINING_RGN (bb->index) = nr_regions++;
+ BLOCK_TO_BB (bb->index) = 0;
}
free (max_hdr);
@@ -2980,6 +2982,7 @@ schedule_insns (dump_file)
sbitmap large_region_blocks, blocks;
int rgn;
int any_large_regions;
+ basic_block bb;
/* Taking care of this degenerate case makes the rest of
this code simpler. */
@@ -3019,7 +3022,9 @@ schedule_insns (dump_file)
any_large_regions = 0;
large_region_blocks = sbitmap_alloc (n_basic_blocks);
- sbitmap_ones (large_region_blocks);
+ sbitmap_zero (large_region_blocks);
+ FOR_EACH_BB (bb)
+ SET_BIT (large_region_blocks, bb->index);
blocks = sbitmap_alloc (n_basic_blocks);
sbitmap_zero (blocks);