summaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorrth <rth@138bc75d-0d04-0410-961f-82ee72b054a4>2001-01-09 17:45:12 +0000
committerrth <rth@138bc75d-0d04-0410-961f-82ee72b054a4>2001-01-09 17:45:12 +0000
commit749c6f584b30dbc81917b4f445f30000fadc5ce6 (patch)
tree1fe02ec542bf787c44dd766982d749861c7780fc /gcc
parent63d882f5c31bae97ab2ee924f980899641e7a2ec (diff)
downloadgcc-749c6f584b30dbc81917b4f445f30000fadc5ce6.tar.gz
* sched-int.h (struct deps): Add max_reg, reg_last_in_use; merge
reg_last_uses, reg_last_sets, reg_last_clobbers into struct deps_reg. * sched-deps.c (sched_analyze_1): Update uses of struct deps. (sched_analyze_2, sched_analyze_insn): Likewise. (sched_analyze, init_deps): Likewise. (free_deps): Likewise. Iterate with EXECUTE_IF_SET_IN_REG_SET. * sched-rgn.c (propagate_deps): Likewise. Remove max_reg argument. (compute_block_backward_dependences): Update propagate_deps call. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@38835 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog11
-rw-r--r--gcc/sched-deps.c232
-rw-r--r--gcc/sched-int.h17
-rw-r--r--gcc/sched-rgn.c96
4 files changed, 181 insertions, 175 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 53b85a9f254..b976645bdd3 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,14 @@
+2001-01-09 Richard Henderson <rth@redhat.com>
+
+ * sched-int.h (struct deps): Add max_reg, reg_last_in_use; merge
+ reg_last_uses, reg_last_sets, reg_last_clobbers into struct deps_reg.
+ * sched-deps.c (sched_analyze_1): Update uses of struct deps.
+ (sched_analyze_2, sched_analyze_insn): Likewise.
+ (sched_analyze, init_deps): Likewise.
+ (free_deps): Likewise. Iterate with EXECUTE_IF_SET_IN_REG_SET.
+ * sched-rgn.c (propagate_deps): Likewise. Remove max_reg argument.
+ (compute_block_backward_dependences): Update propagate_deps call.
+
2001-01-09 Mark Elbrecht <snowball3@bigfoot.com>
* gcc.c (process_command): Set switches[n_switches].ordering to 0.
diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c
index 750285e38d8..4a7f72b48df 100644
--- a/gcc/sched-deps.c
+++ b/gcc/sched-deps.c
@@ -590,10 +590,10 @@ sched_analyze_1 (deps, x, insn)
int r = regno + i;
rtx u;
- for (u = deps->reg_last_uses[r]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[r].uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- for (u = deps->reg_last_sets[r]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[r].sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
/* Clobbers need not be ordered with respect to one
@@ -602,8 +602,8 @@ sched_analyze_1 (deps, x, insn)
if (code == SET)
{
if (GET_CODE (PATTERN (insn)) != COND_EXEC)
- free_INSN_LIST_list (&deps->reg_last_uses[r]);
- for (u = deps->reg_last_clobbers[r]; u; u = XEXP (u, 1))
+ free_INSN_LIST_list (&deps->reg_last[r].uses);
+ for (u = deps->reg_last[r].clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
SET_REGNO_REG_SET (reg_pending_sets, r);
}
@@ -616,21 +616,30 @@ sched_analyze_1 (deps, x, insn)
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
}
}
+ /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
+ it does not reload. Ignore these as they have served their
+ purpose already. */
+ else if (regno >= deps->max_reg)
+ {
+ if (GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER)
+ abort ();
+ }
else
{
rtx u;
- for (u = deps->reg_last_uses[regno]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[regno].uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- for (u = deps->reg_last_sets[regno]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[regno].sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
if (code == SET)
{
if (GET_CODE (PATTERN (insn)) != COND_EXEC)
- free_INSN_LIST_list (&deps->reg_last_uses[regno]);
- for (u = deps->reg_last_clobbers[regno]; u; u = XEXP (u, 1))
+ free_INSN_LIST_list (&deps->reg_last[regno].uses);
+ for (u = deps->reg_last[regno].clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
SET_REGNO_REG_SET (reg_pending_sets, regno);
}
@@ -757,14 +766,15 @@ sched_analyze_2 (deps, x, insn)
while (--i >= 0)
{
int r = regno + i;
- deps->reg_last_uses[r]
- = alloc_INSN_LIST (insn, deps->reg_last_uses[r]);
+ deps->reg_last[r].uses
+ = alloc_INSN_LIST (insn, deps->reg_last[r].uses);
+ SET_REGNO_REG_SET (&deps->reg_last_in_use, r);
- for (u = deps->reg_last_sets[r]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[r].sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
/* ??? This should never happen. */
- for (u = deps->reg_last_clobbers[r]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[r].clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
if (call_used_regs[r] || global_regs[r])
@@ -773,16 +783,26 @@ sched_analyze_2 (deps, x, insn)
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
}
}
+ /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
+ it does not reload. Ignore these as they have served their
+ purpose already. */
+ else if (regno >= deps->max_reg)
+ {
+ if (GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER)
+ abort ();
+ }
else
{
- deps->reg_last_uses[regno]
- = alloc_INSN_LIST (insn, deps->reg_last_uses[regno]);
+ deps->reg_last[regno].uses
+ = alloc_INSN_LIST (insn, deps->reg_last[regno].uses);
+ SET_REGNO_REG_SET (&deps->reg_last_in_use, regno);
- for (u = deps->reg_last_sets[regno]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[regno].sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
/* ??? This should never happen. */
- for (u = deps->reg_last_clobbers[regno]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[regno].clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
/* Pseudos that are REG_EQUIV to something may be replaced
@@ -867,19 +887,19 @@ sched_analyze_2 (deps, x, insn)
pseudo-regs because it might give an incorrectly rounded result. */
if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
{
- int max_reg = max_reg_num ();
- for (i = 0; i < max_reg; i++)
+ for (i = 0; i < deps->max_reg; i++)
{
- for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
- add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- if (GET_CODE (PATTERN (insn)) != COND_EXEC)
- free_INSN_LIST_list (&deps->reg_last_uses[i]);
+ struct deps_reg *reg_last = &deps->reg_last[i];
- for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
+ for (u = reg_last->uses; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ for (u = reg_last->sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
-
- for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
+ for (u = reg_last->clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
+
+ if (GET_CODE (PATTERN (insn)) != COND_EXEC)
+ free_INSN_LIST_list (&reg_last->uses);
}
reg_pending_sets_all = 1;
@@ -948,7 +968,6 @@ sched_analyze_insn (deps, x, insn, loop_notes)
{
register RTX_CODE code = GET_CODE (x);
rtx link;
- int maxreg = max_reg_num ();
int i;
if (code == COND_EXEC)
@@ -1001,13 +1020,15 @@ sched_analyze_insn (deps, x, insn, loop_notes)
next = next_nonnote_insn (insn);
if (next && GET_CODE (next) == BARRIER)
{
- for (i = 0; i < maxreg; i++)
+ for (i = 0; i < deps->max_reg; i++)
{
- for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
+ struct deps_reg *reg_last = &deps->reg_last[i];
+
+ for (u = reg_last->uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
+ for (u = reg_last->sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
+ for (u = reg_last->clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
}
}
@@ -1017,13 +1038,13 @@ sched_analyze_insn (deps, x, insn, loop_notes)
INIT_REG_SET (&tmp);
(*current_sched_info->compute_jump_reg_dependencies) (insn, &tmp);
- EXECUTE_IF_SET_IN_REG_SET
- (&tmp, 0, i,
+ EXECUTE_IF_SET_IN_REG_SET (&tmp, 0, i,
{
- for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ for (u = reg_last->sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- deps->reg_last_uses[i]
- = alloc_INSN_LIST (insn, deps->reg_last_uses[i]);
+ reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
+ SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
});
CLEAR_REG_SET (&tmp);
@@ -1049,7 +1070,6 @@ sched_analyze_insn (deps, x, insn, loop_notes)
if (loop_notes)
{
- int max_reg = max_reg_num ();
int schedule_barrier_found = 0;
rtx link;
@@ -1074,19 +1094,20 @@ sched_analyze_insn (deps, x, insn, loop_notes)
/* Add dependencies if a scheduling barrier was found. */
if (schedule_barrier_found)
{
- for (i = 0; i < max_reg; i++)
+ for (i = 0; i < deps->max_reg; i++)
{
+ struct deps_reg *reg_last = &deps->reg_last[i];
rtx u;
- for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
- add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- if (GET_CODE (PATTERN (insn)) != COND_EXEC)
- free_INSN_LIST_list (&deps->reg_last_uses[i]);
- for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
+ for (u = reg_last->uses; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ for (u = reg_last->sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
-
- for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
+ for (u = reg_last->clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
+
+ if (GET_CODE (PATTERN (insn)) != COND_EXEC)
+ free_INSN_LIST_list (&reg_last->uses);
}
reg_pending_sets_all = 1;
@@ -1095,46 +1116,46 @@ sched_analyze_insn (deps, x, insn, loop_notes)
}
- /* Accumulate clobbers until the next set so that it will be output dependent
- on all of them. At the next set we can clear the clobber list, since
- subsequent sets will be output dependent on it. */
- EXECUTE_IF_SET_IN_REG_SET
- (reg_pending_sets, 0, i,
+ /* Accumulate clobbers until the next set so that it will be output
+ dependent on all of them. At the next set we can clear the clobber
+ list, since subsequent sets will be output dependent on it. */
+ if (reg_pending_sets_all)
{
- if (GET_CODE (PATTERN (insn)) != COND_EXEC)
+ reg_pending_sets_all = 0;
+ for (i = 0; i < deps->max_reg; i++)
{
- free_INSN_LIST_list (&deps->reg_last_sets[i]);
- free_INSN_LIST_list (&deps->reg_last_clobbers[i]);
- deps->reg_last_sets[i] = 0;
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ if (GET_CODE (PATTERN (insn)) != COND_EXEC)
+ {
+ free_INSN_LIST_list (&reg_last->sets);
+ free_INSN_LIST_list (&reg_last->clobbers);
+ }
+ reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
+ SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
}
- deps->reg_last_sets[i]
- = alloc_INSN_LIST (insn, deps->reg_last_sets[i]);
- });
- EXECUTE_IF_SET_IN_REG_SET
- (reg_pending_clobbers, 0, i,
- {
- deps->reg_last_clobbers[i]
- = alloc_INSN_LIST (insn, deps->reg_last_clobbers[i]);
- });
- CLEAR_REG_SET (reg_pending_sets);
- CLEAR_REG_SET (reg_pending_clobbers);
-
- if (reg_pending_sets_all)
+ }
+ else
{
- for (i = 0; i < maxreg; i++)
+ EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
{
+ struct deps_reg *reg_last = &deps->reg_last[i];
if (GET_CODE (PATTERN (insn)) != COND_EXEC)
{
- free_INSN_LIST_list (&deps->reg_last_sets[i]);
- free_INSN_LIST_list (&deps->reg_last_clobbers[i]);
- deps->reg_last_sets[i] = 0;
+ free_INSN_LIST_list (&reg_last->sets);
+ free_INSN_LIST_list (&reg_last->clobbers);
}
- deps->reg_last_sets[i]
- = alloc_INSN_LIST (insn, deps->reg_last_sets[i]);
- }
-
- reg_pending_sets_all = 0;
+ reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
+ SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
+ });
+ EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
+ {
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
+ SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
+ });
}
+ CLEAR_REG_SET (reg_pending_sets);
+ CLEAR_REG_SET (reg_pending_clobbers);
/* If a post-call group is still open, see if it should remain so.
This insn must be a simple move of a hard reg to a pseudo or
@@ -1242,18 +1263,18 @@ sched_analyze (deps, head, tail)
if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == NOTE
&& NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP)
{
- int max_reg = max_reg_num ();
- for (i = 0; i < max_reg; i++)
+ for (i = 0; i < deps->max_reg; i++)
{
- for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
+ struct deps_reg *reg_last = &deps->reg_last[i];
+
+ for (u = reg_last->uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- free_INSN_LIST_list (&deps->reg_last_uses[i]);
-
- for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
+ for (u = reg_last->sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
-
- for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
+ for (u = reg_last->clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
+
+ free_INSN_LIST_list (&reg_last->uses);
}
reg_pending_sets_all = 1;
@@ -1272,10 +1293,9 @@ sched_analyze (deps, head, tail)
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (call_used_regs[i] || global_regs[i])
{
- for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[i].uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
-
- for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[i].sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
SET_REGNO_REG_SET (reg_pending_clobbers, i);
@@ -1424,10 +1444,12 @@ void
init_deps (deps)
struct deps *deps;
{
- int maxreg = max_reg_num ();
- deps->reg_last_uses = (rtx *) xcalloc (maxreg, sizeof (rtx));
- deps->reg_last_sets = (rtx *) xcalloc (maxreg, sizeof (rtx));
- deps->reg_last_clobbers = (rtx *) xcalloc (maxreg, sizeof (rtx));
+ int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
+
+ deps->max_reg = max_reg;
+ deps->reg_last = (struct deps_reg *)
+ xcalloc (max_reg, sizeof (struct deps_reg));
+ INIT_REG_SET (&deps->reg_last_in_use);
deps->pending_read_insns = 0;
deps->pending_read_mems = 0;
@@ -1450,26 +1472,22 @@ void
free_deps (deps)
struct deps *deps;
{
- int max_reg = max_reg_num ();
int i;
- /* Note this loop is executed max_reg * nr_regions times. It's first
- implementation accounted for over 90% of the calls to free_INSN_LIST_list.
- The list was empty for the vast majority of those calls. On the PA, not
- calling free_INSN_LIST_list in those cases improves -O2 compile times by
- 3-5% on average. */
- for (i = 0; i < max_reg; ++i)
+ /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
+ times. For a test case with 42000 regs and 8000 small basic blocks,
+ this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
+ EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
{
- if (deps->reg_last_clobbers[i])
- free_INSN_LIST_list (&deps->reg_last_clobbers[i]);
- if (deps->reg_last_sets[i])
- free_INSN_LIST_list (&deps->reg_last_sets[i]);
- if (deps->reg_last_uses[i])
- free_INSN_LIST_list (&deps->reg_last_uses[i]);
- }
- free (deps->reg_last_clobbers);
- free (deps->reg_last_sets);
- free (deps->reg_last_uses);
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ free_INSN_LIST_list (&reg_last->uses);
+ free_INSN_LIST_list (&reg_last->sets);
+ free_INSN_LIST_list (&reg_last->clobbers);
+ });
+ CLEAR_REG_SET (&deps->reg_last_in_use);
+
+ free (deps->reg_last);
+ deps->reg_last = NULL;
}
/* If it is profitable to use them, initialize caches for tracking
diff --git a/gcc/sched-int.h b/gcc/sched-int.h
index f59f3a84fb3..fdd49eadca7 100644
--- a/gcc/sched-int.h
+++ b/gcc/sched-int.h
@@ -78,13 +78,24 @@ struct deps
to ensure that they won't cross a call after scheduling is done. */
rtx sched_before_next_call;
+ /* The maximum register number for the following arrays. Before reload
+ this is max_reg_num; after reload it is FIRST_PSEUDO_REGISTER. */
+ int max_reg;
+
/* Element N is the next insn that sets (hard or pseudo) register
N within the current basic block; or zero, if there is no
such insn. Needed for new registers which may be introduced
by splitting insns. */
- rtx *reg_last_uses;
- rtx *reg_last_sets;
- rtx *reg_last_clobbers;
+ struct deps_reg
+ {
+ rtx uses;
+ rtx sets;
+ rtx clobbers;
+ } *reg_last;
+
+ /* Element N is set for each register that has any non-zero element
+ in reg_last[N].{uses,sets,clobbers}. */
+ regset_head reg_last_in_use;
};
/* This structure holds some state of the current scheduling pass, and
diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c
index fa7efcfc771..13c358816b7 100644
--- a/gcc/sched-rgn.c
+++ b/gcc/sched-rgn.c
@@ -304,7 +304,7 @@ void debug_dependencies PARAMS ((void));
static void init_regions PARAMS ((void));
static void schedule_region PARAMS ((int));
-static void propagate_deps PARAMS ((int, struct deps *, int));
+static void propagate_deps PARAMS ((int, struct deps *));
static void free_pending_lists PARAMS ((void));
/* Functions for construction of the control flow graph. */
@@ -2440,13 +2440,11 @@ add_branch_dependences (head, tail)
static struct deps *bb_deps;
/* After computing the dependencies for block BB, propagate the dependencies
- found in TMP_DEPS to the successors of the block. MAX_REG is the number
- of registers. */
+ found in TMP_DEPS to the successors of the block. */
static void
-propagate_deps (bb, tmp_deps, max_reg)
+propagate_deps (bb, tmp_deps)
int bb;
struct deps *tmp_deps;
- int max_reg;
{
int b = BB_TO_BLOCK (bb);
int e, first_edge;
@@ -2481,43 +2479,28 @@ propagate_deps (bb, tmp_deps, max_reg)
continue;
}
- for (reg = 0; reg < max_reg; reg++)
+ /* The reg_last lists are inherited by bb_succ. */
+ EXECUTE_IF_SET_IN_REG_SET (&tmp_deps->reg_last_in_use, 0, reg,
{
- /* reg-last-uses lists are inherited by bb_succ. */
- for (u = tmp_deps->reg_last_uses[reg]; u; u = XEXP (u, 1))
- {
- if (find_insn_list (XEXP (u, 0),
- succ_deps->reg_last_uses[reg]))
- continue;
-
- succ_deps->reg_last_uses[reg]
- = alloc_INSN_LIST (XEXP (u, 0),
- succ_deps->reg_last_uses[reg]);
- }
-
- /* reg-last-defs lists are inherited by bb_succ. */
- for (u = tmp_deps->reg_last_sets[reg]; u; u = XEXP (u, 1))
- {
- if (find_insn_list (XEXP (u, 0),
- succ_deps->reg_last_sets[reg]))
- continue;
-
- succ_deps->reg_last_sets[reg]
- = alloc_INSN_LIST (XEXP (u, 0),
- succ_deps->reg_last_sets[reg]);
- }
-
- for (u = tmp_deps->reg_last_clobbers[reg]; u; u = XEXP (u, 1))
- {
- if (find_insn_list (XEXP (u, 0),
- succ_deps->reg_last_clobbers[reg]))
- continue;
-
- succ_deps->reg_last_clobbers[reg]
- = alloc_INSN_LIST (XEXP (u, 0),
- succ_deps->reg_last_clobbers[reg]);
- }
- }
+ struct deps_reg *tmp_deps_reg = &tmp_deps->reg_last[reg];
+ struct deps_reg *succ_deps_reg = &succ_deps->reg_last[reg];
+
+ for (u = tmp_deps_reg->uses; u; u = XEXP (u, 1))
+ if (! find_insn_list (XEXP (u, 0), succ_deps_reg->uses))
+ succ_deps_reg->uses
+ = alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->uses);
+
+ for (u = tmp_deps_reg->sets; u; u = XEXP (u, 1))
+ if (! find_insn_list (XEXP (u, 0), succ_deps_reg->sets))
+ succ_deps_reg->sets
+ = alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->sets);
+
+ for (u = tmp_deps_reg->clobbers; u; u = XEXP (u, 1))
+ if (! find_insn_list (XEXP (u, 0), succ_deps_reg->clobbers))
+ succ_deps_reg->clobbers
+ = alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->clobbers);
+ });
+ IOR_REG_SET (&succ_deps->reg_last_in_use, &tmp_deps->reg_last_in_use);
/* Mem read/write lists are inherited by bb_succ. */
link_insn = tmp_deps->pending_read_insns;
@@ -2554,27 +2537,17 @@ propagate_deps (bb, tmp_deps, max_reg)
/* last_function_call is inherited by bb_succ. */
for (u = tmp_deps->last_function_call; u; u = XEXP (u, 1))
- {
- if (find_insn_list (XEXP (u, 0),
- succ_deps->last_function_call))
- continue;
-
+ if (! find_insn_list (XEXP (u, 0), succ_deps->last_function_call))
succ_deps->last_function_call
- = alloc_INSN_LIST (XEXP (u, 0),
- succ_deps->last_function_call);
- }
+ = alloc_INSN_LIST (XEXP (u, 0), succ_deps->last_function_call);
/* last_pending_memory_flush is inherited by bb_succ. */
for (u = tmp_deps->last_pending_memory_flush; u; u = XEXP (u, 1))
- {
- if (find_insn_list (XEXP (u, 0),
+ if (! find_insn_list (XEXP (u, 0),
succ_deps->last_pending_memory_flush))
- continue;
-
succ_deps->last_pending_memory_flush
= alloc_INSN_LIST (XEXP (u, 0),
succ_deps->last_pending_memory_flush);
- }
/* sched_before_next_call is inherited by bb_succ. */
x = LOG_LINKS (tmp_deps->sched_before_next_call);
@@ -2594,8 +2567,8 @@ propagate_deps (bb, tmp_deps, max_reg)
Specifically for reg-reg data dependences, the block insns are
scanned by sched_analyze () top-to-bottom. Two lists are
- maintained by sched_analyze (): reg_last_sets[] for register DEFs,
- and reg_last_uses[] for register USEs.
+ maintained by sched_analyze (): reg_last[].sets for register DEFs,
+ and reg_last[].uses for register USEs.
When analysis is completed for bb, we update for its successors:
; - DEFS[succ] = Union (DEFS [succ], DEFS [bb])
@@ -2609,7 +2582,6 @@ compute_block_backward_dependences (bb)
int bb;
{
rtx head, tail;
- int max_reg = max_reg_num ();
struct deps tmp_deps;
tmp_deps = bb_deps[bb];
@@ -2620,18 +2592,12 @@ compute_block_backward_dependences (bb)
add_branch_dependences (head, tail);
if (current_nr_blocks > 1)
- propagate_deps (bb, &tmp_deps, max_reg);
+ propagate_deps (bb, &tmp_deps);
/* Free up the INSN_LISTs. */
free_deps (&tmp_deps);
-
- /* Assert that we won't need bb_reg_last_* for this block anymore.
- The vectors we're zeroing out have just been freed by the call to
- free_deps. */
- bb_deps[bb].reg_last_uses = 0;
- bb_deps[bb].reg_last_sets = 0;
- bb_deps[bb].reg_last_clobbers = 0;
}
+
/* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
them to the unused_*_list variables, so that they can be reused. */