diff options
author | rth <rth@138bc75d-0d04-0410-961f-82ee72b054a4> | 2001-03-28 07:42:04 +0000 |
---|---|---|
committer | rth <rth@138bc75d-0d04-0410-961f-82ee72b054a4> | 2001-03-28 07:42:04 +0000 |
commit | f787dd2b96d381c81d9f189933e4a8d8d8f28008 (patch) | |
tree | 68e10b281fed5bb38b28923673bf9706ea5225ab /gcc/sched-deps.c | |
parent | c0c2b73436af804ad20d8e62b6ae875d9afcdcf3 (diff) | |
download | gcc-f787dd2b96d381c81d9f189933e4a8d8d8f28008.tar.gz |
* sched-deps.c (sched_analyze_insn): Consolidate scheduling
barrier code. Add a scheduling barrier if a non-call insn
can throw internally.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@40915 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/sched-deps.c')
-rw-r--r-- | gcc/sched-deps.c | 85 |
1 files changed, 39 insertions, 46 deletions
diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c index 904b37e1d4c..0d13a2025e3 100644 --- a/gcc/sched-deps.c +++ b/gcc/sched-deps.c @@ -972,6 +972,7 @@ sched_analyze_insn (deps, x, insn, loop_notes) rtx loop_notes; { register RTX_CODE code = GET_CODE (x); + int schedule_barrier_found = 0; rtx link; int i; @@ -1021,22 +1022,10 @@ sched_analyze_insn (deps, x, insn, loop_notes) if (GET_CODE (insn) == JUMP_INSN) { - rtx next, u, pending, pending_mem; + rtx next, u; next = next_nonnote_insn (insn); if (next && GET_CODE (next) == BARRIER) - { - for (i = 0; i < deps->max_reg; i++) - { - struct deps_reg *reg_last = &deps->reg_last[i]; - - for (u = reg_last->uses; u; u = XEXP (u, 1)) - add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); - for (u = reg_last->sets; u; u = XEXP (u, 1)) - add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); - for (u = reg_last->clobbers; u; u = XEXP (u, 1)) - add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); - } - } + schedule_barrier_found = 1; else { regset_head tmp; @@ -1054,33 +1043,19 @@ sched_analyze_insn (deps, x, insn, loop_notes) CLEAR_REG_SET (&tmp); } - pending = deps->pending_write_insns; - pending_mem = deps->pending_write_mems; - while (pending) - { - add_dependence (insn, XEXP (pending, 0), 0); - - pending = XEXP (pending, 1); - pending_mem = XEXP (pending_mem, 1); - } - - for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1)) - add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); } /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic block, then we must be sure that no instructions are scheduled across it. Otherwise, the reg_n_refs info (which depends on loop_depth) would become incorrect. */ - if (loop_notes) { - int schedule_barrier_found = 0; rtx link; /* Update loop_notes with any notes from this insn. Also determine if any of the notes on the list correspond to instruction scheduling - barriers (loop, eh & setjmp notes, but not range notes. */ + barriers (loop, eh & setjmp notes, but not range notes). */ link = loop_notes; while (XEXP (link, 1)) { @@ -1095,30 +1070,48 @@ sched_analyze_insn (deps, x, insn, loop_notes) } XEXP (link, 1) = REG_NOTES (insn); REG_NOTES (insn) = loop_notes; + } + + /* If this instruction can throw an exception, then moving it changes + where block boundaries fall. This is mighty confusing elsewhere. + Therefore, prevent such an instruction from being moved. */ + if (flag_non_call_exceptions && can_throw_internal (insn)) + schedule_barrier_found = 1; - /* Add dependencies if a scheduling barrier was found. */ - if (schedule_barrier_found) + /* Add dependencies if a scheduling barrier was found. */ + if (schedule_barrier_found) + { + rtx u, pending, pending_mem; + + for (i = 0; i < deps->max_reg; i++) { - for (i = 0; i < deps->max_reg; i++) - { - struct deps_reg *reg_last = &deps->reg_last[i]; - rtx u; + struct deps_reg *reg_last = &deps->reg_last[i]; - for (u = reg_last->uses; u; u = XEXP (u, 1)) - add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); - for (u = reg_last->sets; u; u = XEXP (u, 1)) - add_dependence (insn, XEXP (u, 0), 0); - for (u = reg_last->clobbers; u; u = XEXP (u, 1)) - add_dependence (insn, XEXP (u, 0), 0); + for (u = reg_last->uses; u; u = XEXP (u, 1)) + add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); + for (u = reg_last->sets; u; u = XEXP (u, 1)) + add_dependence (insn, XEXP (u, 0), 0); + for (u = reg_last->clobbers; u; u = XEXP (u, 1)) + add_dependence (insn, XEXP (u, 0), 0); - if (GET_CODE (PATTERN (insn)) != COND_EXEC) - free_INSN_LIST_list (®_last->uses); - } - reg_pending_sets_all = 1; + if (GET_CODE (PATTERN (insn)) != COND_EXEC) + free_INSN_LIST_list (®_last->uses); + } + flush_pending_lists (deps, insn, 0); - flush_pending_lists (deps, insn, 0); + pending = deps->pending_write_insns; + pending_mem = deps->pending_write_mems; + while (pending) + { + add_dependence (insn, XEXP (pending, 0), 0); + pending = XEXP (pending, 1); + pending_mem = XEXP (pending_mem, 1); } + for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1)) + add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); + + reg_pending_sets_all = 1; } /* Accumulate clobbers until the next set so that it will be output |