summaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorhubicka <hubicka@138bc75d-0d04-0410-961f-82ee72b054a4>2003-02-06 10:03:13 +0000
committerhubicka <hubicka@138bc75d-0d04-0410-961f-82ee72b054a4>2003-02-06 10:03:13 +0000
commit505f406ce35ec8e0eb962365527d3634b9a50cf9 (patch)
treed48ce6d178524f715563dd56706ee443cb3f0d5b /gcc
parent84d86c97c8e0abda6fd63eeb2d8cc905e0ddb06e (diff)
downloadgcc-505f406ce35ec8e0eb962365527d3634b9a50cf9.tar.gz
* i386.c (x86_inter_unit_moves): New variable.
(ix86_secondary_memory_needed): Fix 64bit case, honor TARGET_INTER_UNIT_MOVES * i386.h (x86_inter_unit_moves): Declare. (TARGET_INTER_UNIT_MOVES): New macro. * i386.md (movsi_1): Cleanup constraints; disable when not doing inter-unit moves. (movsi_1_nointernunit): New. (movdi_1_rex64): Fix constraints; deal with SSE->GPR moves. (movdi_1_rex64_nointerunit): New. (mivsf_1): disable when not doing inter-unit moves. (movsf_1_nointerunit): New. * basic-block.h (inside_basic_block_p): Declare. * cfgbuild.c (inside_basic_block_p): Make global. * haifa-sched.c (unlink_other_notes0: Deal with NOT_INSN_BASIC_BLOCK. * scheudle-ebb.c (schedule_ebb): Return last basic block of trace; update CFG. (fix_basic_block_boundaries, add_missing_bbs): New. (rank): Use profile. (scheudle_ebbs): Rely on CFG; update coments. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@62477 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog24
-rw-r--r--gcc/basic-block.h1
-rw-r--r--gcc/cfgbuild.c3
-rw-r--r--gcc/config/i386/i386.c9
-rw-r--r--gcc/config/i386/i386.h2
-rw-r--r--gcc/config/i386/i386.md219
-rw-r--r--gcc/haifa-sched.c1
-rw-r--r--gcc/sched-ebb.c183
8 files changed, 405 insertions, 37 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index aabea405b33..ea7c3c50214 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,27 @@
+Thu Feb 6 00:18:38 CET 2003 Jan Hubicka <jh@suse.cz>
+
+ * i386.c (x86_inter_unit_moves): New variable.
+ (ix86_secondary_memory_needed): Fix 64bit case, honor
+ TARGET_INTER_UNIT_MOVES
+ * i386.h (x86_inter_unit_moves): Declare.
+ (TARGET_INTER_UNIT_MOVES): New macro.
+ * i386.md (movsi_1): Cleanup constraints; disable
+ when not doing inter-unit moves.
+ (movsi_1_nointernunit): New.
+ (movdi_1_rex64): Fix constraints; deal with SSE->GPR moves.
+ (movdi_1_rex64_nointerunit): New.
+ (mivsf_1): disable when not doing inter-unit moves.
+ (movsf_1_nointerunit): New.
+
+ * basic-block.h (inside_basic_block_p): Declare.
+ * cfgbuild.c (inside_basic_block_p): Make global.
+ * haifa-sched.c (unlink_other_notes0: Deal with NOT_INSN_BASIC_BLOCK.
+ * scheudle-ebb.c (schedule_ebb): Return last basic block of trace;
+ update CFG.
+ (fix_basic_block_boundaries, add_missing_bbs): New.
+ (rank): Use profile.
+ (scheudle_ebbs): Rely on CFG; update coments.
+
2003-02-05 Geoffrey Keating <geoffk@apple.com>
* Makefile.in (host_hook_obj): New.
diff --git a/gcc/basic-block.h b/gcc/basic-block.h
index 0fd77bf1751..680bba505fc 100644
--- a/gcc/basic-block.h
+++ b/gcc/basic-block.h
@@ -613,6 +613,7 @@ extern void fixup_abnormal_edges PARAMS ((void));
extern bool can_hoist_insn_p PARAMS ((rtx, rtx, regset));
extern rtx hoist_insn_after PARAMS ((rtx, rtx, rtx, rtx));
extern rtx hoist_insn_to_edge PARAMS ((rtx, edge, rtx, rtx));
+extern bool inside_basic_block_p PARAMS ((rtx));
extern bool control_flow_insn_p PARAMS ((rtx));
/* In dominance.c */
diff --git a/gcc/cfgbuild.c b/gcc/cfgbuild.c
index 2e1830c924f..d5d1b2009db 100644
--- a/gcc/cfgbuild.c
+++ b/gcc/cfgbuild.c
@@ -58,12 +58,11 @@ static void make_label_edge PARAMS ((sbitmap *, basic_block,
static void make_eh_edge PARAMS ((sbitmap *, basic_block, rtx));
static void find_bb_boundaries PARAMS ((basic_block));
static void compute_outgoing_frequencies PARAMS ((basic_block));
-static bool inside_basic_block_p PARAMS ((rtx));
/* Return true if insn is something that should be contained inside basic
block. */
-static bool
+bool
inside_basic_block_p (insn)
rtx insn;
{
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index bb4c22d914d..2c71e1e88d4 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -521,6 +521,7 @@ const int x86_sse_typeless_stores = m_ATHLON_K8;
const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4;
const int x86_use_ffreep = m_ATHLON_K8;
const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6;
+const int x86_inter_unit_moves = ~(m_ATHLON_K8);
/* In case the average insn count for single function invocation is
lower than this constant, emit fast (but longer) prologue and
@@ -14386,10 +14387,10 @@ ix86_secondary_memory_needed (class1, class2, mode, strict)
return 1;
}
return (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2)
- || (SSE_CLASS_P (class1) != SSE_CLASS_P (class2)
- && (mode) != SImode)
- || (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
- && (mode) != SImode));
+ || ((SSE_CLASS_P (class1) != SSE_CLASS_P (class2)
+ || MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
+ && ((mode != SImode && (mode != DImode || !TARGET_64BIT))
+ || (!TARGET_INTER_UNIT_MOVES && !optimize_size))));
}
/* Return the cost of moving data from a register in class CLASS1 to
one in class CLASS2.
diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h
index 1fa4358c24d..8d1b5e428f4 100644
--- a/gcc/config/i386/i386.h
+++ b/gcc/config/i386/i386.h
@@ -230,6 +230,7 @@ extern const int x86_arch_always_fancy_math_387, x86_shift1;
extern const int x86_sse_partial_reg_dependency, x86_sse_partial_regs;
extern const int x86_sse_typeless_stores, x86_sse_load0_by_pxor;
extern const int x86_use_ffreep, x86_sse_partial_regs_for_cvtsd2ss;
+extern const int x86_inter_unit_moves;
extern int x86_prefetch_sse;
#define TARGET_USE_LEAVE (x86_use_leave & CPUMASK)
@@ -282,6 +283,7 @@ extern int x86_prefetch_sse;
#define TARGET_SHIFT1 (x86_shift1 & CPUMASK)
#define TARGET_USE_FFREEP (x86_use_ffreep & CPUMASK)
#define TARGET_REP_MOVL_OPTIMAL (x86_rep_movl_optimal & CPUMASK)
+#define TARGET_INTER_UNIT_MOVES (x86_inter_unit_moves & CPUMASK)
#define TARGET_STACK_PROBE (target_flags & MASK_STACK_PROBE)
diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
index 875617d6996..3bba254dfbe 100644
--- a/gcc/config/i386/i386.md
+++ b/gcc/config/i386/i386.md
@@ -1165,8 +1165,49 @@
(define_insn "*movsi_1"
[(set (match_operand:SI 0 "nonimmediate_operand" "=*?a,r,*?a,m,!*y,!rm,!*y,!*Y,!rm,!*Y")
- (match_operand:SI 1 "general_operand" "im,rinm,rinm,rin,rm,*y,*y,rm,*Y,*Y"))]
- "GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM"
+ (match_operand:SI 1 "general_operand" "im,rinm,rinm,rin,*y,*y,rm,*Y,*Y,rm"))]
+ "(TARGET_INTER_UNIT_MOVES || optimize_size)
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_SSEMOV:
+ if (get_attr_mode (insn) == TImode)
+ return "movdqa\t{%1, %0|%0, %1}";
+ return "movd\t{%1, %0|%0, %1}";
+
+ case TYPE_MMXMOV:
+ if (get_attr_mode (insn) == DImode)
+ return "movq\t{%1, %0|%0, %1}";
+ return "movd\t{%1, %0|%0, %1}";
+
+ case TYPE_LEA:
+ return "lea{l}\t{%1, %0|%0, %1}";
+
+ default:
+ if (flag_pic && !LEGITIMATE_PIC_OPERAND_P (operands[1]))
+ abort();
+ return "mov{l}\t{%1, %0|%0, %1}";
+ }
+}
+ [(set (attr "type")
+ (cond [(eq_attr "alternative" "4,5,6")
+ (const_string "mmxmov")
+ (eq_attr "alternative" "7,8,9")
+ (const_string "ssemov")
+ (and (ne (symbol_ref "flag_pic") (const_int 0))
+ (match_operand:SI 1 "symbolic_operand" ""))
+ (const_string "lea")
+ ]
+ (const_string "imov")))
+ (set_attr "modrm" "0,*,0,*,*,*,*,*,*,*")
+ (set_attr "mode" "SI,SI,SI,SI,DI,SI,SI,TI,SI,SI")])
+
+(define_insn "*movsi_1_nointernunit"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=*?a,r,*?a,m,!*y,!m,!*y,!*Y,!m,!*Y")
+ (match_operand:SI 1 "general_operand" "im,rinm,rinm,rin,*y,*y,m,*Y,*Y,m"))]
+ "(!TARGET_INTER_UNIT_MOVES && !optimize_size)
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
{
switch (get_attr_type (insn))
{
@@ -1200,7 +1241,7 @@
]
(const_string "imov")))
(set_attr "modrm" "0,*,0,*,*,*,*,*,*,*")
- (set_attr "mode" "SI,SI,SI,SI,SI,SI,DI,TI,SI,SI")])
+ (set_attr "mode" "SI,SI,SI,SI,DI,SI,SI,TI,SI,SI")])
;; Stores and loads of ax to arbitrary constant address.
;; We fake an second form of instruction to force reload to load address
@@ -1931,17 +1972,21 @@
"ix86_split_long_move (operands); DONE;")
(define_insn "*movdi_1_rex64"
- [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,mr,!mr,!m*y,!*y,!*Y,!m,!*Y")
- (match_operand:DI 1 "general_operand" "Z,rem,i,re,n,*y,m,*Y,*Y,*m"))]
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,mr,!mr,!*y,!rm,!*y,!*Y,!rm,!*Y")
+ (match_operand:DI 1 "general_operand" "Z,rem,i,re,n,*y,*y,rm,*Y,*Y,rm"))]
"TARGET_64BIT
+ && (TARGET_INTER_UNIT_MOVES || optimize_size)
&& (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
{
switch (get_attr_type (insn))
{
case TYPE_SSEMOV:
- if (register_operand (operands[0], DImode)
- && register_operand (operands[1], DImode))
+ if (get_attr_mode (insn) == MODE_TI)
return "movdqa\t{%1, %0|%0, %1}";
+ /* Moves from and into integer register is done using movd opcode with
+ REX prefix. */
+ if (GENERAL_REG_P (operands[0]) || GENERAL_REG_P (operands[1]))
+ return "movd\t{%1, %0|%0, %1}";
/* FALLTHRU */
case TYPE_MMXMOV:
return "movq\t{%1, %0|%0, %1}";
@@ -1961,9 +2006,9 @@
}
}
[(set (attr "type")
- (cond [(eq_attr "alternative" "5,6")
+ (cond [(eq_attr "alternative" "5,6,7")
(const_string "mmxmov")
- (eq_attr "alternative" "7,8")
+ (eq_attr "alternative" "8,9,10")
(const_string "ssemov")
(eq_attr "alternative" "4")
(const_string "multi")
@@ -1972,9 +2017,55 @@
(const_string "lea")
]
(const_string "imov")))
- (set_attr "modrm" "*,0,0,*,*,*,*,*,*,*")
- (set_attr "length_immediate" "*,4,8,*,*,*,*,*,*,*")
- (set_attr "mode" "SI,DI,DI,DI,SI,DI,DI,DI,TI,DI")])
+ (set_attr "modrm" "*,0,0,*,*,*,*,*,*,*,*")
+ (set_attr "length_immediate" "*,4,8,*,*,*,*,*,*,*,*")
+ (set_attr "mode" "SI,DI,DI,DI,SI,DI,DI,DI,TI,DI,DI")])
+
+(define_insn "*movdi_1_rex64_nointerunit"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,mr,!mr,!*y,!m,!*y,!*Y,!m,!*Y")
+ (match_operand:DI 1 "general_operand" "Z,rem,i,re,n,*y,*y,m,*Y,*Y,m"))]
+ "TARGET_64BIT
+ && (!TARGET_INTER_UNIT_MOVES && !optimize_size)
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+{
+ switch (get_attr_type (insn))
+ {
+ case TYPE_SSEMOV:
+ if (get_attr_mode (insn) == MODE_TI)
+ return "movdqa\t{%1, %0|%0, %1}";
+ /* FALLTHRU */
+ case TYPE_MMXMOV:
+ return "movq\t{%1, %0|%0, %1}";
+ case TYPE_MULTI:
+ return "#";
+ case TYPE_LEA:
+ return "lea{q}\t{%a1, %0|%0, %a1}";
+ default:
+ if (flag_pic && !LEGITIMATE_PIC_OPERAND_P (operands[1]))
+ abort ();
+ if (get_attr_mode (insn) == MODE_SI)
+ return "mov{l}\t{%k1, %k0|%k0, %k1}";
+ else if (which_alternative == 2)
+ return "movabs{q}\t{%1, %0|%0, %1}";
+ else
+ return "mov{q}\t{%1, %0|%0, %1}";
+ }
+}
+ [(set (attr "type")
+ (cond [(eq_attr "alternative" "5,6,7")
+ (const_string "mmxmov")
+ (eq_attr "alternative" "8,9,10")
+ (const_string "ssemov")
+ (eq_attr "alternative" "4")
+ (const_string "multi")
+ (and (ne (symbol_ref "flag_pic") (const_int 0))
+ (match_operand:DI 1 "symbolic_operand" ""))
+ (const_string "lea")
+ ]
+ (const_string "imov")))
+ (set_attr "modrm" "*,0,0,*,*,*,*,*,*,*,*")
+ (set_attr "length_immediate" "*,4,8,*,*,*,*,*,*,*,*")
+ (set_attr "mode" "SI,DI,DI,DI,SI,DI,DI,DI,TI,DI,DI")])
;; Stores and loads of ax to arbitrary constant address.
;; We fake an second form of instruction to force reload to load address
@@ -2129,7 +2220,109 @@
(define_insn "*movsf_1"
[(set (match_operand:SF 0 "nonimmediate_operand" "=f#xr,m,f#xr,r#xf,m,x#rf,x#rf,x#rf,m,!*y,!rm,!*y")
(match_operand:SF 1 "general_operand" "fm#rx,f#rx,G,rmF#fx,Fr#fx,C,x,xm#rf,x#rf,rm,*y,*y"))]
- "(GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+ "(TARGET_INTER_UNIT_MOVES || optimize_size)
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+ && (reload_in_progress || reload_completed
+ || (ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_LARGE)
+ || GET_CODE (operands[1]) != CONST_DOUBLE
+ || memory_operand (operands[0], SFmode))"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (REG_P (operands[1])
+ && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ return "fstp\t%y0";
+ else if (STACK_TOP_P (operands[0]))
+ return "fld%z1\t%y1";
+ else
+ return "fst\t%y0";
+
+ case 1:
+ if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ return "fstp%z0\t%y0";
+ else
+ return "fst%z0\t%y0";
+
+ case 2:
+ switch (standard_80387_constant_p (operands[1]))
+ {
+ case 1:
+ return "fldz";
+ case 2:
+ return "fld1";
+ }
+ abort();
+
+ case 3:
+ case 4:
+ return "mov{l}\t{%1, %0|%0, %1}";
+ case 5:
+ if (get_attr_mode (insn) == MODE_TI)
+ return "pxor\t%0, %0";
+ else
+ return "xorps\t%0, %0";
+ case 6:
+ if (get_attr_mode (insn) == MODE_V4SF)
+ return "movaps\t{%1, %0|%0, %1}";
+ else
+ return "movss\t{%1, %0|%0, %1}";
+ case 7:
+ case 8:
+ return "movss\t{%1, %0|%0, %1}";
+
+ case 9:
+ case 10:
+ return "movd\t{%1, %0|%0, %1}";
+
+ case 11:
+ return "movq\t{%1, %0|%0, %1}";
+
+ default:
+ abort();
+ }
+}
+ [(set_attr "type" "fmov,fmov,fmov,imov,imov,ssemov,ssemov,ssemov,ssemov,mmxmov,mmxmov,mmxmov")
+ (set (attr "mode")
+ (cond [(eq_attr "alternative" "3,4,9,10")
+ (const_string "SI")
+ (eq_attr "alternative" "5")
+ (if_then_else
+ (and (and (ne (symbol_ref "TARGET_SSE_LOAD0_BY_PXOR")
+ (const_int 0))
+ (ne (symbol_ref "TARGET_SSE2")
+ (const_int 0)))
+ (eq (symbol_ref "optimize_size")
+ (const_int 0)))
+ (const_string "TI")
+ (const_string "V4SF"))
+ /* For architectures resolving dependencies on
+ whole SSE registers use APS move to break dependency
+ chains, otherwise use short move to avoid extra work.
+
+ Do the same for architectures resolving dependencies on
+ the parts. While in DF mode it is better to always handle
+ just register parts, the SF mode is different due to lack
+ of instructions to load just part of the register. It is
+ better to maintain the whole registers in single format
+ to avoid problems on using packed logical operations. */
+ (eq_attr "alternative" "6")
+ (if_then_else
+ (ior (ne (symbol_ref "TARGET_SSE_PARTIAL_REG_DEPENDENCY")
+ (const_int 0))
+ (ne (symbol_ref "TARGET_SSE_PARTIAL_REGS")
+ (const_int 0)))
+ (const_string "V4SF")
+ (const_string "SF"))
+ (eq_attr "alternative" "11")
+ (const_string "DI")]
+ (const_string "SF")))])
+
+(define_insn "*movsf_1_nointerunit"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f#xr,m,f#xr,r#xf,m,x#rf,x#rf,x#rf,m,!*y,!m,!*y")
+ (match_operand:SF 1 "general_operand" "fm#rx,f#rx,G,rmF#fx,Fr#fx,C,x,xm#rf,x#rf,m,*y,*y"))]
+ "(!TARGET_INTER_UNIT_MOVES && !optimize_size)
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
&& (reload_in_progress || reload_completed
|| (ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_LARGE)
|| GET_CODE (operands[1]) != CONST_DOUBLE
diff --git a/gcc/haifa-sched.c b/gcc/haifa-sched.c
index ef4ec0a0786..a06c2f93071 100644
--- a/gcc/haifa-sched.c
+++ b/gcc/haifa-sched.c
@@ -1246,6 +1246,7 @@ unlink_other_notes (insn, tail)
/* See sched_analyze to see how these are handled. */
if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_END
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_BASIC_BLOCK
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
{
diff --git a/gcc/sched-ebb.c b/gcc/sched-ebb.c
index 5fa64f1f623..c298453f798 100644
--- a/gcc/sched-ebb.c
+++ b/gcc/sched-ebb.c
@@ -56,7 +56,9 @@ static const char *ebb_print_insn PARAMS ((rtx, int));
static int rank PARAMS ((rtx, rtx));
static int contributes_to_priority PARAMS ((rtx, rtx));
static void compute_jump_reg_dependencies PARAMS ((rtx, regset));
-static void schedule_ebb PARAMS ((rtx, rtx));
+static basic_block schedule_ebb PARAMS ((rtx, rtx));
+static basic_block fix_basic_block_boundaries PARAMS ((basic_block, basic_block, rtx, rtx));
+static void add_missing_bbs PARAMS ((rtx, basic_block, basic_block));
/* Return nonzero if there are more insns that should be scheduled. */
@@ -139,8 +141,17 @@ ebb_print_insn (insn, aligned)
static int
rank (insn1, insn2)
- rtx insn1 ATTRIBUTE_UNUSED, insn2 ATTRIBUTE_UNUSED;
+ rtx insn1, insn2;
{
+ basic_block bb1 = BLOCK_FOR_INSN (insn1);
+ basic_block bb2 = BLOCK_FOR_INSN (insn2);
+
+ if (bb1->count > bb2->count
+ || bb1->frequency > bb2->frequency)
+ return -1;
+ if (bb1->count < bb2->count
+ || bb1->frequency < bb2->frequency)
+ return 1;
return 0;
}
@@ -192,18 +203,157 @@ static struct sched_info ebb_sched_info =
0, 1
};
+/* It is possible that ebb scheduling elliminated some blocks.
+ Place blocks from FIRST to LAST before BEFORE. */
+
+static void
+add_missing_bbs (before, first, last)
+ rtx before;
+ basic_block first, last;
+{
+ for (; last != first->prev_bb; last = last->prev_bb)
+ {
+ before = emit_note_before (NOTE_INSN_BASIC_BLOCK, before);
+ NOTE_BASIC_BLOCK (before) = last;
+ last->head = before;
+ last->end = before;
+ update_bb_for_insn (last);
+ }
+}
+
+/* Fixup the CFG after EBB scheduling. Re-recognize the basic
+ block boundaries in between HEAD and TAIL and update basic block
+ structures between BB and LAST. */
+
+static basic_block
+fix_basic_block_boundaries (bb, last, head, tail)
+ basic_block bb, last;
+ rtx head, tail;
+{
+ rtx insn = head;
+ rtx last_inside = bb->head;
+ rtx aftertail = NEXT_INSN (tail);
+
+ head = bb->head;
+
+ for (; insn != aftertail; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == CODE_LABEL)
+ abort ();
+ /* Create new basic blocks just before first insn. */
+ if (inside_basic_block_p (insn))
+ {
+ if (!last_inside)
+ {
+ rtx note;
+
+ /* Re-emit the basic block note for newly found BB header. */
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ note = emit_note_after (NOTE_INSN_BASIC_BLOCK, insn);
+ head = insn;
+ last_inside = note;
+ }
+ else
+ {
+ note = emit_note_before (NOTE_INSN_BASIC_BLOCK, insn);
+ head = note;
+ last_inside = insn;
+ }
+ }
+ else
+ last_inside = insn;
+ }
+ /* Control flow instruction terminate basic block. It is possible
+ that we've elliminated some basic blocks (made them empty).
+ Find the proper basic block using BLOCK_FOR_INSN and arrange things in
+ a sensible way by inserting empty basic blocks as needed. */
+ if (control_flow_insn_p (insn) || (insn == tail && last_inside))
+ {
+ basic_block curr_bb = BLOCK_FOR_INSN (insn);
+ rtx note;
+
+ if (!control_flow_insn_p (insn))
+ curr_bb = last;
+ if (bb == last->next_bb)
+ {
+ edge f;
+ rtx h;
+
+ /* An obscure special case, where we do have partially dead
+ instruction scheduled after last control flow instruction.
+ In this case we can create new basic block. It is
+ always exactly one basic block last in the sequence. Handle
+ it by splitting the edge and repositioning the block.
+ This is somewhat hackish, but at least avoid cut&paste
+
+ Safter sollution can be to bring the code into sequence,
+ do the split and re-emit it back in case this will ever
+ trigger problem. */
+ f = bb->prev_bb->succ;
+ while (f && !(f->flags & EDGE_FALLTHRU))
+ f = f->succ_next;
+
+ if (f)
+ {
+ last = curr_bb = split_edge (f);
+ h = curr_bb->head;
+ curr_bb->head = head;
+ curr_bb->end = insn;
+ /* Edge splitting created missplaced BASIC_BLOCK note, kill
+ it. */
+ delete_insn (h);
+ }
+ /* It may happen that code got moved past unconditional jump in
+ case the code is completely dead. Kill it. */
+ else
+ {
+ rtx next = next_nonnote_insn (insn);
+ delete_insn_chain (head, insn);
+ /* We keep some notes in the way that may split barrier from the
+ jump. */
+ if (GET_CODE (next) == BARRIER)
+ {
+ emit_barrier_after (prev_nonnote_insn (head));
+ delete_insn (next);
+ }
+ insn = NULL;
+ }
+ }
+ else
+ {
+ curr_bb->head = head;
+ curr_bb->end = insn;
+ add_missing_bbs (curr_bb->head, bb, curr_bb->prev_bb);
+ }
+ note = GET_CODE (head) == CODE_LABEL ? NEXT_INSN (head) : head;
+ NOTE_BASIC_BLOCK (note) = curr_bb;
+ update_bb_for_insn (curr_bb);
+ bb = curr_bb->next_bb;
+ last_inside = NULL;
+ if (!insn)
+ break;
+ }
+ }
+ add_missing_bbs (last->next_bb->head, bb, last);
+ return bb->prev_bb;
+}
+
/* Schedule a single extended basic block, defined by the boundaries HEAD
and TAIL. */
-static void
+static basic_block
schedule_ebb (head, tail)
rtx head, tail;
{
int n_insns;
+ basic_block b;
struct deps tmp_deps;
+ basic_block first_bb = BLOCK_FOR_INSN (head);
+ basic_block last_bb = BLOCK_FOR_INSN (tail);
if (no_real_insns_p (head, tail))
- return;
+ return BLOCK_FOR_INSN (tail);
init_deps_global ();
@@ -266,8 +416,10 @@ schedule_ebb (head, tail)
if (write_symbols != NO_DEBUG)
restore_line_notes (head, tail);
+ b = fix_basic_block_boundaries (first_bb, last_bb, head, tail);
finish_deps_global ();
+ return b;
}
/* The one entry point in this file. DUMP_FILE is the dump file for
@@ -309,17 +461,8 @@ schedule_ebbs (dump_file)
break;
if (! e)
break;
- if (GET_CODE (tail) == JUMP_INSN)
- {
- rtx x = find_reg_note (tail, REG_BR_PROB, 0);
- if (x)
- {
- int pred_val = INTVAL (XEXP (x, 0));
- if (pred_val > REG_BR_PROB_BASE / 2)
- break;
- }
- }
-
+ if (e->probability < REG_BR_PROB_BASE / 2)
+ break;
bb = bb->next_bb;
}
@@ -337,11 +480,11 @@ schedule_ebbs (dump_file)
break;
}
- schedule_ebb (head, tail);
+ bb = schedule_ebb (head, tail);
}
- /* It doesn't make much sense to try and update life information here - we
- probably messed up even the flow graph. */
+ /* Updating life info can be done by local propagation over the modified
+ superblocks. */
/* Reposition the prologue and epilogue notes in case we moved the
prologue/epilogue insns. */
@@ -352,4 +495,8 @@ schedule_ebbs (dump_file)
rm_redundant_line_notes ();
sched_finish ();
+
+#ifdef ENABLE_CHECKING
+ verify_flow_info ();
+#endif
}