summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorwilson <wilson@138bc75d-0d04-0410-961f-82ee72b054a4>2004-04-21 23:21:59 +0000
committerwilson <wilson@138bc75d-0d04-0410-961f-82ee72b054a4>2004-04-21 23:21:59 +0000
commitb462e7568ee276323f5896929fa8923cda71e6bf (patch)
tree5cf9396ca05bbcaf40749be722dbfd67a385d142
parent46978c1cdab19014ba82252d65a0aae64506aa40 (diff)
downloadgcc-b462e7568ee276323f5896929fa8923cda71e6bf.tar.gz
MIPS SB-1 DFA scheduler.
* config/mips/mips-protos.h (fp_register_operand, lo_operand): Declare. * config/mips/mips.c (mips_multipass_dfa_lookahead): Declare. (TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD): New. (fp_register_operand, lo_operand): New. (mips_rtx_costs): Add TUNE_SB1 support. (mips_issue_rate): Add comment. Add PROCESSOR_SB1 support. (mips_use_dfa_pipeline_interface): Add PROCESSOR_SB1 support. (mips_multipass_dfa_lookahead): New. * config/mips/mips.h (MASK_FP_EXCEPTIONS, TARGET_FP_EXCEPTIONS, TUNE_SB1): New. (TARGET_SWITCHES): Add -mfp-exceptions support. (TARGET_FP_EXCEPTIONS_DEFAULT): New. (BRANCH_COST): Fix whitespace. * config/mips/mips.md: Include sb1.md. * config/mips/sb1.md: New file. * doc/invoke.texi: Document -mfp-exceptions. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@80988 138bc75d-0d04-0410-961f-82ee72b054a4
-rw-r--r--gcc/ChangeLog17
-rw-r--r--gcc/config/mips/mips-protos.h2
-rw-r--r--gcc/config/mips/mips.c61
-rw-r--r--gcc/config/mips/mips.h18
-rw-r--r--gcc/config/mips/mips.md1
-rw-r--r--gcc/config/mips/sb1.md504
-rw-r--r--gcc/doc/invoke.texi14
7 files changed, 611 insertions, 6 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 586132ff890..9b22f4db7ee 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -10,6 +10,23 @@
2004-04-21 James E Wilson <wilson@specifixinc.com>
+ * config/mips/mips-protos.h (fp_register_operand, lo_operand): Declare.
+ * config/mips/mips.c (mips_multipass_dfa_lookahead): Declare.
+ (TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD): New.
+ (fp_register_operand, lo_operand): New.
+ (mips_rtx_costs): Add TUNE_SB1 support.
+ (mips_issue_rate): Add comment. Add PROCESSOR_SB1 support.
+ (mips_use_dfa_pipeline_interface): Add PROCESSOR_SB1 support.
+ (mips_multipass_dfa_lookahead): New.
+ * config/mips/mips.h (MASK_FP_EXCEPTIONS, TARGET_FP_EXCEPTIONS,
+ TUNE_SB1): New.
+ (TARGET_SWITCHES): Add -mfp-exceptions support.
+ (TARGET_FP_EXCEPTIONS_DEFAULT): New.
+ (BRANCH_COST): Fix whitespace.
+ * config/mips/mips.md: Include sb1.md.
+ * config/mips/sb1.md: New file.
+ * doc/invoke.texi: Document -mfp-exceptions.
+
* Makefile.in (fixinc.sh): Don't set or export WARN_CFLAGS. Fix
comment.
* fixinc/Makefile.in (FL_LIST): Don't mention WARN_CFLAGS.
diff --git a/gcc/config/mips/mips-protos.h b/gcc/config/mips/mips-protos.h
index fac82469662..b8afafb0f69 100644
--- a/gcc/config/mips/mips-protos.h
+++ b/gcc/config/mips/mips-protos.h
@@ -93,6 +93,8 @@ extern int mips_address_insns (rtx, enum machine_mode);
extern int mips_const_insns (rtx);
extern int mips_fetch_insns (rtx);
extern int mips_idiv_insns (void);
+extern int fp_register_operand (rtx, enum machine_mode);
+extern int lo_operand (rtx, enum machine_mode);
extern bool mips_legitimate_address_p (enum machine_mode, rtx, int);
extern rtx mips_unspec_address (rtx, enum mips_symbol_type);
extern bool mips_legitimize_address (rtx *, enum machine_mode);
diff --git a/gcc/config/mips/mips.c b/gcc/config/mips/mips.c
index 46a22e334bb..69398c60a07 100644
--- a/gcc/config/mips/mips.c
+++ b/gcc/config/mips/mips.c
@@ -232,6 +232,7 @@ static bool mips_return_in_memory (tree, tree);
static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
static int mips_issue_rate (void);
static int mips_use_dfa_pipeline_interface (void);
+static int mips_multipass_dfa_lookahead (void);
static void mips_init_libfuncs (void);
static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
tree, int *, int);
@@ -680,6 +681,9 @@ const struct mips_cpu_info mips_cpu_info_table[] = {
#define TARGET_SCHED_ISSUE_RATE mips_issue_rate
#undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
#define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE mips_use_dfa_pipeline_interface
+#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
+#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
+ mips_multipass_dfa_lookahead
#undef TARGET_FUNCTION_OK_FOR_SIBCALL
#define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
@@ -1661,6 +1665,23 @@ stack_operand (rtx op, enum machine_mode mode)
&& addr.reg == stack_pointer_rtx);
}
+/* Helper function for DFA schedulers. Return true if OP is a floating
+ point register. */
+
+int
+fp_register_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ return REG_P (op) && FP_REG_P (REGNO (op));
+}
+
+/* Helper function for DFA schedulers. Return true if OP is a LO reg. */
+
+int
+lo_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ return REG_P (op) && REGNO (op) == LO_REGNUM;
+}
+
/* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
returns a nonzero value if X is a legitimate address for a memory
@@ -2265,6 +2286,8 @@ mips_rtx_costs (rtx x, int code, int outer_code, int *total)
*total = COSTS_N_INSNS (2);
else if (TUNE_MIPS6000)
*total = COSTS_N_INSNS (3);
+ else if (TUNE_SB1)
+ *total = COSTS_N_INSNS (4);
else
*total = COSTS_N_INSNS (6);
return true;
@@ -2289,7 +2312,8 @@ mips_rtx_costs (rtx x, int code, int outer_code, int *total)
{
if (TUNE_MIPS3000
|| TUNE_MIPS3900
- || TUNE_MIPS5000)
+ || TUNE_MIPS5000
+ || TUNE_SB1)
*total = COSTS_N_INSNS (4);
else if (TUNE_MIPS6000
|| TUNE_MIPS5400
@@ -2302,7 +2326,9 @@ mips_rtx_costs (rtx x, int code, int outer_code, int *total)
if (mode == DFmode)
{
- if (TUNE_MIPS3000
+ if (TUNE_SB1)
+ *total = COSTS_N_INSNS (4);
+ else if (TUNE_MIPS3000
|| TUNE_MIPS3900
|| TUNE_MIPS5000)
*total = COSTS_N_INSNS (5);
@@ -2319,7 +2345,7 @@ mips_rtx_costs (rtx x, int code, int outer_code, int *total)
*total = COSTS_N_INSNS (12);
else if (TUNE_MIPS3900)
*total = COSTS_N_INSNS (2);
- else if (TUNE_MIPS5400 || TUNE_MIPS5500)
+ else if (TUNE_MIPS5400 || TUNE_MIPS5500 || TUNE_SB1)
*total = COSTS_N_INSNS ((mode == DImode) ? 4 : 3);
else if (TUNE_MIPS7000)
*total = COSTS_N_INSNS (mode == DImode ? 9 : 5);
@@ -2342,6 +2368,8 @@ mips_rtx_costs (rtx x, int code, int outer_code, int *total)
*total = COSTS_N_INSNS (12);
else if (TUNE_MIPS6000)
*total = COSTS_N_INSNS (15);
+ else if (TUNE_SB1)
+ *total = COSTS_N_INSNS (24);
else if (TUNE_MIPS5400 || TUNE_MIPS5500)
*total = COSTS_N_INSNS (30);
else
@@ -2358,6 +2386,8 @@ mips_rtx_costs (rtx x, int code, int outer_code, int *total)
*total = COSTS_N_INSNS (59);
else if (TUNE_MIPS6000)
*total = COSTS_N_INSNS (16);
+ else if (TUNE_SB1)
+ *total = COSTS_N_INSNS (32);
else
*total = COSTS_N_INSNS (36);
return true;
@@ -2373,6 +2403,8 @@ mips_rtx_costs (rtx x, int code, int outer_code, int *total)
*total = COSTS_N_INSNS (38);
else if (TUNE_MIPS5000)
*total = COSTS_N_INSNS (36);
+ else if (TUNE_SB1)
+ *total = COSTS_N_INSNS ((mode == SImode) ? 36 : 68);
else if (TUNE_MIPS5400 || TUNE_MIPS5500)
*total = COSTS_N_INSNS ((mode == SImode) ? 42 : 74);
else
@@ -9292,6 +9324,8 @@ mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
{
return !TARGET_OLDABI;
}
+
+/* Return the number of instructions that can be issued per cycle. */
static int
mips_issue_rate (void)
@@ -9304,6 +9338,13 @@ mips_issue_rate (void)
case PROCESSOR_R9000:
return 2;
+ case PROCESSOR_SB1:
+ /* This is actually 4, but we get better performance if we claim 3.
+ This is partly because of unwanted speculative code motion with the
+ larger number, and partly because in most common cases we can't
+ reach the theoretical max of 4. */
+ return 3;
+
default:
return 1;
}
@@ -9324,6 +9365,7 @@ mips_use_dfa_pipeline_interface (void)
case PROCESSOR_R5500:
case PROCESSOR_R7000:
case PROCESSOR_R9000:
+ case PROCESSOR_SB1:
case PROCESSOR_SR71000:
return true;
@@ -9332,6 +9374,19 @@ mips_use_dfa_pipeline_interface (void)
}
}
+/* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
+ be as wide as the scheduling freedom in the DFA. */
+
+static int
+mips_multipass_dfa_lookahead (void)
+{
+ /* Can schedule up to 4 of the 6 function units in any one cycle. */
+ if (mips_tune == PROCESSOR_SB1)
+ return 4;
+
+ return 0;
+}
+
const char *
mips_emit_prefetch (rtx *operands)
diff --git a/gcc/config/mips/mips.h b/gcc/config/mips/mips.h
index a70ea9c9044..0055828d9b6 100644
--- a/gcc/config/mips/mips.h
+++ b/gcc/config/mips/mips.h
@@ -173,6 +173,8 @@ extern const struct mips_cpu_info *mips_tune_info;
#define MASK_FIX_SB1 0x04000000 /* Work around SB-1 errata. */
#define MASK_FIX_VR4120 0x08000000 /* Work around VR4120 errata. */
+#define MASK_FP_EXCEPTIONS 0x10000000 /* FP exceptions are enabled. */
+
/* Debug switches, not documented */
#define MASK_DEBUG 0 /* unused */
#define MASK_DEBUG_C 0 /* don't expand seq, etc. */
@@ -257,6 +259,8 @@ extern const struct mips_cpu_info *mips_tune_info;
#define TARGET_FIX_R4400 (target_flags & MASK_FIX_R4400)
#define TARGET_FIX_VR4120 (target_flags & MASK_FIX_VR4120)
+#define TARGET_FP_EXCEPTIONS (target_flags & MASK_FP_EXCEPTIONS)
+
/* True if we should use NewABI-style relocation operators for
symbolic addresses. This is never true for mips16 code,
which has its own conventions. */
@@ -339,6 +343,7 @@ extern const struct mips_cpu_info *mips_tune_info;
#define TUNE_MIPS6000 (mips_tune == PROCESSOR_R6000)
#define TUNE_MIPS7000 (mips_tune == PROCESSOR_R7000)
#define TUNE_MIPS9000 (mips_tune == PROCESSOR_R9000)
+#define TUNE_SB1 (mips_tune == PROCESSOR_SB1)
#define TARGET_OLDABI (mips_abi == ABI_32 || mips_abi == ABI_O64)
#define TARGET_NEWABI (mips_abi == ABI_N32 || mips_abi == ABI_64)
@@ -630,6 +635,10 @@ extern const struct mips_cpu_info *mips_tune_info;
N_("Lift restrictions on GOT size") }, \
{"no-xgot", -MASK_XGOT, \
N_("Do not lift restrictions on GOT size") }, \
+ {"fp-exceptions", MASK_FP_EXCEPTIONS, \
+ N_("FP exceptions are enabled") }, \
+ {"no-fp-exceptions", -MASK_FP_EXCEPTIONS, \
+ N_("FP exceptions are not enabled") }, \
{"debug", MASK_DEBUG, \
NULL}, \
{"debugc", MASK_DEBUG_C, \
@@ -640,7 +649,8 @@ extern const struct mips_cpu_info *mips_tune_info;
NULL}, \
{"", (TARGET_DEFAULT \
| TARGET_CPU_DEFAULT \
- | TARGET_ENDIAN_DEFAULT), \
+ | TARGET_ENDIAN_DEFAULT \
+ | TARGET_FP_EXCEPTIONS_DEFAULT), \
NULL}, \
}
@@ -658,6 +668,10 @@ extern const struct mips_cpu_info *mips_tune_info;
#define TARGET_ENDIAN_DEFAULT MASK_BIG_ENDIAN
#endif
+#ifndef TARGET_FP_EXCEPTIONS_DEFAULT
+#define TARGET_FP_EXCEPTIONS_DEFAULT MASK_FP_EXCEPTIONS
+#endif
+
/* 'from-abi' makes a good default: you get whatever the ABI requires. */
#ifndef MIPS_ISA_DEFAULT
#ifndef MIPS_CPU_STRING_DEFAULT
@@ -2674,7 +2688,7 @@ typedef struct mips_args {
/* ??? Fix this to be right for the R8000. */
#define BRANCH_COST \
((! TARGET_MIPS16 \
- && (TUNE_MIPS4000 || TUNE_MIPS6000)) \
+ && (TUNE_MIPS4000 || TUNE_MIPS6000)) \
? 2 : 1)
/* If defined, modifies the length assigned to instruction INSN as a
diff --git a/gcc/config/mips/mips.md b/gcc/config/mips/mips.md
index fad3b8d6c50..5e31e55203f 100644
--- a/gcc/config/mips/mips.md
+++ b/gcc/config/mips/mips.md
@@ -628,6 +628,7 @@
(include "5500.md")
(include "7000.md")
(include "9000.md")
+(include "sb1.md")
(include "sr71k.md")
;;
diff --git a/gcc/config/mips/sb1.md b/gcc/config/mips/sb1.md
new file mode 100644
index 00000000000..a58992be525
--- /dev/null
+++ b/gcc/config/mips/sb1.md
@@ -0,0 +1,504 @@
+;;
+;; DFA-based pipeline description for Broadcom SB-1
+;;
+
+;; The Broadcom SB-1 core is 4-way superscalar, in-order. It has 2 load/store
+;; pipes (one of which can support some ALU operations), 2 alu pipes, 2 FP
+;; pipes, and 1 MDMX pipes. It can issue 2 ls insns and 2 exe/fpu/mdmx insns
+;; each cycle.
+
+;; We model the 4-way issue by ordering unit choices. The possible choices are
+;; {ex1,fp1}|{ex0,fp0}|ls1|ls0. Instructions issue to the first eligible unit
+;; in the list in most cases. Non-indexed load/stores issue to ls0 first.
+;; simple alu operations issue to ls1 if it is still available, and their
+;; operands are ready (no co-issue with loads), otherwise to the first
+;; available ex unit.
+
+;; When exceptions are enabled, can only issue FP insns to fp1. This is
+;; to ensure that instructions complete in order. The -mfp-exceptions option
+;; can be used to specify whether the system has FP exceptions enabled or not.
+
+;; In 32-bit mode, dependent FP can't co-issue with load, and only one FP exe
+;; insn can issue per cycle (fp1).
+
+;; The A1 MDMX pipe is separate from the FP pipes, but uses the same register
+;; file. As a result, once an MDMX insn is issued, no FP insns can be issued
+;; for 3 cycles. When an FP insn is issued, no MDMX insn can be issued for
+;; 5 cycles. This is currently not handled because there is no MDMX insn
+;; support as yet.
+
+;;
+;; We use two automata. sb1_cpu_div is for the integer divides, which are
+;; not pipelined. sb1_cpu is for everything else.
+;;
+(define_automaton "sb1_cpu, sb1_cpu_div")
+
+;; Load/store function units.
+(define_cpu_unit "sb1_ls0" "sb1_cpu")
+(define_cpu_unit "sb1_ls1" "sb1_cpu")
+
+;; CPU function units.
+(define_cpu_unit "sb1_ex0" "sb1_cpu")
+(define_cpu_unit "sb1_ex1" "sb1_cpu")
+
+;; The divide unit is not pipelined, and blocks hi/lo reads and writes.
+(define_cpu_unit "sb1_div" "sb1_cpu_div")
+;; DMULT block any multiply from issuing in the next cycle.
+(define_cpu_unit "sb1_mul" "sb1_cpu")
+
+;; Floating-point units.
+(define_cpu_unit "sb1_fp0" "sb1_cpu")
+(define_cpu_unit "sb1_fp1" "sb1_cpu")
+
+;; Can only issue to one of the ex and fp pipes at a time.
+(exclusion_set "sb1_ex0" "sb1_fp0")
+(exclusion_set "sb1_ex1" "sb1_fp1")
+
+;; Define an SB-1 specific attribute to simplify some FP descriptions.
+;; We can use 2 FP pipes only if we have 64-bit FP code, and exceptions are
+;; disabled.
+
+(define_attr "sb1_fp_pipes" "one,two"
+ (cond [(and (ne (symbol_ref "TARGET_FLOAT64") (const_int 0))
+ (eq (symbol_ref "TARGET_FP_EXCEPTIONS") (const_int 0)))
+ (const_string "two")]
+ (const_string "one")))
+
+;; Define reservations for common combinations.
+
+;; For long cycle operations, the FPU has a 4 cycle pipeline that repeats,
+;; effectively re-issuing the operation every 4 cycles. This means that we
+;; can have at most 4 long-cycle operations per pipe.
+
+;; ??? The fdiv operations should be e.g.
+;; sb1_fp1_4cycles*7" | "sb1_fp0_4cycle*7
+;; but the DFA is too large when we do that. Perhaps have to use scheduler
+;; hooks here.
+
+;; ??? Try limiting scheduler to 2 long latency operations, and see if this
+;; results in a usable DFA, and whether it helps code performance.
+
+;;(define_reservation "sb1_fp0_4cycles" "sb1_fp0, nothing*3")
+;;(define_reservation "sb1_fp1_4cycles" "sb1_fp1, nothing*3")
+
+;;
+;; The ordering of the instruction-execution-path/resource-usage
+;; descriptions (also known as reservation RTL) is roughly ordered
+;; based on the define attribute RTL for the "type" classification.
+;; When modifying, remember that the first test that matches is the
+;; reservation used!
+;;
+
+(define_insn_reservation "ir_sb1_unknown" 1
+ (and (eq_attr "cpu" "sb1")
+ (eq_attr "type" "unknown,multi"))
+ "sb1_ls0+sb1_ls1+sb1_ex0+sb1_ex1+sb1_fp0+sb1_fp1")
+
+;; predicted taken branch causes 2 cycle ifetch bubble. predicted not
+;; taken branch causes 0 cycle ifetch bubble. mispredicted branch causes 8
+;; cycle ifetch bubble. We assume all branches predicted not taken.
+
+;; ??? This assumption that branches are predicated not taken should be
+;; investigated. Maybe using 2 here will give better results.
+
+(define_insn_reservation "ir_sb1_branch" 0
+ (and (eq_attr "cpu" "sb1")
+ (eq_attr "type" "branch,jump,call"))
+ "sb1_ex0")
+
+;; ??? This is 1 cycle for ldl/ldr to ldl/ldr when they use the same data
+;; register as destination.
+
+;; ??? Can co-issue a load with a dependent arith insn if it executes on an EX
+;; unit. Can not co-issue if the dependent insn executes on an LS unit.
+
+;; A load normally has a latency of zero cycles. In some cases, dependent
+;; insns can be issued in the same cycle. However, a value of 1 gives
+;; better performance in empirical testing.
+
+(define_insn_reservation "ir_sb1_load" 1
+ (and (eq_attr "cpu" "sb1")
+ (eq_attr "type" "load,prefetch"))
+ "sb1_ls0 | sb1_ls1")
+
+;; Can not co-issue fpload with fp exe when in 32-bit mode.
+
+(define_insn_reservation "ir_sb1_fpload" 0
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "fpload")
+ (ne (symbol_ref "TARGET_FLOAT64")
+ (const_int 0))))
+ "sb1_ls0 | sb1_ls1")
+
+(define_insn_reservation "ir_sb1_fpload_32bitfp" 1
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "fpload")
+ (eq (symbol_ref "TARGET_FLOAT64")
+ (const_int 0))))
+ "sb1_ls0 | sb1_ls1")
+
+;; Indexed loads can only execute on LS1 pipe.
+
+(define_insn_reservation "ir_sb1_fpidxload" 0
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "fpidxload")
+ (ne (symbol_ref "TARGET_FLOAT64")
+ (const_int 0))))
+ "sb1_ls1")
+
+(define_insn_reservation "ir_sb1_fpidxload_32bitfp" 1
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "fpidxload")
+ (eq (symbol_ref "TARGET_FLOAT64")
+ (const_int 0))))
+ "sb1_ls1")
+
+;; prefx can only execute on the ls1 pipe.
+
+(define_insn_reservation "ir_sb1_prefetchx" 0
+ (and (eq_attr "cpu" "sb1")
+ (eq_attr "type" "prefetchx"))
+ "sb1_ls1")
+
+;; ??? There is a 4.5 cycle latency if a store is followed by a load, and
+;; there is a RAW dependency.
+
+(define_insn_reservation "ir_sb1_store" 1
+ (and (eq_attr "cpu" "sb1")
+ (eq_attr "type" "store"))
+ "sb1_ls0+sb1_ex1 | sb1_ls0+sb1_ex0 | sb1_ls1+sb1_ex1 | sb1_ls1+sb1_ex0")
+
+(define_insn_reservation "ir_sb1_fpstore" 1
+ (and (eq_attr "cpu" "sb1")
+ (eq_attr "type" "fpstore"))
+ "sb1_ls0+sb1_fp1 | sb1_ls0+sb1_fp0 | sb1_ls1+sb1_fp1 | sb1_ls1+sb1_fp0")
+
+;; Indexed stores can only execute on LS1 pipe.
+
+(define_insn_reservation "ir_sb1_fpidxstore" 1
+ (and (eq_attr "cpu" "sb1")
+ (eq_attr "type" "fpidxstore"))
+ "sb1_ls1+sb1_fp1 | sb1_ls1+sb1_fp0")
+
+;; Load latencies are 3 cycles for one load to another load or store (address
+;; only). This is 0 cycles for one load to a store using it as the data
+;; written.
+
+;; This assumes that if a load is dependent on a previous insn, then it must
+;; be an address dependence.
+
+(define_bypass 3
+ "ir_sb1_load,ir_sb1_fpload,ir_sb1_fpload_32bitfp,ir_sb1_fpidxload,
+ ir_sb1_fpidxload_32bitfp"
+ "ir_sb1_load,ir_sb1_fpload,ir_sb1_fpload_32bitfp,ir_sb1_fpidxload,
+ ir_sb1_fpidxload_32bitfp,ir_sb1_prefetchx")
+
+(define_bypass 3
+ "ir_sb1_load,ir_sb1_fpload,ir_sb1_fpload_32bitfp,ir_sb1_fpidxload,
+ ir_sb1_fpidxload_32bitfp"
+ "ir_sb1_store,ir_sb1_fpstore,ir_sb1_fpidxstore"
+ "store_data_bypass_p")
+
+;; Simple alu instructions can execute on the LS1 unit.
+
+;; ??? A simple alu insn issued on an LS unit has 0 cycle latency to an EX
+;; insn, to a store (for data), and to an xfer insn. It has 1 cycle latency to
+;; another LS insn (excluding store data). A simple alu insn issued on an EX
+;; unit has a latency of 5 cycles when the results goes to a LS unit (exluding
+;; store data), otherwise a latency of 1 cycle.
+
+;; ??? We can not handle latencies properly for simple alu instructions
+;; within the DFA pipeline model. Latencies can be defined only from one
+;; insn reservation to another. We can't make them depend on which function
+;; unit was used. This isn't a DFA flaw. There is a conflict here, as we
+;; need to know the latency before we can determine which unit will be
+;; available, but we need to know which unit it is issued to before we can
+;; compute the latency. Perhaps this can be handled via scheduler hooks.
+;; This needs to be investigated.
+
+;; ??? Optimal scheduling taking the LS units into account seems to require
+;; a pre-scheduling pass. We need to determine which instructions feed results
+;; into store/load addresses, and thus benefit most from being issued to the
+;; LS unit. Also, we need to prune the list to ensure we don't overschedule
+;; insns to the LS unit, and that we don't conflict with insns that need LS1
+;; such as indexed loads. We then need to emit nops to ensure that simple
+;; alu instructions that are not supposed to be scheduled to LS1 don't
+;; accidentally end up there because LS1 is free when they are issued. This
+;; will be a lot of work, and it isn't clear how useful it will be.
+
+;; Empirical testing shows that 2 gives the best result.
+
+(define_insn_reservation "ir_sb1_simple_alu" 2
+ (and (eq_attr "cpu" "sb1")
+ (eq_attr "type" "const,arith"))
+ "sb1_ls1 | sb1_ex1 | sb1_ex0")
+
+;; ??? condmove also includes some FP instructions that execute on the FP
+;; units. This needs to be clarified.
+
+(define_insn_reservation "ir_sb1_alu" 1
+ (and (eq_attr "cpu" "sb1")
+ (eq_attr "type" "condmove,nop,shift"))
+ "sb1_ex1 | sb1_ex0")
+
+;; These are type arith/darith that only execute on the EX0 unit.
+
+(define_insn_reservation "ir_sb1_alu_0" 1
+ (and (eq_attr "cpu" "sb1")
+ (eq_attr "type" "slt,clz,trap"))
+ "sb1_ex0")
+
+;; An alu insn issued on an EX unit has a latency of 5 cycles when the
+;; result goes to a LS unit (excluding store data).
+
+;; This assumes that if a load is dependent on a previous insn, then it must
+;; be an address dependence.
+
+(define_bypass 5
+ "ir_sb1_alu,ir_sb1_alu_0,ir_sb1_mfhi,ir_sb1_mflo"
+ "ir_sb1_load,ir_sb1_fpload,ir_sb1_fpload_32bitfp,ir_sb1_fpidxload,
+ ir_sb1_fpidxload_32bitfp,ir_sb1_prefetchx")
+
+(define_bypass 5
+ "ir_sb1_alu,ir_sb1_alu_0,ir_sb1_mfhi,ir_sb1_mflo"
+ "ir_sb1_store,ir_sb1_fpstore,ir_sb1_fpidxstore"
+ "store_data_bypass_p")
+
+;; mf{hi,lo} is 1 cycle.
+
+(define_insn_reservation "ir_sb1_mfhi" 1
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "mfhilo")
+ (not (match_operand 1 "lo_operand" ""))))
+ "sb1_ex1")
+
+(define_insn_reservation "ir_sb1_mflo" 1
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "mfhilo")
+ (match_operand 1 "lo_operand" "")))
+ "sb1_ex1")
+
+;; mt{hi,lo} to mul/div is 4 cycles.
+
+(define_insn_reservation "ir_sb1_mthilo" 4
+ (and (eq_attr "cpu" "sb1")
+ (eq_attr "type" "mthilo"))
+ "sb1_ex1")
+
+;; mt{hi,lo} to mf{hi,lo} is 3 cycles.
+
+(define_bypass 3 "ir_sb1_mthilo" "ir_sb1_mfhi,ir_sb1_mflo")
+
+;; multiply latency to an EX operation is 3 cycles.
+
+;; ??? Should check whether we need to make multiply conflict with moves
+;; to/from hilo registers.
+
+(define_insn_reservation "ir_sb1_mulsi" 3
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "imul,imadd")
+ (eq_attr "mode" "SI")))
+ "sb1_ex1+sb1_mul")
+
+;; muldi to mfhi is 4 cycles.
+;; Blocks any other multiply insn issue for 1 cycle.
+
+(define_insn_reservation "ir_sb1_muldi" 4
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "imul")
+ (eq_attr "mode" "DI")))
+ "sb1_ex1+sb1_mul, sb1_mul")
+
+;; muldi to mflo is 3 cycles.
+
+(define_bypass 3 "ir_sb1_muldi" "ir_sb1_mflo")
+
+;; mul latency is 7 cycles if the result is used by any LS insn.
+
+;; This assumes that if a load is dependent on a previous insn, then it must
+;; be an address dependence.
+
+(define_bypass 7
+ "ir_sb1_mulsi,ir_sb1_muldi"
+ "ir_sb1_load,ir_sb1_fpload,ir_sb1_fpload_32bitfp,ir_sb1_fpidxload,
+ ir_sb1_fpidxload_32bitfp,ir_sb1_prefetchx")
+
+(define_bypass 7
+ "ir_sb1_mulsi,ir_sb1_muldi"
+ "ir_sb1_store,ir_sb1_fpstore,ir_sb1_fpidxstore"
+ "store_data_bypass_p")
+
+;; The divide unit is not pipelined. Divide busy is asserted in the 4th
+;; cycle, and then deasserted on the latency cycle. So only one divide at
+;; a time, but the first/last 4 cycles can overlap.
+
+;; ??? All divides block writes to hi/lo regs. hi/lo regs are written 4 cycles
+;; after the latency cycle for divides (e.g. 40/72). dmult writes lo in
+;; cycle 7, and hi in cycle 8. All other insns write hi/lo regs in cycle 7.
+;; Default for output dependencies is the difference in latencies, which is
+;; only 1 cycle off here, e.g. div to mtlo stalls for 32 cycles, but should
+;; stall for 33 cycles. This does not seem significant enough to worry about.
+
+(define_insn_reservation "ir_sb1_divsi" 36
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "SI")))
+ "sb1_ex1, nothing*3, sb1_div*32")
+
+(define_insn_reservation "ir_sb1_divdi" 68
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "DI")))
+ "sb1_ex1, nothing*3, sb1_div*64")
+
+(define_insn_reservation "ir_sb1_fpu_2pipes" 4
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "fmove,fadd,fmul,fabs,fneg,fcvt")
+ (eq_attr "sb1_fp_pipes" "two")))
+ "sb1_fp1 | sb1_fp0")
+
+(define_insn_reservation "ir_sb1_fpu_1pipe" 4
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "fmove,fadd,fmul,fabs,fneg,fcvt")
+ (eq_attr "sb1_fp_pipes" "one")))
+ "sb1_fp1")
+
+;; ??? madd/msub 4-cycle latency to itself (same fr?), but 8 cycle latency
+;; otherwise.
+
+;; ??? Blocks issue of another non-madd/msub after 4 cycles.
+
+(define_insn_reservation "ir_sb1_fmadd_2pipes" 8
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "fmadd")
+ (eq_attr "sb1_fp_pipes" "two")))
+ "sb1_fp1 | sb1_fp0")
+
+(define_insn_reservation "ir_sb1_fmadd_1pipe" 8
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "fmadd")
+ (eq_attr "sb1_fp_pipes" "one")))
+ "sb1_fp1")
+
+(define_insn_reservation "ir_sb1_fcmp" 4
+ (and (eq_attr "cpu" "sb1")
+ (eq_attr "type" "fcmp"))
+ "sb1_fp1")
+
+;; mtc1 latency 5 cycles.
+
+(define_insn_reservation "ir_sb1_mtxfer" 5
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "xfer")
+ (match_operand 0 "fp_register_operand" "")))
+ "sb1_fp0")
+
+;; mfc1 latency 1 cycle.
+
+(define_insn_reservation "ir_sb1_mfxfer" 1
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "xfer")
+ (not (match_operand 0 "fp_register_operand" ""))))
+ "sb1_fp0")
+
+;; ??? Can deliver at most 1 result per every 6 cycles because of issue
+;; restrictions.
+
+(define_insn_reservation "ir_sb1_divsf_2pipes" 24
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "fdiv")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "sb1_fp_pipes" "two"))))
+ "sb1_fp1 | sb1_fp0")
+
+(define_insn_reservation "ir_sb1_divsf_1pipes" 24
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "fdiv")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "sb1_fp_pipes" "one"))))
+ "sb1_fp1")
+
+;; ??? Can deliver at most 1 result per every 8 cycles because of issue
+;; restrictions.
+
+(define_insn_reservation "ir_sb1_divdf_2pipes" 32
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "fdiv")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "sb1_fp_pipes" "two"))))
+ "sb1_fp1 | sb1_fp0")
+
+(define_insn_reservation "ir_sb1_divdf_1pipe" 32
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "fdiv")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "sb1_fp_pipes" "one"))))
+ "sb1_fp1")
+
+;; ??? Can deliver at most 1 result per every 7 cycles because of issue
+;; restrictions.
+
+(define_insn_reservation "ir_sb1_sqrtsf_2pipes" 28
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "fsqrt")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "sb1_fp_pipes" "two"))))
+ "sb1_fp1 | sb1_fp0")
+
+(define_insn_reservation "ir_sb1_sqrtsf_1pipe" 28
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "fsqrt")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "sb1_fp_pipes" "one"))))
+ "sb1_fp1")
+
+;; ??? Can deliver at most 1 result per every 10 cycles because of issue
+;; restrictions.
+
+(define_insn_reservation "ir_sb1_sqrtdf_2pipes" 40
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "fsqrt")
+ (and (eq_attr "mode" "DF")
+ (eq_attr "sb1_fp_pipes" "two"))))
+ "sb1_fp1 | sb1_fp0")
+
+(define_insn_reservation "ir_sb1_sqrtdf_1pipe" 40
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "fsqrt")
+ (and (eq_attr "mode" "DF")
+ (eq_attr "sb1_fp_pipes" "one"))))
+ "sb1_fp1")
+
+;; ??? Can deliver at most 1 result per every 4 cycles because of issue
+;; restrictions.
+
+(define_insn_reservation "ir_sb1_rsqrtsf_2pipes" 16
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "frsqrt")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "sb1_fp_pipes" "two"))))
+ "sb1_fp1 | sb1_fp0")
+
+(define_insn_reservation "ir_sb1_rsqrtsf_1pipe" 16
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "frsqrt")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "sb1_fp_pipes" "one"))))
+ "sb1_fp1")
+
+;; ??? Can deliver at most 1 result per every 7 cycles because of issue
+;; restrictions.
+
+(define_insn_reservation "ir_sb1_rsqrtdf_2pipes" 28
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "frsqrt")
+ (and (eq_attr "mode" "DF")
+ (eq_attr "sb1_fp_pipes" "two"))))
+ "sb1_fp1 | sb1_fp0")
+
+(define_insn_reservation "ir_sb1_rsqrtdf_1pipe" 28
+ (and (eq_attr "cpu" "sb1")
+ (and (eq_attr "type" "frsqrt")
+ (and (eq_attr "mode" "DF")
+ (eq_attr "sb1_fp_pipes" "one"))))
+ "sb1_fp1")
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index d204cf68f54..797efdc048d 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -482,7 +482,8 @@ in the following sections.
-mfix-r4000 -mno-fix-r4000 -mfix-r4400 -mno-fix-r4400 @gol
-mfix-vr4120 -mno-fix-vr4120 -mfix-sb1 -mno-fix-sb1 @gol
-mflush-func=@var{func} -mno-flush-func @gol
--mbranch-likely -mno-branch-likely}
+-mbranch-likely -mno-branch-likely @gol
+-mfp-exceptions -mno-fp-exceptions}
@emph{i386 and x86-64 Options}
@gccoptlist{-mtune=@var{cpu-type} -march=@var{cpu-type} @gol
@@ -8178,6 +8179,17 @@ architecture. An exception is for the MIPS32 and MIPS64 architectures
and processors which implement those architectures; for those, Branch
Likely instructions will not be generated by default because the MIPS32
and MIPS64 architectures specifically deprecate their use.
+
+@item -mfp-exceptions
+@itemx -mno-fp-exceptions
+@opindex mfp-exceptions
+Specifies whether FP exceptions are enabled. This affects how we schedule
+FP instructions for some processors. The default is that FP exceptions are
+enabled.
+
+For instance, on the SB-1, if FP exceptions are disabled, and we are emitting
+64-bit code, then we can use both FP pipes. Otherwise, we can only use one
+FP pipe.
@end table
@node i386 and x86-64 Options