summaryrefslogtreecommitdiff
path: root/gcc/config/i386/x86-tune.def
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/config/i386/x86-tune.def')
-rw-r--r--gcc/config/i386/x86-tune.def92
1 files changed, 49 insertions, 43 deletions
diff --git a/gcc/config/i386/x86-tune.def b/gcc/config/i386/x86-tune.def
index 4c13c3a0ec6..ec96a4b2617 100644
--- a/gcc/config/i386/x86-tune.def
+++ b/gcc/config/i386/x86-tune.def
@@ -1,5 +1,5 @@
/* Definitions of x86 tunable features.
- Copyright (C) 2013 Free Software Foundation, Inc.
+ Copyright (C) 2013-2014 Free Software Foundation, Inc.
This file is part of GCC.
@@ -40,7 +40,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
/* X86_TUNE_SCHEDULE: Enable scheduling. */
DEF_TUNE (X86_TUNE_SCHEDULE, "schedule",
- m_PENT | m_PPRO | m_CORE_ALL | m_ATOM | m_SLM | m_K6_GEODE
+ m_PENT | m_PPRO | m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_K6_GEODE
| m_AMD_MULTIPLE | m_GENERIC)
/* X86_TUNE_PARTIAL_REG_DEPENDENCY: Enable more register renaming
@@ -48,7 +48,7 @@ DEF_TUNE (X86_TUNE_SCHEDULE, "schedule",
over partial stores. For example preffer MOVZBL or MOVQ to load 8bit
value over movb. */
DEF_TUNE (X86_TUNE_PARTIAL_REG_DEPENDENCY, "partial_reg_dependency",
- m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_AMD_MULTIPLE
+ m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_AMD_MULTIPLE
| m_GENERIC)
/* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: This knob promotes all store
@@ -58,7 +58,7 @@ DEF_TUNE (X86_TUNE_PARTIAL_REG_DEPENDENCY, "partial_reg_dependency",
SPECfp regression, while enabling it on K8 brings roughly 2.4% regression
that can be partly masked by careful scheduling of moves. */
DEF_TUNE (X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY, "sse_partial_reg_dependency",
- m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_AMDFAM10
+ m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_AMDFAM10
| m_BDVER | m_GENERIC)
/* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
@@ -84,13 +84,13 @@ DEF_TUNE (X86_TUNE_PARTIAL_FLAG_REG_STALL, "partial_flag_reg_stall",
/* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
partial dependencies. */
DEF_TUNE (X86_TUNE_MOVX, "movx",
- m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_GEODE
+ m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_GEODE
| m_AMD_MULTIPLE | m_GENERIC)
/* X86_TUNE_MEMORY_MISMATCH_STALL: Avoid partial stores that are followed by
full sized loads. */
DEF_TUNE (X86_TUNE_MEMORY_MISMATCH_STALL, "memory_mismatch_stall",
- m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_AMD_MULTIPLE | m_GENERIC)
+ m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_AMD_MULTIPLE | m_GENERIC)
/* X86_TUNE_FUSE_CMP_AND_BRANCH_32: Fuse compare with a subsequent
conditional jump instruction for 32 bit TARGET.
@@ -102,29 +102,29 @@ DEF_TUNE (X86_TUNE_FUSE_CMP_AND_BRANCH_32, "fuse_cmp_and_branch_32",
conditional jump instruction for TARGET_64BIT.
FIXME: revisit for generic. */
DEF_TUNE (X86_TUNE_FUSE_CMP_AND_BRANCH_64, "fuse_cmp_and_branch_64",
- m_COREI7 | m_COREI7_AVX | m_HASWELL | m_BDVER)
+ m_NEHALEM | m_SANDYBRIDGE | m_HASWELL | m_BDVER)
/* X86_TUNE_FUSE_CMP_AND_BRANCH_SOFLAGS: Fuse compare with a
subsequent conditional jump instruction when the condition jump
check sign flag (SF) or overflow flag (OF). */
DEF_TUNE (X86_TUNE_FUSE_CMP_AND_BRANCH_SOFLAGS, "fuse_cmp_and_branch_soflags",
- m_COREI7 | m_COREI7_AVX | m_HASWELL | m_BDVER)
+ m_NEHALEM | m_SANDYBRIDGE | m_HASWELL | m_BDVER)
/* X86_TUNE_FUSE_ALU_AND_BRANCH: Fuse alu with a subsequent conditional
jump instruction when the alu instruction produces the CCFLAG consumed by
the conditional jump instruction. */
DEF_TUNE (X86_TUNE_FUSE_ALU_AND_BRANCH, "fuse_alu_and_branch",
- m_COREI7_AVX | m_HASWELL)
+ m_SANDYBRIDGE | m_HASWELL)
/* X86_TUNE_REASSOC_INT_TO_PARALLEL: Try to produce parallel computations
during reassociation of integer computation. */
DEF_TUNE (X86_TUNE_REASSOC_INT_TO_PARALLEL, "reassoc_int_to_parallel",
- m_ATOM)
+ m_BONNELL)
/* X86_TUNE_REASSOC_FP_TO_PARALLEL: Try to produce parallel computations
during reassociation of fp computation. */
DEF_TUNE (X86_TUNE_REASSOC_FP_TO_PARALLEL, "reassoc_fp_to_parallel",
- m_ATOM | m_SLM | m_HASWELL | m_BDVER1 | m_BDVER2 | m_GENERIC)
+ m_BONNELL | m_SILVERMONT | m_HASWELL | m_BDVER1 | m_BDVER2 | m_GENERIC)
/*****************************************************************************/
/* Function prologue, epilogue and function calling sequences. */
@@ -142,33 +142,33 @@ DEF_TUNE (X86_TUNE_REASSOC_FP_TO_PARALLEL, "reassoc_fp_to_parallel",
Bobcat and Generic. This is because disabling it causes large
regression on mgrid due to IRA limitation leading to unecessary
use of the frame pointer in 32bit mode. */
-DEF_TUNE (X86_TUNE_ACCUMULATE_OUTGOING_ARGS, "accumulate_outgoing_args",
- m_PPRO | m_P4_NOCONA | m_ATOM | m_SLM | m_AMD_MULTIPLE | m_GENERIC)
+DEF_TUNE (X86_TUNE_ACCUMULATE_OUTGOING_ARGS, "accumulate_outgoing_args",
+ m_PPRO | m_P4_NOCONA | m_BONNELL | m_SILVERMONT | m_AMD_MULTIPLE | m_GENERIC)
/* X86_TUNE_PROLOGUE_USING_MOVE: Do not use push/pop in prologues that are
considered on critical path. */
-DEF_TUNE (X86_TUNE_PROLOGUE_USING_MOVE, "prologue_using_move",
+DEF_TUNE (X86_TUNE_PROLOGUE_USING_MOVE, "prologue_using_move",
m_PPRO | m_ATHLON_K8)
/* X86_TUNE_PROLOGUE_USING_MOVE: Do not use push/pop in epilogues that are
considered on critical path. */
DEF_TUNE (X86_TUNE_EPILOGUE_USING_MOVE, "epilogue_using_move",
- m_PPRO | m_ATHLON_K8)
+ m_PPRO | m_ATHLON_K8)
/* X86_TUNE_USE_LEAVE: Use "leave" instruction in epilogues where it fits. */
-DEF_TUNE (X86_TUNE_USE_LEAVE, "use_leave",
+DEF_TUNE (X86_TUNE_USE_LEAVE, "use_leave",
m_386 | m_CORE_ALL | m_K6_GEODE | m_AMD_MULTIPLE | m_GENERIC)
/* X86_TUNE_PUSH_MEMORY: Enable generation of "push mem" instructions.
Some chips, like 486 and Pentium works faster with separate load
and push instructions. */
-DEF_TUNE (X86_TUNE_PUSH_MEMORY, "push_memory",
- m_386 | m_P4_NOCONA | m_CORE_ALL | m_K6_GEODE | m_AMD_MULTIPLE
+DEF_TUNE (X86_TUNE_PUSH_MEMORY, "push_memory",
+ m_386 | m_P4_NOCONA | m_CORE_ALL | m_K6_GEODE | m_AMD_MULTIPLE
| m_GENERIC)
/* X86_TUNE_SINGLE_PUSH: Enable if single push insn is preferred
over esp subtraction. */
-DEF_TUNE (X86_TUNE_SINGLE_PUSH, "single_push", m_386 | m_486 | m_PENT
+DEF_TUNE (X86_TUNE_SINGLE_PUSH, "single_push", m_386 | m_486 | m_PENT
| m_K6_GEODE)
/* X86_TUNE_DOUBLE_PUSH. Enable if double push insn is preferred
@@ -189,7 +189,7 @@ DEF_TUNE (X86_TUNE_DOUBLE_POP, "double_pop", m_PENT)
/* X86_TUNE_PAD_SHORT_FUNCTION: Make every function to be at least 4
instructions long. */
-DEF_TUNE (X86_TUNE_PAD_SHORT_FUNCTION, "pad_short_function", m_ATOM)
+DEF_TUNE (X86_TUNE_PAD_SHORT_FUNCTION, "pad_short_function", m_BONNELL)
/* X86_TUNE_PAD_RETURNS: Place NOP before every RET that is a destination
of conditional jump or directly preceded by other jump instruction.
@@ -202,7 +202,7 @@ DEF_TUNE (X86_TUNE_PAD_RETURNS, "pad_returns",
/* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
than 4 branch instructions in the 16 byte window. */
DEF_TUNE (X86_TUNE_FOUR_JUMP_LIMIT, "four_jump_limit",
- m_PPRO | m_P4_NOCONA | m_ATOM | m_SLM | m_ATHLON_K8 | m_AMDFAM10)
+ m_PPRO | m_P4_NOCONA | m_BONNELL | m_SILVERMONT | m_ATHLON_K8 | m_AMDFAM10)
/*****************************************************************************/
/* Integer instruction selection tuning */
@@ -224,26 +224,26 @@ DEF_TUNE (X86_TUNE_READ_MODIFY, "read_modify", ~(m_PENT | m_PPRO))
/* X86_TUNE_USE_INCDEC: Enable use of inc/dec instructions. */
DEF_TUNE (X86_TUNE_USE_INCDEC, "use_incdec",
- ~(m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_GENERIC))
+ ~(m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_GENERIC))
/* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
for DFmode copies */
DEF_TUNE (X86_TUNE_INTEGER_DFMODE_MOVES, "integer_dfmode_moves",
- ~(m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM
+ ~(m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT
| m_GEODE | m_AMD_MULTIPLE | m_GENERIC))
/* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
will impact LEA instruction selection. */
-DEF_TUNE (X86_TUNE_OPT_AGU, "opt_agu", m_ATOM | m_SLM)
+DEF_TUNE (X86_TUNE_OPT_AGU, "opt_agu", m_BONNELL | m_SILVERMONT)
/* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
- vector path on AMD machines.
+ vector path on AMD machines.
FIXME: Do we need to enable this for core? */
DEF_TUNE (X86_TUNE_SLOW_IMUL_IMM32_MEM, "slow_imul_imm32_mem",
m_K8 | m_AMDFAM10)
/* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
- machines.
+ machines.
FIXME: Do we need to enable this for core? */
DEF_TUNE (X86_TUNE_SLOW_IMUL_IMM8, "slow_imul_imm8",
m_K8 | m_AMDFAM10)
@@ -251,7 +251,7 @@ DEF_TUNE (X86_TUNE_SLOW_IMUL_IMM8, "slow_imul_imm8",
/* X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE: Try to avoid memory operands for
a conditional move. */
DEF_TUNE (X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE, "avoid_mem_opnd_for_cmove",
- m_ATOM | m_SLM)
+ m_BONNELL | m_SILVERMONT)
/* X86_TUNE_SINGLE_STRINGOP: Enable use of single string operations, such
as MOVS and STOS (without a REP prefix) to move/set sequences of bytes. */
@@ -268,15 +268,15 @@ DEF_TUNE (X86_TUNE_MISALIGNED_MOVE_STRING_PRO_EPILOGUES,
/* X86_TUNE_USE_SAHF: Controls use of SAHF. */
DEF_TUNE (X86_TUNE_USE_SAHF, "use_sahf",
- m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_K6_GEODE
+ m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_K6_GEODE
| m_K8 | m_AMDFAM10 | m_BDVER | m_BTVER | m_GENERIC)
/* X86_TUNE_USE_CLTD: Controls use of CLTD and CTQO instructions. */
-DEF_TUNE (X86_TUNE_USE_CLTD, "use_cltd", ~(m_PENT | m_ATOM | m_SLM | m_K6))
+DEF_TUNE (X86_TUNE_USE_CLTD, "use_cltd", ~(m_PENT | m_BONNELL | m_SILVERMONT | m_K6))
/* X86_TUNE_USE_BT: Enable use of BT (bit test) instructions. */
DEF_TUNE (X86_TUNE_USE_BT, "use_bt",
- m_CORE_ALL | m_ATOM | m_SLM | m_AMD_MULTIPLE | m_GENERIC)
+ m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_AMD_MULTIPLE | m_GENERIC)
/*****************************************************************************/
/* 387 instruction selection tuning */
@@ -285,21 +285,21 @@ DEF_TUNE (X86_TUNE_USE_BT, "use_bt",
/* X86_TUNE_USE_HIMODE_FIOP: Enables use of x87 instructions with 16bit
integer operand.
FIXME: Why this is disabled for modern chips? */
-DEF_TUNE (X86_TUNE_USE_HIMODE_FIOP, "use_himode_fiop",
+DEF_TUNE (X86_TUNE_USE_HIMODE_FIOP, "use_himode_fiop",
m_386 | m_486 | m_K6_GEODE)
/* X86_TUNE_USE_SIMODE_FIOP: Enables use of x87 instructions with 32bit
integer operand. */
DEF_TUNE (X86_TUNE_USE_SIMODE_FIOP, "use_simode_fiop",
- ~(m_PENT | m_PPRO | m_CORE_ALL | m_ATOM
- | m_SLM | m_AMD_MULTIPLE | m_GENERIC))
+ ~(m_PENT | m_PPRO | m_CORE_ALL | m_BONNELL
+ | m_SILVERMONT | m_AMD_MULTIPLE | m_GENERIC))
/* X86_TUNE_USE_FFREEP: Use freep instruction instead of fstp. */
DEF_TUNE (X86_TUNE_USE_FFREEP, "use_ffreep", m_AMD_MULTIPLE)
/* X86_TUNE_EXT_80387_CONSTANTS: Use fancy 80387 constants, such as PI. */
DEF_TUNE (X86_TUNE_EXT_80387_CONSTANTS, "ext_80387_constants",
- m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_K6_GEODE
+ m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_K6_GEODE
| m_ATHLON_K8 | m_GENERIC)
/*****************************************************************************/
@@ -308,7 +308,7 @@ DEF_TUNE (X86_TUNE_EXT_80387_CONSTANTS, "ext_80387_constants",
/* X86_TUNE_VECTORIZE_DOUBLE: Enable double precision vector
instructions. */
-DEF_TUNE (X86_TUNE_VECTORIZE_DOUBLE, "vectorize_double", ~m_ATOM)
+DEF_TUNE (X86_TUNE_VECTORIZE_DOUBLE, "vectorize_double", ~m_BONNELL)
/* X86_TUNE_GENERAL_REGS_SSE_SPILL: Try to spill general regs to SSE
regs instead of memory. */
@@ -318,12 +318,12 @@ DEF_TUNE (X86_TUNE_GENERAL_REGS_SSE_SPILL, "general_regs_sse_spill",
/* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL: Use movups for misaligned loads instead
of a sequence loading registers by parts. */
DEF_TUNE (X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL, "sse_unaligned_load_optimal",
- m_COREI7 | m_COREI7_AVX | m_HASWELL | m_AMDFAM10 | m_BDVER | m_BTVER | m_SLM | m_GENERIC)
+ m_NEHALEM | m_SANDYBRIDGE | m_HASWELL | m_AMDFAM10 | m_BDVER | m_BTVER | m_SILVERMONT | m_GENERIC)
/* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL: Use movups for misaligned stores instead
of a sequence loading registers by parts. */
DEF_TUNE (X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL, "sse_unaligned_store_optimal",
- m_COREI7 | m_COREI7_AVX | m_HASWELL | m_BDVER | m_SLM | m_GENERIC)
+ m_NEHALEM | m_SANDYBRIDGE | m_HASWELL | m_BDVER | m_SILVERMONT | m_GENERIC)
/* Use packed single precision instructions where posisble. I.e. movups instead
of movupd. */
@@ -360,7 +360,7 @@ DEF_TUNE (X86_TUNE_INTER_UNIT_CONVERSIONS, "inter_unit_conversions",
/* X86_TUNE_SPLIT_MEM_OPND_FOR_FP_CONVERTS: Try to split memory operand for
fp converts to destination register. */
DEF_TUNE (X86_TUNE_SPLIT_MEM_OPND_FOR_FP_CONVERTS, "split_mem_opnd_for_fp_converts",
- m_SLM)
+ m_SILVERMONT)
/* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
from FP to FP. This form of instructions avoids partial write to the
@@ -378,13 +378,13 @@ DEF_TUNE (X86_TUNE_USE_VECTOR_CONVERTS, "use_vector_converts", m_AMDFAM10)
/* X86_TUNE_AVX256_UNALIGNED_LOAD_OPTIMAL: if false, unaligned loads are
split. */
-DEF_TUNE (X86_TUNE_AVX256_UNALIGNED_LOAD_OPTIMAL, "256_unaligned_load_optimal",
- ~(m_COREI7 | m_COREI7_AVX | m_GENERIC))
+DEF_TUNE (X86_TUNE_AVX256_UNALIGNED_LOAD_OPTIMAL, "256_unaligned_load_optimal",
+ ~(m_NEHALEM | m_SANDYBRIDGE | m_GENERIC))
/* X86_TUNE_AVX256_UNALIGNED_STORE_OPTIMAL: if false, unaligned stores are
split. */
-DEF_TUNE (X86_TUNE_AVX256_UNALIGNED_STORE_OPTIMAL, "256_unaligned_store_optimal",
- ~(m_COREI7 | m_COREI7_AVX | m_BDVER | m_GENERIC))
+DEF_TUNE (X86_TUNE_AVX256_UNALIGNED_STORE_OPTIMAL, "256_unaligned_store_optimal",
+ ~(m_NEHALEM | m_SANDYBRIDGE | m_BDVER | m_GENERIC))
/* X86_TUNE_AVX128_OPTIMAL: Enable 128-bit AVX instruction generation for
the auto-vectorizer. */
@@ -401,7 +401,7 @@ DEF_TUNE (X86_TUNE_DOUBLE_WITH_ADD, "double_with_add", ~m_386)
/* X86_TUNE_ALWAYS_FANCY_MATH_387: controls use of fancy 387 operations,
such as fsqrt, fprem, fsin, fcos, fsincos etc.
Should be enabled for all targets that always has coprocesor. */
-DEF_TUNE (X86_TUNE_ALWAYS_FANCY_MATH_387, "always_fancy_math_387",
+DEF_TUNE (X86_TUNE_ALWAYS_FANCY_MATH_387, "always_fancy_math_387",
~(m_386 | m_486))
/* X86_TUNE_UNROLL_STRLEN: Produce (quite lame) unrolled sequence for
@@ -503,3 +503,9 @@ DEF_TUNE (X86_TUNE_QIMODE_MATH, "qimode_math", ~0)
arithmetic to 32bit via PROMOTE_MODE macro. This code generation scheme
is usually used for RISC targets. */
DEF_TUNE (X86_TUNE_PROMOTE_QI_REGS, "promote_qi_regs", 0)
+
+/* X86_TUNE_ADJUST_UNROLL: This enables adjusting the unroll factor based
+ on hardware capabilities. Bdver3 hardware has a loop buffer which makes
+ unrolling small loop less important. For, such architectures we adjust
+ the unroll factor so that the unrolled loop fits the loop buffer. */
+DEF_TUNE (X86_TUNE_ADJUST_UNROLL, "adjust_unroll_factor", m_BDVER3 | m_BDVER4)