summaryrefslogtreecommitdiff
path: root/gcc/config
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/config')
-rw-r--r--gcc/config/alpha/alpha.c48
-rw-r--r--gcc/config/arm/arm.c5
-rw-r--r--gcc/config/arm/iwmmxt.md36
-rw-r--r--gcc/config/bfin/bfin.c17
-rw-r--r--gcc/config/frv/frv.c2
-rw-r--r--gcc/config/i386/darwin.h4
-rw-r--r--gcc/config/i386/i386-c.c344
-rw-r--r--gcc/config/i386/i386-protos.h7
-rw-r--r--gcc/config/i386/i386.c1262
-rw-r--r--gcc/config/i386/i386.h215
-rw-r--r--gcc/config/i386/i386.opt163
-rw-r--r--gcc/config/i386/t-i38613
-rw-r--r--gcc/config/ia64/ia64.c26
-rw-r--r--gcc/config/m32c/m32c-protos.h4
-rw-r--r--gcc/config/m32c/m32c.c2
-rw-r--r--gcc/config/mips/mips.c20
-rw-r--r--gcc/config/pa/pa.c7
-rw-r--r--gcc/config/rs6000/rs6000-c.c12
-rw-r--r--gcc/config/rs6000/rs6000.c80
-rw-r--r--gcc/config/s390/s390.c40
-rw-r--r--gcc/config/sh/sh.c52
-rw-r--r--gcc/config/sh/sh.h4
-rw-r--r--gcc/config/sparc/sparc.c10
-rw-r--r--gcc/config/sparc/sparc.h7
-rw-r--r--gcc/config/spu/spu-c.c74
-rw-r--r--gcc/config/spu/spu.c33
-rw-r--r--gcc/config/stormy16/stormy16.c21
-rw-r--r--gcc/config/xtensa/xtensa.c30
28 files changed, 1855 insertions, 683 deletions
diff --git a/gcc/config/alpha/alpha.c b/gcc/config/alpha/alpha.c
index 350994234cd..e2835acd3b7 100644
--- a/gcc/config/alpha/alpha.c
+++ b/gcc/config/alpha/alpha.c
@@ -1,6 +1,6 @@
/* Subroutines used for code generation on the DEC Alpha.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
- 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
This file is part of GCC.
@@ -51,7 +51,7 @@ along with GCC; see the file COPYING3. If not see
#include "langhooks.h"
#include <splay-tree.h>
#include "cfglayout.h"
-#include "tree-gimple.h"
+#include "gimple.h"
#include "tree-flow.h"
#include "tree-stdarg.h"
#include "tm-constrs.h"
@@ -5817,11 +5817,11 @@ va_list_skip_additions (tree lhs)
if (TREE_CODE (stmt) == PHI_NODE)
return stmt;
- if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT
- || GIMPLE_STMT_OPERAND (stmt, 0) != lhs)
+ if (TREE_CODE (stmt) != MODIFY_EXPR
+ || TREE_OPERAND (stmt, 0) != lhs)
return lhs;
- rhs = GIMPLE_STMT_OPERAND (stmt, 1);
+ rhs = TREE_OPERAND (stmt, 1);
if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
rhs = TREE_OPERAND (rhs, 0);
@@ -5856,11 +5856,17 @@ va_list_skip_additions (tree lhs)
current statement. */
static bool
-alpha_stdarg_optimize_hook (struct stdarg_info *si, const_tree lhs, const_tree rhs)
+alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
{
tree base, offset, arg1, arg2;
int offset_arg = 1;
+#if 1
+ /* FIXME tuples. */
+ (void) si;
+ (void) stmt;
+ return false;
+#else
while (handled_component_p (rhs))
rhs = TREE_OPERAND (rhs, 0);
if (TREE_CODE (rhs) != INDIRECT_REF
@@ -5953,6 +5959,7 @@ alpha_stdarg_optimize_hook (struct stdarg_info *si, const_tree lhs, const_tree r
escapes:
si->va_list_escapes = true;
return false;
+#endif
}
#endif
@@ -6087,7 +6094,7 @@ alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
{
nextarg = plus_constant (nextarg, offset);
nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (valist), valist,
+ t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist,
make_tree (ptr_type_node, nextarg));
TREE_SIDE_EFFECTS (t) = 1;
@@ -6106,20 +6113,20 @@ alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
size_int (offset));
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (base_field), base_field, t);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (offset_field),
- offset_field, t);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
}
static tree
-alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
+alpha_gimplify_va_arg_1 (tree type, tree base, gimple_seq offset,
+ gimple_seq *pre_p)
{
tree type_size, ptr_type, addend, t, addr, internal_post;
@@ -6128,9 +6135,9 @@ alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
{
t = build_int_cst (TREE_TYPE (offset), 6*8);
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (offset), offset,
- build2 (MAX_EXPR, TREE_TYPE (offset), offset, t));
- gimplify_and_add (t, pre_p);
+ gimplify_assign (offset,
+ build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
+ pre_p);
}
addend = offset;
@@ -6182,15 +6189,15 @@ alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
t = size_binop (MULT_EXPR, t, size_int (8));
}
t = fold_convert (TREE_TYPE (offset), t);
- t = build2 (GIMPLE_MODIFY_STMT, void_type_node, offset,
- build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t));
- gimplify_and_add (t, pre_p);
+ gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
+ pre_p);
return build_va_arg_indirect_ref (addr);
}
static tree
-alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
+alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
+ gimple_seq *post_p)
{
tree offset_field, base_field, offset, base, t, r;
bool indirect;
@@ -6222,9 +6229,8 @@ alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
/* Stuff the offset temporary back into its field. */
- t = build2 (GIMPLE_MODIFY_STMT, void_type_node, offset_field,
- fold_convert (TREE_TYPE (offset_field), offset));
- gimplify_and_add (t, pre_p);
+ gimplify_assign (offset_field,
+ fold_convert (TREE_TYPE (offset_field), offset), pre_p);
if (indirect)
r = build_va_arg_indirect_ref (r);
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index d64b6fda34f..dfee7c3740e 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -3267,11 +3267,6 @@ arm_function_in_section_p (tree decl, section *section)
/* If DECL_SECTION_NAME is set, assume it is trustworthy. */
if (!DECL_SECTION_NAME (decl))
{
- /* Only cater for unit-at-a-time mode, where we know that the user
- cannot later specify a section for DECL. */
- if (!flag_unit_at_a_time)
- return false;
-
/* Make sure that we will not create a unique section for DECL. */
if (flag_function_sections || DECL_ONE_ONLY (decl))
return false;
diff --git a/gcc/config/arm/iwmmxt.md b/gcc/config/arm/iwmmxt.md
index 633aaaa875f..b484b55e6cd 100644
--- a/gcc/config/arm/iwmmxt.md
+++ b/gcc/config/arm/iwmmxt.md
@@ -168,8 +168,8 @@
)
(define_insn "movv8qi_internal"
- [(set (match_operand:V8QI 0 "nonimmediate_operand" "=y,m,y,?r,?y,?r,?r")
- (match_operand:V8QI 1 "general_operand" "y,y,mi,y,r,r,mi"))]
+ [(set (match_operand:V8QI 0 "nonimmediate_operand" "=y,m,y,?r,?y,?r,?r,?m")
+ (match_operand:V8QI 1 "general_operand" "y,y,mi,y,r,r,mi,r"))]
"TARGET_REALLY_IWMMXT"
"*
switch (which_alternative)
@@ -183,14 +183,14 @@
default: return output_move_double (operands);
}"
[(set_attr "predicable" "yes")
- (set_attr "length" "4, 4, 4,4,4,8, 8")
- (set_attr "type" "*,store1,load1,*,*,*,load1")
- (set_attr "pool_range" "*, *, 256,*,*,*, 256")
- (set_attr "neg_pool_range" "*, *, 244,*,*,*, 244")])
+ (set_attr "length" "4, 4, 4,4,4,8, 8,8")
+ (set_attr "type" "*,store1,load1,*,*,*,load1,store1")
+ (set_attr "pool_range" "*, *, 256,*,*,*, 256,*")
+ (set_attr "neg_pool_range" "*, *, 244,*,*,*, 244,*")])
(define_insn "movv4hi_internal"
- [(set (match_operand:V4HI 0 "nonimmediate_operand" "=y,m,y,?r,?y,?r,?r")
- (match_operand:V4HI 1 "general_operand" "y,y,mi,y,r,r,mi"))]
+ [(set (match_operand:V4HI 0 "nonimmediate_operand" "=y,m,y,?r,?y,?r,?r,?m")
+ (match_operand:V4HI 1 "general_operand" "y,y,mi,y,r,r,mi,r"))]
"TARGET_REALLY_IWMMXT"
"*
switch (which_alternative)
@@ -204,14 +204,14 @@
default: return output_move_double (operands);
}"
[(set_attr "predicable" "yes")
- (set_attr "length" "4, 4, 4,4,4,8, 8")
- (set_attr "type" "*,store1,load1,*,*,*,load1")
- (set_attr "pool_range" "*, *, 256,*,*,*, 256")
- (set_attr "neg_pool_range" "*, *, 244,*,*,*, 244")])
+ (set_attr "length" "4, 4, 4,4,4,8, 8,8")
+ (set_attr "type" "*,store1,load1,*,*,*,load1,store1")
+ (set_attr "pool_range" "*, *, 256,*,*,*, 256,*")
+ (set_attr "neg_pool_range" "*, *, 244,*,*,*, 244,*")])
(define_insn "movv2si_internal"
- [(set (match_operand:V2SI 0 "nonimmediate_operand" "=y,m,y,?r,?y,?r,?r")
- (match_operand:V2SI 1 "general_operand" "y,y,mi,y,r,r,mi"))]
+ [(set (match_operand:V2SI 0 "nonimmediate_operand" "=y,m,y,?r,?y,?r,?r,?m")
+ (match_operand:V2SI 1 "general_operand" "y,y,mi,y,r,r,mi,r"))]
"TARGET_REALLY_IWMMXT"
"*
switch (which_alternative)
@@ -225,10 +225,10 @@
default: return output_move_double (operands);
}"
[(set_attr "predicable" "yes")
- (set_attr "length" "4, 4, 4,4,4,8, 24")
- (set_attr "type" "*,store1,load1,*,*,*,load1")
- (set_attr "pool_range" "*, *, 256,*,*,*, 256")
- (set_attr "neg_pool_range" "*, *, 244,*,*,*, 244")])
+ (set_attr "length" "4, 4, 4,4,4,8, 24,8")
+ (set_attr "type" "*,store1,load1,*,*,*,load1,store1")
+ (set_attr "pool_range" "*, *, 256,*,*,*, 256,*")
+ (set_attr "neg_pool_range" "*, *, 244,*,*,*, 244,*")])
;; This pattern should not be needed. It is to match a
;; wierd case generated by GCC when no optimizations are
diff --git a/gcc/config/bfin/bfin.c b/gcc/config/bfin/bfin.c
index 05a5e495cea..843726067b6 100644
--- a/gcc/config/bfin/bfin.c
+++ b/gcc/config/bfin/bfin.c
@@ -1129,8 +1129,7 @@ bfin_load_pic_reg (rtx dest)
struct cgraph_local_info *i = NULL;
rtx addr, insn;
- if (flag_unit_at_a_time)
- i = cgraph_local_info (current_function_decl);
+ i = cgraph_local_info (current_function_decl);
/* Functions local to the translation unit don't need to reload the
pic reg, since the caller always passes a usable one. */
@@ -1906,6 +1905,7 @@ static bool
bfin_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
tree exp ATTRIBUTE_UNUSED)
{
+ struct cgraph_local_info *this_func, *called_func;
e_funkind fkind = funkind (TREE_TYPE (current_function_decl));
if (fkind != SUBROUTINE)
return false;
@@ -1917,17 +1917,10 @@ bfin_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
not need to reload P5 in the prologue, but the sibcall wil pop P5 in the
sibcall epilogue, and we end up with the wrong value in P5. */
- if (!flag_unit_at_a_time || decl == NULL)
- /* Not enough information. */
- return false;
-
- {
- struct cgraph_local_info *this_func, *called_func;
- this_func = cgraph_local_info (current_function_decl);
- called_func = cgraph_local_info (decl);
- return !called_func->local || this_func->local;
- }
+ this_func = cgraph_local_info (current_function_decl);
+ called_func = cgraph_local_info (decl);
+ return !called_func->local || this_func->local;
}
/* Emit RTL insns to initialize the variable parts of a trampoline at
diff --git a/gcc/config/frv/frv.c b/gcc/config/frv/frv.c
index 6ba924b1df7..07e8eaae928 100644
--- a/gcc/config/frv/frv.c
+++ b/gcc/config/frv/frv.c
@@ -2207,7 +2207,7 @@ frv_expand_builtin_va_start (tree valist, rtx nextarg)
debug_rtx (nextarg);
}
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (valist), valist,
+ t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist,
fold_convert (TREE_TYPE (valist),
make_tree (sizetype, nextarg)));
TREE_SIDE_EFFECTS (t) = 1;
diff --git a/gcc/config/i386/darwin.h b/gcc/config/i386/darwin.h
index 6001f64b42a..c6ed10d8a72 100644
--- a/gcc/config/i386/darwin.h
+++ b/gcc/config/i386/darwin.h
@@ -263,8 +263,8 @@ extern void darwin_x86_file_end (void);
: (n) >= 11 && (n) <= 18 ? (n) + 1 \
: (n))
-#undef REGISTER_TARGET_PRAGMAS
-#define REGISTER_TARGET_PRAGMAS() DARWIN_REGISTER_TARGET_PRAGMAS()
+#undef REGISTER_SUBTARGET_PRAGMAS
+#define REGISTER_SUBTARGET_PRAGMAS() DARWIN_REGISTER_TARGET_PRAGMAS()
#undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
#define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES darwin_set_default_type_attributes
diff --git a/gcc/config/i386/i386-c.c b/gcc/config/i386/i386-c.c
new file mode 100644
index 00000000000..f0a3a17f9f6
--- /dev/null
+++ b/gcc/config/i386/i386-c.c
@@ -0,0 +1,344 @@
+/* Subroutines used for macro/preprocessor support on the ia-32.
+ Copyright (C) 2008
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "tree.h"
+#include "tm_p.h"
+#include "flags.h"
+#include "c-common.h"
+#include "ggc.h"
+#include "target.h"
+#include "target-def.h"
+#include "cpplib.h"
+#include "c-pragma.h"
+
+static bool ix86_pragma_option_parse (tree);
+static void ix86_target_macros_internal
+ (int, enum processor_type, enum processor_type, enum fpmath_unit,
+ void (*def_or_undef) (cpp_reader *, const char *));
+
+
+/* Internal function to either define or undef the appropriate system
+ macros. */
+static void
+ix86_target_macros_internal (int isa_flag,
+ enum processor_type arch,
+ enum processor_type tune,
+ enum fpmath_unit fpmath,
+ void (*def_or_undef) (cpp_reader *,
+ const char *))
+{
+ /* For some of the k6/pentium varients there weren't seperate ISA bits to
+ identify which tune/arch flag was passed, so figure it out here. */
+ size_t arch_len = strlen (ix86_arch_string);
+ size_t tune_len = strlen (ix86_tune_string);
+ int last_arch_char = ix86_arch_string[arch_len - 1];
+ int last_tune_char = ix86_tune_string[tune_len - 1];
+
+ /* Built-ins based on -march=. */
+ switch (arch)
+ {
+ case PROCESSOR_I386:
+ break;
+ case PROCESSOR_I486:
+ def_or_undef (parse_in, "__i486");
+ def_or_undef (parse_in, "__i486__");
+ break;
+ case PROCESSOR_PENTIUM:
+ def_or_undef (parse_in, "__i586");
+ def_or_undef (parse_in, "__i586__");
+ def_or_undef (parse_in, "__pentium");
+ def_or_undef (parse_in, "__pentium__");
+ if (isa_flag & OPTION_MASK_ISA_MMX)
+ def_or_undef (parse_in, "__pentium_mmx__");
+ break;
+ case PROCESSOR_PENTIUMPRO:
+ def_or_undef (parse_in, "__i686");
+ def_or_undef (parse_in, "__i686__");
+ def_or_undef (parse_in, "__pentiumpro");
+ def_or_undef (parse_in, "__pentiumpro__");
+ break;
+ case PROCESSOR_GEODE:
+ def_or_undef (parse_in, "__geode");
+ def_or_undef (parse_in, "__geode__");
+ break;
+ case PROCESSOR_K6:
+ def_or_undef (parse_in, "__k6");
+ def_or_undef (parse_in, "__k6__");
+ if (last_arch_char == '2')
+ def_or_undef (parse_in, "__k6_2__");
+ else if (last_arch_char == '3')
+ def_or_undef (parse_in, "__k6_3__");
+ else if (isa_flag & OPTION_MASK_ISA_3DNOW)
+ def_or_undef (parse_in, "__k6_3__");
+ break;
+ case PROCESSOR_ATHLON:
+ def_or_undef (parse_in, "__athlon");
+ def_or_undef (parse_in, "__athlon__");
+ if (isa_flag & OPTION_MASK_ISA_SSE)
+ def_or_undef (parse_in, "__athlon_sse__");
+ break;
+ case PROCESSOR_K8:
+ def_or_undef (parse_in, "__k8");
+ def_or_undef (parse_in, "__k8__");
+ break;
+ case PROCESSOR_AMDFAM10:
+ def_or_undef (parse_in, "__amdfam10");
+ def_or_undef (parse_in, "__amdfam10__");
+ break;
+ case PROCESSOR_PENTIUM4:
+ def_or_undef (parse_in, "__pentium4");
+ def_or_undef (parse_in, "__pentium4__");
+ break;
+ case PROCESSOR_NOCONA:
+ def_or_undef (parse_in, "__nocona");
+ def_or_undef (parse_in, "__nocona__");
+ break;
+ case PROCESSOR_CORE2:
+ def_or_undef (parse_in, "__core2");
+ def_or_undef (parse_in, "__core2__");
+ break;
+ /* use PROCESSOR_max to not set/unset the arch macro. */
+ case PROCESSOR_max:
+ break;
+ case PROCESSOR_GENERIC32:
+ case PROCESSOR_GENERIC64:
+ gcc_unreachable ();
+ }
+
+ /* Built-ins based on -mtune=. */
+ switch (tune)
+ {
+ case PROCESSOR_I386:
+ def_or_undef (parse_in, "__tune_i386__");
+ break;
+ case PROCESSOR_I486:
+ def_or_undef (parse_in, "__tune_i486__");
+ break;
+ case PROCESSOR_PENTIUM:
+ def_or_undef (parse_in, "__tune_i586__");
+ def_or_undef (parse_in, "__tune_pentium__");
+ if (last_tune_char == 'x')
+ def_or_undef (parse_in, "__tune_pentium_mmx__");
+ break;
+ case PROCESSOR_PENTIUMPRO:
+ def_or_undef (parse_in, "__tune_i686__");
+ def_or_undef (parse_in, "__tune_pentiumpro__");
+ switch (last_tune_char)
+ {
+ case '3':
+ def_or_undef (parse_in, "__tune_pentium3__");
+ /* FALLTHRU */
+ case '2':
+ def_or_undef (parse_in, "__tune_pentium2__");
+ break;
+ }
+ break;
+ case PROCESSOR_GEODE:
+ def_or_undef (parse_in, "__tune_geode__");
+ break;
+ case PROCESSOR_K6:
+ def_or_undef (parse_in, "__tune_k6__");
+ if (last_tune_char == '2')
+ def_or_undef (parse_in, "__tune_k6_2__");
+ else if (last_tune_char == '3')
+ def_or_undef (parse_in, "__tune_k6_3__");
+ else if (isa_flag & OPTION_MASK_ISA_3DNOW)
+ def_or_undef (parse_in, "__tune_k6_3__");
+ break;
+ case PROCESSOR_ATHLON:
+ def_or_undef (parse_in, "__tune_athlon__");
+ if (isa_flag & OPTION_MASK_ISA_SSE)
+ def_or_undef (parse_in, "__tune_athlon_sse__");
+ break;
+ case PROCESSOR_K8:
+ def_or_undef (parse_in, "__tune_k8__");
+ break;
+ case PROCESSOR_AMDFAM10:
+ def_or_undef (parse_in, "__tune_amdfam10__");
+ break;
+ case PROCESSOR_PENTIUM4:
+ def_or_undef (parse_in, "__tune_pentium4__");
+ break;
+ case PROCESSOR_NOCONA:
+ def_or_undef (parse_in, "__tune_nocona__");
+ break;
+ case PROCESSOR_CORE2:
+ def_or_undef (parse_in, "__tune_core2__");
+ break;
+ case PROCESSOR_GENERIC32:
+ case PROCESSOR_GENERIC64:
+ break;
+ /* use PROCESSOR_max to not set/unset the tune macro. */
+ case PROCESSOR_max:
+ break;
+ }
+
+ if (isa_flag & OPTION_MASK_ISA_MMX)
+ def_or_undef (parse_in, "__MMX__");
+ if (isa_flag & OPTION_MASK_ISA_3DNOW)
+ def_or_undef (parse_in, "__3dNOW__");
+ if (isa_flag & OPTION_MASK_ISA_3DNOW_A)
+ def_or_undef (parse_in, "__3dNOW_A__");
+ if (isa_flag & OPTION_MASK_ISA_SSE)
+ def_or_undef (parse_in, "__SSE__");
+ if (isa_flag & OPTION_MASK_ISA_SSE2)
+ def_or_undef (parse_in, "__SSE2__");
+ if (isa_flag & OPTION_MASK_ISA_SSE3)
+ def_or_undef (parse_in, "__SSE3__");
+ if (isa_flag & OPTION_MASK_ISA_SSSE3)
+ def_or_undef (parse_in, "__SSSE3__");
+ if (isa_flag & OPTION_MASK_ISA_SSE4_1)
+ def_or_undef (parse_in, "__SSE4_1__");
+ if (isa_flag & OPTION_MASK_ISA_SSE4_2)
+ def_or_undef (parse_in, "__SSE4_2__");
+ if (isa_flag & OPTION_MASK_ISA_AES)
+ def_or_undef (parse_in, "__AES__");
+ if (isa_flag & OPTION_MASK_ISA_PCLMUL)
+ def_or_undef (parse_in, "__PCLMUL__");
+ if (isa_flag & OPTION_MASK_ISA_SSE4A)
+ def_or_undef (parse_in, "__SSE4A__");
+ if (isa_flag & OPTION_MASK_ISA_SSE5)
+ def_or_undef (parse_in, "__SSE5__");
+ if ((fpmath & FPMATH_SSE) && (isa_flag & OPTION_MASK_ISA_SSE))
+ def_or_undef (parse_in, "__SSE_MATH__");
+ if ((fpmath & FPMATH_SSE) && (isa_flag & OPTION_MASK_ISA_SSE2))
+ def_or_undef (parse_in, "__SSE2_MATH__");
+}
+
+
+/* Hook to validate the current #pragma option and set the state, and update
+ the macros based on what was changed. */
+
+static bool
+ix86_pragma_option_parse (tree args)
+{
+ tree prev_tree = build_target_option_node ();
+ tree cur_tree;
+ struct cl_target_option *prev_opt;
+ struct cl_target_option *cur_opt;
+ int prev_isa;
+ int cur_isa;
+ int diff_isa;
+ enum processor_type prev_arch;
+ enum processor_type prev_tune;
+ enum processor_type cur_arch;
+ enum processor_type cur_tune;
+
+ if (! args)
+ {
+ cur_tree = target_option_default_node;
+ cl_target_option_restore (TREE_TARGET_OPTION (cur_tree));
+ }
+ else
+ {
+ cur_tree = ix86_valid_option_attribute_tree (args);
+ if (!cur_tree)
+ return false;
+ }
+
+ target_option_current_node = cur_tree;
+
+ /* Figure out the previous/current isa, arch, tune and the differences. */
+ prev_opt = TREE_TARGET_OPTION (prev_tree);
+ cur_opt = TREE_TARGET_OPTION (cur_tree);
+ prev_isa = prev_opt->ix86_isa_flags;
+ cur_isa = cur_opt->ix86_isa_flags;
+ diff_isa = (prev_isa ^ cur_isa);
+ prev_arch = prev_opt->arch;
+ prev_tune = prev_opt->tune;
+ cur_arch = cur_opt->arch;
+ cur_tune = cur_opt->tune;
+
+ /* If the same processor is used for both previous and current options, don't
+ change the macros. */
+ if (cur_arch == prev_arch)
+ cur_arch = prev_arch = PROCESSOR_max;
+
+ if (cur_tune == prev_tune)
+ cur_tune = prev_tune = PROCESSOR_max;
+
+ /* Undef all of the macros for that are no longer current. */
+ ix86_target_macros_internal (prev_isa & diff_isa,
+ prev_arch,
+ prev_tune,
+ prev_opt->fpmath,
+ cpp_undef);
+
+ /* Define all of the macros for new options that were just turned on. */
+ ix86_target_macros_internal (cur_isa & diff_isa,
+ cur_arch,
+ cur_tune,
+ cur_opt->fpmath,
+ cpp_define);
+
+ return true;
+}
+
+/* Function to tell the preprocessor about the defines for the current target. */
+
+void
+ix86_target_macros (void)
+{
+ /* 32/64-bit won't change with target specific options, so do the assert and
+ builtin_define_std calls here. */
+ if (TARGET_64BIT)
+ {
+ cpp_assert (parse_in, "cpu=x86_64");
+ cpp_assert (parse_in, "machine=x86_64");
+ cpp_define (parse_in, "__amd64");
+ cpp_define (parse_in, "__amd64__");
+ cpp_define (parse_in, "__x86_64");
+ cpp_define (parse_in, "__x86_64__");
+ }
+ else
+ {
+ cpp_assert (parse_in, "cpu=i386");
+ cpp_assert (parse_in, "machine=i386");
+ builtin_define_std ("i386");
+ }
+
+ ix86_target_macros_internal (ix86_isa_flags,
+ ix86_arch,
+ ix86_tune,
+ ix86_fpmath,
+ cpp_define);
+}
+
+
+/* Register target pragmas. We need to add the hook for parsing #pragma GCC
+ option here rather than in i386.c since it will pull in various preprocessor
+ functions, and those are not present in languages like fortran without a
+ preprocessor. */
+
+void
+ix86_register_pragmas (void)
+{
+ /* Update pragma hook to allow parsing #pragma GCC option. */
+ targetm.target_option.pragma_parse = ix86_pragma_option_parse;
+
+#ifdef REGISTER_SUBTARGET_PRAGMAS
+ REGISTER_SUBTARGET_PRAGMAS ();
+#endif
+}
diff --git a/gcc/config/i386/i386-protos.h b/gcc/config/i386/i386-protos.h
index 634a4254f06..3ebfd3cd9a9 100644
--- a/gcc/config/i386/i386-protos.h
+++ b/gcc/config/i386/i386-protos.h
@@ -20,7 +20,7 @@ along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* Functions in i386.c */
-extern void override_options (void);
+extern void override_options (bool);
extern void optimization_options (int, int);
extern int ix86_can_use_return_insn_p (void);
@@ -202,6 +202,7 @@ extern int ix86_constant_alignment (tree, int);
extern tree ix86_handle_shared_attribute (tree *, tree, tree, int, bool *);
extern tree ix86_handle_selectany_attribute (tree *, tree, tree, int, bool *);
extern int x86_field_alignment (tree, int);
+extern tree ix86_valid_option_attribute_tree (tree);
#endif
extern rtx ix86_tls_get_addr (void);
@@ -215,6 +216,10 @@ extern void ix86_expand_reduc_v4sf (rtx (*)(rtx, rtx, rtx), rtx, rtx);
extern bool ix86_sse5_valid_op_p (rtx [], rtx, int, bool, int, bool);
extern void ix86_expand_sse5_multiple_memory (rtx [], int, enum machine_mode);
+/* In i386-c.c */
+extern void ix86_target_macros (void);
+extern void ix86_register_pragmas (void);
+
/* In winnt.c */
extern void i386_pe_unique_section (tree, int);
extern void i386_pe_declare_function_type (FILE *, const char *, int);
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index e68f35a24c6..62d1b8d9bce 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -48,7 +48,7 @@ along with GCC; see the file COPYING3. If not see
#include "target-def.h"
#include "langhooks.h"
#include "cgraph.h"
-#include "tree-gimple.h"
+#include "gimple.h"
#include "dwarf2.h"
#include "df.h"
#include "tm-constrs.h"
@@ -1210,7 +1210,11 @@ const struct processor_costs *ix86_cost = &pentium_cost;
#define m_GENERIC (m_GENERIC32 | m_GENERIC64)
/* Feature tests against the various tunings. */
-unsigned int ix86_tune_features[X86_TUNE_LAST] = {
+unsigned char ix86_tune_features[X86_TUNE_LAST];
+
+/* Feature tests against the various tunings used to create ix86_tune_features
+ based on the processor mask. */
+static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
/* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
negatively, so enabling for Generic64 seems like good code size
tradeoff. We can't enable it for 32bit generic because it does not
@@ -1443,7 +1447,11 @@ unsigned int ix86_tune_features[X86_TUNE_LAST] = {
};
/* Feature tests against the various architecture variations. */
-unsigned int ix86_arch_features[X86_ARCH_LAST] = {
+unsigned char ix86_arch_features[X86_ARCH_LAST];
+
+/* Feature tests against the various architecture variations, used to create
+ ix86_arch_features based on the processor mask. */
+static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
/* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
~(m_386 | m_486 | m_PENT | m_K6),
@@ -1773,6 +1781,26 @@ static void ix86_compute_frame_layout (struct ix86_frame *);
static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
rtx, rtx, int);
+enum ix86_function_specific_strings
+{
+ IX86_FUNCTION_SPECIFIC_ARCH,
+ IX86_FUNCTION_SPECIFIC_TUNE,
+ IX86_FUNCTION_SPECIFIC_FPMATH,
+ IX86_FUNCTION_SPECIFIC_MAX
+};
+
+static char *ix86_target_string (int, int, const char *, const char *,
+ const char *, bool);
+static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
+static void ix86_function_specific_save (struct cl_target_option *);
+static void ix86_function_specific_restore (struct cl_target_option *);
+static void ix86_function_specific_print (FILE *, int,
+ struct cl_target_option *);
+static bool ix86_valid_option_attribute_p (tree, tree, tree, int);
+static bool ix86_valid_option_attribute_inner_p (tree, char *[]);
+static bool ix86_can_inline_p (tree, tree);
+static void ix86_set_current_function (tree);
+
/* The svr4 ABI for the i386 says that records and unions are returned
in memory. */
@@ -1780,6 +1808,10 @@ static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
#define DEFAULT_PCC_STRUCT_RETURN 1
#endif
+/* Whether -mtune= or -march= were specified */
+static int ix86_tune_defaulted;
+static int ix86_arch_specified;
+
/* Bit flags that specify the ISA we are compiling for. */
int ix86_isa_flags = TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_ISA_DEFAULT;
@@ -1815,6 +1847,18 @@ static int ix86_isa_flags_explicit;
#define OPTION_MASK_ISA_SSE5_SET \
(OPTION_MASK_ISA_SSE5 | OPTION_MASK_ISA_SSE4A_SET)
+/* AES and PCLMUL need SSE2 because they use xmm registers */
+#define OPTION_MASK_ISA_AES_SET \
+ (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
+#define OPTION_MASK_ISA_PCLMUL_SET \
+ (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
+
+#define OPTION_MASK_ISA_ABM_SET \
+ (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
+#define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
+#define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
+#define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
+
/* Define a set of ISAs which aren't available when a given ISA is
disabled. MMX and SSE ISAs are handled separately. */
@@ -1844,14 +1888,73 @@ static int ix86_isa_flags_explicit;
#define OPTION_MASK_ISA_SSE4A_UNSET \
(OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE5_UNSET)
-
#define OPTION_MASK_ISA_SSE5_UNSET OPTION_MASK_ISA_SSE5
+#define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
+#define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
+#define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
+#define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
+#define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
+#define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
/* Vectorization library interface and handlers. */
tree (*ix86_veclib_handler)(enum built_in_function, tree, tree) = NULL;
static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
+/* Processor target table, indexed by processor number */
+struct ptt
+{
+ const struct processor_costs *cost; /* Processor costs */
+ const int align_loop; /* Default alignments. */
+ const int align_loop_max_skip;
+ const int align_jump;
+ const int align_jump_max_skip;
+ const int align_func;
+};
+
+static const struct ptt processor_target_table[PROCESSOR_max] =
+{
+ {&i386_cost, 4, 3, 4, 3, 4},
+ {&i486_cost, 16, 15, 16, 15, 16},
+ {&pentium_cost, 16, 7, 16, 7, 16},
+ {&pentiumpro_cost, 16, 15, 16, 10, 16},
+ {&geode_cost, 0, 0, 0, 0, 0},
+ {&k6_cost, 32, 7, 32, 7, 32},
+ {&athlon_cost, 16, 7, 16, 7, 16},
+ {&pentium4_cost, 0, 0, 0, 0, 0},
+ {&k8_cost, 16, 7, 16, 7, 16},
+ {&nocona_cost, 0, 0, 0, 0, 0},
+ {&core2_cost, 16, 10, 16, 10, 16},
+ {&generic32_cost, 16, 7, 16, 7, 16},
+ {&generic64_cost, 16, 10, 16, 10, 16},
+ {&amdfam10_cost, 32, 24, 32, 7, 32}
+};
+
+static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
+{
+ "generic",
+ "i386",
+ "i486",
+ "pentium",
+ "pentium-mmx",
+ "pentiumpro",
+ "pentium2",
+ "pentium3",
+ "pentium4",
+ "pentium-m",
+ "prescott",
+ "nocona",
+ "core2",
+ "geode",
+ "k6",
+ "k6-2",
+ "k6-3",
+ "athlon",
+ "athlon-4",
+ "k8",
+ "amdfam10"
+};
+
/* Implement TARGET_HANDLE_OPTION. */
static bool
@@ -2002,11 +2105,295 @@ ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
}
return true;
+ case OPT_mabm:
+ if (value)
+ {
+ ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
+ ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
+ }
+ else
+ {
+ ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
+ ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
+ }
+ return true;
+
+ case OPT_mpopcnt:
+ if (value)
+ {
+ ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
+ ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
+ }
+ else
+ {
+ ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
+ ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
+ }
+ return true;
+
+ case OPT_msahf:
+ if (value)
+ {
+ ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
+ ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
+ }
+ else
+ {
+ ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
+ ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
+ }
+ return true;
+
+ case OPT_mcx16:
+ if (value)
+ {
+ ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
+ ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
+ }
+ else
+ {
+ ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
+ ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
+ }
+ return true;
+
+ case OPT_maes:
+ if (value)
+ {
+ ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
+ ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
+ }
+ else
+ {
+ ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
+ ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
+ }
+ return true;
+
+ case OPT_mpclmul:
+ if (value)
+ {
+ ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
+ ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
+ }
+ else
+ {
+ ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
+ ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
+ }
+ return true;
+
default:
return true;
}
}
+
+/* Return a string the documents the current -m options. The caller is
+ responsible for freeing the string. */
+
+static char *
+ix86_target_string (int isa, int flags, const char *arch, const char *tune,
+ const char *fpmath, bool add_nl_p)
+{
+ struct ix86_target_opts
+ {
+ const char *option; /* option string */
+ int mask; /* isa mask options */
+ };
+
+ /* This table is ordered so that options like -msse5 or -msse4.2 that imply
+ preceding options while match those first. */
+ static struct ix86_target_opts isa_opts[] =
+ {
+ { "-m64", OPTION_MASK_ISA_64BIT },
+ { "-msse5", OPTION_MASK_ISA_SSE5 },
+ { "-msse4a", OPTION_MASK_ISA_SSE4A },
+ { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
+ { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
+ { "-mssse3", OPTION_MASK_ISA_SSSE3 },
+ { "-msse3", OPTION_MASK_ISA_SSE3 },
+ { "-msse2", OPTION_MASK_ISA_SSE2 },
+ { "-msse", OPTION_MASK_ISA_SSE },
+ { "-m3dnow", OPTION_MASK_ISA_3DNOW },
+ { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
+ { "-mmmx", OPTION_MASK_ISA_MMX },
+ { "-mabm", OPTION_MASK_ISA_ABM },
+ { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
+ { "-maes", OPTION_MASK_ISA_AES },
+ { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
+ };
+
+ /* Flag options. */
+ static struct ix86_target_opts flag_opts[] =
+ {
+ { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
+ { "-m80387", MASK_80387 },
+ { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
+ { "-malign-double", MASK_ALIGN_DOUBLE },
+ { "-mcld", MASK_CLD },
+ { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
+ { "-mieee-fp", MASK_IEEE_FP },
+ { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
+ { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
+ { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
+ { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
+ { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
+ { "-mno-fused-madd", MASK_NO_FUSED_MADD },
+ { "-mno-push-args", MASK_NO_PUSH_ARGS },
+ { "-mno-red-zone", MASK_NO_RED_ZONE },
+ { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
+ { "-mrecip", MASK_RECIP },
+ { "-mrtd", MASK_RTD },
+ { "-msseregparm", MASK_SSEREGPARM },
+ { "-mstack-arg-probe", MASK_STACK_PROBE },
+ { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
+ };
+
+ const char *opts[ (sizeof (isa_opts) / sizeof (isa_opts[0])
+ + sizeof (flag_opts) / sizeof (flag_opts[0])
+ + 6)][2];
+
+ char isa_other[40];
+ char target_other[40];
+ unsigned num = 0;
+ unsigned i, j;
+ char *ret;
+ char *ptr;
+ size_t len;
+ size_t line_len;
+ size_t sep_len;
+
+ memset (opts, '\0', sizeof (opts));
+
+ /* Add -march= option. */
+ if (arch)
+ {
+ opts[num][0] = "-march=";
+ opts[num++][1] = arch;
+ }
+
+ /* Add -mtune= option. */
+ if (tune)
+ {
+ opts[num][0] = "-mtune=";
+ opts[num++][1] = tune;
+ }
+
+ /* Pick out the options in isa options. */
+ for (i = 0; i < sizeof (isa_opts) / sizeof (isa_opts[0]); i++)
+ {
+ if ((isa & isa_opts[i].mask) != 0)
+ {
+ opts[num++][0] = isa_opts[i].option;
+ isa &= ~ isa_opts[i].mask;
+ }
+ }
+
+ if (isa && add_nl_p)
+ {
+ opts[num++][0] = isa_other;
+ sprintf (isa_other, "(other isa: 0x%x)", isa);
+ }
+
+ /* Add flag options. */
+ for (i = 0; i < sizeof (flag_opts) / sizeof (flag_opts[0]); i++)
+ {
+ if ((flags & flag_opts[i].mask) != 0)
+ {
+ opts[num++][0] = flag_opts[i].option;
+ flags &= ~ flag_opts[i].mask;
+ }
+ }
+
+ if (flags && add_nl_p)
+ {
+ opts[num++][0] = target_other;
+ sprintf (target_other, "(other flags: 0x%x)", isa);
+ }
+
+ /* Add -fpmath= option. */
+ if (fpmath)
+ {
+ opts[num][0] = "-mfpmath=";
+ opts[num++][1] = fpmath;
+ }
+
+ /* Any options? */
+ if (num == 0)
+ return NULL;
+
+ gcc_assert (num < sizeof (opts) / sizeof (opts[0]));
+
+ /* Size the string. */
+ len = 0;
+ sep_len = (add_nl_p) ? 3 : 1;
+ for (i = 0; i < num; i++)
+ {
+ len += sep_len;
+ for (j = 0; j < 2; j++)
+ if (opts[i][j])
+ len += strlen (opts[i][j]);
+ }
+
+ /* Build the string. */
+ ret = ptr = (char *) xmalloc (len);
+ line_len = 0;
+
+ for (i = 0; i < num; i++)
+ {
+ size_t len2[2];
+
+ for (j = 0; j < 2; j++)
+ len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
+
+ if (i != 0)
+ {
+ *ptr++ = ' ';
+ line_len++;
+
+ if (add_nl_p && line_len + len2[0] + len2[1] > 70)
+ {
+ *ptr++ = '\\';
+ *ptr++ = '\n';
+ line_len = 0;
+ }
+ }
+
+ for (j = 0; j < 2; j++)
+ if (opts[i][j])
+ {
+ memcpy (ptr, opts[i][j], len2[j]);
+ ptr += len2[j];
+ line_len += len2[j];
+ }
+ }
+
+ *ptr = '\0';
+ gcc_assert (ret + len >= ptr);
+
+ return ret;
+}
+
+/* Function that is callable from the debugger to print the current
+ options. */
+void
+ix86_debug_options (void)
+{
+ char *opts = ix86_target_string (ix86_isa_flags, target_flags,
+ ix86_arch_string, ix86_tune_string,
+ ix86_fpmath_string, true);
+
+ if (opts)
+ {
+ fprintf (stderr, "%s\n\n", opts);
+ free (opts);
+ }
+ else
+ fprintf (stderr, "<no options>\n\n");
+ return;
+}
+
/* Sometimes certain combinations of command options do not make
sense on a particular target machine. You can define a macro
`OVERRIDE_OPTIONS' to take account of this. This macro, if
@@ -2017,68 +2404,17 @@ ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
`-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
void
-override_options (void)
+override_options (bool main_args_p)
{
int i;
- int ix86_tune_defaulted = 0;
- int ix86_arch_specified = 0;
unsigned int ix86_arch_mask, ix86_tune_mask;
+ const char *prefix;
+ const char *suffix;
+ const char *sw;
/* Comes from final.c -- no real reason to change it. */
#define MAX_CODE_ALIGN 16
- static struct ptt
- {
- const struct processor_costs *cost; /* Processor costs */
- const int align_loop; /* Default alignments. */
- const int align_loop_max_skip;
- const int align_jump;
- const int align_jump_max_skip;
- const int align_func;
- }
- const processor_target_table[PROCESSOR_max] =
- {
- {&i386_cost, 4, 3, 4, 3, 4},
- {&i486_cost, 16, 15, 16, 15, 16},
- {&pentium_cost, 16, 7, 16, 7, 16},
- {&pentiumpro_cost, 16, 15, 16, 10, 16},
- {&geode_cost, 0, 0, 0, 0, 0},
- {&k6_cost, 32, 7, 32, 7, 32},
- {&athlon_cost, 16, 7, 16, 7, 16},
- {&pentium4_cost, 0, 0, 0, 0, 0},
- {&k8_cost, 16, 7, 16, 7, 16},
- {&nocona_cost, 0, 0, 0, 0, 0},
- {&core2_cost, 16, 10, 16, 10, 16},
- {&generic32_cost, 16, 7, 16, 7, 16},
- {&generic64_cost, 16, 10, 16, 10, 16},
- {&amdfam10_cost, 32, 24, 32, 7, 32}
- };
-
- static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
- {
- "generic",
- "i386",
- "i486",
- "pentium",
- "pentium-mmx",
- "pentiumpro",
- "pentium2",
- "pentium3",
- "pentium4",
- "pentium-m",
- "prescott",
- "nocona",
- "core2",
- "geode",
- "k6",
- "k6-2",
- "k6-3",
- "athlon",
- "athlon-4",
- "k8",
- "amdfam10"
- };
-
enum pta_flags
{
PTA_SSE = 1 << 0,
@@ -2197,6 +2533,21 @@ override_options (void)
int const pta_size = ARRAY_SIZE (processor_alias_table);
+ /* Set up prefix/suffix so the error messages refer to either the command
+ line argument, or the attribute(option). */
+ if (main_args_p)
+ {
+ prefix = "-m";
+ suffix = "";
+ sw = "switch";
+ }
+ else
+ {
+ prefix = "option(\"";
+ suffix = "\")";
+ sw = "attribute";
+ }
+
#ifdef SUBTARGET_OVERRIDE_OPTIONS
SUBTARGET_OVERRIDE_OPTIONS;
#endif
@@ -2246,8 +2597,15 @@ override_options (void)
else
ix86_tune_string = "generic32";
}
+ /* If this call is for setting the option attribute, allow the
+ generic32/generic64 that was previously set. */
+ else if (!main_args_p
+ && (!strcmp (ix86_tune_string, "generic32")
+ || !strcmp (ix86_tune_string, "generic64")))
+ ;
else if (!strncmp (ix86_tune_string, "generic", 7))
- error ("bad value (%s) for -mtune= switch", ix86_tune_string);
+ error ("bad value (%s) for %stune=%s %s",
+ ix86_tune_string, prefix, suffix, sw);
}
else
{
@@ -2288,11 +2646,13 @@ override_options (void)
else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
stringop_alg = unrolled_loop;
else
- error ("bad value (%s) for -mstringop-strategy= switch", ix86_stringop_string);
+ error ("bad value (%s) for %sstringop-strategy=%s %s",
+ ix86_stringop_string, prefix, suffix, sw);
}
if (!strcmp (ix86_tune_string, "x86-64"))
- warning (OPT_Wdeprecated, "-mtune=x86-64 is deprecated. Use -mtune=k8 or "
- "-mtune=generic instead as appropriate.");
+ warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated. Use "
+ "%stune=k8%s or %stune=generic%s instead as appropriate.",
+ prefix, suffix, prefix, suffix, prefix, suffix);
if (!ix86_arch_string)
ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
@@ -2300,9 +2660,11 @@ override_options (void)
ix86_arch_specified = 1;
if (!strcmp (ix86_arch_string, "generic"))
- error ("generic CPU can be used only for -mtune= switch");
+ error ("generic CPU can be used only for %stune=%s %s",
+ prefix, suffix, sw);
if (!strncmp (ix86_arch_string, "generic", 7))
- error ("bad value (%s) for -march= switch", ix86_arch_string);
+ error ("bad value (%s) for %sarch=%s %s",
+ ix86_arch_string, prefix, suffix, sw);
if (ix86_cmodel_string != 0)
{
@@ -2319,7 +2681,8 @@ override_options (void)
else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
ix86_cmodel = CM_KERNEL;
else
- error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
+ error ("bad value (%s) for %scmodel=%s %s",
+ ix86_cmodel_string, prefix, suffix, sw);
}
else
{
@@ -2342,7 +2705,8 @@ override_options (void)
else if (!strcmp (ix86_asm_string, "att"))
ix86_asm_dialect = ASM_ATT;
else
- error ("bad value (%s) for -masm= switch", ix86_asm_string);
+ error ("bad value (%s) for %sasm=%s %s",
+ ix86_asm_string, prefix, suffix, sw);
}
if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
error ("code model %qs not supported in the %s bit mode",
@@ -2395,31 +2759,37 @@ override_options (void)
if (processor_alias_table[i].flags & PTA_SSE5
&& !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE5))
ix86_isa_flags |= OPTION_MASK_ISA_SSE5;
-
- if (processor_alias_table[i].flags & PTA_ABM)
- x86_abm = true;
- if (processor_alias_table[i].flags & PTA_CX16)
- x86_cmpxchg16b = true;
- if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM))
- x86_popcnt = true;
+ if (processor_alias_table[i].flags & PTA_ABM
+ && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
+ ix86_isa_flags |= OPTION_MASK_ISA_ABM;
+ if (processor_alias_table[i].flags & PTA_CX16
+ && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
+ ix86_isa_flags |= OPTION_MASK_ISA_CX16;
+ if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
+ && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
+ ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
+ if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
+ && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
+ ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
+ if (processor_alias_table[i].flags & PTA_AES
+ && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
+ ix86_isa_flags |= OPTION_MASK_ISA_AES;
+ if (processor_alias_table[i].flags & PTA_PCLMUL
+ && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
+ ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
x86_prefetch_sse = true;
- if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF)))
- x86_sahf = true;
- if (processor_alias_table[i].flags & PTA_AES)
- x86_aes = true;
- if (processor_alias_table[i].flags & PTA_PCLMUL)
- x86_pclmul = true;
break;
}
if (i == pta_size)
- error ("bad value (%s) for -march= switch", ix86_arch_string);
+ error ("bad value (%s) for %sarch=%s %s",
+ ix86_arch_string, prefix, suffix, sw);
ix86_arch_mask = 1u << ix86_arch;
for (i = 0; i < X86_ARCH_LAST; ++i)
- ix86_arch_features[i] &= ix86_arch_mask;
+ ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
for (i = 0; i < pta_size; i++)
if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
@@ -2451,19 +2821,12 @@ override_options (void)
break;
}
if (i == pta_size)
- error ("bad value (%s) for -mtune= switch", ix86_tune_string);
-
- /* Enable SSE2 if AES or PCLMUL is enabled. */
- if ((x86_aes || x86_pclmul)
- && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
- {
- ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
- ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
- }
+ error ("bad value (%s) for %stune=%s %s",
+ ix86_tune_string, prefix, suffix, sw);
ix86_tune_mask = 1u << ix86_tune;
for (i = 0; i < X86_TUNE_LAST; ++i)
- ix86_tune_features[i] &= ix86_tune_mask;
+ ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
if (optimize_size)
ix86_cost = &size_cost;
@@ -2477,10 +2840,11 @@ override_options (void)
if (ix86_regparm_string)
{
if (TARGET_64BIT)
- warning (0, "-mregparm is ignored in 64-bit mode");
+ warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
i = atoi (ix86_regparm_string);
if (i < 0 || i > REGPARM_MAX)
- error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
+ error ("%sregparm=%d%s is not between 0 and %d",
+ prefix, i, suffix, REGPARM_MAX);
else
ix86_regparm = i;
}
@@ -2492,12 +2856,14 @@ override_options (void)
Remove this code in GCC 3.2 or later. */
if (ix86_align_loops_string)
{
- warning (0, "-malign-loops is obsolete, use -falign-loops");
+ warning (0, "%salign-loops%s is obsolete, use %salign-loops%s",
+ prefix, suffix, prefix, suffix);
if (align_loops == 0)
{
i = atoi (ix86_align_loops_string);
if (i < 0 || i > MAX_CODE_ALIGN)
- error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
+ error ("%salign-loops=%d%s is not between 0 and %d",
+ prefix, i, suffix, MAX_CODE_ALIGN);
else
align_loops = 1 << i;
}
@@ -2505,12 +2871,14 @@ override_options (void)
if (ix86_align_jumps_string)
{
- warning (0, "-malign-jumps is obsolete, use -falign-jumps");
+ warning (0, "%salign-jumps%s is obsolete, use %salign-jumps%s",
+ prefix, suffix, prefix, suffix);
if (align_jumps == 0)
{
i = atoi (ix86_align_jumps_string);
if (i < 0 || i > MAX_CODE_ALIGN)
- error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
+ error ("%salign-loops=%d%s is not between 0 and %d",
+ prefix, i, suffix, MAX_CODE_ALIGN);
else
align_jumps = 1 << i;
}
@@ -2518,12 +2886,14 @@ override_options (void)
if (ix86_align_funcs_string)
{
- warning (0, "-malign-functions is obsolete, use -falign-functions");
+ warning (0, "%salign-functions%s is obsolete, use %salign-functions%s",
+ prefix, suffix, prefix, suffix);
if (align_functions == 0)
{
i = atoi (ix86_align_funcs_string);
if (i < 0 || i > MAX_CODE_ALIGN)
- error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
+ error ("%salign-loops=%d%s is not between 0 and %d",
+ prefix, i, suffix, MAX_CODE_ALIGN);
else
align_functions = 1 << i;
}
@@ -2551,7 +2921,7 @@ override_options (void)
{
i = atoi (ix86_branch_cost_string);
if (i < 0 || i > 5)
- error ("-mbranch-cost=%d is not between 0 and 5", i);
+ error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
else
ix86_branch_cost = i;
}
@@ -2559,7 +2929,7 @@ override_options (void)
{
i = atoi (ix86_section_threshold_string);
if (i < 0)
- error ("-mlarge-data-threshold=%d is negative", i);
+ error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
else
ix86_section_threshold = i;
}
@@ -2573,8 +2943,8 @@ override_options (void)
else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
ix86_tls_dialect = TLS_DIALECT_SUN;
else
- error ("bad value (%s) for -mtls-dialect= switch",
- ix86_tls_dialect_string);
+ error ("bad value (%s) for %stls-dialect=%s %s",
+ ix86_tls_dialect_string, prefix, suffix, sw);
}
if (ix87_precision_string)
@@ -2597,7 +2967,7 @@ override_options (void)
| TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
if (TARGET_RTD)
- warning (0, "-mrtd is ignored in 64bit mode");
+ warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
}
else
{
@@ -2643,7 +3013,7 @@ override_options (void)
/* Turn on popcnt instruction for -msse4.2 or -mabm. */
if (TARGET_SSE4_2 || TARGET_ABM)
- x86_popcnt = true;
+ ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
/* Validate -mpreferred-stack-boundary= value, or provide default.
The default of 128 bits is for Pentium III's SSE __m128. We can't
@@ -2654,8 +3024,8 @@ override_options (void)
{
i = atoi (ix86_preferred_stack_boundary_string);
if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
- error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
- TARGET_64BIT ? 4 : 2);
+ error ("%spreferred-stack-boundary=%d%s is not between %d and 12",
+ prefix, i, suffix, TARGET_64BIT ? 4 : 2);
else
ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
}
@@ -2663,7 +3033,7 @@ override_options (void)
/* Accept -msseregparm only if at least SSE support is enabled. */
if (TARGET_SSEREGPARM
&& ! TARGET_SSE)
- error ("-msseregparm used without SSE enabled");
+ error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
ix86_fpmath = TARGET_FPMATH_DEFAULT;
if (ix86_fpmath_string != 0)
@@ -2681,7 +3051,10 @@ override_options (void)
ix86_fpmath = FPMATH_SSE;
}
else if (! strcmp (ix86_fpmath_string, "387,sse")
- || ! strcmp (ix86_fpmath_string, "sse,387"))
+ || ! strcmp (ix86_fpmath_string, "387+sse")
+ || ! strcmp (ix86_fpmath_string, "sse,387")
+ || ! strcmp (ix86_fpmath_string, "sse+387")
+ || ! strcmp (ix86_fpmath_string, "both"))
{
if (!TARGET_SSE)
{
@@ -2697,7 +3070,8 @@ override_options (void)
ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
}
else
- error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
+ error ("bad value (%s) for %sfpmath=%s %s",
+ ix86_fpmath_string, prefix, suffix, sw);
}
/* If the i387 is disabled, then do not return values in it. */
@@ -2713,7 +3087,8 @@ override_options (void)
ix86_veclib_handler = ix86_veclibabi_acml;
else
error ("unknown vectorization library ABI type (%s) for "
- "-mveclibabi= switch", ix86_veclibabi_string);
+ "%sveclibabi=%s %s", ix86_veclibabi_string,
+ prefix, suffix, sw);
}
if ((x86_accumulate_outgoing_args & ix86_tune_mask)
@@ -2732,7 +3107,8 @@ override_options (void)
{
if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
warning (0, "unwind tables currently require either a frame pointer "
- "or -maccumulate-outgoing-args for correctness");
+ "or %saccumulate-outgoing-args%s for correctness",
+ prefix, suffix);
target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
}
@@ -2743,8 +3119,8 @@ override_options (void)
&& !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
{
if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
- warning (0, "stack probing requires -maccumulate-outgoing-args "
- "for correctness");
+ warning (0, "stack probing requires %saccumulate-outgoing-args%s "
+ "for correctness", prefix, suffix);
target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
}
@@ -2762,11 +3138,6 @@ override_options (void)
*p = '\0';
}
- /* When scheduling description is not available, disable scheduler pass
- so it won't slow down the compilation and make x87 code slower. */
- if (!TARGET_SCHEDULE)
- flag_schedule_insns_after_reload = flag_schedule_insns = 0;
-
if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
set_param_value ("simultaneous-prefetches",
ix86_cost->simultaneous_prefetches);
@@ -2808,8 +3179,500 @@ override_options (void)
if (!TARGET_64BIT)
target_flags |= MASK_CLD & ~target_flags_explicit;
#endif
+
+ /* Save the initial options in case the user does function specific options */
+ if (main_args_p)
+ target_option_default_node = target_option_current_node
+ = build_target_option_node ();
}
+/* Save the current options */
+
+static void
+ix86_function_specific_save (struct cl_target_option *ptr)
+{
+ gcc_assert (IN_RANGE (ix86_arch, 0, 255));
+ gcc_assert (IN_RANGE (ix86_tune, 0, 255));
+ gcc_assert (IN_RANGE (ix86_fpmath, 0, 255));
+ gcc_assert (IN_RANGE (ix86_branch_cost, 0, 255));
+
+ ptr->arch = ix86_arch;
+ ptr->tune = ix86_tune;
+ ptr->fpmath = ix86_fpmath;
+ ptr->branch_cost = ix86_branch_cost;
+ ptr->tune_defaulted = ix86_tune_defaulted;
+ ptr->arch_specified = ix86_arch_specified;
+ ptr->ix86_isa_flags_explicit = ix86_isa_flags_explicit;
+ ptr->target_flags_explicit = target_flags_explicit;
+}
+
+/* Restore the current options */
+
+static void
+ix86_function_specific_restore (struct cl_target_option *ptr)
+{
+ enum processor_type old_tune = ix86_tune;
+ enum processor_type old_arch = ix86_arch;
+ unsigned int ix86_arch_mask, ix86_tune_mask;
+ int i;
+
+ ix86_arch = ptr->arch;
+ ix86_tune = ptr->tune;
+ ix86_fpmath = ptr->fpmath;
+ ix86_branch_cost = ptr->branch_cost;
+ ix86_tune_defaulted = ptr->tune_defaulted;
+ ix86_arch_specified = ptr->arch_specified;
+ ix86_isa_flags_explicit = ptr->ix86_isa_flags_explicit;
+ target_flags_explicit = ptr->target_flags_explicit;
+
+ /* Recreate the arch feature tests if the arch changed */
+ if (old_arch != ix86_arch)
+ {
+ ix86_arch_mask = 1u << ix86_arch;
+ for (i = 0; i < X86_ARCH_LAST; ++i)
+ ix86_arch_features[i]
+ = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
+ }
+
+ /* Recreate the tune optimization tests */
+ if (old_tune != ix86_tune)
+ {
+ ix86_tune_mask = 1u << ix86_tune;
+ for (i = 0; i < X86_TUNE_LAST; ++i)
+ ix86_tune_features[i]
+ = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
+ }
+}
+
+/* Print the current options */
+
+static void
+ix86_function_specific_print (FILE *file, int indent,
+ struct cl_target_option *ptr)
+{
+ char *target_string
+ = ix86_target_string (ptr->ix86_isa_flags, ptr->target_flags,
+ NULL, NULL, NULL, false);
+
+ fprintf (file, "%*sarch = %d (%s)\n",
+ indent, "",
+ ptr->arch,
+ ((ptr->arch < TARGET_CPU_DEFAULT_max)
+ ? cpu_names[ptr->arch]
+ : "<unknown>"));
+
+ fprintf (file, "%*stune = %d (%s)\n",
+ indent, "",
+ ptr->tune,
+ ((ptr->tune < TARGET_CPU_DEFAULT_max)
+ ? cpu_names[ptr->tune]
+ : "<unknown>"));
+
+ fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
+ (ptr->fpmath & FPMATH_387) ? ", 387" : "",
+ (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
+ fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
+
+ if (target_string)
+ {
+ fprintf (file, "%*s%s\n", indent, "", target_string);
+ free (target_string);
+ }
+}
+
+
+/* Inner function to process the attribute((option(...))), take an argument and
+ set the current options from the argument. If we have a list, recursively go
+ over the list. */
+
+static bool
+ix86_valid_option_attribute_inner_p (tree args, char *p_strings[])
+{
+ char *next_optstr;
+ bool ret = true;
+
+#define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
+#define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
+#define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
+#define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
+
+ enum ix86_opt_type
+ {
+ ix86_opt_unknown,
+ ix86_opt_yes,
+ ix86_opt_no,
+ ix86_opt_str,
+ ix86_opt_isa
+ };
+
+ static const struct
+ {
+ const char *string;
+ size_t len;
+ enum ix86_opt_type type;
+ int opt;
+ int mask;
+ } attrs[] = {
+ /* isa options */
+ IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
+ IX86_ATTR_ISA ("abm", OPT_mabm),
+ IX86_ATTR_ISA ("aes", OPT_maes),
+ IX86_ATTR_ISA ("mmx", OPT_mmmx),
+ IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
+ IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
+ IX86_ATTR_ISA ("sse", OPT_msse),
+ IX86_ATTR_ISA ("sse2", OPT_msse2),
+ IX86_ATTR_ISA ("sse3", OPT_msse3),
+ IX86_ATTR_ISA ("sse4", OPT_msse4),
+ IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
+ IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
+ IX86_ATTR_ISA ("sse4a", OPT_msse4a),
+ IX86_ATTR_ISA ("sse5", OPT_msse5),
+ IX86_ATTR_ISA ("ssse3", OPT_mssse3),
+
+ /* string options */
+ IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
+ IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
+ IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
+
+ /* flag options */
+ IX86_ATTR_YES ("cld",
+ OPT_mcld,
+ MASK_CLD),
+
+ IX86_ATTR_NO ("fancy-math-387",
+ OPT_mfancy_math_387,
+ MASK_NO_FANCY_MATH_387),
+
+ IX86_ATTR_NO ("fused-madd",
+ OPT_mfused_madd,
+ MASK_NO_FUSED_MADD),
+
+ IX86_ATTR_YES ("ieee-fp",
+ OPT_mieee_fp,
+ MASK_IEEE_FP),
+
+ IX86_ATTR_YES ("inline-all-stringops",
+ OPT_minline_all_stringops,
+ MASK_INLINE_ALL_STRINGOPS),
+
+ IX86_ATTR_YES ("inline-stringops-dynamically",
+ OPT_minline_stringops_dynamically,
+ MASK_INLINE_STRINGOPS_DYNAMICALLY),
+
+ IX86_ATTR_NO ("align-stringops",
+ OPT_mno_align_stringops,
+ MASK_NO_ALIGN_STRINGOPS),
+
+ IX86_ATTR_YES ("recip",
+ OPT_mrecip,
+ MASK_RECIP),
+
+ };
+
+ /* If this is a list, recurse to get the options. */
+ if (TREE_CODE (args) == TREE_LIST)
+ {
+ bool ret = true;
+
+ for (; args; args = TREE_CHAIN (args))
+ if (TREE_VALUE (args)
+ && !ix86_valid_option_attribute_inner_p (TREE_VALUE (args), p_strings))
+ ret = false;
+
+ return ret;
+ }
+
+ else if (TREE_CODE (args) != STRING_CST)
+ gcc_unreachable ();
+
+ /* Handle multiple arguments separated by commas. */
+ next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
+
+ while (next_optstr && *next_optstr != '\0')
+ {
+ char *p = next_optstr;
+ char *orig_p = p;
+ char *comma = strchr (next_optstr, ',');
+ const char *opt_string;
+ size_t len, opt_len;
+ int opt;
+ bool opt_set_p;
+ char ch;
+ unsigned i;
+ enum ix86_opt_type type = ix86_opt_unknown;
+ int mask = 0;
+
+ if (comma)
+ {
+ *comma = '\0';
+ len = comma - next_optstr;
+ next_optstr = comma + 1;
+ }
+ else
+ {
+ len = strlen (p);
+ next_optstr = NULL;
+ }
+
+ /* Recognize no-xxx. */
+ if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
+ {
+ opt_set_p = false;
+ p += 3;
+ len -= 3;
+ }
+ else
+ opt_set_p = true;
+
+ /* Find the option. */
+ ch = *p;
+ opt = N_OPTS;
+ for (i = 0; i < sizeof (attrs) / sizeof (attrs[0]); i++)
+ {
+ type = attrs[i].type;
+ opt_len = attrs[i].len;
+ if (ch == attrs[i].string[0]
+ && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
+ && memcmp (p, attrs[i].string, opt_len) == 0)
+ {
+ opt = attrs[i].opt;
+ mask = attrs[i].mask;
+ opt_string = attrs[i].string;
+ break;
+ }
+ }
+
+ /* Process the option. */
+ if (opt == N_OPTS)
+ {
+ error ("attribute(option(\"%s\")) is unknown", orig_p);
+ ret = false;
+ }
+
+ else if (type == ix86_opt_isa)
+ ix86_handle_option (opt, p, opt_set_p);
+
+ else if (type == ix86_opt_yes || type == ix86_opt_no)
+ {
+ if (type == ix86_opt_no)
+ opt_set_p = !opt_set_p;
+
+ if (opt_set_p)
+ target_flags |= mask;
+ else
+ target_flags &= ~mask;
+ }
+
+ else if (type == ix86_opt_str)
+ {
+ if (p_strings[opt])
+ {
+ error ("option(\"%s\") was already specified", opt_string);
+ ret = false;
+ }
+ else
+ p_strings[opt] = xstrdup (p + opt_len);
+ }
+
+ else
+ gcc_unreachable ();
+ }
+
+ return ret;
+}
+
+/* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
+
+tree
+ix86_valid_option_attribute_tree (tree args)
+{
+ const char *orig_arch_string = ix86_arch_string;
+ const char *orig_tune_string = ix86_tune_string;
+ const char *orig_fpmath_string = ix86_fpmath_string;
+ int orig_tune_defaulted = ix86_tune_defaulted;
+ int orig_arch_specified = ix86_arch_specified;
+ char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
+ tree t = NULL_TREE;
+ int i;
+ struct cl_target_option *def
+ = TREE_TARGET_OPTION (target_option_default_node);
+
+ /* Process each of the options on the chain. */
+ if (! ix86_valid_option_attribute_inner_p (args, option_strings))
+ return NULL_TREE;
+
+ /* If the changed options are different from the default, rerun override_options,
+ and then save the options away. The string options are are attribute options,
+ and will be undone when we copy the save structure. */
+ if (ix86_isa_flags != def->ix86_isa_flags
+ || target_flags != def->target_flags
+ || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
+ || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
+ || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
+ {
+ /* If we are using the default tune= or arch=, undo the string assigned,
+ and use the default. */
+ if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
+ ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
+ else if (!orig_arch_specified)
+ ix86_arch_string = NULL;
+
+ if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
+ ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
+ else if (orig_tune_defaulted)
+ ix86_tune_string = NULL;
+
+ /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
+ if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
+ ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
+ else if (!TARGET_64BIT && TARGET_SSE)
+ ix86_fpmath_string = "sse,387";
+
+ /* Do any overrides, such as arch=xxx, or tune=xxx support. */
+ override_options (false);
+
+ /* Save the current options unless we are validating options for
+ #pragma. */
+ t = build_target_option_node ();
+
+ ix86_arch_string = orig_arch_string;
+ ix86_tune_string = orig_tune_string;
+ ix86_fpmath_string = orig_fpmath_string;
+
+ /* Free up memory allocated to hold the strings */
+ for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
+ if (option_strings[i])
+ free (option_strings[i]);
+ }
+
+ return t;
+}
+
+/* Hook to validate attribute((option("string"))). */
+
+static bool
+ix86_valid_option_attribute_p (tree fndecl,
+ tree ARG_UNUSED (name),
+ tree args,
+ int ARG_UNUSED (flags))
+{
+ struct cl_target_option cur_opts;
+ bool ret = true;
+ tree new_opts;
+
+ cl_target_option_save (&cur_opts);
+ new_opts = ix86_valid_option_attribute_tree (args);
+ if (!new_opts)
+ ret = false;
+
+ else if (fndecl)
+ DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_opts;
+
+ cl_target_option_restore (&cur_opts);
+ return ret;
+}
+
+
+/* Hook to determine if one function can safely inline another. */
+
+static bool
+ix86_can_inline_p (tree caller, tree callee)
+{
+ bool ret = false;
+ tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
+ tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
+
+ /* If callee has no option attributes, then it is ok to inline. */
+ if (!callee_tree)
+ ret = true;
+
+ /* If caller has no option attributes, but callee does then it is not ok to
+ inline. */
+ else if (!caller_tree)
+ ret = false;
+
+ else
+ {
+ struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
+ struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
+
+ /* Callee's isa options should a subset of the caller's, i.e. a SSE5 function
+ can inline a SSE2 function but a SSE2 function can't inline a SSE5
+ function. */
+ if ((caller_opts->ix86_isa_flags & callee_opts->ix86_isa_flags)
+ != callee_opts->ix86_isa_flags)
+ ret = false;
+
+ /* See if we have the same non-isa options. */
+ else if (caller_opts->target_flags != callee_opts->target_flags)
+ ret = false;
+
+ /* See if arch, tune, etc. are the same. */
+ else if (caller_opts->arch != callee_opts->arch)
+ ret = false;
+
+ else if (caller_opts->tune != callee_opts->tune)
+ ret = false;
+
+ else if (caller_opts->fpmath != callee_opts->fpmath)
+ ret = false;
+
+ else if (caller_opts->branch_cost != callee_opts->branch_cost)
+ ret = false;
+
+ else
+ ret = true;
+ }
+
+ return ret;
+}
+
+
+/* Remember the last target of ix86_set_current_function. */
+static GTY(()) tree ix86_previous_fndecl;
+
+/* Establish appropriate back-end context for processing the function
+ FNDECL. The argument might be NULL to indicate processing at top
+ level, outside of any function scope. */
+static void
+ix86_set_current_function (tree fndecl)
+{
+ /* Only change the context if the function changes. This hook is called
+ several times in the course of compiling a function, and we don't want to
+ slow things down too much or call target_reinit when it isn't safe. */
+ if (fndecl && fndecl != ix86_previous_fndecl)
+ {
+ tree old_tree = (ix86_previous_fndecl
+ ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
+ : NULL_TREE);
+
+ tree new_tree = (fndecl
+ ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
+ : NULL_TREE);
+
+ ix86_previous_fndecl = fndecl;
+ if (old_tree == new_tree)
+ ;
+
+ else if (new_tree)
+ {
+ cl_target_option_restore (TREE_TARGET_OPTION (new_tree));
+ target_reinit ();
+ }
+
+ else if (old_tree)
+ {
+ struct cl_target_option *def
+ = TREE_TARGET_OPTION (target_option_current_node);
+
+ cl_target_option_restore (def);
+ target_reinit ();
+ }
+ }
+}
+
+
/* Return true if this goes in large data/bss. */
static bool
@@ -3042,6 +3905,11 @@ optimization_options (int level, int size ATTRIBUTE_UNUSED)
flag_schedule_insns = 0;
#endif
+ /* When scheduling description is not available, disable scheduler pass
+ so it won't slow down the compilation and make x87 code slower. */
+ if (!TARGET_SCHEDULE)
+ flag_schedule_insns_after_reload = flag_schedule_insns = 0;
+
if (TARGET_MACHO)
/* The Darwin libraries never set errno, so we might as well
avoid calling them when that's the only reason we would. */
@@ -3341,7 +4209,7 @@ ix86_function_regparm (const_tree type, const_tree decl)
/* Use register calling convention for local functions when possible. */
if (decl && TREE_CODE (decl) == FUNCTION_DECL
- && flag_unit_at_a_time && !profile_flag)
+ && !profile_flag)
{
/* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
@@ -3428,7 +4296,7 @@ ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
/* For local functions, pass up to SSE_REGPARM_MAX SFmode
(and DFmode for SSE2) arguments in SSE registers. */
- if (decl && TARGET_SSE_MATH && flag_unit_at_a_time && !profile_flag)
+ if (decl && TARGET_SSE_MATH && !profile_flag)
{
/* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
@@ -5459,8 +6327,8 @@ ix86_va_start (tree valist, rtx nextarg)
if (cfun->va_list_gpr_size)
{
type = TREE_TYPE (gpr);
- t = build2 (GIMPLE_MODIFY_STMT, type, gpr,
- build_int_cst (type, n_gpr * 8));
+ t = build2 (MODIFY_EXPR, type,
+ gpr, build_int_cst (type, n_gpr * 8));
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
@@ -5468,7 +6336,7 @@ ix86_va_start (tree valist, rtx nextarg)
if (cfun->va_list_fpr_size)
{
type = TREE_TYPE (fpr);
- t = build2 (GIMPLE_MODIFY_STMT, type, fpr,
+ t = build2 (MODIFY_EXPR, type, fpr,
build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
@@ -5480,7 +6348,7 @@ ix86_va_start (tree valist, rtx nextarg)
if (words != 0)
t = build2 (POINTER_PLUS_EXPR, type, t,
size_int (words * UNITS_PER_WORD));
- t = build2 (GIMPLE_MODIFY_STMT, type, ovf, t);
+ t = build2 (MODIFY_EXPR, type, ovf, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
@@ -5490,7 +6358,7 @@ ix86_va_start (tree valist, rtx nextarg)
Prologue of the function save it right above stack frame. */
type = TREE_TYPE (sav);
t = make_tree (type, frame_pointer_rtx);
- t = build2 (GIMPLE_MODIFY_STMT, type, sav, t);
+ t = build2 (MODIFY_EXPR, type, sav, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
@@ -5499,7 +6367,8 @@ ix86_va_start (tree valist, rtx nextarg)
/* Implement va_arg. */
static tree
-ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
+ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
+ gimple_seq *post_p)
{
static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
tree f_gpr, f_fpr, f_ovf, f_sav;
@@ -5629,16 +6498,14 @@ ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
/* int_addr = gpr + sav; */
t = fold_convert (sizetype, gpr);
t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
- t = build2 (GIMPLE_MODIFY_STMT, void_type_node, int_addr, t);
- gimplify_and_add (t, pre_p);
+ gimplify_assign (int_addr, t, pre_p);
}
if (needed_sseregs)
{
/* sse_addr = fpr + sav; */
t = fold_convert (sizetype, fpr);
t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
- t = build2 (GIMPLE_MODIFY_STMT, void_type_node, sse_addr, t);
- gimplify_and_add (t, pre_p);
+ gimplify_assign (sse_addr, t, pre_p);
}
if (need_temp)
{
@@ -5647,8 +6514,7 @@ ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
/* addr = &temp; */
t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
- t = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
- gimplify_and_add (t, pre_p);
+ gimplify_assign (addr, t, pre_p);
for (i = 0; i < XVECLEN (container, 0); i++)
{
@@ -5681,8 +6547,7 @@ ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
size_int (INTVAL (XEXP (slot, 1))));
dest = build_va_arg_indirect_ref (dest_addr);
- t = build2 (GIMPLE_MODIFY_STMT, void_type_node, dest, src);
- gimplify_and_add (t, pre_p);
+ gimplify_assign (dest, src, pre_p);
}
}
@@ -5690,22 +6555,19 @@ ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
{
t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gpr), gpr, t);
- gimplify_and_add (t, pre_p);
+ gimplify_assign (gpr, t, pre_p);
}
+
if (needed_sseregs)
{
t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (fpr), fpr, t);
- gimplify_and_add (t, pre_p);
+ gimplify_assign (fpr, t, pre_p);
}
- t = build1 (GOTO_EXPR, void_type_node, lab_over);
- gimplify_and_add (t, pre_p);
+ gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
- t = build1 (LABEL_EXPR, void_type_node, lab_false);
- append_to_statement_list (t, pre_p);
+ gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
}
/* ... otherwise out of the overflow area. */
@@ -5733,20 +6595,14 @@ ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
t = fold_convert (TREE_TYPE (ovf), t);
}
gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
-
- t2 = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
- gimplify_and_add (t2, pre_p);
+ gimplify_assign (addr, t, pre_p);
t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
size_int (rsize * UNITS_PER_WORD));
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovf), ovf, t);
- gimplify_and_add (t, pre_p);
+ gimplify_assign (unshare_expr (ovf), t, pre_p);
if (container)
- {
- t = build1 (LABEL_EXPR, void_type_node, lab_over);
- append_to_statement_list (t, pre_p);
- }
+ gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
ptrtype = build_pointer_type (type);
addr = fold_convert (ptrtype, addr);
@@ -8254,7 +9110,8 @@ get_dllimport_decl (tree decl)
name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
name = targetm.strip_name_encoding (name);
- prefix = name[0] == FASTCALL_PREFIX ? "*__imp_": "*__imp__";
+ prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
+ ? "*__imp_" : "*__imp__";
namelen = strlen (name);
prefixlen = strlen (prefix);
imp_name = (char *) alloca (namelen + prefixlen + 1);
@@ -18129,22 +18986,29 @@ enum ix86_builtins
/* Table for the ix86 builtin decls. */
static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
-/* Add an ix86 target builtin function with CODE, NAME and TYPE. Do so,
- * if the target_flags include one of MASK. Stores the function decl
- * in the ix86_builtins array.
- * Returns the function decl or NULL_TREE, if the builtin was not added. */
+/* Table to record which ISA options the builtin needs. */
+static int ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
+
+/* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
+ * of which isa_flags to use in the ix86_builtins_isa array. Stores the
+ * function decl in the ix86_builtins array. Returns the function decl or
+ * NULL_TREE, if the builtin was not added.
+ *
+ * Record all builtins, even if it isn't an instruction set in the current ISA
+ * in case the user uses function specific options for a different ISA. When
+ * the builtin is expanded, check at that time whether it is valid. */
static inline tree
def_builtin (int mask, const char *name, tree type, enum ix86_builtins code)
{
tree decl = NULL_TREE;
- if (mask & ix86_isa_flags
- && (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT))
+ if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
{
decl = add_builtin_function (name, type, code, BUILT_IN_MD,
NULL, NULL_TREE);
ix86_builtins[(int) code] = decl;
+ ix86_builtins_isa[(int) code] = mask;
}
return decl;
@@ -19187,9 +20051,10 @@ static const struct builtin_description bdesc_multi_arg[] =
{ OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomtrueuq", IX86_BUILTIN_PCOMTRUEUQ, PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
};
-/* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
- is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
- builtins. */
+/* Set up all the MMX/SSE builtins, even builtins for instructions that are not
+ in the current target ISA to allow the user to compile particular modules
+ with different target specific options that differ from the command line
+ options. */
static void
ix86_init_mmx_sse_builtins (void)
{
@@ -20128,23 +20993,15 @@ ix86_init_mmx_sse_builtins (void)
def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait", void_ftype_unsigned_unsigned, IX86_BUILTIN_MWAIT);
/* AES */
- if (TARGET_AES)
- {
- /* Define AES built-in functions only if AES is enabled. */
- def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_aesenc128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESENC128);
- def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_aesenclast128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESENCLAST128);
- def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_aesdec128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESDEC128);
- def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_aesdeclast128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESDECLAST128);
- def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_aesimc128", v2di_ftype_v2di, IX86_BUILTIN_AESIMC128);
- def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_aeskeygenassist128", v2di_ftype_v2di_int, IX86_BUILTIN_AESKEYGENASSIST128);
- }
+ def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESENC128);
+ def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESENCLAST128);
+ def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESDEC128);
+ def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESDECLAST128);
+ def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128", v2di_ftype_v2di, IX86_BUILTIN_AESIMC128);
+ def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128", v2di_ftype_v2di_int, IX86_BUILTIN_AESKEYGENASSIST128);
/* PCLMUL */
- if (TARGET_PCLMUL)
- {
- /* Define PCLMUL built-in function only if PCLMUL is enabled. */
- def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_pclmulqdq128", v2di_ftype_v2di_v2di_int, IX86_BUILTIN_PCLMULQDQ128);
- }
+ def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128", v2di_ftype_v2di_v2di_int, IX86_BUILTIN_PCLMULQDQ128);
/* Access to the vec_init patterns. */
ftype = build_function_type_list (V2SI_type_node, integer_type_node,
@@ -20399,8 +21256,7 @@ ix86_init_builtins (void)
ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = decl;
TREE_READONLY (decl) = 1;
- if (TARGET_MMX)
- ix86_init_mmx_sse_builtins ();
+ ix86_init_mmx_sse_builtins ();
if (TARGET_64BIT)
ix86_init_builtins_va_builtins_abi ();
}
@@ -21616,6 +22472,28 @@ ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
enum machine_mode mode0, mode1, mode2;
unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+ /* Determine whether the builtin function is available under the current ISA.
+ Originally the builtin was not created if it wasn't applicable to the
+ current ISA based on the command line switches. With function specific
+ options, we need to check in the context of the function making the call
+ whether it is supported. */
+ if (ix86_builtins_isa[fcode]
+ && !(ix86_builtins_isa[fcode] & ix86_isa_flags))
+ {
+ char *opts = ix86_target_string (ix86_builtins_isa[fcode], 0, NULL,
+ NULL, NULL, false);
+
+ if (!opts)
+ error ("%qE needs unknown isa option", fndecl);
+ else
+ {
+ gcc_assert (opts != NULL);
+ error ("%qE needs isa option %s", fndecl, opts);
+ free (opts);
+ }
+ return const0_rtx;
+ }
+
switch (fcode)
{
case IX86_BUILTIN_MASKMOVQ:
@@ -26436,6 +27314,30 @@ ix86_enum_va_list (int idx, const char **pname, tree *ptree)
#undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
#define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST x86_builtin_vectorization_cost
+#undef TARGET_SET_CURRENT_FUNCTION
+#define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
+
+#undef TARGET_OPTION_VALID_ATTRIBUTE_P
+#define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_option_attribute_p
+
+#undef TARGET_OPTION_SAVE
+#define TARGET_OPTION_SAVE ix86_function_specific_save
+
+#undef TARGET_OPTION_RESTORE
+#define TARGET_OPTION_RESTORE ix86_function_specific_restore
+
+#undef TARGET_OPTION_PRINT
+#define TARGET_OPTION_PRINT ix86_function_specific_print
+
+#undef TARGET_OPTION_CAN_INLINE_P
+#define TARGET_OPTION_CAN_INLINE_P ix86_can_inline_p
+
+#undef TARGET_OPTION_COLD_ATTRIBUTE_SETS_OPTIMIZATION
+#define TARGET_OPTION_COLD_ATTRIBUTE_SETS_OPTIMIZATION true
+
+#undef TARGET_OPTION_HOT_ATTRIBUTE_SETS_OPTIMIZATION
+#define TARGET_OPTION_HOT_ATTRIBUTE_SETS_OPTIMIZATION true
+
struct gcc_target targetm = TARGET_INITIALIZER;
#include "gt-i386.h"
diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h
index d17e414eb5f..c7d33c7eb05 100644
--- a/gcc/config/i386/i386.h
+++ b/gcc/config/i386/i386.h
@@ -49,6 +49,13 @@ along with GCC; see the file COPYING3. If not see
#define TARGET_SSE4A OPTION_ISA_SSE4A
#define TARGET_SSE5 OPTION_ISA_SSE5
#define TARGET_ROUND OPTION_ISA_ROUND
+#define TARGET_ABM OPTION_ISA_ABM
+#define TARGET_POPCNT OPTION_ISA_POPCNT
+#define TARGET_SAHF OPTION_ISA_SAHF
+#define TARGET_AES OPTION_ISA_AES
+#define TARGET_PCLMUL OPTION_ISA_PCLMUL
+#define TARGET_CMPXCHG16B OPTION_ISA_CX16
+
/* SSE5 and SSE4.1 define the same round instructions */
#define OPTION_MASK_ISA_ROUND (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE5)
@@ -286,7 +293,7 @@ enum ix86_tune_indices {
X86_TUNE_LAST
};
-extern unsigned int ix86_tune_features[X86_TUNE_LAST];
+extern unsigned char ix86_tune_features[X86_TUNE_LAST];
#define TARGET_USE_LEAVE ix86_tune_features[X86_TUNE_USE_LEAVE]
#define TARGET_PUSH_MEMORY ix86_tune_features[X86_TUNE_PUSH_MEMORY]
@@ -380,7 +387,7 @@ enum ix86_arch_indices {
X86_ARCH_LAST
};
-extern unsigned int ix86_arch_features[X86_ARCH_LAST];
+extern unsigned char ix86_arch_features[X86_ARCH_LAST];
#define TARGET_CMOVE ix86_arch_features[X86_ARCH_CMOVE]
#define TARGET_CMPXCHG ix86_arch_features[X86_ARCH_CMPXCHG]
@@ -392,15 +399,7 @@ extern unsigned int ix86_arch_features[X86_ARCH_LAST];
extern int x86_prefetch_sse;
-#define TARGET_ABM x86_abm
-#define TARGET_CMPXCHG16B x86_cmpxchg16b
-#define TARGET_POPCNT x86_popcnt
#define TARGET_PREFETCH_SSE x86_prefetch_sse
-#define TARGET_SAHF x86_sahf
-#define TARGET_RECIP x86_recip
-#define TARGET_FUSED_MADD x86_fused_muladd
-#define TARGET_AES (TARGET_SSE2 && x86_aes)
-#define TARGET_PCLMUL (TARGET_SSE2 && x86_pclmul)
#define ASSEMBLER_DIALECT (ix86_asm_dialect)
@@ -475,7 +474,7 @@ enum calling_abi
Don't use this macro to turn on various extra optimizations for
`-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
-#define OVERRIDE_OPTIONS override_options ()
+#define OVERRIDE_OPTIONS override_options (true)
/* Define this to change the optimizations performed by default. */
#define OPTIMIZATION_OPTIONS(LEVEL, SIZE) \
@@ -537,196 +536,10 @@ extern const char *host_detect_local_cpu (int argc, const char **argv);
#endif
/* Target CPU builtins. */
-#define TARGET_CPU_CPP_BUILTINS() \
- do \
- { \
- size_t arch_len = strlen (ix86_arch_string); \
- size_t tune_len = strlen (ix86_tune_string); \
- int last_arch_char = ix86_arch_string[arch_len - 1]; \
- int last_tune_char = ix86_tune_string[tune_len - 1]; \
- \
- if (TARGET_64BIT) \
- { \
- builtin_assert ("cpu=x86_64"); \
- builtin_assert ("machine=x86_64"); \
- builtin_define ("__amd64"); \
- builtin_define ("__amd64__"); \
- builtin_define ("__x86_64"); \
- builtin_define ("__x86_64__"); \
- } \
- else \
- { \
- builtin_assert ("cpu=i386"); \
- builtin_assert ("machine=i386"); \
- builtin_define_std ("i386"); \
- } \
- \
- /* Built-ins based on -march=. */ \
- switch (ix86_arch) \
- { \
- case PROCESSOR_I386: \
- break; \
- case PROCESSOR_I486: \
- builtin_define ("__i486"); \
- builtin_define ("__i486__"); \
- break; \
- case PROCESSOR_PENTIUM: \
- builtin_define ("__i586"); \
- builtin_define ("__i586__"); \
- builtin_define ("__pentium"); \
- builtin_define ("__pentium__"); \
- if (last_arch_char == 'x') \
- builtin_define ("__pentium_mmx__"); \
- break; \
- case PROCESSOR_PENTIUMPRO: \
- builtin_define ("__i686"); \
- builtin_define ("__i686__"); \
- builtin_define ("__pentiumpro"); \
- builtin_define ("__pentiumpro__"); \
- break; \
- case PROCESSOR_GEODE: \
- builtin_define ("__geode"); \
- builtin_define ("__geode__"); \
- break; \
- case PROCESSOR_K6: \
- builtin_define ("__k6"); \
- builtin_define ("__k6__"); \
- if (last_arch_char == '2') \
- builtin_define ("__k6_2__"); \
- else if (last_arch_char == '3') \
- builtin_define ("__k6_3__"); \
- break; \
- case PROCESSOR_ATHLON: \
- builtin_define ("__athlon"); \
- builtin_define ("__athlon__"); \
- /* Only plain "athlon" lacks SSE. */ \
- if (last_arch_char != 'n') \
- builtin_define ("__athlon_sse__"); \
- break; \
- case PROCESSOR_K8: \
- builtin_define ("__k8"); \
- builtin_define ("__k8__"); \
- break; \
- case PROCESSOR_AMDFAM10: \
- builtin_define ("__amdfam10"); \
- builtin_define ("__amdfam10__"); \
- break; \
- case PROCESSOR_PENTIUM4: \
- builtin_define ("__pentium4"); \
- builtin_define ("__pentium4__"); \
- break; \
- case PROCESSOR_NOCONA: \
- builtin_define ("__nocona"); \
- builtin_define ("__nocona__"); \
- break; \
- case PROCESSOR_CORE2: \
- builtin_define ("__core2"); \
- builtin_define ("__core2__"); \
- break; \
- case PROCESSOR_GENERIC32: \
- case PROCESSOR_GENERIC64: \
- case PROCESSOR_max: \
- gcc_unreachable (); \
- } \
- \
- /* Built-ins based on -mtune=. */ \
- switch (ix86_tune) \
- { \
- case PROCESSOR_I386: \
- builtin_define ("__tune_i386__"); \
- break; \
- case PROCESSOR_I486: \
- builtin_define ("__tune_i486__"); \
- break; \
- case PROCESSOR_PENTIUM: \
- builtin_define ("__tune_i586__"); \
- builtin_define ("__tune_pentium__"); \
- if (last_tune_char == 'x') \
- builtin_define ("__tune_pentium_mmx__"); \
- break; \
- case PROCESSOR_PENTIUMPRO: \
- builtin_define ("__tune_i686__"); \
- builtin_define ("__tune_pentiumpro__"); \
- switch (last_tune_char) \
- { \
- case '3': \
- builtin_define ("__tune_pentium3__"); \
- /* FALLTHRU */ \
- case '2': \
- builtin_define ("__tune_pentium2__"); \
- break; \
- } \
- break; \
- case PROCESSOR_GEODE: \
- builtin_define ("__tune_geode__"); \
- break; \
- case PROCESSOR_K6: \
- builtin_define ("__tune_k6__"); \
- if (last_tune_char == '2') \
- builtin_define ("__tune_k6_2__"); \
- else if (last_tune_char == '3') \
- builtin_define ("__tune_k6_3__"); \
- break; \
- case PROCESSOR_ATHLON: \
- builtin_define ("__tune_athlon__"); \
- /* Only plain "athlon" lacks SSE. */ \
- if (last_tune_char != 'n') \
- builtin_define ("__tune_athlon_sse__"); \
- break; \
- case PROCESSOR_K8: \
- builtin_define ("__tune_k8__"); \
- break; \
- case PROCESSOR_AMDFAM10: \
- builtin_define ("__tune_amdfam10__"); \
- break; \
- case PROCESSOR_PENTIUM4: \
- builtin_define ("__tune_pentium4__"); \
- break; \
- case PROCESSOR_NOCONA: \
- builtin_define ("__tune_nocona__"); \
- break; \
- case PROCESSOR_CORE2: \
- builtin_define ("__tune_core2__"); \
- break; \
- case PROCESSOR_GENERIC32: \
- case PROCESSOR_GENERIC64: \
- break; \
- case PROCESSOR_max: \
- gcc_unreachable (); \
- } \
- \
- if (TARGET_MMX) \
- builtin_define ("__MMX__"); \
- if (TARGET_3DNOW) \
- builtin_define ("__3dNOW__"); \
- if (TARGET_3DNOW_A) \
- builtin_define ("__3dNOW_A__"); \
- if (TARGET_SSE) \
- builtin_define ("__SSE__"); \
- if (TARGET_SSE2) \
- builtin_define ("__SSE2__"); \
- if (TARGET_SSE3) \
- builtin_define ("__SSE3__"); \
- if (TARGET_SSSE3) \
- builtin_define ("__SSSE3__"); \
- if (TARGET_SSE4_1) \
- builtin_define ("__SSE4_1__"); \
- if (TARGET_SSE4_2) \
- builtin_define ("__SSE4_2__"); \
- if (TARGET_AES) \
- builtin_define ("__AES__"); \
- if (TARGET_PCLMUL) \
- builtin_define ("__PCLMUL__"); \
- if (TARGET_SSE4A) \
- builtin_define ("__SSE4A__"); \
- if (TARGET_SSE5) \
- builtin_define ("__SSE5__"); \
- if (TARGET_SSE_MATH && TARGET_SSE) \
- builtin_define ("__SSE_MATH__"); \
- if (TARGET_SSE_MATH && TARGET_SSE2) \
- builtin_define ("__SSE2_MATH__"); \
- } \
- while (0)
+#define TARGET_CPU_CPP_BUILTINS() ix86_target_macros ()
+
+/* Target Pragmas. */
+#define REGISTER_TARGET_PRAGMAS() ix86_register_pragmas ()
enum target_cpu_default
{
diff --git a/gcc/config/i386/i386.opt b/gcc/config/i386/i386.opt
index 75c94ba771e..d5c0978dde5 100644
--- a/gcc/config/i386/i386.opt
+++ b/gcc/config/i386/i386.opt
@@ -18,24 +18,58 @@
; along with GCC; see the file COPYING3. If not see
; <http://www.gnu.org/licenses/>.
+;; Definitions to add to the cl_target_option structure
+;; -march= processor
+TargetSave
+unsigned char arch
+
+;; -mtune= processor
+TargetSave
+unsigned char tune
+
+;; -mfpath=
+TargetSave
+unsigned char fpmath
+
+;; branch cost
+TargetSave
+unsigned char branch_cost
+
+;; which flags were passed by the user
+TargetSave
+int ix86_isa_flags_explicit
+
+;; which flags were passed by the user
+TargetSave
+int target_flags_explicit
+
+;; whether -mtune was not specified
+TargetSave
+unsigned char tune_defaulted
+
+;; whether -march was specified
+TargetSave
+unsigned char arch_specified
+
+;; x86 options
m128bit-long-double
-Target RejectNegative Report Mask(128BIT_LONG_DOUBLE)
+Target RejectNegative Report Mask(128BIT_LONG_DOUBLE) Save
sizeof(long double) is 16
m80387
-Target Report Mask(80387)
+Target Report Mask(80387) Save
Use hardware fp
m96bit-long-double
-Target RejectNegative Report InverseMask(128BIT_LONG_DOUBLE)
+Target RejectNegative Report InverseMask(128BIT_LONG_DOUBLE) Save
sizeof(long double) is 12
maccumulate-outgoing-args
-Target Report Mask(ACCUMULATE_OUTGOING_ARGS)
+Target Report Mask(ACCUMULATE_OUTGOING_ARGS) Save
Reserve space for outgoing arguments in the function prologue
malign-double
-Target Report Mask(ALIGN_DOUBLE)
+Target Report Mask(ALIGN_DOUBLE) Save
Align some doubles on dword boundary
malign-functions=
@@ -51,7 +85,7 @@ Target RejectNegative Joined Var(ix86_align_loops_string)
Loop code aligned to this power of 2
malign-stringops
-Target RejectNegative Report InverseMask(NO_ALIGN_STRINGOPS, ALIGN_STRINGOPS)
+Target RejectNegative Report InverseMask(NO_ALIGN_STRINGOPS, ALIGN_STRINGOPS) Save
Align destination of the string operations
march=
@@ -75,11 +109,11 @@ Target RejectNegative Joined Var(ix86_cmodel_string)
Use given x86-64 code model
mfancy-math-387
-Target RejectNegative Report InverseMask(NO_FANCY_MATH_387, USE_FANCY_MATH_387)
+Target RejectNegative Report InverseMask(NO_FANCY_MATH_387, USE_FANCY_MATH_387) Save
Generate sin, cos, sqrt for FPU
mfp-ret-in-387
-Target Report Mask(FLOAT_RETURNS)
+Target Report Mask(FLOAT_RETURNS) Save
Return values of functions in FPU registers
mfpmath=
@@ -87,19 +121,19 @@ Target RejectNegative Joined Var(ix86_fpmath_string)
Generate floating point mathematics using given instruction set
mhard-float
-Target RejectNegative Mask(80387) MaskExists
+Target RejectNegative Mask(80387) MaskExists Save
Use hardware fp
mieee-fp
-Target Report Mask(IEEE_FP)
+Target Report Mask(IEEE_FP) Save
Use IEEE math for fp comparisons
minline-all-stringops
-Target Report Mask(INLINE_ALL_STRINGOPS)
+Target Report Mask(INLINE_ALL_STRINGOPS) Save
Inline all known string operations
minline-stringops-dynamically
-Target Report Mask(INLINE_STRINGOPS_DYNAMICALLY)
+Target Report Mask(INLINE_STRINGOPS_DYNAMICALLY) Save
Inline memset/memcpy string operations, but perform inline version only for small blocks
mintel-syntax
@@ -107,23 +141,23 @@ Target Undocumented
;; Deprecated
mms-bitfields
-Target Report Mask(MS_BITFIELD_LAYOUT)
+Target Report Mask(MS_BITFIELD_LAYOUT) Save
Use native (MS) bitfield layout
mno-align-stringops
-Target RejectNegative Report Mask(NO_ALIGN_STRINGOPS) Undocumented
+Target RejectNegative Report Mask(NO_ALIGN_STRINGOPS) Undocumented Save
mno-fancy-math-387
-Target RejectNegative Report Mask(NO_FANCY_MATH_387) Undocumented
+Target RejectNegative Report Mask(NO_FANCY_MATH_387) Undocumented Save
mno-push-args
-Target RejectNegative Report Mask(NO_PUSH_ARGS) Undocumented
+Target RejectNegative Report Mask(NO_PUSH_ARGS) Undocumented Save
mno-red-zone
-Target RejectNegative Report Mask(NO_RED_ZONE) Undocumented
+Target RejectNegative Report Mask(NO_RED_ZONE) Undocumented Save
momit-leaf-frame-pointer
-Target Report Mask(OMIT_LEAF_FRAME_POINTER)
+Target Report Mask(OMIT_LEAF_FRAME_POINTER) Save
Omit the frame pointer in leaf functions
mpc
@@ -135,11 +169,11 @@ Target RejectNegative Joined Var(ix86_preferred_stack_boundary_string)
Attempt to keep stack aligned to this power of 2
mpush-args
-Target Report InverseMask(NO_PUSH_ARGS, PUSH_ARGS)
+Target Report InverseMask(NO_PUSH_ARGS, PUSH_ARGS) Save
Use push instructions to save outgoing arguments
mred-zone
-Target RejectNegative Report InverseMask(NO_RED_ZONE, RED_ZONE)
+Target RejectNegative Report InverseMask(NO_RED_ZONE, RED_ZONE) Save
Use red-zone in the x86-64 code
mregparm=
@@ -147,15 +181,15 @@ Target RejectNegative Joined Var(ix86_regparm_string)
Number of registers used to pass integer arguments
mrtd
-Target Report Mask(RTD)
+Target Report Mask(RTD) Save
Alternate calling convention
msoft-float
-Target InverseMask(80387)
+Target InverseMask(80387) Save
Do not use hardware fp
msseregparm
-Target RejectNegative Mask(SSEREGPARM)
+Target RejectNegative Mask(SSEREGPARM) Save
Use SSE register passing conventions for SF and DF mode
mstackrealign
@@ -163,7 +197,7 @@ Target Report Var(ix86_force_align_arg_pointer)
Realign stack in prologue
mstack-arg-probe
-Target Report Mask(STACK_PROBE)
+Target Report Mask(STACK_PROBE) Save
Enable stack probing
mstringop-strategy=
@@ -186,104 +220,105 @@ mveclibabi=
Target RejectNegative Joined Var(ix86_veclibabi_string)
Vector library ABI to use
+mrecip
+Target Report Mask(RECIP) Save
+Generate reciprocals instead of divss and sqrtss.
+
+mcld
+Target Report Mask(CLD) Save
+Generate cld instruction in the function prologue.
+
+mno-fused-madd
+Target RejectNegative Report Mask(NO_FUSED_MADD) Undocumented Save
+
+mfused-madd
+Target Report InverseMask(NO_FUSED_MADD, FUSED_MADD) Save
+Enable automatic generation of fused floating point multiply-add instructions
+if the ISA supports such instructions. The -mfused-madd option is on by
+default.
+
;; ISA support
m32
-Target RejectNegative Negative(m64) Report InverseMask(ISA_64BIT) Var(ix86_isa_flags) VarExists
+Target RejectNegative Negative(m64) Report InverseMask(ISA_64BIT) Var(ix86_isa_flags) VarExists Save
Generate 32bit i386 code
m64
-Target RejectNegative Negative(m32) Report Mask(ISA_64BIT) Var(ix86_isa_flags) VarExists
+Target RejectNegative Negative(m32) Report Mask(ISA_64BIT) Var(ix86_isa_flags) VarExists Save
Generate 64bit x86-64 code
mmmx
-Target Report Mask(ISA_MMX) Var(ix86_isa_flags) VarExists
+Target Report Mask(ISA_MMX) Var(ix86_isa_flags) VarExists Save
Support MMX built-in functions
m3dnow
-Target Report Mask(ISA_3DNOW) Var(ix86_isa_flags) VarExists
+Target Report Mask(ISA_3DNOW) Var(ix86_isa_flags) VarExists Save
Support 3DNow! built-in functions
m3dnowa
-Target Undocumented Mask(ISA_3DNOW_A) Var(ix86_isa_flags) VarExists
+Target Undocumented Mask(ISA_3DNOW_A) Var(ix86_isa_flags) VarExists Save
Support Athlon 3Dnow! built-in functions
msse
-Target Report Mask(ISA_SSE) Var(ix86_isa_flags) VarExists
+Target Report Mask(ISA_SSE) Var(ix86_isa_flags) VarExists Save
Support MMX and SSE built-in functions and code generation
msse2
-Target Report Mask(ISA_SSE2) Var(ix86_isa_flags) VarExists
+Target Report Mask(ISA_SSE2) Var(ix86_isa_flags) VarExists Save
Support MMX, SSE and SSE2 built-in functions and code generation
msse3
-Target Report Mask(ISA_SSE3) Var(ix86_isa_flags) VarExists
+Target Report Mask(ISA_SSE3) Var(ix86_isa_flags) VarExists Save
Support MMX, SSE, SSE2 and SSE3 built-in functions and code generation
mssse3
-Target Report Mask(ISA_SSSE3) Var(ix86_isa_flags) VarExists
+Target Report Mask(ISA_SSSE3) Var(ix86_isa_flags) VarExists Save
Support MMX, SSE, SSE2, SSE3 and SSSE3 built-in functions and code generation
msse4.1
-Target Report Mask(ISA_SSE4_1) Var(ix86_isa_flags) VarExists
+Target Report Mask(ISA_SSE4_1) Var(ix86_isa_flags) VarExists Save
Support MMX, SSE, SSE2, SSE3, SSSE3 and SSE4.1 built-in functions and code generation
msse4.2
-Target Report Mask(ISA_SSE4_2) Var(ix86_isa_flags) VarExists
+Target Report Mask(ISA_SSE4_2) Var(ix86_isa_flags) VarExists Save
Support MMX, SSE, SSE2, SSE3, SSSE3, SSE4.1 and SSE4.2 built-in functions and code generation
msse4
-Target RejectNegative Report Mask(ISA_SSE4_2) MaskExists Var(ix86_isa_flags) VarExists
+Target RejectNegative Report Mask(ISA_SSE4_2) MaskExists Var(ix86_isa_flags) VarExists Save
Support MMX, SSE, SSE2, SSE3, SSSE3, SSE4.1 and SSE4.2 built-in functions and code generation
mno-sse4
-Target RejectNegative Report InverseMask(ISA_SSE4_1) MaskExists Var(ix86_isa_flags) VarExists
+Target RejectNegative Report InverseMask(ISA_SSE4_1) MaskExists Var(ix86_isa_flags) VarExists Save
Do not support SSE4.1 and SSE4.2 built-in functions and code generation
msse4a
-Target Report Mask(ISA_SSE4A) Var(ix86_isa_flags) VarExists
+Target Report Mask(ISA_SSE4A) Var(ix86_isa_flags) VarExists Save
Support MMX, SSE, SSE2, SSE3 and SSE4A built-in functions and code generation
msse5
-Target Report Mask(ISA_SSE5) Var(ix86_isa_flags) VarExists
+Target Report Mask(ISA_SSE5) Var(ix86_isa_flags) VarExists Save
Support SSE5 built-in functions and code generation
-;; Instruction support
-
-mcld
-Target Report Mask(CLD)
-Generate cld instruction in the function prologue.
-
mabm
-Target Report RejectNegative Var(x86_abm)
+Target Report Mask(ISA_ABM) Var(ix86_isa_flags) VarExists Save
Support code generation of Advanced Bit Manipulation (ABM) instructions.
-mcx16
-Target Report RejectNegative Var(x86_cmpxchg16b)
-Support code generation of cmpxchg16b instruction.
-
mpopcnt
-Target Report RejectNegative Var(x86_popcnt)
+Target Report Mask(ISA_POPCNT) Var(ix86_isa_flags) VarExists Save
Support code generation of popcnt instruction.
+mcx16
+Target Report Mask(ISA_CX16) Var(ix86_isa_flags) VarExists Save
+Support code generation of cmpxchg16b instruction.
+
msahf
-Target Report RejectNegative Var(x86_sahf)
+Target Report Mask(ISA_SAHF) Var(ix86_isa_flags) VarExists Save
Support code generation of sahf instruction in 64bit x86-64 code.
-mrecip
-Target Report RejectNegative Var(x86_recip)
-Generate reciprocals instead of divss and sqrtss.
-
-mfused-madd
-Target Report Var(x86_fused_muladd) Init(1)
-Enable automatic generation of fused floating point multiply-add instructions
-if the ISA supports such instructions. The -mfused-madd option is on by
-default.
-
maes
-Target Report RejectNegative Var(x86_aes)
+Target Report Mask(ISA_AES) Var(ix86_isa_flags) VarExists Save
Support AES built-in functions and code generation
mpclmul
-Target Report RejectNegative Var(x86_pclmul)
+Target Report Mask(ISA_PCLMUL) Var(ix86_isa_flags) VarExists Save
Support PCLMUL built-in functions and code generation
diff --git a/gcc/config/i386/t-i386 b/gcc/config/i386/t-i386
new file mode 100644
index 00000000000..4c0c046dae6
--- /dev/null
+++ b/gcc/config/i386/t-i386
@@ -0,0 +1,13 @@
+i386.o: $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
+ $(RTL_H) $(TREE_H) $(TM_P_H) $(REGS_H) hard-reg-set.h \
+ $(REAL_H) insn-config.h conditions.h output.h insn-codes.h \
+ $(INSN_ATTR_H) $(FLAGS_H) $(C_COMMON_H) except.h $(FUNCTION_H) \
+ $(RECOG_H) $(EXPR_H) $(OPTABS_H) toplev.h $(BASIC_BLOCK_H) \
+ $(GGC_H) $(TARGET_H) $(TARGET_DEF_H) langhooks.h $(CGRAPH_H) \
+ $(TREE_GIMPLE_H) dwarf2.h $(DF_H) tm-constrs.h $(PARAMS_H)
+
+i386-c.o: $(srcdir)/config/i386/i386-c.c \
+ $(srcdir)/config/i386/i386-protos.h $(CONFIG_H) $(SYSTEM_H) coretypes.h \
+ $(TM_H) $(RTL_H) $(TREE_H) $(TM_P_H) $(FLAGS_H) $(C_COMMON_H) $(GGC_H) \
+ $(TARGET_H) $(TARGET_DEF_H) $(CPPLIB_H) $(C_PRAGMA_H)
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $(srcdir)/config/i386/i386-c.c
diff --git a/gcc/config/ia64/ia64.c b/gcc/config/ia64/ia64.c
index e93ae31b0bd..c07b21d9d53 100644
--- a/gcc/config/ia64/ia64.c
+++ b/gcc/config/ia64/ia64.c
@@ -50,7 +50,7 @@ along with GCC; see the file COPYING3. If not see
#include "hashtab.h"
#include "langhooks.h"
#include "cfglayout.h"
-#include "tree-gimple.h"
+#include "gimple.h"
#include "intl.h"
#include "df.h"
#include "debug.h"
@@ -275,7 +275,7 @@ static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *);
static void ia64_encode_section_info (tree, rtx, int);
static rtx ia64_struct_value_rtx (tree, int);
-static tree ia64_gimplify_va_arg (tree, tree, tree *, tree *);
+static tree ia64_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
static bool ia64_vector_mode_supported_p (enum machine_mode mode);
static bool ia64_cannot_force_const_mem (rtx);
@@ -493,6 +493,12 @@ static const struct attribute_spec ia64_attribute_table[] =
#undef TARGET_C_MODE_FOR_SUFFIX
#define TARGET_C_MODE_FOR_SUFFIX ia64_c_mode_for_suffix
+#undef TARGET_OPTION_COLD_ATTRIBUTE_SETS_OPTIMIZATION
+#define TARGET_OPTION_COLD_ATTRIBUTE_SETS_OPTIMIZATION true
+
+#undef TARGET_OPTION_HOT_ATTRIBUTE_SETS_OPTIMIZATION
+#define TARGET_OPTION_HOT_ATTRIBUTE_SETS_OPTIMIZATION true
+
struct gcc_target targetm = TARGET_INITIALIZER;
typedef enum
@@ -4336,7 +4342,8 @@ ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
/* Implement va_arg. */
static tree
-ia64_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
+ia64_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
+ gimple_seq *post_p)
{
/* Variable sized types are passed by reference. */
if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
@@ -4359,8 +4366,7 @@ ia64_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
size_int (-2 * UNITS_PER_WORD));
t = fold_convert (TREE_TYPE (valist), t);
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (valist), valist, t);
- gimplify_and_add (t, pre_p);
+ gimplify_assign (unshare_expr (valist), t, pre_p);
}
return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
@@ -5232,9 +5238,6 @@ ia64_override_options (void)
TARGET_INLINE_SQRT = INL_MAX_THR;
}
- ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
- flag_schedule_insns_after_reload = 0;
-
ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
init_machine_status = ia64_init_machine_status;
@@ -9927,6 +9930,13 @@ void
ia64_optimization_options (int level ATTRIBUTE_UNUSED,
int size ATTRIBUTE_UNUSED)
{
+ /* Disable the second machine independent scheduling pass and use one for the
+ IA-64. This needs to be here instead of in OVERRIDE_OPTIONS because this
+ is done whenever the optimization is changed via #pragma GCC optimize or
+ attribute((optimize(...))). */
+ ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
+ flag_schedule_insns_after_reload = 0;
+
/* Let the scheduler form additional regions. */
set_param_value ("max-sched-extend-regions-iters", 2);
diff --git a/gcc/config/m32c/m32c-protos.h b/gcc/config/m32c/m32c-protos.h
index a63191c21ab..ec98d81f93c 100644
--- a/gcc/config/m32c/m32c-protos.h
+++ b/gcc/config/m32c/m32c-protos.h
@@ -1,5 +1,5 @@
/* Target Prototypes for R8C/M16C/M32C
- Copyright (C) 2005, 2007
+ Copyright (C) 2005, 2007, 2008
Free Software Foundation, Inc.
Contributed by Red Hat.
@@ -108,7 +108,7 @@ int m32c_split_psi_p (rtx *);
#ifdef TREE_CODE
void m32c_function_arg_advance (CUMULATIVE_ARGS *, MM, tree, int);
-tree m32c_gimplify_va_arg_expr (tree, tree, tree *, tree *);
+tree m32c_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
void m32c_init_cumulative_args (CUMULATIVE_ARGS *, tree, rtx, tree, int);
bool m32c_promote_function_return (const_tree);
int m32c_special_page_vector_p (tree);
diff --git a/gcc/config/m32c/m32c.c b/gcc/config/m32c/m32c.c
index b0733dd8364..0e03c9e6bf2 100644
--- a/gcc/config/m32c/m32c.c
+++ b/gcc/config/m32c/m32c.c
@@ -47,7 +47,7 @@
#include "target-def.h"
#include "tm_p.h"
#include "langhooks.h"
-#include "tree-gimple.h"
+#include "gimple.h"
#include "df.h"
/* Prototypes */
diff --git a/gcc/config/mips/mips.c b/gcc/config/mips/mips.c
index d1def425576..913acc71cf1 100644
--- a/gcc/config/mips/mips.c
+++ b/gcc/config/mips/mips.c
@@ -56,7 +56,7 @@ along with GCC; see the file COPYING3. If not see
#include "langhooks.h"
#include "cfglayout.h"
#include "sched-int.h"
-#include "tree-gimple.h"
+#include "gimple.h"
#include "bitmap.h"
#include "diagnostic.h"
@@ -4959,12 +4959,12 @@ mips_va_start (tree valist, rtx nextarg)
if (cum->stack_words > 0)
t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
size_int (cum->stack_words * UNITS_PER_WORD));
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
/* Emit code to initialize GTOP, the top of the GPR save area. */
t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gtop), gtop, t);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (gtop), gtop, t);
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
/* Emit code to initialize FTOP, the top of the FPR save area.
@@ -4976,18 +4976,18 @@ mips_va_start (tree valist, rtx nextarg)
if (fpr_offset)
t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
size_int (-fpr_offset));
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ftop), ftop, t);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (ftop), ftop, t);
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
/* Emit code to initialize GOFF, the offset from GTOP of the
next GPR argument. */
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (goff), goff,
+ t = build2 (MODIFY_EXPR, TREE_TYPE (goff), goff,
build_int_cst (TREE_TYPE (goff), gpr_save_area_size));
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
/* Likewise emit code to initialize FOFF, the offset from FTOP
of the next FPR argument. */
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (foff), foff,
+ t = build2 (MODIFY_EXPR, TREE_TYPE (foff), foff,
build_int_cst (TREE_TYPE (foff), fpr_save_area_size));
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
@@ -5001,7 +5001,8 @@ mips_va_start (tree valist, rtx nextarg)
/* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
static tree
-mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
+mips_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
+ gimple_seq *post_p)
{
tree addr;
bool indirect_p;
@@ -5100,8 +5101,7 @@ mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
/* [1] Emit code for: off &= -rsize. */
t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
build_int_cst (NULL_TREE, -rsize));
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (off), off, t);
- gimplify_and_add (t, pre_p);
+ gimplify_assign (off, t, pre_p);
}
osize = rsize;
}
@@ -5137,7 +5137,7 @@ mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
u = size_int (-osize);
t = build2 (BIT_AND_EXPR, sizetype, t, u);
t = fold_convert (TREE_TYPE (ovfl), t);
- align = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
+ align = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
}
else
align = NULL;
diff --git a/gcc/config/pa/pa.c b/gcc/config/pa/pa.c
index 238d35276e4..76d84bad092 100644
--- a/gcc/config/pa/pa.c
+++ b/gcc/config/pa/pa.c
@@ -1,6 +1,6 @@
/* Subroutines for insn-output.c for HPPA.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
- 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
This file is part of GCC.
@@ -125,7 +125,7 @@ static void pa_asm_out_destructor (rtx, int);
static void pa_init_builtins (void);
static rtx hppa_builtin_saveregs (void);
static void hppa_va_start (tree, rtx);
-static tree hppa_gimplify_va_arg_expr (tree, tree, tree *, tree *);
+static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
static bool pa_scalar_mode_supported_p (enum machine_mode);
static bool pa_commutative_p (const_rtx x, int outer_code);
static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
@@ -5998,7 +5998,8 @@ hppa_va_start (tree valist, rtx nextarg)
}
static tree
-hppa_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
+hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
+ gimple_seq *post_p)
{
if (TARGET_64BIT)
{
diff --git a/gcc/config/rs6000/rs6000-c.c b/gcc/config/rs6000/rs6000-c.c
index ce1ec4b9665..76c92352b57 100644
--- a/gcc/config/rs6000/rs6000-c.c
+++ b/gcc/config/rs6000/rs6000-c.c
@@ -85,12 +85,12 @@ rs6000_pragma_longcall (cpp_reader *pfile ATTRIBUTE_UNUSED)
#define builtin_assert(TXT) cpp_assert (pfile, TXT)
/* Keep the AltiVec keywords handy for fast comparisons. */
-static tree __vector_keyword;
-static tree vector_keyword;
-static tree __pixel_keyword;
-static tree pixel_keyword;
-static tree __bool_keyword;
-static tree bool_keyword;
+static GTY(()) tree __vector_keyword;
+static GTY(()) tree vector_keyword;
+static GTY(()) tree __pixel_keyword;
+static GTY(()) tree pixel_keyword;
+static GTY(()) tree __bool_keyword;
+static GTY(()) tree bool_keyword;
/* Preserved across calls. */
static tree expand_bool_pixel;
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index 5e2f7ac38a1..0e03be0fe2a 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -52,7 +52,7 @@
#include "reload.h"
#include "cfglayout.h"
#include "sched-int.h"
-#include "tree-gimple.h"
+#include "gimple.h"
#include "tree-flow.h"
#include "intl.h"
#include "params.h"
@@ -958,7 +958,7 @@ static void rs6000_darwin_file_start (void);
static tree rs6000_build_builtin_va_list (void);
static void rs6000_va_start (tree, rtx);
-static tree rs6000_gimplify_va_arg (tree, tree, tree *, tree *);
+static tree rs6000_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
static bool rs6000_must_pass_in_stack (enum machine_mode, const_tree);
static bool rs6000_scalar_mode_supported_p (enum machine_mode);
static bool rs6000_vector_mode_supported_p (enum machine_mode);
@@ -6713,9 +6713,12 @@ rs6000_va_start (tree valist, rtx nextarg)
valist = build_va_arg_indirect_ref (valist);
gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
- fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
- ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
- sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
+ fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
+ f_fpr, NULL_TREE);
+ ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
+ f_ovf, NULL_TREE);
+ sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
+ f_sav, NULL_TREE);
/* Count number of gp and fp argument registers used. */
words = crtl->args.info.words;
@@ -6731,7 +6734,7 @@ rs6000_va_start (tree valist, rtx nextarg)
if (cfun->va_list_gpr_size)
{
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gpr), gpr,
+ t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
build_int_cst (NULL_TREE, n_gpr));
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
@@ -6739,7 +6742,7 @@ rs6000_va_start (tree valist, rtx nextarg)
if (cfun->va_list_fpr_size)
{
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (fpr), fpr,
+ t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
build_int_cst (NULL_TREE, n_fpr));
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
@@ -6750,7 +6753,7 @@ rs6000_va_start (tree valist, rtx nextarg)
if (words != 0)
t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), t,
size_int (words * UNITS_PER_WORD));
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovf), ovf, t);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
@@ -6767,7 +6770,7 @@ rs6000_va_start (tree valist, rtx nextarg)
if (cfun->machine->varargs_save_offset)
t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (sav), t,
size_int (cfun->machine->varargs_save_offset));
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (sav), sav, t);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
@@ -6775,7 +6778,8 @@ rs6000_va_start (tree valist, rtx nextarg)
/* Implement va_arg. */
tree
-rs6000_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
+rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
+ gimple_seq *post_p)
{
tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
tree gpr, fpr, ovf, sav, reg, t, u;
@@ -6784,6 +6788,7 @@ rs6000_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
int align;
tree ptrtype = build_pointer_type (type);
int regalign = 0;
+ gimple stmt;
if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
{
@@ -6802,14 +6807,14 @@ rs6000_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
if (elem_size < UNITS_PER_WORD)
{
tree real_part, imag_part;
- tree post = NULL_TREE;
+ gimple_seq post = NULL;
real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
&post);
/* Copy the value into a temporary, lest the formal temporary
be reused out from under us. */
real_part = get_initialized_tmp_var (real_part, pre_p, &post);
- append_to_statement_list (post, pre_p);
+ gimple_seq_add_seq (pre_p, post);
imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
post_p);
@@ -6829,9 +6834,12 @@ rs6000_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
valist = build_va_arg_indirect_ref (valist);
gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
- fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
- ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
- sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
+ fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
+ f_fpr, NULL_TREE);
+ ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
+ f_ovf, NULL_TREE);
+ sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
+ f_sav, NULL_TREE);
size = int_size_in_bytes (type);
rsize = (size + 3) / 4;
@@ -6885,18 +6893,19 @@ rs6000_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
if (n_reg == 2 && reg == gpr)
{
regalign = 1;
- u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), reg,
+ u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
build_int_cst (TREE_TYPE (reg), n_reg - 1));
- u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), reg, u);
+ u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
+ unshare_expr (reg), u);
}
/* _Decimal128 is passed in even/odd fpr pairs; the stored
reg number is 0 for f1, so we want to make it odd. */
else if (reg == fpr && TYPE_MODE (type) == TDmode)
{
regalign = 1;
- t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), reg,
+ t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
build_int_cst (TREE_TYPE (reg), 1));
- u = build2 (MODIFY_EXPR, void_type_node, reg, t);
+ u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
}
t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
@@ -6909,7 +6918,7 @@ rs6000_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
if (sav_ofs)
t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, size_int (sav_ofs));
- u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), reg,
+ u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
build_int_cst (TREE_TYPE (reg), n_reg));
u = fold_convert (sizetype, u);
u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
@@ -6922,22 +6931,18 @@ rs6000_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
&& TYPE_MODE (type) == SDmode)
t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, size_int (size));
- t = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
- gimplify_and_add (t, pre_p);
+ gimplify_assign (addr, t, pre_p);
- t = build1 (GOTO_EXPR, void_type_node, lab_over);
- gimplify_and_add (t, pre_p);
+ gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
- t = build1 (LABEL_EXPR, void_type_node, lab_false);
- append_to_statement_list (t, pre_p);
+ stmt = gimple_build_label (lab_false);
+ gimple_seq_add_stmt (pre_p, stmt);
if ((n_reg == 2 && !regalign) || n_reg > 2)
{
/* Ensure that we don't find any more args in regs.
Alignment has taken care of for special cases. */
- t = build_gimple_modify_stmt (reg,
- build_int_cst (TREE_TYPE (reg), 8));
- gimplify_and_add (t, pre_p);
+ gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
}
}
@@ -6955,17 +6960,15 @@ rs6000_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
}
gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
- u = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
- gimplify_and_add (u, pre_p);
+ gimplify_assign (unshare_expr (addr), t, pre_p);
t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, size_int (size));
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovf), ovf, t);
- gimplify_and_add (t, pre_p);
+ gimplify_assign (unshare_expr (ovf), t, pre_p);
if (lab_over)
{
- t = build1 (LABEL_EXPR, void_type_node, lab_over);
- append_to_statement_list (t, pre_p);
+ stmt = gimple_build_label (lab_over);
+ gimple_seq_add_stmt (pre_p, stmt);
}
if (STRICT_ALIGNMENT
@@ -11321,15 +11324,14 @@ rs6000_alloc_sdmode_stack_slot (void)
{
tree t;
basic_block bb;
- block_stmt_iterator bsi;
+ gimple_stmt_iterator gsi;
gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
FOR_EACH_BB (bb)
- for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
- tree ret = walk_tree_without_duplicates (bsi_stmt_ptr (bsi),
- rs6000_check_sdmode, NULL);
+ tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
if (ret)
{
rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
diff --git a/gcc/config/s390/s390.c b/gcc/config/s390/s390.c
index 936e0a0ead2..3c7d92b3c8d 100644
--- a/gcc/config/s390/s390.c
+++ b/gcc/config/s390/s390.c
@@ -50,7 +50,7 @@ along with GCC; see the file COPYING3. If not see
#include "debug.h"
#include "langhooks.h"
#include "optabs.h"
-#include "tree-gimple.h"
+#include "gimple.h"
#include "df.h"
@@ -8424,15 +8424,15 @@ s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
if (cfun->va_list_gpr_size)
{
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gpr), gpr,
- build_int_cst (NULL_TREE, n_gpr));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
+ build_int_cst (NULL_TREE, n_gpr));
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
if (cfun->va_list_fpr_size)
{
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (fpr), fpr,
+ t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
build_int_cst (NULL_TREE, n_fpr));
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
@@ -8452,7 +8452,7 @@ s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), t, size_int (off));
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovf), ovf, t);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
@@ -8465,7 +8465,7 @@ s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (sav), t,
size_int (-RETURN_REGNUM * UNITS_PER_WORD));
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (sav), sav, t);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
@@ -8496,8 +8496,8 @@ s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
} */
static tree
-s390_gimplify_va_arg (tree valist, tree type, tree *pre_p,
- tree *post_p ATTRIBUTE_UNUSED)
+s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
+ gimple_seq *post_p ATTRIBUTE_UNUSED)
{
tree f_gpr, f_fpr, f_ovf, f_sav;
tree gpr, fpr, ovf, sav, reg, t, u;
@@ -8512,9 +8512,13 @@ s390_gimplify_va_arg (tree valist, tree type, tree *pre_p,
valist = build_va_arg_indirect_ref (valist);
gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
- ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
+ /* The tree for args* cannot be shared between gpr/fpr and ovf since
+ both appear on a lhs. */
+ valist = unshare_expr (valist);
+ ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
+
size = int_size_in_bytes (type);
if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
@@ -8598,14 +8602,11 @@ s390_gimplify_va_arg (tree valist, tree type, tree *pre_p,
fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t, fold_convert (sizetype, u));
- t = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
- gimplify_and_add (t, pre_p);
+ gimplify_assign (addr, t, pre_p);
- t = build1 (GOTO_EXPR, void_type_node, lab_over);
- gimplify_and_add (t, pre_p);
+ gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
- t = build1 (LABEL_EXPR, void_type_node, lab_false);
- append_to_statement_list (t, pre_p);
+ gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
/* ... Otherwise out of the overflow area. */
@@ -8617,16 +8618,13 @@ s390_gimplify_va_arg (tree valist, tree type, tree *pre_p,
gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
- u = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
- gimplify_and_add (u, pre_p);
+ gimplify_assign (addr, t, pre_p);
t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
size_int (size));
- t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, ovf, t);
- gimplify_and_add (t, pre_p);
+ gimplify_assign (ovf, t, pre_p);
- t = build1 (LABEL_EXPR, void_type_node, lab_over);
- append_to_statement_list (t, pre_p);
+ gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
/* Increment register save count. */
diff --git a/gcc/config/sh/sh.c b/gcc/config/sh/sh.c
index e311362de6c..c7b8f582062 100644
--- a/gcc/config/sh/sh.c
+++ b/gcc/config/sh/sh.c
@@ -51,7 +51,7 @@ along with GCC; see the file COPYING3. If not see
#include "intl.h"
#include "sched-int.h"
#include "ggc.h"
-#include "tree-gimple.h"
+#include "gimple.h"
#include "cfgloop.h"
#include "alloc-pool.h"
#include "tm-constrs.h"
@@ -262,7 +262,7 @@ static bool sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *);
static tree sh_build_builtin_va_list (void);
static tree sh_canonical_va_list_type (tree);
static void sh_va_start (tree, rtx);
-static tree sh_gimplify_va_arg_expr (tree, tree, tree *, tree *);
+static tree sh_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
static bool sh_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
const_tree, bool);
static bool sh_callee_copies (CUMULATIVE_ARGS *, enum machine_mode,
@@ -7200,7 +7200,7 @@ sh_va_start (tree valist, rtx nextarg)
/* Call __builtin_saveregs. */
u = make_tree (sizetype, expand_builtin_saveregs ());
u = fold_convert (ptr_type_node, u);
- t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_fp, u);
+ t = build2 (MODIFY_EXPR, ptr_type_node, next_fp, u);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
@@ -7211,11 +7211,11 @@ sh_va_start (tree valist, rtx nextarg)
nfp = 0;
u = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, u,
size_int (UNITS_PER_WORD * nfp));
- t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_fp_limit, u);
+ t = build2 (MODIFY_EXPR, ptr_type_node, next_fp_limit, u);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
- t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_o, u);
+ t = build2 (MODIFY_EXPR, ptr_type_node, next_o, u);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
@@ -7226,12 +7226,12 @@ sh_va_start (tree valist, rtx nextarg)
nint = 0;
u = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, u,
size_int (UNITS_PER_WORD * nint));
- t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_o_limit, u);
+ t = build2 (MODIFY_EXPR, ptr_type_node, next_o_limit, u);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
u = make_tree (ptr_type_node, nextarg);
- t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_stack, u);
+ t = build2 (MODIFY_EXPR, ptr_type_node, next_stack, u);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
@@ -7260,8 +7260,8 @@ find_sole_member (tree type)
/* Implement `va_arg'. */
static tree
-sh_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p,
- tree *post_p ATTRIBUTE_UNUSED)
+sh_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
+ gimple_seq *post_p ATTRIBUTE_UNUSED)
{
HOST_WIDE_INT size, rsize;
tree tmp, pptr_type_node;
@@ -7351,11 +7351,9 @@ sh_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p,
bool is_double = size == 8 && TREE_CODE (eff_type) == REAL_TYPE;
tmp = build1 (ADDR_EXPR, pptr_type_node, next_fp);
- tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, tmp);
- gimplify_and_add (tmp, pre_p);
+ gimplify_assign (addr, tmp, pre_p);
- tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_fp_tmp, valist);
- gimplify_and_add (tmp, pre_p);
+ gimplify_assign (next_fp_tmp, valist, pre_p);
tmp = next_fp_limit;
if (size > 4 && !is_double)
tmp = build2 (POINTER_PLUS_EXPR, TREE_TYPE (tmp), tmp,
@@ -7375,9 +7373,7 @@ sh_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p,
size_int (UNITS_PER_WORD));
tmp = build2 (POINTER_PLUS_EXPR, ptr_type_node,
next_fp_tmp, tmp);
- tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node,
- next_fp_tmp, tmp);
- gimplify_and_add (tmp, pre_p);
+ gimplify_assign (next_fp_tmp, tmp, pre_p);
}
if (is_double)
gimplify_and_add (cmp, pre_p);
@@ -7409,13 +7405,10 @@ sh_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p,
gimplify_and_add (tmp, pre_p);
tmp = build1 (ADDR_EXPR, pptr_type_node, next_stack);
- tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, tmp);
- gimplify_and_add (tmp, pre_p);
- tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, next_fp_tmp, valist);
- gimplify_and_add (tmp, pre_p);
+ gimplify_assign (addr, tmp, pre_p);
+ gimplify_assign (next_fp_tmp, valist, pre_p);
- tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, valist, next_fp_tmp);
- gimplify_and_add (tmp, post_p);
+ gimplify_assign (valist, next_fp_tmp, post_p);
valist = next_fp_tmp;
}
else
@@ -7429,8 +7422,7 @@ sh_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p,
gimplify_and_add (tmp, pre_p);
tmp = build1 (ADDR_EXPR, pptr_type_node, next_o);
- tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, tmp);
- gimplify_and_add (tmp, pre_p);
+ gimplify_assign (addr, tmp, pre_p);
tmp = build1 (GOTO_EXPR, void_type_node, lab_over);
gimplify_and_add (tmp, pre_p);
@@ -7439,15 +7431,10 @@ sh_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p,
gimplify_and_add (tmp, pre_p);
if (size > 4 && ! (TARGET_SH4 || TARGET_SH2A))
- {
- tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node,
- next_o, next_o_limit);
- gimplify_and_add (tmp, pre_p);
- }
+ gimplify_assign (next_o, next_o_limit, pre_p);
tmp = build1 (ADDR_EXPR, pptr_type_node, next_stack);
- tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, tmp);
- gimplify_and_add (tmp, pre_p);
+ gimplify_assign (addr, tmp, pre_p);
}
if (!result)
@@ -7463,8 +7450,7 @@ sh_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p,
tmp = std_gimplify_va_arg_expr (valist, type, pre_p, NULL);
if (result)
{
- tmp = build2 (GIMPLE_MODIFY_STMT, void_type_node, result, tmp);
- gimplify_and_add (tmp, pre_p);
+ gimplify_assign (result, tmp, pre_p);
tmp = build1 (LABEL_EXPR, void_type_node, lab_over);
gimplify_and_add (tmp, pre_p);
diff --git a/gcc/config/sh/sh.h b/gcc/config/sh/sh.h
index 5204847abc5..2305872903d 100644
--- a/gcc/config/sh/sh.h
+++ b/gcc/config/sh/sh.h
@@ -463,7 +463,7 @@ do { \
do { \
if (LEVEL) \
{ \
- flag_omit_frame_pointer = -1; \
+ flag_omit_frame_pointer = 2; \
if (! SIZE) \
sh_div_str = "inv:minlat"; \
} \
@@ -690,7 +690,7 @@ do { \
if (! VALID_REGISTER_P (ADDREGNAMES_REGNO (regno))) \
sh_additional_register_names[regno][0] = '\0'; \
\
- if (flag_omit_frame_pointer < 0) \
+ if (flag_omit_frame_pointer == 2) \
{ \
/* The debugging information is sufficient, \
but gdb doesn't implement this yet */ \
diff --git a/gcc/config/sparc/sparc.c b/gcc/config/sparc/sparc.c
index 5e6f5748672..adf28c027b1 100644
--- a/gcc/config/sparc/sparc.c
+++ b/gcc/config/sparc/sparc.c
@@ -48,7 +48,7 @@ along with GCC; see the file COPYING3. If not see
#include "target.h"
#include "target-def.h"
#include "cfglayout.h"
-#include "tree-gimple.h"
+#include "gimple.h"
#include "langhooks.h"
#include "params.h"
#include "df.h"
@@ -410,7 +410,7 @@ static rtx sparc_struct_value_rtx (tree, int);
static bool sparc_return_in_memory (const_tree, const_tree);
static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
static void sparc_va_start (tree, rtx);
-static tree sparc_gimplify_va_arg (tree, tree, tree *, tree *);
+static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
static bool sparc_vector_mode_supported_p (enum machine_mode);
static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
enum machine_mode, const_tree, bool);
@@ -5709,7 +5709,8 @@ sparc_va_start (tree valist, rtx nextarg)
/* Implement `va_arg' for stdarg. */
static tree
-sparc_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
+sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
+ gimple_seq *post_p)
{
HOST_WIDE_INT size, rsize, align;
tree addr, incr;
@@ -5792,8 +5793,7 @@ sparc_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
addr = fold_convert (ptrtype, addr);
incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr, size_int (rsize));
- incr = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, valist, incr);
- gimplify_and_add (incr, post_p);
+ gimplify_assign (valist, incr, post_p);
return build_va_arg_indirect_ref (addr);
}
diff --git a/gcc/config/sparc/sparc.h b/gcc/config/sparc/sparc.h
index ef60292cef3..4d180da8285 100644
--- a/gcc/config/sparc/sparc.h
+++ b/gcc/config/sparc/sparc.h
@@ -981,9 +981,12 @@ extern int sparc_mode_class[];
/* Pick a default value we can notice from override_options:
!v9: Default is on.
- v9: Default is off. */
+ v9: Default is off.
+ Originally it was -1, but later on the container of options changed to
+ unsigned byte, so we decided to pick 127 as default value, which does
+ reflect an undefined default value in case of 0/1. */
-#define DEFAULT_PCC_STRUCT_RETURN -1
+#define DEFAULT_PCC_STRUCT_RETURN 127
/* Functions which return large structures get the address
to place the wanted value at offset 64 from the frame.
diff --git a/gcc/config/spu/spu-c.c b/gcc/config/spu/spu-c.c
index 0b0d2e8743a..96fe43e6e94 100644
--- a/gcc/config/spu/spu-c.c
+++ b/gcc/config/spu/spu-c.c
@@ -35,6 +35,64 @@
#include "spu-builtins.h"
+/* Keep the vector keywords handy for fast comparisons. */
+static GTY(()) tree __vector_keyword;
+static GTY(()) tree vector_keyword;
+
+static cpp_hashnode *
+spu_categorize_keyword (const cpp_token *tok)
+{
+ if (tok->type == CPP_NAME)
+ {
+ cpp_hashnode *ident = tok->val.node;
+
+ if (ident == C_CPP_HASHNODE (vector_keyword)
+ || ident == C_CPP_HASHNODE (__vector_keyword))
+ return C_CPP_HASHNODE (__vector_keyword);
+ else
+ return ident;
+ }
+ return 0;
+}
+
+/* Called to decide whether a conditional macro should be expanded.
+ Since we have exactly one such macro (i.e, 'vector'), we do not
+ need to examine the 'tok' parameter. */
+
+static cpp_hashnode *
+spu_macro_to_expand (cpp_reader *pfile, const cpp_token *tok)
+{
+ cpp_hashnode *expand_this = tok->val.node;
+ cpp_hashnode *ident;
+
+ ident = spu_categorize_keyword (tok);
+ if (ident == C_CPP_HASHNODE (__vector_keyword))
+ {
+ tok = cpp_peek_token (pfile, 0);
+ ident = spu_categorize_keyword (tok);
+
+ if (ident)
+ {
+ enum rid rid_code = (enum rid)(ident->rid_code);
+ if (ident->type == NT_MACRO)
+ {
+ (void) cpp_get_token (pfile);
+ tok = cpp_peek_token (pfile, 0);
+ ident = spu_categorize_keyword (tok);
+ if (ident)
+ rid_code = (enum rid)(ident->rid_code);
+ }
+
+ if (rid_code == RID_UNSIGNED || rid_code == RID_LONG
+ || rid_code == RID_SHORT || rid_code == RID_SIGNED
+ || rid_code == RID_INT || rid_code == RID_CHAR
+ || rid_code == RID_FLOAT || rid_code == RID_DOUBLE)
+ expand_this = C_CPP_HASHNODE (__vector_keyword);
+ }
+ }
+ return expand_this;
+}
+
/* target hook for resolve_overloaded_builtin(). Returns a function call
RTX if we can resolve the overloaded builtin */
tree
@@ -140,6 +198,22 @@ spu_cpu_cpp_builtins (struct cpp_reader *pfile)
if (spu_arch == PROCESSOR_CELLEDP)
builtin_define_std ("__SPU_EDP__");
builtin_define_std ("__vector=__attribute__((__spu_vector__))");
+
+ if (!flag_iso)
+ {
+ /* Define this when supporting context-sensitive keywords. */
+ cpp_define (pfile, "__VECTOR_KEYWORD_SUPPORTED__");
+ cpp_define (pfile, "vector=vector");
+
+ /* Initialize vector keywords. */
+ __vector_keyword = get_identifier ("__vector");
+ C_CPP_HASHNODE (__vector_keyword)->flags |= NODE_CONDITIONAL;
+ vector_keyword = get_identifier ("vector");
+ C_CPP_HASHNODE (vector_keyword)->flags |= NODE_CONDITIONAL;
+
+ /* Enable context-sensitive macros. */
+ cpp_get_callbacks (pfile)->macro_to_expand = spu_macro_to_expand;
+ }
}
void
diff --git a/gcc/config/spu/spu.c b/gcc/config/spu/spu.c
index e645adb2281..83bd9f51315 100644
--- a/gcc/config/spu/spu.c
+++ b/gcc/config/spu/spu.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+/* Copyright (C) 2006, 2007, 2008 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
@@ -50,7 +50,7 @@
#include "assert.h"
#include "c-common.h"
#include "machmode.h"
-#include "tree-gimple.h"
+#include "gimple.h"
#include "tm-constrs.h"
#include "spu-builtins.h"
#include "ddg.h"
@@ -118,8 +118,8 @@ static unsigned char spu_pass_by_reference (CUMULATIVE_ARGS *cum, enum machine_m
const_tree type, unsigned char named);
static tree spu_build_builtin_va_list (void);
static void spu_va_start (tree, rtx);
-static tree spu_gimplify_va_arg_expr (tree valist, tree type, tree * pre_p,
- tree * post_p);
+static tree spu_gimplify_va_arg_expr (tree valist, tree type,
+ gimple_seq * pre_p, gimple_seq * post_p);
static int regno_aligned_for_load (int regno);
static int store_with_one_insn_p (rtx mem);
static int mem_is_padded_component_ref (rtx x);
@@ -3238,7 +3238,7 @@ spu_va_start (tree valist, rtx nextarg)
if (crtl->args.pretend_args_size > 0)
t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (args), t,
size_int (-STACK_POINTER_OFFSET));
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (args), args, t);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (args), args, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
@@ -3247,7 +3247,7 @@ spu_va_start (tree valist, rtx nextarg)
t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (skip), t,
size_int (crtl->args.pretend_args_size
- STACK_POINTER_OFFSET));
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (skip), skip, t);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (skip), skip, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
@@ -3270,8 +3270,8 @@ spu_va_start (tree valist, rtx nextarg)
ret = *(TYPE *)addr;
*/
static tree
-spu_gimplify_va_arg_expr (tree valist, tree type, tree * pre_p,
- tree * post_p ATTRIBUTE_UNUSED)
+spu_gimplify_va_arg_expr (tree valist, tree type, gimple_seq * pre_p,
+ gimple_seq * post_p ATTRIBUTE_UNUSED)
{
tree f_args, f_skip;
tree args, skip;
@@ -3303,22 +3303,21 @@ spu_gimplify_va_arg_expr (tree valist, tree type, tree * pre_p,
/* build conditional expression to calculate addr. The expression
will be gimplified later. */
paddedsize = size_int (rsize);
- tmp = build2 (POINTER_PLUS_EXPR, ptr_type_node, args, paddedsize);
+ tmp = build2 (POINTER_PLUS_EXPR, ptr_type_node, unshare_expr (args), paddedsize);
tmp = build2 (TRUTH_AND_EXPR, boolean_type_node,
- build2 (GT_EXPR, boolean_type_node, tmp, skip),
- build2 (LE_EXPR, boolean_type_node, args, skip));
+ build2 (GT_EXPR, boolean_type_node, tmp, unshare_expr (skip)),
+ build2 (LE_EXPR, boolean_type_node, unshare_expr (args),
+ unshare_expr (skip)));
tmp = build3 (COND_EXPR, ptr_type_node, tmp,
- build2 (POINTER_PLUS_EXPR, ptr_type_node, skip,
- size_int (32)), args);
+ build2 (POINTER_PLUS_EXPR, ptr_type_node, unshare_expr (skip),
+ size_int (32)), unshare_expr (args));
- tmp = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, addr, tmp);
- gimplify_and_add (tmp, pre_p);
+ gimplify_assign (addr, tmp, pre_p);
/* update VALIST.__args */
tmp = build2 (POINTER_PLUS_EXPR, ptr_type_node, addr, paddedsize);
- tmp = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (args), args, tmp);
- gimplify_and_add (tmp, pre_p);
+ gimplify_assign (unshare_expr (args), tmp, pre_p);
addr = fold_convert (build_pointer_type (type), addr);
diff --git a/gcc/config/stormy16/stormy16.c b/gcc/config/stormy16/stormy16.c
index 6cbe52d5048..64ecec8f5e5 100644
--- a/gcc/config/stormy16/stormy16.c
+++ b/gcc/config/stormy16/stormy16.c
@@ -45,7 +45,7 @@ along with GCC; see the file COPYING3. If not see
#include "target-def.h"
#include "tm_p.h"
#include "langhooks.h"
-#include "tree-gimple.h"
+#include "gimple.h"
#include "df.h"
#include "ggc.h"
@@ -1350,11 +1350,11 @@ xstormy16_expand_builtin_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
u = build_int_cst (NULL_TREE, INCOMING_FRAME_SP_OFFSET);
u = fold_convert (TREE_TYPE (count), u);
t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), t, u);
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (base), base, t);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (base), base, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (count), count,
+ t = build2 (MODIFY_EXPR, TREE_TYPE (count), count,
build_int_cst (NULL_TREE,
crtl->args.info * UNITS_PER_WORD));
TREE_SIDE_EFFECTS (t) = 1;
@@ -1366,8 +1366,8 @@ xstormy16_expand_builtin_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
Note: This algorithm is documented in stormy-abi. */
static tree
-xstormy16_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p,
- tree *post_p ATTRIBUTE_UNUSED)
+xstormy16_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
+ gimple_seq *post_p ATTRIBUTE_UNUSED)
{
tree f_base, f_count;
tree base, count;
@@ -1408,8 +1408,7 @@ xstormy16_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p,
gimplify_and_add (t, pre_p);
t = build2 (POINTER_PLUS_EXPR, ptr_type_node, base, count_tmp);
- t = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
- gimplify_and_add (t, pre_p);
+ gimplify_assign (addr, t, pre_p);
t = build1 (GOTO_EXPR, void_type_node, lab_gotaddr);
gimplify_and_add (t, pre_p);
@@ -1427,7 +1426,7 @@ xstormy16_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p,
tree r, u;
r = size_int (NUM_ARGUMENT_REGISTERS * UNITS_PER_WORD);
- u = build2 (GIMPLE_MODIFY_STMT, void_type_node, count_tmp, r);
+ u = build2 (MODIFY_EXPR, TREE_TYPE (count_tmp), count_tmp, r);
t = fold_convert (TREE_TYPE (count), r);
t = build2 (GE_EXPR, boolean_type_node, count_tmp, t);
@@ -1444,16 +1443,14 @@ xstormy16_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p,
t = fold_convert (TREE_TYPE (t), fold (t));
t = fold_build1 (NEGATE_EXPR, TREE_TYPE (t), t);
t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
- t = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
- gimplify_and_add (t, pre_p);
+ gimplify_assign (addr, t, pre_p);
t = build1 (LABEL_EXPR, void_type_node, lab_gotaddr);
gimplify_and_add (t, pre_p);
t = fold_convert (TREE_TYPE (count), size_tree);
t = build2 (PLUS_EXPR, TREE_TYPE (count), count_tmp, t);
- t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (count), count, t);
- gimplify_and_add (t, pre_p);
+ gimplify_assign (count, t, pre_p);
addr = fold_convert (build_pointer_type (type), addr);
return build_va_arg_indirect_ref (addr);
diff --git a/gcc/config/xtensa/xtensa.c b/gcc/config/xtensa/xtensa.c
index 38a621df0d0..000df14854c 100644
--- a/gcc/config/xtensa/xtensa.c
+++ b/gcc/config/xtensa/xtensa.c
@@ -48,7 +48,7 @@ along with GCC; see the file COPYING3. If not see
#include "target.h"
#include "target-def.h"
#include "langhooks.h"
-#include "tree-gimple.h"
+#include "gimple.h"
#include "df.h"
@@ -142,8 +142,9 @@ static section *xtensa_select_rtx_section (enum machine_mode, rtx,
static bool xtensa_rtx_costs (rtx, int, int, int *);
static tree xtensa_build_builtin_va_list (void);
static bool xtensa_return_in_memory (const_tree, const_tree);
+static tree xtensa_gimplify_va_arg_expr (tree, tree, gimple_seq *,
+ gimple_seq *);
static rtx xtensa_function_value (const_tree, const_tree, bool);
-static tree xtensa_gimplify_va_arg_expr (tree, tree, tree *, tree *);
static void xtensa_init_builtins (void);
static tree xtensa_fold_builtin (tree, tree, bool);
static rtx xtensa_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
@@ -2538,14 +2539,14 @@ xtensa_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
/* Call __builtin_saveregs; save the result in __va_reg */
u = make_tree (sizetype, expand_builtin_saveregs ());
u = fold_convert (ptr_type_node, u);
- t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, reg, u);
+ t = build2 (MODIFY_EXPR, ptr_type_node, reg, u);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
/* Set the __va_stk member to ($arg_ptr - 32). */
u = make_tree (ptr_type_node, virtual_incoming_args_rtx);
u = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, u, size_int (-32));
- t = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, stk, u);
+ t = build2 (MODIFY_EXPR, ptr_type_node, stk, u);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
@@ -2554,7 +2555,7 @@ xtensa_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
alignment offset for __va_stk. */
if (arg_words >= MAX_ARGS_IN_REGISTERS)
arg_words += 2;
- t = build2 (GIMPLE_MODIFY_STMT, integer_type_node, ndx,
+ t = build2 (MODIFY_EXPR, integer_type_node, ndx,
build_int_cst (integer_type_node, arg_words * UNITS_PER_WORD));
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
@@ -2564,8 +2565,8 @@ xtensa_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
/* Implement `va_arg'. */
static tree
-xtensa_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p,
- tree *post_p ATTRIBUTE_UNUSED)
+xtensa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
+ gimple_seq *post_p ATTRIBUTE_UNUSED)
{
tree f_stk, stk;
tree f_reg, reg;
@@ -2624,8 +2625,7 @@ xtensa_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p,
build_int_cst (integer_type_node, align - 1));
t = build2 (BIT_AND_EXPR, integer_type_node, t,
build_int_cst (integer_type_node, -align));
- t = build2 (GIMPLE_MODIFY_STMT, integer_type_node, orig_ndx, t);
- gimplify_and_add (t, pre_p);
+ gimplify_assign (orig_ndx, t, pre_p);
}
@@ -2635,8 +2635,7 @@ xtensa_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p,
t = fold_convert (integer_type_node, va_size);
t = build2 (PLUS_EXPR, integer_type_node, orig_ndx, t);
- t = build2 (GIMPLE_MODIFY_STMT, integer_type_node, ndx, t);
- gimplify_and_add (t, pre_p);
+ gimplify_assign (ndx, t, pre_p);
/* Check if the argument is in registers:
@@ -2661,8 +2660,7 @@ xtensa_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p,
NULL_TREE);
gimplify_and_add (t, pre_p);
- t = build2 (GIMPLE_MODIFY_STMT, void_type_node, array, reg);
- gimplify_and_add (t, pre_p);
+ gimplify_assign (array, reg, pre_p);
t = build1 (GOTO_EXPR, void_type_node, lab_over);
gimplify_and_add (t, pre_p);
@@ -2694,14 +2692,12 @@ xtensa_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p,
t = size_binop (PLUS_EXPR, va_size, size_int (32));
t = fold_convert (integer_type_node, t);
- t = build2 (GIMPLE_MODIFY_STMT, integer_type_node, ndx, t);
- gimplify_and_add (t, pre_p);
+ gimplify_assign (ndx, t, pre_p);
t = build1 (LABEL_EXPR, void_type_node, lab_false2);
gimplify_and_add (t, pre_p);
- t = build2 (GIMPLE_MODIFY_STMT, void_type_node, array, stk);
- gimplify_and_add (t, pre_p);
+ gimplify_assign (array, stk, pre_p);
if (lab_over)
{