summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog8
-rw-r--r--MAINTAINERS1
-rw-r--r--gcc/ChangeLog248
-rw-r--r--gcc/DATESTAMP2
-rw-r--r--gcc/ada/ChangeLog40
-rw-r--r--gcc/ada/comperr.adb14
-rw-r--r--gcc/ada/comperr.ads13
-rw-r--r--gcc/ada/exp_cg.adb6
-rw-r--r--gcc/ada/exp_dbug.adb77
-rw-r--r--gcc/ada/exp_dbug.ads26
-rw-r--r--gcc/ada/exp_disp.adb7
-rw-r--r--gcc/ada/fe.h23
-rw-r--r--gcc/ada/gcc-interface/Makefile.in38
-rw-r--r--gcc/ada/gcc-interface/decl.c20
-rw-r--r--gcc/ada/gcc-interface/misc.c22
-rw-r--r--gcc/ada/gcc-interface/trans.c16
-rw-r--r--gcc/ada/gcc-interface/utils.c1
-rw-r--r--gcc/ada/types.h18
-rw-r--r--gcc/basic-block.h3
-rw-r--r--gcc/builtins.def10
-rw-r--r--gcc/c-family/ChangeLog5
-rw-r--r--gcc/c-family/c-ubsan.c18
-rw-r--r--gcc/c/ChangeLog17
-rw-r--r--gcc/c/c-typeck.c20
-rw-r--r--gcc/calls.c72
-rw-r--r--gcc/cgraph.c4
-rw-r--r--gcc/config.gcc14
-rw-r--r--gcc/config/aarch64/aarch64-builtins.c5
-rw-r--r--gcc/config/aarch64/aarch64-protos.h2
-rw-r--r--gcc/config/aarch64/aarch64-simd-builtins.def4
-rw-r--r--gcc/config/aarch64/aarch64-simd.md84
-rw-r--r--gcc/config/aarch64/aarch64.c33
-rw-r--r--gcc/config/aarch64/aarch64.h3
-rw-r--r--gcc/config/aarch64/aarch64.md22
-rw-r--r--gcc/config/aarch64/arm_neon.h2136
-rw-r--r--gcc/config/aarch64/iterators.md26
-rw-r--r--gcc/config/arc/arc.c22
-rw-r--r--gcc/config/arc/arc.opt2
-rw-r--r--gcc/config/i386/sol2-bi.h109
-rw-r--r--gcc/config/i386/sol2.h101
-rw-r--r--gcc/config/i386/t-sol2 (renamed from gcc/config/i386/t-sol2-64)0
-rw-r--r--gcc/config/m32c/m32c.c2
-rw-r--r--gcc/config/m32r/m32r.c20
-rw-r--r--gcc/config/msp430/msp430.c2
-rw-r--r--gcc/config/msp430/msp430.md4
-rw-r--r--gcc/config/rs6000/predicates.md4
-rw-r--r--gcc/config/rs6000/rs6000.md1
-rw-r--r--gcc/config/rs6000/sync.md105
-rw-r--r--gcc/config/sol2-10.h24
-rw-r--r--gcc/config/sol2-bi.h135
-rw-r--r--gcc/config/sol2.h149
-rw-r--r--gcc/config/sparc/sol2.h32
-rw-r--r--gcc/config/sparc/t-sol2 (renamed from gcc/config/sparc/t-sol2-64)0
-rw-r--r--gcc/coretypes.h3
-rw-r--r--gcc/cp/ChangeLog44
-rw-r--r--gcc/cp/class.c6
-rw-r--r--gcc/cp/cp-tree.def9
-rw-r--r--gcc/cp/cp-tree.h19
-rw-r--r--gcc/cp/decl.c32
-rw-r--r--gcc/cp/decl2.c6
-rw-r--r--gcc/cp/except.c12
-rw-r--r--gcc/cp/init.c68
-rw-r--r--gcc/cp/method.c101
-rw-r--r--gcc/cp/parser.c15
-rw-r--r--gcc/cp/parser.h6
-rw-r--r--gcc/cp/pt.c28
-rw-r--r--gcc/cp/typeck.c21
-rw-r--r--gcc/cp/typeck2.c32
-rw-r--r--gcc/doc/gimple.texi155
-rw-r--r--gcc/doc/invoke.texi2
-rw-r--r--gcc/dominance.c38
-rw-r--r--gcc/expr.c20
-rw-r--r--gcc/flag-types.h1
-rw-r--r--gcc/gcc.c2
-rw-r--r--gcc/gimple.c4
-rw-r--r--gcc/go/ChangeLog9
-rw-r--r--gcc/go/go-gcc.cc219
-rw-r--r--gcc/go/gofrontend/backend.h5
-rw-r--r--gcc/go/gofrontend/go.cc3
-rw-r--r--gcc/go/gofrontend/gogo-tree.cc378
-rw-r--r--gcc/go/gofrontend/gogo.cc60
-rw-r--r--gcc/go/gofrontend/gogo.h25
-rw-r--r--gcc/go/gofrontend/import-archive.cc2
-rw-r--r--gcc/graphite-scop-detection.c2
-rw-r--r--gcc/graphite-sese-to-poly.c2
-rw-r--r--gcc/lto/ChangeLog5
-rw-r--r--gcc/lto/lto-lang.c1
-rw-r--r--gcc/opts.c2
-rw-r--r--gcc/passes.c65
-rw-r--r--gcc/testsuite/ChangeLog163
-rw-r--r--gcc/testsuite/c-c++-common/torture/pr60971.c34
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/div-by-zero-5.c2
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/float-div-by-zero-1.c26
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/constexpr-51707.C14
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/constexpr-aggr1.C17
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/defaulted49.C15
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/nsdmi-defer6.C4
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/nsdmi-dr1397.C7
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/nsdmi-eh1.C2
-rw-r--r--gcc/testsuite/gcc.dg/pr60139.c14
-rw-r--r--gcc/testsuite/gcc.dg/pr60351.c11
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/alias-32.c20
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/pr23401.c2
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/pr27810.c2
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/sra-14.c70
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr60092.c18
-rw-r--r--gcc/testsuite/gcc.target/aarch64/fcsel_1.c22
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/simd.exp45
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpf32.x26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpf32_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpp16.x26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpp16_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpp8.x26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpp8_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpqf32.x26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpqf32_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpqp16.x26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpqp16_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpqp8.x27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpqp8_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpqs16.x26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpqs16_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpqs32.x26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpqs32_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpqs8.x27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpqs8_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpqu16.x26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpqu16_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpqu32.x26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpqu32_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpqu8.x27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpqu8_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzps16.x26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzps16_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzps32.x26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzps32_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzps8.x26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzps8_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpu16.x26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpu16_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpu32.x26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpu32_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpu8.x26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vuzpu8_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipf32.x27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipf32_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipp16.x27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipp16_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipp8.x27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipp8_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipqf32.x27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipqf32_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipqp16.x27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipqp16_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipqp8.x29
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipqp8_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipqs16.x27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipqs16_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipqs32.x27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipqs32_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipqs8.x29
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipqs8_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipqu16.x27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipqu16_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipqu32.x27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipqu32_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipqu8.x29
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipqu8_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzips16.x27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzips16_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzips32.x27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzips32_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzips8.x27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzips8_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipu16.x27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipu16_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipu32.x27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipu32_1.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipu8.x27
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/vzipu8_1.c11
-rw-r--r--gcc/testsuite/gcc.target/arm/simd/simd.exp35
-rw-r--r--gcc/testsuite/gcc.target/arm/simd/vzipf32_1.c12
-rw-r--r--gcc/testsuite/gcc.target/arm/simd/vzipp16_1.c12
-rw-r--r--gcc/testsuite/gcc.target/arm/simd/vzipp8_1.c12
-rw-r--r--gcc/testsuite/gcc.target/arm/simd/vzipqf32_1.c12
-rw-r--r--gcc/testsuite/gcc.target/arm/simd/vzipqp16_1.c12
-rw-r--r--gcc/testsuite/gcc.target/arm/simd/vzipqp8_1.c12
-rw-r--r--gcc/testsuite/gcc.target/arm/simd/vzipqs16_1.c12
-rw-r--r--gcc/testsuite/gcc.target/arm/simd/vzipqs32_1.c12
-rw-r--r--gcc/testsuite/gcc.target/arm/simd/vzipqs8_1.c12
-rw-r--r--gcc/testsuite/gcc.target/arm/simd/vzipqu16_1.c12
-rw-r--r--gcc/testsuite/gcc.target/arm/simd/vzipqu32_1.c12
-rw-r--r--gcc/testsuite/gcc.target/arm/simd/vzipqu8_1.c12
-rw-r--r--gcc/testsuite/gcc.target/arm/simd/vzips16_1.c12
-rw-r--r--gcc/testsuite/gcc.target/arm/simd/vzips32_1.c12
-rw-r--r--gcc/testsuite/gcc.target/arm/simd/vzips8_1.c12
-rw-r--r--gcc/testsuite/gcc.target/arm/simd/vzipu16_1.c12
-rw-r--r--gcc/testsuite/gcc.target/arm/simd/vzipu32_1.c12
-rw-r--r--gcc/testsuite/gcc.target/arm/simd/vzipu8_1.c12
-rw-r--r--gcc/testsuite/gfortran.dg/arrayio_13.f9014
-rw-r--r--gcc/testsuite/gfortran.dg/vect/pr48329.f9029
-rw-r--r--gcc/tree-cfg.c21
-rw-r--r--gcc/tree-cfg.h2
-rw-r--r--gcc/tree-eh.c2
-rw-r--r--gcc/tree-pass.h4
-rw-r--r--gcc/tree-sra.c177
-rw-r--r--gcc/tree-ssa-alias.c2
-rw-r--r--gcc/tree-ssa-ccp.c19
-rw-r--r--gcc/tree-ssa-dce.c9
-rw-r--r--gcc/tree-ssa-loop-manip.c2
-rw-r--r--gcc/tree-ssa.c4
-rw-r--r--gcc/tree-ssa.h2
-rw-r--r--gcc/tree-tailcall.c16
-rw-r--r--gcc/tree-vrp.c4
-rw-r--r--include/ChangeLog4
-rw-r--r--include/longlong.h2
-rw-r--r--libgcc/ChangeLog7
-rw-r--r--libgcc/config/i386/cygming-crtbegin.c8
218 files changed, 5212 insertions, 2386 deletions
diff --git a/ChangeLog b/ChangeLog
index a0e55567d59..652c06d028b 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,11 @@
+2014-04-29 Alan Lawrence <alan.lawrence@arm.com>
+
+ * MAINTAINERS (Write After Approval): Put myself in correct order.
+
+2014-04-29 Alan Lawrence <alan.lawrence@arm.com>
+
+ * MAINTAINERS (Write After Approval): Add myself.
+
2014-04-24 Laurynas Biveinis <laurynas.biveinis@gmail.com>
* MAINTAINERS: Move myself from Reviewers to Write After Approval
diff --git a/MAINTAINERS b/MAINTAINERS
index 36979e13785..b79588d3f23 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -441,6 +441,7 @@ Rask Ingemann Lambertsen ccc94453@vip.cybercity.dk
Asher Langton langton2@llnl.gov
Chris Lattner sabre@nondot.org
Terry Laurenzo tlaurenzo@gmail.com
+Alan Lawrence alan.lawrence@arm.com
Georg-Johann Lay avr@gjlay.de
Marc Lehmann pcg@goof.com
James Lemke jwlemke@codesourcery.com
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 67a4645a87a..e3ae8dcfc81 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,251 @@
+2014-04-30 Alan Lawrence <alan.lawrence@arm.com>
+
+ * config/aarch64/arm_neon.h (vuzp1_f32, vuzp1_p8, vuzp1_p16, vuzp1_s8,
+ vuzp1_s16, vuzp1_s32, vuzp1_u8, vuzp1_u16, vuzp1_u32, vuzp1q_f32,
+ vuzp1q_f64, vuzp1q_p8, vuzp1q_p16, vuzp1q_s8, vuzp1q_s16, vuzp1q_s32,
+ vuzp1q_s64, vuzp1q_u8, vuzp1q_u16, vuzp1q_u32, vuzp1q_u64, vuzp2_f32,
+ vuzp2_p8, vuzp2_p16, vuzp2_s8, vuzp2_s16, vuzp2_s32, vuzp2_u8,
+ vuzp2_u16, vuzp2_u32, vuzp2q_f32, vuzp2q_f64, vuzp2q_p8, vuzp2q_p16,
+ vuzp2q_s8, vuzp2q_s16, vuzp2q_s32, vuzp2q_s64, vuzp2q_u8, vuzp2q_u16,
+ vuzp2q_u32, vuzp2q_u64): Replace temporary asm with __builtin_shuffle.
+
+2014-04-30 Joern Rennecke <joern.rennecke@embecosm.com>
+
+ * config/arc/arc.opt (mlra): Move comment above option name
+ to avoid mis-parsing as language options.
+
+2014-04-30 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * config/sol2-10.h (TARGET_LIBC_HAS_FUNCTION): Move ...
+ * config/sol2.h: ... here.
+ * config/sol2-10.h: Remove.
+
+ * config/sol2-bi.h (WCHAR_TYPE, WCHAR_TYPE_SIZE, WINT_TYPE)
+ (WINT_TYPE_SIZE, MULTILIB_DEFAULTS, DEF_ARCH32_SPEC)
+ (DEF_ARCH64_SPEC, ASM_CPU_DEFAULT_SPEC, LINK_ARCH64_SPEC_BASE)
+ (LINK_ARCH64_SPEC, ARCH_DEFAULT_EMULATION, TARGET_LD_EMULATION)
+ (LINK_ARCH_SPEC, SUBTARGET_EXTRA_SPECS): Move ...
+ * config/sol2.h: ... here.
+ (SECTION_NAME_FORMAT): Don't redefine.
+ (STARTFILE_ARCH32_SPEC): Rename to ...
+ (STARTFILE_ARCH_SPEC): ... this.
+ (ASM_OUTPUT_ALIGNED_COMMON): Move ...
+ * config/sparc/sol2.h: ... here.
+ (SECTION_NAME_FORMAT): Don't undef.
+ * config/i386/sol2.h (ASM_CPU_DEFAULT_SPEC)
+ (SUBTARGET_EXTRA_SPECS): Remove.
+ * config/sparc/sol2.h (ASM_CPU_DEFAULT_SPEC): Remove.
+
+ * config/i386/sol2-bi.h (TARGET_SUBTARGET_DEFAULT)
+ (MD_STARTFILE_PREFIX): Remove.
+ (SUBTARGET_OPTIMIZATION_OPTIONS, ASM_CPU32_DEFAULT_SPEC)
+ (ASM_CPU64_DEFAULT_SPEC, ASM_CPU_SPEC, ASM_SPEC, DEFAULT_ARCH32_P)
+ (ARCH64_SUBDIR, ARCH32_EMULATION, ARCH64_EMULATION)
+ (ASM_COMMENT_START, JUMP_TABLES_IN_TEXT_SECTION)
+ (ASM_OUTPUT_DWARF_PCREL, ASM_OUTPUT_ALIGNED_COMMON)
+ (USE_IX86_FRAME_POINTER, USE_X86_64_FRAME_POINTER): Move ...
+ * config/i386/sol2.h: ... here.
+ (TARGET_SUBTARGET_DEFAULT, SIZE_TYPE, PTRDIFF_TYPE): Remove.
+ * config/i386/sol2-bi.h: Remove.
+ * config/sol2.h (MD_STARTFILE_PREFIX): Remove.
+ (LINK_ARCH32_SPEC_BASE): Remove /usr/ccs/lib/libp, /usr/ccs/lib.
+
+ * config/i386/t-sol2-64: Rename to ...
+ * config/i386/t-sol2: ... this.
+ * config/sparc/t-sol2-64: Rename to ...
+ * config/sparc/t-sol2: ... this.
+
+ * config.gcc (*-*-solaris2*): Split sol2_tm_file into
+ sol2_tm_file_head, sol2_tm_file_tail.
+ Include ${cpu_type}/sol2.h before sol2.h.
+ Remove sol2-10.h.
+ (i[34567]86-*-solaris2* | x86_64-*-solaris2.1[0-9]*): Include
+ i386/x86-64.h between sol2_tm_file_head and sol2_tm_file_tail.
+ Remove i386/sol2-bi.h, sol2-bi.h from tm_file.
+ Reflect i386/t-sol2-64 renaming.
+ (sparc*-*-solaris2*): Remove sol2-bi.h from tm_file.
+ Reflect sparc/t-sol2-64 renaming.
+
+2014-04-30 Richard Biener <rguenther@suse.de>
+
+ * passes.c (execute_function_todo): Move TODO_verify_stmts
+ and TODO_verify_ssa under the TODO_verify_il umbrella.
+ * tree-ssa.h (verify_ssa): Adjust prototype.
+ * tree-ssa.c (verify_ssa): Add parameter to tell whether
+ we should verify SSA operands.
+ * tree-cfg.h (verify_gimple_in_cfg): Adjust prototype.
+ * tree-cfg.c (verify_gimple_in_cfg): Add parameter to tell
+ whether we should verify whether not throwing stmts have EH info.
+ * graphite-scop-detection.c (create_sese_edges): Adjust.
+ * tree-ssa-loop-manip.c (verify_loop_closed_ssa): Likewise.
+ * tree-eh.c (lower_try_finally_switch): Do not add the
+ default case label twice.
+
+2014-04-30 Marek Polacek <polacek@redhat.com>
+
+ * gcc.c (sanitize_spec_function): Handle SANITIZE_FLOAT_DIVIDE.
+ * builtins.def: Initialize builtins even for SANITIZE_FLOAT_DIVIDE.
+ * flag-types.h (enum sanitize_code): Add SANITIZE_FLOAT_DIVIDE.
+ * opts.c (common_handle_option): Add -fsanitize=float-divide-by-zero.
+
+2014-04-29 Alan Lawrence <alan.lawrence@arm.com>
+
+ * config/aarch64/arm_neon.h (vzip1_f32, vzip1_p8, vzip1_p16, vzip1_s8,
+ vzip1_s16, vzip1_s32, vzip1_u8, vzip1_u16, vzip1_u32, vzip1q_f32,
+ vzip1q_f64, vzip1q_p8, vzip1q_p16, vzip1q_s8, vzip1q_s16, vzip1q_s32,
+ vzip1q_s64, vzip1q_u8, vzip1q_u16, vzip1q_u32, vzip1q_u64, vzip2_f32,
+ vzip2_p8, vzip2_p16, vzip2_s8, vzip2_s16, vzip2_s32, vzip2_u8,
+ vzip2_u16, vzip2_u32, vzip2q_f32, vzip2q_f64, vzip2q_p8, vzip2q_p16,
+ vzip2q_s8, vzip2q_s16, vzip2q_s32, vzip2q_s64, vzip2q_u8, vzip2q_u16,
+ vzip2q_u32, vzip2q_u64): Replace inline __asm__ with __builtin_shuffle.
+
+2014-04-29 David Malcolm <dmalcolm@redhat.com>
+
+ * tree-cfg.c (dump_function_to_file): Dump the return type of
+ functions, in a line to itself before the function body, mimicking
+ the layout of a C function.
+
+2014-04-29 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/60971
+ * tree-tailcall.c (process_assignment): Reject conversions which
+ reduce precision.
+
+2014-04-29 James Greenhalgh <james.greenhalgh@arm.com>
+
+ * calls.c (initialize_argument_information): Always treat
+ PUSH_ARGS_REVERSED as 1, simplify code accordingly.
+ (expand_call): Likewise.
+ (emit_library_call_calue_1): Likewise.
+ * expr.c (PUSH_ARGS_REVERSED): Do not define.
+ (emit_push_insn): Always treat PUSH_ARGS_REVERSED as 1, simplify
+ code accordingly.
+
+2014-04-29 Nick Clifton <nickc@redhat.com>
+
+ * config/msp430/msp430.md (umulsidi): Fix typo.
+ (mulhisi3): Enable even inside interrupt handlers.
+ * config/msp430/msp430.c (msp430_print_operand): %O: Allow for the
+ bigger return address pushed in large mode.
+
+2014-04-29 Nick Clifton <nickc@redhat.com>
+
+ * config/arc/arc.c (arc_select_cc_mode): Fix parentheses.
+ (arc_init_reg_tables): Use a machine_mode enum to iterate over
+ available modes.
+ * config/m32r/m32r.c (init_reg_tables): Likewise.
+ * config/m32c/m32c.c (m32c_illegal_subreg_p): Use a machine_mode
+ enum to hold the modes.
+
+2014-04-29 Richard Biener <rguenther@suse.de>
+
+ * dominance.c (free_dominance_info): Add overload with
+ function parameter.
+ (dom_info_state): Likewise.
+ (dom_info_available_p): Likewise.
+ * basic-block.h (free_dominance_info, dom_info_state,
+ dom_info_available_p): Declare overloads.
+ * passes.c (execute_function_todo): Verify that verifiers
+ don't change dominator info state. Drop dominator info
+ for IPA pass invocations.
+ * cgraph.c (release_function_body): Restore asserts that
+ dominator information is released.
+
+2014-04-29 Patrick Palka <patrick@parcs.ath.cx>
+
+ * doc/invoke.texi: Fix typo.
+ * tree-vrp.c: Fix typos.
+ * gimple.c (infer_nonnull_range): Reorder operands of an &&
+ condition.
+
+2014-04-29 Zhenqiang Chen <zhenqiang.chen@linaro.org>
+
+ * config/aarch64/aarch64.md (mov<mode>cc): New for GPF.
+
+2014-04-28 James Greenhalgh <james.greenhalgh@arm.com>
+
+ * config/aarch64/aarch64-builtins.c
+ (aarch64_types_storestruct_lane_qualifiers): New.
+ (TYPES_STORESTRUCT_LANE): Likewise.
+ * config/aarch64/aarch64-simd-builtins.def (st2_lane): New.
+ (st3_lane): Likewise.
+ (st4_lane): Likewise.
+ * config/aarch64/aarch64-simd.md (vec_store_lanesoi_lane<mode>): New.
+ (vec_store_lanesci_lane<mode>): Likewise.
+ (vec_store_lanesxi_lane<mode>): Likewise.
+ (aarch64_st2_lane<VQ:mode>): Likewise.
+ (aarch64_st3_lane<VQ:mode>): Likewise.
+ (aarch64_st4_lane<VQ:mode>): Likewise.
+ * config/aarch64/aarch64.md (unspec): Add UNSPEC_ST{2,3,4}_LANE.
+ * config/aarch64/arm_neon.h
+ (__ST2_LANE_FUNC): Rewrite using builtins, update use points to
+ use new macro arguments.
+ (__ST3_LANE_FUNC): Likewise.
+ (__ST4_LANE_FUNC): Likewise.
+ * config/aarch64/iterators.md (V_TWO_ELEM): New.
+ (V_THREE_ELEM): Likewise.
+ (V_FOUR_ELEM): Likewise.
+
+2014-04-28 David Malcolm <dmalcolm@redhat.com>
+
+ * doc/gimple.texi: Replace the description of the now-defunct
+ union gimple_statement_d with a diagram showing the
+ gimple_statement_base class hierarchy and its relationships to
+ the GSS_ and GIMPLE_ enums.
+
+2014-04-28 James Greenhalgh <james.greenhalgh@arm.com>
+
+ * config/aarch64/aarch64-protos.h (aarch64_modes_tieable_p): New.
+ * config/aarch64/aarch64.c
+ (aarch64_cannot_change_mode_class): Weaken conditions.
+ (aarch64_modes_tieable_p): New.
+ * config/aarch64/aarch64.h (MODES_TIEABLE_P): Use it.
+
+2014-04-28 Pat Haugen <pthaugen@us.ibm.com>
+
+ * config/rs6000/sync.md (AINT mode_iterator): Move definition.
+ (loadsync_<mode>): Change mode.
+ (load_quadpti, store_quadpti): New.
+ (atomic_load<mode>, atomic_store<mode>): Add support for TI mode.
+ * config/rs6000/rs6000.md (unspec enum): Add UNSPEC_LSQ.
+
+2014-04-28 Martin Jambor <mjambor@suse.cz>
+
+ * tree-sra.c (sra_modify_expr): Generate new memory accesses with
+ same alias type as the original statement.
+ (subreplacement_assignment_data): New type.
+ (handle_unscalarized_data_in_subtree): New type of parameter,
+ generate new memory accesses with same alias type as the original
+ statement.
+ (load_assign_lhs_subreplacements): Likewise.
+ (sra_modify_constructor_assign): Generate new memory accesses with
+ same alias type as the original statement.
+
+2014-04-28 Richard Biener <rguenther@suse.de>
+
+ * tree-pass.h (TODO_verify_il): Define.
+ (TODO_verify_all): Complete properly.
+ * passes.c (execute_function_todo): Move existing loop-closed
+ SSA verification under TODO_verify_il.
+ (execute_one_pass): Trigger TODO_verify_il at todo-after time.
+ * graphite-sese-to-poly.c (rewrite_cross_bb_scalar_deps):
+ Fix tree sharing issue.
+
+2014-04-28 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/60092
+ * builtins.def (DEF_C11_BUILTIN): Add.
+ (BUILT_IN_ALIGNED_ALLOC): Likewise.
+ * coretypes.h (enum function_class): Add function_c11_misc.
+ * tree-ssa-alias.c (ref_maybe_used_by_call_p_1): Handle
+ BUILT_IN_ALIGNED_ALLOC like BUILT_IN_MALLOC.
+ (call_may_clobber_ref_p_1): Likewise.
+ * tree-ssa-dce.c (mark_stmt_if_obviously_necessary): Likewise.
+ (mark_all_reaching_defs_necessary_1): Likewise.
+ (propagate_necessity): Likewise.
+ (eliminate_unnecessary_stmts): Likewise.
+ * tree-ssa-ccp.c (evaluate_stmt): Handle BUILT_IN_ALIGNED_ALLOC.
+
2014-04-28 Richard Biener <rguenther@suse.de>
* tree-vrp.c (vrp_var_may_overflow): Remove.
diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP
index 2eee80b2cf1..5907e7a3156 100644
--- a/gcc/DATESTAMP
+++ b/gcc/DATESTAMP
@@ -1 +1 @@
-20140428
+20140430
diff --git a/gcc/ada/ChangeLog b/gcc/ada/ChangeLog
index a31cd3c2f11..fb57af5c0d4 100644
--- a/gcc/ada/ChangeLog
+++ b/gcc/ada/ChangeLog
@@ -1,3 +1,43 @@
+2014-04-28 Richard Henderson <rth@redhat.com>
+
+ * gcc-interface/Makefile.in: Support aarch64-linux.
+
+2014-04-28 Eric Botcazou <ebotcazou@adacore.com>
+
+ * exp_dbug.ads (Get_External_Name): Add 'False' default to Has_Suffix,
+ add 'Suffix' parameter and adjust comment.
+ (Get_External_Name_With_Suffix): Delete.
+ * exp_dbug.adb (Get_External_Name_With_Suffix): Merge into...
+ (Get_External_Name): ...here. Add 'False' default to Has_Suffix, add
+ 'Suffix' parameter.
+ (Get_Encoded_Name): Remove 2nd argument in call to Get_External_Name.
+ Call Get_External_Name instead of Get_External_Name_With_Suffix.
+ (Get_Secondary_DT_External_Name): Likewise.
+ * exp_cg.adb (Write_Call_Info): Likewise.
+ * exp_disp.adb (Export_DT): Likewise.
+ (Import_DT): Likewise.
+ * comperr.ads (Compiler_Abort): Remove Code parameter and add From_GCC
+ parameter with False default.
+ * comperr.adb (Compiler_Abort): Likewise. Adjust accordingly.
+ * types.h (Fat_Pointer): Rename into...
+ (String_Pointer): ...this. Add comment on interfacing rules.
+ * fe.h (Compiler_Abort): Adjust for above renaming.
+ (Error_Msg_N): Likewise.
+ (Error_Msg_NE): Likewise.
+ (Get_External_Name): Likewise. Add third parameter.
+ (Get_External_Name_With_Suffix): Delete.
+ * gcc-interface/decl.c (STDCALL_PREFIX): Define.
+ (create_concat_name): Adjust call to Get_External_Name, remove call to
+ Get_External_Name_With_Suffix, use STDCALL_PREFIX, adjust for renaming.
+ * gcc-interface/trans.c (post_error): Likewise.
+ (post_error_ne): Likewise.
+ * gcc-interface/misc.c (internal_error_function): Likewise.
+
+2014-04-28 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/60092
+ * gcc-interface/utils.c: Define flag_isoc11.
+
2014-04-26 Eric Botcazou <ebotcazou@adacore.com>
* gnatvsn.ads (Library_Version): Bump to 4.10.
diff --git a/gcc/ada/comperr.adb b/gcc/ada/comperr.adb
index 13646a5c155..7a9d7070cde 100644
--- a/gcc/ada/comperr.adb
+++ b/gcc/ada/comperr.adb
@@ -6,7 +6,7 @@
-- --
-- B o d y --
-- --
--- Copyright (C) 1992-2013, Free Software Foundation, Inc. --
+-- Copyright (C) 1992-2014, Free Software Foundation, Inc. --
-- --
-- GNAT is free software; you can redistribute it and/or modify it under --
-- terms of the GNU General Public License as published by the Free Soft- --
@@ -74,8 +74,8 @@ package body Comperr is
procedure Compiler_Abort
(X : String;
- Code : Integer := 0;
- Fallback_Loc : String := "")
+ Fallback_Loc : String := "";
+ From_GCC : Boolean := False)
is
-- The procedures below output a "bug box" with information about
-- the cause of the compiler abort and about the preferred method
@@ -206,7 +206,7 @@ package body Comperr is
Write_Str (") ");
if X'Length + Column > 76 then
- if Code < 0 then
+ if From_GCC then
Write_Str ("GCC error:");
end if;
@@ -235,11 +235,7 @@ package body Comperr is
Write_Str (X);
end if;
- if Code > 0 then
- Write_Str (", Code=");
- Write_Int (Int (Code));
-
- elsif Code = 0 then
+ if not From_GCC then
-- For exception case, get exception message from the TSD. Note
-- that it would be neater and cleaner to pass the exception
diff --git a/gcc/ada/comperr.ads b/gcc/ada/comperr.ads
index ba3cb6b8f66..dccd8ef3433 100644
--- a/gcc/ada/comperr.ads
+++ b/gcc/ada/comperr.ads
@@ -6,7 +6,7 @@
-- --
-- S p e c --
-- --
--- Copyright (C) 1992-2013, Free Software Foundation, Inc. --
+-- Copyright (C) 1992-2014, Free Software Foundation, Inc. --
-- --
-- GNAT is free software; you can redistribute it and/or modify it under --
-- terms of the GNU General Public License as published by the Free Soft- --
@@ -31,8 +31,8 @@ package Comperr is
procedure Compiler_Abort
(X : String;
- Code : Integer := 0;
- Fallback_Loc : String := "");
+ Fallback_Loc : String := "";
+ From_GCC : Boolean := False);
pragma No_Return (Compiler_Abort);
-- Signals an internal compiler error. Never returns control. Depending on
-- processing may end up raising Unrecoverable_Error, or exiting directly.
@@ -46,10 +46,9 @@ package Comperr is
-- Note that this is only used at the outer level (to handle constraint
-- errors or assert errors etc.) In the normal logic of the compiler we
-- always use pragma Assert to check for errors, and if necessary an
- -- explicit abort is achieved by pragma Assert (False). Code is positive
- -- for a gigi abort (giving the gigi abort code), zero for a front
- -- end exception (with possible message stored in TSD.Current_Excep,
- -- and negative (an unused value) for a GCC abort.
+ -- explicit abort is achieved by pragma Assert (False). From_GCC is true
+ -- for a GCC abort and false for a front end exception (with a possible
+ -- message stored in TSD.Current_Excep).
procedure Delete_SCIL_Files;
-- Delete SCIL files associated with the main unit
diff --git a/gcc/ada/exp_cg.adb b/gcc/ada/exp_cg.adb
index d8a7022e504..483f174efc6 100644
--- a/gcc/ada/exp_cg.adb
+++ b/gcc/ada/exp_cg.adb
@@ -6,7 +6,7 @@
-- --
-- B o d y --
-- --
--- Copyright (C) 2010-2013, Free Software Foundation, Inc. --
+-- Copyright (C) 2010-2014, Free Software Foundation, Inc. --
-- --
-- GNAT is free software; you can redistribute it and/or modify it under --
-- terms of the GNU General Public License as published by the Free Soft- --
@@ -437,10 +437,10 @@ package body Exp_CG is
if Nkind (P) = N_Subprogram_Body
and then not Acts_As_Spec (P)
then
- Get_External_Name (Corresponding_Spec (P), Has_Suffix => False);
+ Get_External_Name (Corresponding_Spec (P));
else
- Get_External_Name (Defining_Entity (P), Has_Suffix => False);
+ Get_External_Name (Defining_Entity (P));
end if;
Write_Str (Name_Buffer (1 .. Name_Len));
diff --git a/gcc/ada/exp_dbug.adb b/gcc/ada/exp_dbug.adb
index 7dc4264cc59..13620290815 100644
--- a/gcc/ada/exp_dbug.adb
+++ b/gcc/ada/exp_dbug.adb
@@ -507,8 +507,8 @@ package body Exp_Dbug is
begin
-- If not generating code, there is no need to create encoded names, and
-- problems when the back-end is called to annotate types without full
- -- code generation. See comments in Get_External_Name_With_Suffix for
- -- additional details.
+ -- code generation. See comments in Get_External_Name for additional
+ -- details.
-- However we do create encoded names if the back end is active, even
-- if Operating_Mode got reset. Otherwise any serious error reported
@@ -556,7 +556,7 @@ package body Exp_Dbug is
-- Fixed-point case
if Is_Fixed_Point_Type (E) then
- Get_External_Name_With_Suffix (E, "XF_");
+ Get_External_Name (E, True, "XF_");
Add_Real_To_Buffer (Delta_Value (E));
if Small_Value (E) /= Delta_Value (E) then
@@ -568,14 +568,14 @@ package body Exp_Dbug is
elsif Vax_Float (E) then
if Digits_Value (Base_Type (E)) = 6 then
- Get_External_Name_With_Suffix (E, "XFF");
+ Get_External_Name (E, True, "XFF");
elsif Digits_Value (Base_Type (E)) = 9 then
- Get_External_Name_With_Suffix (E, "XFF");
+ Get_External_Name (E, True, "XFF");
else
pragma Assert (Digits_Value (Base_Type (E)) = 15);
- Get_External_Name_With_Suffix (E, "XFG");
+ Get_External_Name (E, True, "XFG");
end if;
-- Discrete case where bounds do not match size
@@ -607,9 +607,9 @@ package body Exp_Dbug is
begin
if Biased then
- Get_External_Name_With_Suffix (E, "XB");
+ Get_External_Name (E, True, "XB");
else
- Get_External_Name_With_Suffix (E, "XD");
+ Get_External_Name (E, True, "XD");
end if;
if Lo_Encode or Hi_Encode then
@@ -649,7 +649,7 @@ package body Exp_Dbug is
else
Has_Suffix := False;
- Get_External_Name (E, Has_Suffix);
+ Get_External_Name (E);
end if;
if Debug_Flag_B and then Has_Suffix then
@@ -667,7 +667,11 @@ package body Exp_Dbug is
-- Get_External_Name --
-----------------------
- procedure Get_External_Name (Entity : Entity_Id; Has_Suffix : Boolean) is
+ procedure Get_External_Name
+ (Entity : Entity_Id;
+ Has_Suffix : Boolean := False;
+ Suffix : String := "")
+ is
E : Entity_Id := Entity;
Kind : Entity_Kind;
@@ -704,6 +708,20 @@ package body Exp_Dbug is
-- Start of processing for Get_External_Name
begin
+ -- If we are not in code generation mode, this procedure may still be
+ -- called from Back_End (more specifically - from gigi for doing type
+ -- representation annotation or some representation-specific checks).
+ -- But in this mode there is no need to mess with external names.
+
+ -- Furthermore, the call causes difficulties in this case because the
+ -- string representing the homonym number is not correctly reset as a
+ -- part of the call to Output_Homonym_Numbers_Suffix (which is not
+ -- called in gigi).
+
+ if Operating_Mode /= Generate_Code then
+ return;
+ end if;
+
Reset_Buffers;
-- If this is a child unit, we want the child
@@ -762,42 +780,13 @@ package body Exp_Dbug is
Get_Qualified_Name_And_Append (E);
end if;
- Name_Buffer (Name_Len + 1) := ASCII.NUL;
- end Get_External_Name;
-
- -----------------------------------
- -- Get_External_Name_With_Suffix --
- -----------------------------------
-
- procedure Get_External_Name_With_Suffix
- (Entity : Entity_Id;
- Suffix : String)
- is
- Has_Suffix : constant Boolean := (Suffix /= "");
-
- begin
- -- If we are not in code generation mode, this procedure may still be
- -- called from Back_End (more specifically - from gigi for doing type
- -- representation annotation or some representation-specific checks).
- -- But in this mode there is no need to mess with external names.
-
- -- Furthermore, the call causes difficulties in this case because the
- -- string representing the homonym number is not correctly reset as a
- -- part of the call to Output_Homonym_Numbers_Suffix (which is not
- -- called in gigi).
-
- if Operating_Mode /= Generate_Code then
- return;
- end if;
-
- Get_External_Name (Entity, Has_Suffix);
-
if Has_Suffix then
Add_Str_To_Name_Buffer ("___");
Add_Str_To_Name_Buffer (Suffix);
- Name_Buffer (Name_Len + 1) := ASCII.NUL;
end if;
- end Get_External_Name_With_Suffix;
+
+ Name_Buffer (Name_Len + 1) := ASCII.NUL;
+ end Get_External_Name;
--------------------------
-- Get_Variant_Encoding --
@@ -944,7 +933,7 @@ package body Exp_Dbug is
Suffix_Index : Int)
is
begin
- Get_External_Name (Typ, Has_Suffix => False);
+ Get_External_Name (Typ);
if Ancestor_Typ /= Typ then
declare
@@ -952,7 +941,7 @@ package body Exp_Dbug is
Save_Str : constant String (1 .. Name_Len)
:= Name_Buffer (1 .. Name_Len);
begin
- Get_External_Name (Ancestor_Typ, Has_Suffix => False);
+ Get_External_Name (Ancestor_Typ);
-- Append the extended name of the ancestor to the
-- extended name of Typ
diff --git a/gcc/ada/exp_dbug.ads b/gcc/ada/exp_dbug.ads
index 86099f66f75..6f27bfe0e3d 100644
--- a/gcc/ada/exp_dbug.ads
+++ b/gcc/ada/exp_dbug.ads
@@ -413,10 +413,11 @@ package Exp_Dbug is
procedure Get_External_Name
(Entity : Entity_Id;
- Has_Suffix : Boolean);
- -- Set Name_Buffer and Name_Len to the external name of entity E. The
+ Has_Suffix : Boolean := False;
+ Suffix : String := "");
+ -- Set Name_Buffer and Name_Len to the external name of the entity. The
-- external name is the Interface_Name, if specified, unless the entity
- -- has an address clause or a suffix.
+ -- has an address clause or Has_Suffix is true.
--
-- If the Interface is not present, or not used, the external name is the
-- concatenation of:
@@ -428,26 +429,11 @@ package Exp_Dbug is
-- - the string "$" (or "__" if target does not allow "$"), followed
-- by homonym suffix, if the entity is an overloaded subprogram
-- or is defined within an overloaded subprogram.
-
- procedure Get_External_Name_With_Suffix
- (Entity : Entity_Id;
- Suffix : String);
- -- Set Name_Buffer and Name_Len to the external name of entity E. If
- -- Suffix is the empty string the external name is as above, otherwise
- -- the external name is the concatenation of:
- --
- -- - the string "_ada_", if the entity is a library subprogram,
- -- - the names of any enclosing scopes, each followed by "__",
- -- or "X_" if the next entity is a subunit)
- -- - the name of the entity
- -- - the string "$" (or "__" if target does not allow "$"), followed
- -- by homonym suffix, if the entity is an overloaded subprogram
- -- or is defined within an overloaded subprogram.
- -- - the string "___" followed by Suffix
+ -- - the string "___" followed by Suffix if Has_Suffix is true.
--
-- Note that a call to this procedure has no effect if we are not
-- generating code, since the necessary information for computing the
- -- proper encoded name is not available in this case.
+ -- proper external name is not available in this case.
--------------------------------------------
-- Subprograms for Handling Qualification --
diff --git a/gcc/ada/exp_disp.adb b/gcc/ada/exp_disp.adb
index 8ed3b3956c2..da2b55d3d9a 100644
--- a/gcc/ada/exp_disp.adb
+++ b/gcc/ada/exp_disp.adb
@@ -3913,10 +3913,7 @@ package body Exp_Disp is
pragma Assert (Related_Type (Node (Elmt)) = Typ);
- Get_External_Name
- (Entity => Node (Elmt),
- Has_Suffix => True);
-
+ Get_External_Name (Node (Elmt));
Set_Interface_Name (DT,
Make_String_Literal (Loc,
Strval => String_From_Name_Buffer));
@@ -7088,7 +7085,7 @@ package body Exp_Disp is
Set_Scope (DT, Current_Scope);
- Get_External_Name (DT, True);
+ Get_External_Name (DT);
Set_Interface_Name (DT,
Make_String_Literal (Loc, Strval => String_From_Name_Buffer));
diff --git a/gcc/ada/fe.h b/gcc/ada/fe.h
index d9fe48b5baa..3d670dc7bc3 100644
--- a/gcc/ada/fe.h
+++ b/gcc/ada/fe.h
@@ -29,17 +29,20 @@
* *
****************************************************************************/
-/* This file contains definitions to access front-end functions and
- variables used by gigi. */
+/* This file contains declarations to access front-end functions and variables
+ used by gigi.
+
+ WARNING: functions taking String_Pointer parameters must abide by the rule
+ documented alongside the definition of String_Pointer in types.h. */
#ifdef __cplusplus
extern "C" {
#endif
-/* comperr: */
+/* comperr: */
#define Compiler_Abort comperr__compiler_abort
-extern int Compiler_Abort (Fat_Pointer, int, Fat_Pointer) ATTRIBUTE_NORETURN;
+extern int Compiler_Abort (String_Pointer, String_Pointer, Boolean) ATTRIBUTE_NORETURN;
/* csets: */
@@ -72,8 +75,6 @@ extern void Set_Mechanism (Entity_Id, Mechanism_Type);
extern void Set_RM_Size (Entity_Id, Uint);
extern void Set_Present_Expr (Node_Id, Uint);
-/* Test if the node N is the name of an entity (i.e. is an identifier,
- expanded name, or an attribute reference that returns an entity). */
#define Is_Entity_Name einfo__is_entity_name
extern Boolean Is_Entity_Name (Node_Id);
@@ -90,8 +91,8 @@ extern Node_Id Get_Attribute_Definition_Clause (Entity_Id, char);
#define Error_Msg_NE errout__error_msg_ne
#define Set_Identifier_Casing errout__set_identifier_casing
-extern void Error_Msg_N (Fat_Pointer, Node_Id);
-extern void Error_Msg_NE (Fat_Pointer, Node_Id, Entity_Id);
+extern void Error_Msg_N (String_Pointer, Node_Id);
+extern void Error_Msg_NE (String_Pointer, Node_Id, Entity_Id);
extern void Set_Identifier_Casing (Char *, const Char *);
/* err_vars: */
@@ -147,11 +148,9 @@ extern void Setup_Asm_Outputs (Node_Id);
#define Get_Encoded_Name exp_dbug__get_encoded_name
#define Get_External_Name exp_dbug__get_external_name
-#define Get_External_Name_With_Suffix exp_dbug__get_external_name_with_suffix
-extern void Get_Encoded_Name (Entity_Id);
-extern void Get_External_Name (Entity_Id, Boolean);
-extern void Get_External_Name_With_Suffix (Entity_Id, Fat_Pointer);
+extern void Get_Encoded_Name (Entity_Id);
+extern void Get_External_Name (Entity_Id, Boolean, String_Pointer);
/* exp_util: */
diff --git a/gcc/ada/gcc-interface/Makefile.in b/gcc/ada/gcc-interface/Makefile.in
index 9af1967ce9f..5c36962ef3b 100644
--- a/gcc/ada/gcc-interface/Makefile.in
+++ b/gcc/ada/gcc-interface/Makefile.in
@@ -1988,6 +1988,44 @@ ifeq ($(strip $(filter-out arm% linux-gnueabi%,$(target_cpu) $(target_os))),)
LIBRARY_VERSION := $(LIB_VERSION)
endif
+# AArch64 Linux
+ifeq ($(strip $(filter-out aarch64% linux%,$(target_cpu) $(target_os))),)
+ LIBGNAT_TARGET_PAIRS = \
+ a-exetim.adb<a-exetim-posix.adb \
+ a-exetim.ads<a-exetim-default.ads \
+ a-intnam.ads<a-intnam-linux.ads \
+ a-synbar.adb<a-synbar-posix.adb \
+ a-synbar.ads<a-synbar-posix.ads \
+ s-inmaop.adb<s-inmaop-posix.adb \
+ s-intman.adb<s-intman-posix.adb \
+ s-linux.ads<s-linux.ads \
+ s-mudido.adb<s-mudido-affinity.adb \
+ s-osinte.ads<s-osinte-linux.ads \
+ s-osinte.adb<s-osinte-posix.adb \
+ s-osprim.adb<s-osprim-posix.adb \
+ s-taprop.adb<s-taprop-linux.adb \
+ s-tasinf.ads<s-tasinf-linux.ads \
+ s-tasinf.adb<s-tasinf-linux.adb \
+ s-tpopsp.adb<s-tpopsp-tls.adb \
+ s-taspri.ads<s-taspri-posix.ads \
+ g-sercom.adb<g-sercom-linux.adb \
+ $(ATOMICS_TARGET_PAIRS) \
+ $(ATOMICS_BUILTINS_TARGET_PAIRS) \
+ system.ads<system-linux-x86_64.ads
+ ## ^^ Note the above is a pretty-close placeholder.
+
+ TOOLS_TARGET_PAIRS = \
+ mlib-tgt-specific.adb<mlib-tgt-specific-linux.adb \
+ indepsw.adb<indepsw-gnu.adb
+
+ EXTRA_GNATRTL_TASKING_OBJS=s-linux.o a-exetim.o
+ EH_MECHANISM=-gcc
+ THREADSLIB=-lpthread -lrt
+ GNATLIB_SHARED=gnatlib-shared-dual
+ GMEM_LIB = gmemlib
+ LIBRARY_VERSION := $(LIB_VERSION)
+endif
+
# Sparc Linux
ifeq ($(strip $(filter-out sparc% linux%,$(target_cpu) $(target_os))),)
LIBGNAT_TARGET_PAIRS_COMMON = \
diff --git a/gcc/ada/gcc-interface/decl.c b/gcc/ada/gcc-interface/decl.c
index ac2af06c45c..6d0b8b25038 100644
--- a/gcc/ada/gcc-interface/decl.c
+++ b/gcc/ada/gcc-interface/decl.c
@@ -72,6 +72,8 @@
#define Has_Thiscall_Convention(E) 0
#endif
+#define STDCALL_PREFIX "_imp__"
+
/* Stack realignment is necessary for functions with foreign conventions when
the ABI doesn't mandate as much as what the compiler assumes - that is, up
to PREFERRED_STACK_BOUNDARY.
@@ -8854,16 +8856,12 @@ get_entity_name (Entity_Id gnat_entity)
tree
create_concat_name (Entity_Id gnat_entity, const char *suffix)
{
- Entity_Kind kind = Ekind (gnat_entity);
+ const Entity_Kind kind = Ekind (gnat_entity);
+ const bool has_suffix = (suffix != NULL);
+ String_Template temp = {1, has_suffix ? strlen (suffix) : 0};
+ String_Pointer sp = {suffix, &temp};
- if (suffix)
- {
- String_Template temp = {1, (int) strlen (suffix)};
- Fat_Pointer fp = {suffix, &temp};
- Get_External_Name_With_Suffix (gnat_entity, fp);
- }
- else
- Get_External_Name (gnat_entity, 0);
+ Get_External_Name (gnat_entity, has_suffix, sp);
/* A variable using the Stdcall convention lives in a DLL. We adjust
its name to use the jump table, the _imp__NAME contains the address
@@ -8871,9 +8869,9 @@ create_concat_name (Entity_Id gnat_entity, const char *suffix)
if ((kind == E_Variable || kind == E_Constant)
&& Has_Stdcall_Convention (gnat_entity))
{
- const int len = 6 + Name_Len;
+ const int len = strlen (STDCALL_PREFIX) + Name_Len;
char *new_name = (char *) alloca (len + 1);
- strcpy (new_name, "_imp__");
+ strcpy (new_name, STDCALL_PREFIX);
strcat (new_name, Name_Buffer);
return get_identifier_with_length (new_name, len);
}
diff --git a/gcc/ada/gcc-interface/misc.c b/gcc/ada/gcc-interface/misc.c
index a5f2881d697..fe44c6d5b3f 100644
--- a/gcc/ada/gcc-interface/misc.c
+++ b/gcc/ada/gcc-interface/misc.c
@@ -283,8 +283,8 @@ internal_error_function (diagnostic_context *context,
text_info tinfo;
char *buffer, *p, *loc;
String_Template temp, temp_loc;
- Fat_Pointer fp, fp_loc;
- expanded_location s;
+ String_Pointer sp, sp_loc;
+ expanded_location xloc;
/* Warn if plugins present. */
warn_if_plugins ();
@@ -311,21 +311,21 @@ internal_error_function (diagnostic_context *context,
temp.Low_Bound = 1;
temp.High_Bound = p - buffer;
- fp.Bounds = &temp;
- fp.Array = buffer;
+ sp.Bounds = &temp;
+ sp.Array = buffer;
- s = expand_location (input_location);
- if (context->show_column && s.column != 0)
- asprintf (&loc, "%s:%d:%d", s.file, s.line, s.column);
+ xloc = expand_location (input_location);
+ if (context->show_column && xloc.column != 0)
+ asprintf (&loc, "%s:%d:%d", xloc.file, xloc.line, xloc.column);
else
- asprintf (&loc, "%s:%d", s.file, s.line);
+ asprintf (&loc, "%s:%d", xloc.file, xloc.line);
temp_loc.Low_Bound = 1;
temp_loc.High_Bound = strlen (loc);
- fp_loc.Bounds = &temp_loc;
- fp_loc.Array = loc;
+ sp_loc.Bounds = &temp_loc;
+ sp_loc.Array = loc;
Current_Error_Node = error_gnat_node;
- Compiler_Abort (fp, -1, fp_loc);
+ Compiler_Abort (sp, sp_loc, true);
}
/* Perform all the initialization steps that are language-specific. */
diff --git a/gcc/ada/gcc-interface/trans.c b/gcc/ada/gcc-interface/trans.c
index 813a1dce6f8..11b89825c23 100644
--- a/gcc/ada/gcc-interface/trans.c
+++ b/gcc/ada/gcc-interface/trans.c
@@ -9356,16 +9356,16 @@ void
post_error (const char *msg, Node_Id node)
{
String_Template temp;
- Fat_Pointer fp;
+ String_Pointer sp;
if (No (node))
return;
temp.Low_Bound = 1;
temp.High_Bound = strlen (msg);
- fp.Bounds = &temp;
- fp.Array = msg;
- Error_Msg_N (fp, node);
+ sp.Bounds = &temp;
+ sp.Array = msg;
+ Error_Msg_N (sp, node);
}
/* Similar to post_error, but NODE is the node at which to post the error and
@@ -9375,16 +9375,16 @@ void
post_error_ne (const char *msg, Node_Id node, Entity_Id ent)
{
String_Template temp;
- Fat_Pointer fp;
+ String_Pointer sp;
if (No (node))
return;
temp.Low_Bound = 1;
temp.High_Bound = strlen (msg);
- fp.Bounds = &temp;
- fp.Array = msg;
- Error_Msg_NE (fp, node, ent);
+ sp.Bounds = &temp;
+ sp.Array = msg;
+ Error_Msg_NE (sp, node, ent);
}
/* Similar to post_error_ne, but NUM is the number to use for the '^'. */
diff --git a/gcc/ada/gcc-interface/utils.c b/gcc/ada/gcc-interface/utils.c
index 9d7438c4c0b..b9b9dc1c52d 100644
--- a/gcc/ada/gcc-interface/utils.c
+++ b/gcc/ada/gcc-interface/utils.c
@@ -6518,6 +6518,7 @@ def_builtin_1 (enum built_in_function fncode,
static int flag_isoc94 = 0;
static int flag_isoc99 = 0;
+static int flag_isoc11 = 0;
/* Install what the common builtins.def offers. */
diff --git a/gcc/ada/types.h b/gcc/ada/types.h
index dd049db908a..1330730b71b 100644
--- a/gcc/ada/types.h
+++ b/gcc/ada/types.h
@@ -76,11 +76,19 @@ typedef Char *Str;
/* Pointer to string of Chars */
typedef Char *Str_Ptr;
-/* Types for the fat pointer used for strings and the template it
- points to. */
-typedef struct {int Low_Bound, High_Bound; } String_Template;
-typedef struct {const char *Array; String_Template *Bounds; }
- __attribute ((aligned (sizeof (char *) * 2))) Fat_Pointer;
+/* Types for the fat pointer used for strings and the template it points to.
+ The fat pointer is conceptually a couple of pointers, but it is wrapped
+ up in a special record type. On the Ada side, the record is naturally
+ aligned (i.e. given pointer alignment) on regular platforms, but it is
+ given twice this alignment on strict-alignment platforms for performance
+ reasons. On the C side, for the sake of portability and simplicity, we
+ overalign it on all platforms (so the machine mode is always the same as
+ on the Ada side) but arrange to pass it in an even scalar position as a
+ parameter to functions (so the scalar parameter alignment is always the
+ same as on the Ada side). */
+typedef struct { int Low_Bound, High_Bound; } String_Template;
+typedef struct { const char *Array; String_Template *Bounds; }
+ __attribute ((aligned (sizeof (char *) * 2))) String_Pointer;
/* Types for Node/Entity Kinds: */
diff --git a/gcc/basic-block.h b/gcc/basic-block.h
index 82729b4c810..0bf6e877145 100644
--- a/gcc/basic-block.h
+++ b/gcc/basic-block.h
@@ -826,10 +826,13 @@ enum cdi_direction
CDI_POST_DOMINATORS = 2
};
+extern enum dom_state dom_info_state (function *, enum cdi_direction);
extern enum dom_state dom_info_state (enum cdi_direction);
extern void set_dom_info_availability (enum cdi_direction, enum dom_state);
+extern bool dom_info_available_p (function *, enum cdi_direction);
extern bool dom_info_available_p (enum cdi_direction);
extern void calculate_dominance_info (enum cdi_direction);
+extern void free_dominance_info (function *, enum cdi_direction);
extern void free_dominance_info (enum cdi_direction);
extern basic_block nearest_common_dominator (enum cdi_direction,
basic_block, basic_block);
diff --git a/gcc/builtins.def b/gcc/builtins.def
index 5a76ba3291e..d400ecb4e8f 100644
--- a/gcc/builtins.def
+++ b/gcc/builtins.def
@@ -111,6 +111,13 @@ along with GCC; see the file COPYING3. If not see
DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
true, true, !flag_isoc99, ATTRS, targetm.libc_has_function (function_c99_misc), true)
+/* Like DEF_LIB_BUILTIN, except that the function is only a part of
+ the standard in C11 or above. */
+#undef DEF_C11_BUILTIN
+#define DEF_C11_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
+ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
+ true, true, !flag_isoc11, ATTRS, targetm.libc_has_function (function_c11_misc), true)
+
/* Like DEF_C99_BUILTIN, but for complex math functions. */
#undef DEF_C99_COMPL_BUILTIN
#define DEF_C99_COMPL_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
@@ -169,7 +176,7 @@ along with GCC; see the file COPYING3. If not see
DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
true, true, true, ATTRS, true, \
(flag_sanitize & (SANITIZE_ADDRESS | SANITIZE_THREAD \
- | SANITIZE_UNDEFINED)))
+ | SANITIZE_UNDEFINED | SANITIZE_FLOAT_DIVIDE)))
#undef DEF_CILKPLUS_BUILTIN
#define DEF_CILKPLUS_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
@@ -223,6 +230,7 @@ DEF_C99_BUILTIN (BUILT_IN_ACOSH, "acosh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHF
DEF_C99_BUILTIN (BUILT_IN_ACOSHF, "acoshf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
DEF_C99_BUILTIN (BUILT_IN_ACOSHL, "acoshl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
DEF_C99_C90RES_BUILTIN (BUILT_IN_ACOSL, "acosl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C11_BUILTIN (BUILT_IN_ALIGNED_ALLOC, "aligned_alloc", BT_FN_PTR_SIZE_SIZE, ATTR_MALLOC_NOTHROW_LIST)
DEF_LIB_BUILTIN (BUILT_IN_ASIN, "asin", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
DEF_C99_C90RES_BUILTIN (BUILT_IN_ASINF, "asinf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
DEF_C99_BUILTIN (BUILT_IN_ASINH, "asinh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING)
diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog
index fb0d102eac3..47bb11438ad 100644
--- a/gcc/c-family/ChangeLog
+++ b/gcc/c-family/ChangeLog
@@ -1,3 +1,8 @@
+2014-04-30 Marek Polacek <polacek@redhat.com>
+
+ * c-ubsan.c (ubsan_instrument_division): Handle REAL_TYPEs. Perform
+ INT_MIN / -1 sanitization only for integer types.
+
2014-04-25 Marek Polacek <polacek@redhat.com>
PR c/18079
diff --git a/gcc/c-family/c-ubsan.c b/gcc/c-family/c-ubsan.c
index e4f6f327277..a0397925fe7 100644
--- a/gcc/c-family/c-ubsan.c
+++ b/gcc/c-family/c-ubsan.c
@@ -46,15 +46,21 @@ ubsan_instrument_division (location_t loc, tree op0, tree op1)
gcc_assert (TYPE_MAIN_VARIANT (TREE_TYPE (op0))
== TYPE_MAIN_VARIANT (TREE_TYPE (op1)));
- /* TODO: REAL_TYPE is not supported yet. */
- if (TREE_CODE (type) != INTEGER_TYPE)
+ if (TREE_CODE (type) == INTEGER_TYPE
+ && (flag_sanitize & SANITIZE_DIVIDE))
+ t = fold_build2 (EQ_EXPR, boolean_type_node,
+ op1, build_int_cst (type, 0));
+ else if (TREE_CODE (type) == REAL_TYPE
+ && (flag_sanitize & SANITIZE_FLOAT_DIVIDE))
+ t = fold_build2 (EQ_EXPR, boolean_type_node,
+ op1, build_real (type, dconst0));
+ else
return NULL_TREE;
- t = fold_build2 (EQ_EXPR, boolean_type_node,
- op1, build_int_cst (type, 0));
-
/* We check INT_MIN / -1 only for signed types. */
- if (!TYPE_UNSIGNED (type))
+ if (TREE_CODE (type) == INTEGER_TYPE
+ && (flag_sanitize & SANITIZE_DIVIDE)
+ && !TYPE_UNSIGNED (type))
{
tree x;
tt = fold_build2 (EQ_EXPR, boolean_type_node, op1,
diff --git a/gcc/c/ChangeLog b/gcc/c/ChangeLog
index 80841af40ee..bf61610e37c 100644
--- a/gcc/c/ChangeLog
+++ b/gcc/c/ChangeLog
@@ -1,3 +1,20 @@
+2014-04-30 Marek Polacek <polacek@redhat.com>
+
+ * c-typeck.c (build_binary_op): Call ubsan_instrument_division
+ also when SANITIZE_FLOAT_DIVIDE is on.
+
+2014-04-30 Marek Polacek <polacek@redhat.com>
+
+ PR c/60139
+ * c-typeck.c (output_init_element): Pass OPT_Wpedantic to pedwarn
+ and pedwarn_init. Use loc insted of input_location.
+
+2014-04-30 Marek Polacek <polacek@redhat.com>
+
+ PR c/60351
+ * c-typeck.c (build_binary_op): Use location when warning about
+ shift count.
+
2014-04-25 Marek Polacek <polacek@redhat.com>
PR c/18079
diff --git a/gcc/c/c-typeck.c b/gcc/c/c-typeck.c
index e4293104d7b..11825a9218e 100644
--- a/gcc/c/c-typeck.c
+++ b/gcc/c/c-typeck.c
@@ -8256,12 +8256,12 @@ output_init_element (location_t loc, tree value, tree origtype,
value = error_mark_node;
}
else if (require_constant_elements)
- pedwarn (input_location, 0,
+ pedwarn (loc, OPT_Wpedantic,
"initializer element is not computable at load time");
}
else if (!maybe_const
&& (require_constant_value || require_constant_elements))
- pedwarn_init (input_location, 0,
+ pedwarn_init (loc, OPT_Wpedantic,
"initializer element is not a constant expression");
/* Issue -Wc++-compat warnings about initializing a bitfield with
@@ -10403,7 +10403,7 @@ build_binary_op (location_t location, enum tree_code code,
{
int_const = false;
if (c_inhibit_evaluation_warnings == 0)
- warning (0, "right shift count is negative");
+ warning_at (location, 0, "right shift count is negative");
}
else
{
@@ -10414,7 +10414,8 @@ build_binary_op (location_t location, enum tree_code code,
{
int_const = false;
if (c_inhibit_evaluation_warnings == 0)
- warning (0, "right shift count >= width of type");
+ warning_at (location, 0, "right shift count >= width "
+ "of type");
}
}
}
@@ -10456,14 +10457,15 @@ build_binary_op (location_t location, enum tree_code code,
{
int_const = false;
if (c_inhibit_evaluation_warnings == 0)
- warning (0, "left shift count is negative");
+ warning_at (location, 0, "left shift count is negative");
}
else if (compare_tree_int (op1, TYPE_PRECISION (type0)) >= 0)
{
int_const = false;
if (c_inhibit_evaluation_warnings == 0)
- warning (0, "left shift count >= width of type");
+ warning_at (location, 0, "left shift count >= width of "
+ "type");
}
}
@@ -10996,7 +10998,8 @@ build_binary_op (location_t location, enum tree_code code,
return error_mark_node;
}
- if ((flag_sanitize & (SANITIZE_SHIFT | SANITIZE_DIVIDE))
+ if ((flag_sanitize & (SANITIZE_SHIFT | SANITIZE_DIVIDE
+ | SANITIZE_FLOAT_DIVIDE))
&& current_function_decl != 0
&& !lookup_attribute ("no_sanitize_undefined",
DECL_ATTRIBUTES (current_function_decl))
@@ -11007,7 +11010,8 @@ build_binary_op (location_t location, enum tree_code code,
op1 = c_save_expr (op1);
op0 = c_fully_fold (op0, false, NULL);
op1 = c_fully_fold (op1, false, NULL);
- if (doing_div_or_mod && (flag_sanitize & SANITIZE_DIVIDE))
+ if (doing_div_or_mod && (flag_sanitize & (SANITIZE_DIVIDE
+ | SANITIZE_FLOAT_DIVIDE)))
instrument_expr = ubsan_instrument_division (location, op0, op1);
else if (doing_shift && (flag_sanitize & SANITIZE_SHIFT))
instrument_expr = ubsan_instrument_shift (location, code, op0, op1);
diff --git a/gcc/calls.c b/gcc/calls.c
index e798c7a0349..78fe7d8525b 100644
--- a/gcc/calls.c
+++ b/gcc/calls.c
@@ -1104,8 +1104,6 @@ initialize_argument_information (int num_actuals ATTRIBUTE_UNUSED,
{
CUMULATIVE_ARGS *args_so_far_pnt = get_cumulative_args (args_so_far);
location_t loc = EXPR_LOCATION (exp);
- /* 1 if scanning parms front to back, -1 if scanning back to front. */
- int inc;
/* Count arg position in order args appear. */
int argpos;
@@ -1116,22 +1114,9 @@ initialize_argument_information (int num_actuals ATTRIBUTE_UNUSED,
args_size->var = 0;
/* In this loop, we consider args in the order they are written.
- We fill up ARGS from the front or from the back if necessary
- so that in any case the first arg to be pushed ends up at the front. */
+ We fill up ARGS from the back. */
- if (PUSH_ARGS_REVERSED)
- {
- i = num_actuals - 1, inc = -1;
- /* In this case, must reverse order of args
- so that we compute and push the last arg first. */
- }
- else
- {
- i = 0, inc = 1;
- }
-
- /* First fill in the actual arguments in the ARGS array, splitting
- complex arguments if necessary. */
+ i = num_actuals - 1;
{
int j = i;
call_expr_arg_iterator iter;
@@ -1140,7 +1125,7 @@ initialize_argument_information (int num_actuals ATTRIBUTE_UNUSED,
if (struct_value_addr_value)
{
args[j].tree_value = struct_value_addr_value;
- j += inc;
+ j--;
}
FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
{
@@ -1152,17 +1137,17 @@ initialize_argument_information (int num_actuals ATTRIBUTE_UNUSED,
{
tree subtype = TREE_TYPE (argtype);
args[j].tree_value = build1 (REALPART_EXPR, subtype, arg);
- j += inc;
+ j--;
args[j].tree_value = build1 (IMAGPART_EXPR, subtype, arg);
}
else
args[j].tree_value = arg;
- j += inc;
+ j--;
}
}
/* I counts args in order (to be) pushed; ARGPOS counts in order written. */
- for (argpos = 0; argpos < num_actuals; i += inc, argpos++)
+ for (argpos = 0; argpos < num_actuals; i--, argpos++)
{
tree type = TREE_TYPE (args[i].tree_value);
int unsignedp;
@@ -2952,9 +2937,8 @@ expand_call (tree exp, rtx target, int ignore)
compute_argument_addresses (args, argblock, num_actuals);
- /* If we push args individually in reverse order, perform stack alignment
- before the first push (the last arg). */
- if (PUSH_ARGS_REVERSED && argblock == 0
+ /* Perform stack alignment before the first push (the last arg). */
+ if (argblock == 0
&& adjusted_args_size.constant > reg_parm_stack_space
&& adjusted_args_size.constant != unadjusted_args_size)
{
@@ -3097,12 +3081,6 @@ expand_call (tree exp, rtx target, int ignore)
sibcall_failure = 1;
}
- /* If we pushed args in forward order, perform stack alignment
- after pushing the last arg. */
- if (!PUSH_ARGS_REVERSED && argblock == 0)
- anti_adjust_stack (GEN_INT (adjusted_args_size.constant
- - unadjusted_args_size));
-
/* If register arguments require space on the stack and stack space
was not preallocated, allocate stack space here for arguments
passed in registers. */
@@ -3152,8 +3130,7 @@ expand_call (tree exp, rtx target, int ignore)
if (pass == 1 && (return_flags & ERF_RETURNS_ARG))
{
int arg_nr = return_flags & ERF_RETURN_ARG_MASK;
- if (PUSH_ARGS_REVERSED)
- arg_nr = num_actuals - arg_nr - 1;
+ arg_nr = num_actuals - arg_nr - 1;
if (arg_nr >= 0
&& arg_nr < num_actuals
&& args[arg_nr].reg
@@ -3610,7 +3587,6 @@ emit_library_call_value_1 (int retval, rtx orgfun, rtx value,
isn't present here, so we default to native calling abi here. */
tree fndecl ATTRIBUTE_UNUSED = NULL_TREE; /* library calls default to host calling abi ? */
tree fntype ATTRIBUTE_UNUSED = NULL_TREE; /* library calls default to host calling abi ? */
- int inc;
int count;
rtx argblock = 0;
CUMULATIVE_ARGS args_so_far_v;
@@ -3959,22 +3935,13 @@ emit_library_call_value_1 (int retval, rtx orgfun, rtx value,
argblock = push_block (GEN_INT (args_size.constant), 0, 0);
}
- /* If we push args individually in reverse order, perform stack alignment
+ /* We push args individually in reverse order, perform stack alignment
before the first push (the last arg). */
- if (argblock == 0 && PUSH_ARGS_REVERSED)
+ if (argblock == 0)
anti_adjust_stack (GEN_INT (args_size.constant
- original_args_size.constant));
- if (PUSH_ARGS_REVERSED)
- {
- inc = -1;
- argnum = nargs - 1;
- }
- else
- {
- inc = 1;
- argnum = 0;
- }
+ argnum = nargs - 1;
#ifdef REG_PARM_STACK_SPACE
if (ACCUMULATE_OUTGOING_ARGS)
@@ -3991,7 +3958,7 @@ emit_library_call_value_1 (int retval, rtx orgfun, rtx value,
/* ARGNUM indexes the ARGVEC array in the order in which the arguments
are to be pushed. */
- for (count = 0; count < nargs; count++, argnum += inc)
+ for (count = 0; count < nargs; count++, argnum--)
{
enum machine_mode mode = argvec[argnum].mode;
rtx val = argvec[argnum].value;
@@ -4093,16 +4060,7 @@ emit_library_call_value_1 (int retval, rtx orgfun, rtx value,
}
}
- /* If we pushed args in forward order, perform stack alignment
- after pushing the last arg. */
- if (argblock == 0 && !PUSH_ARGS_REVERSED)
- anti_adjust_stack (GEN_INT (args_size.constant
- - original_args_size.constant));
-
- if (PUSH_ARGS_REVERSED)
- argnum = nargs - 1;
- else
- argnum = 0;
+ argnum = nargs - 1;
fun = prepare_call_address (NULL, fun, NULL, &call_fusage, 0, 0);
@@ -4110,7 +4068,7 @@ emit_library_call_value_1 (int retval, rtx orgfun, rtx value,
/* ARGNUM indexes the ARGVEC array in the order in which the arguments
are to be pushed. */
- for (count = 0; count < nargs; count++, argnum += inc)
+ for (count = 0; count < nargs; count++, argnum--)
{
enum machine_mode mode = argvec[argnum].mode;
rtx val = argvec[argnum].value;
diff --git a/gcc/cgraph.c b/gcc/cgraph.c
index b5df572d039..2b4ce813c90 100644
--- a/gcc/cgraph.c
+++ b/gcc/cgraph.c
@@ -1695,8 +1695,8 @@ release_function_body (tree decl)
}
if (cfun->cfg)
{
- free_dominance_info (CDI_DOMINATORS);
- free_dominance_info (CDI_POST_DOMINATORS);
+ gcc_assert (!dom_info_available_p (CDI_DOMINATORS));
+ gcc_assert (!dom_info_available_p (CDI_POST_DOMINATORS));
clear_edges ();
cfun->cfg = NULL;
}
diff --git a/gcc/config.gcc b/gcc/config.gcc
index 92f8a94b3e6..81205ff0c8f 100644
--- a/gcc/config.gcc
+++ b/gcc/config.gcc
@@ -808,8 +808,9 @@ case ${target} in
*-*-solaris2*)
# i?86-*-solaris2* needs to insert headers between cpu default and
# Solaris 2 specific ones.
- sol2_tm_file="dbxelf.h elfos.h ${cpu_type}/sysv4.h sol2.h ${cpu_type}/sol2.h"
- sol2_tm_file="${sol2_tm_file} sol2-10.h"
+ sol2_tm_file_head="dbxelf.h elfos.h ${cpu_type}/sysv4.h"
+ sol2_tm_file_tail="${cpu_type}/sol2.h sol2.h"
+ sol2_tm_file="${sol2_tm_file_head} ${sol2_tm_file_tail}"
use_gcc_stdint=wrap
if test x$gnu_ld = xyes; then
tm_file="usegld.h ${tm_file}"
@@ -1515,14 +1516,13 @@ i[34567]86-*-rtems*)
tmake_file="${tmake_file} i386/t-rtems"
;;
i[34567]86-*-solaris2* | x86_64-*-solaris2.1[0-9]*)
- tm_file="${tm_file} i386/unix.h i386/att.h ${sol2_tm_file}"
# Set default arch_32 to pentium4, tune_32 to generic like the other
# i386 targets, although config.guess defaults to i386-pc-solaris2*.
with_arch_32=${with_arch_32:-pentium4}
with_tune_32=${with_tune_32:-generic}
- tm_file="${tm_file} i386/x86-64.h i386/sol2-bi.h sol2-bi.h"
+ tm_file="${tm_file} i386/unix.h i386/att.h ${sol2_tm_file_head} i386/x86-64.h ${sol2_tm_file_tail}"
tm_defines="${tm_defines} TARGET_BI_ARCH=1"
- tmake_file="$tmake_file i386/t-sol2-64"
+ tmake_file="$tmake_file i386/t-sol2"
need_64bit_isa=yes
if test x$with_cpu = x; then
if test x$with_cpu_64 = x; then
@@ -2661,7 +2661,7 @@ sparc-*-netbsdelf*)
tmake_file="${tmake_file} sparc/t-sparc"
;;
sparc*-*-solaris2*)
- tm_file="sparc/biarch64.h ${tm_file} ${sol2_tm_file} sol2-bi.h sparc/tso.h"
+ tm_file="sparc/biarch64.h ${tm_file} ${sol2_tm_file} sparc/tso.h"
case ${target} in
sparc64-*-* | sparcv9-*-*)
tm_file="sparc/default-64.h ${tm_file}"
@@ -2670,7 +2670,7 @@ sparc*-*-solaris2*)
test x$with_cpu != x || with_cpu=v9
;;
esac
- tmake_file="${tmake_file} sparc/t-sparc sparc/t-sol2-64"
+ tmake_file="${tmake_file} sparc/t-sparc sparc/t-sol2"
;;
sparc-wrs-vxworks)
tm_file="${tm_file} elfos.h sparc/sysv4.h vx-common.h vxworks.h sparc/vxworks.h"
diff --git a/gcc/config/aarch64/aarch64-builtins.c b/gcc/config/aarch64/aarch64-builtins.c
index 4616ad24c07..a3019828a93 100644
--- a/gcc/config/aarch64/aarch64-builtins.c
+++ b/gcc/config/aarch64/aarch64-builtins.c
@@ -246,6 +246,11 @@ aarch64_types_store1_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_void, qualifier_pointer_map_mode, qualifier_none };
#define TYPES_STORE1 (aarch64_types_store1_qualifiers)
#define TYPES_STORESTRUCT (aarch64_types_store1_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_storestruct_lane_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_void, qualifier_pointer_map_mode,
+ qualifier_none, qualifier_none };
+#define TYPES_STORESTRUCT_LANE (aarch64_types_storestruct_lane_qualifiers)
#define CF0(N, X) CODE_FOR_aarch64_##N##X
#define CF1(N, X) CODE_FOR_##N##X##1
diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
index 5542f023b33..04cbc780da2 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -175,6 +175,8 @@ bool aarch64_is_extend_from_extract (enum machine_mode, rtx, rtx);
bool aarch64_is_long_call_p (rtx);
bool aarch64_label_mentioned_p (rtx);
bool aarch64_legitimate_pic_operand_p (rtx);
+bool aarch64_modes_tieable_p (enum machine_mode mode1,
+ enum machine_mode mode2);
bool aarch64_move_imm (HOST_WIDE_INT, enum machine_mode);
bool aarch64_mov_operand_p (rtx, enum aarch64_symbol_context,
enum machine_mode);
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index fa332ae5948..339e8f86a4b 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -118,6 +118,10 @@
BUILTIN_VQ (STORESTRUCT, st3, 0)
BUILTIN_VQ (STORESTRUCT, st4, 0)
+ BUILTIN_VQ (STORESTRUCT_LANE, st2_lane, 0)
+ BUILTIN_VQ (STORESTRUCT_LANE, st3_lane, 0)
+ BUILTIN_VQ (STORESTRUCT_LANE, st4_lane, 0)
+
BUILTIN_VQW (BINOP, saddl2, 0)
BUILTIN_VQW (BINOP, uaddl2, 0)
BUILTIN_VQW (BINOP, ssubl2, 0)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index c05767b2045..108bc8d8893 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -3686,6 +3686,17 @@
[(set_attr "type" "neon_store2_2reg<q>")]
)
+(define_insn "vec_store_lanesoi_lane<mode>"
+ [(set (match_operand:<V_TWO_ELEM> 0 "aarch64_simd_struct_operand" "=Utv")
+ (unspec:<V_TWO_ELEM> [(match_operand:OI 1 "register_operand" "w")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_ST2_LANE))]
+ "TARGET_SIMD"
+ "st2\\t{%S1.<Vetype> - %T1.<Vetype>}[%2], %0"
+ [(set_attr "type" "neon_store3_one_lane<q>")]
+)
+
(define_insn "vec_load_lanesci<mode>"
[(set (match_operand:CI 0 "register_operand" "=w")
(unspec:CI [(match_operand:CI 1 "aarch64_simd_struct_operand" "Utv")
@@ -3706,6 +3717,17 @@
[(set_attr "type" "neon_store3_3reg<q>")]
)
+(define_insn "vec_store_lanesci_lane<mode>"
+ [(set (match_operand:<V_THREE_ELEM> 0 "aarch64_simd_struct_operand" "=Utv")
+ (unspec:<V_THREE_ELEM> [(match_operand:CI 1 "register_operand" "w")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_ST3_LANE))]
+ "TARGET_SIMD"
+ "st3\\t{%S1.<Vetype> - %U1.<Vetype>}[%2], %0"
+ [(set_attr "type" "neon_store3_one_lane<q>")]
+)
+
(define_insn "vec_load_lanesxi<mode>"
[(set (match_operand:XI 0 "register_operand" "=w")
(unspec:XI [(match_operand:XI 1 "aarch64_simd_struct_operand" "Utv")
@@ -3726,6 +3748,17 @@
[(set_attr "type" "neon_store4_4reg<q>")]
)
+(define_insn "vec_store_lanesxi_lane<mode>"
+ [(set (match_operand:<V_FOUR_ELEM> 0 "aarch64_simd_struct_operand" "=Utv")
+ (unspec:<V_FOUR_ELEM> [(match_operand:XI 1 "register_operand" "w")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_ST4_LANE))]
+ "TARGET_SIMD"
+ "st4\\t{%S1.<Vetype> - %V1.<Vetype>}[%2], %0"
+ [(set_attr "type" "neon_store4_one_lane<q>")]
+)
+
;; Reload patterns for AdvSIMD register list operands.
(define_expand "mov<mode>"
@@ -4220,6 +4253,57 @@
DONE;
})
+(define_expand "aarch64_st2_lane<VQ:mode>"
+ [(match_operand:DI 0 "register_operand" "r")
+ (match_operand:OI 1 "register_operand" "w")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)
+ (match_operand:SI 2 "immediate_operand")]
+ "TARGET_SIMD"
+{
+ enum machine_mode mode = <V_TWO_ELEM>mode;
+ rtx mem = gen_rtx_MEM (mode, operands[0]);
+ operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2])));
+
+ emit_insn (gen_vec_store_lanesoi_lane<VQ:mode> (mem,
+ operands[1],
+ operands[2]));
+ DONE;
+})
+
+(define_expand "aarch64_st3_lane<VQ:mode>"
+ [(match_operand:DI 0 "register_operand" "r")
+ (match_operand:CI 1 "register_operand" "w")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)
+ (match_operand:SI 2 "immediate_operand")]
+ "TARGET_SIMD"
+{
+ enum machine_mode mode = <V_THREE_ELEM>mode;
+ rtx mem = gen_rtx_MEM (mode, operands[0]);
+ operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2])));
+
+ emit_insn (gen_vec_store_lanesci_lane<VQ:mode> (mem,
+ operands[1],
+ operands[2]));
+ DONE;
+})
+
+(define_expand "aarch64_st4_lane<VQ:mode>"
+ [(match_operand:DI 0 "register_operand" "r")
+ (match_operand:XI 1 "register_operand" "w")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)
+ (match_operand:SI 2 "immediate_operand")]
+ "TARGET_SIMD"
+{
+ enum machine_mode mode = <V_FOUR_ELEM>mode;
+ rtx mem = gen_rtx_MEM (mode, operands[0]);
+ operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2])));
+
+ emit_insn (gen_vec_store_lanesxi_lane<VQ:mode> (mem,
+ operands[1],
+ operands[2]));
+ DONE;
+})
+
(define_expand "aarch64_st1<VALL:mode>"
[(match_operand:DI 0 "register_operand")
(match_operand:VALL 1 "register_operand")]
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index 1d48108516d..d3d7d1e60d6 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -8316,7 +8316,8 @@ aarch64_cannot_change_mode_class (enum machine_mode from,
/* Limited combinations of subregs are safe on FPREGs. Particularly,
1. Vector Mode to Scalar mode where 1 unit of the vector is accessed.
2. Scalar to Scalar for integer modes or same size float modes.
- 3. Vector to Vector modes. */
+ 3. Vector to Vector modes.
+ 4. On little-endian only, Vector-Structure to Vector modes. */
if (GET_MODE_SIZE (from) > GET_MODE_SIZE (to))
{
if (aarch64_vector_mode_supported_p (from)
@@ -8332,11 +8333,41 @@ aarch64_cannot_change_mode_class (enum machine_mode from,
if (aarch64_vector_mode_supported_p (from)
&& aarch64_vector_mode_supported_p (to))
return false;
+
+ /* Within an vector structure straddling multiple vector registers
+ we are in a mixed-endian representation. As such, we can't
+ easily change modes for BYTES_BIG_ENDIAN. Otherwise, we can
+ switch between vectors and vector structures cheaply. */
+ if (!BYTES_BIG_ENDIAN)
+ if ((aarch64_vector_mode_supported_p (from)
+ && aarch64_vect_struct_mode_p (to))
+ || (aarch64_vector_mode_supported_p (to)
+ && aarch64_vect_struct_mode_p (from)))
+ return false;
}
return true;
}
+/* Implement MODES_TIEABLE_P. */
+
+bool
+aarch64_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
+{
+ if (GET_MODE_CLASS (mode1) == GET_MODE_CLASS (mode2))
+ return true;
+
+ /* We specifically want to allow elements of "structure" modes to
+ be tieable to the structure. This more general condition allows
+ other rarer situations too. */
+ if (TARGET_SIMD
+ && aarch64_vector_mode_p (mode1)
+ && aarch64_vector_mode_p (mode2))
+ return true;
+
+ return false;
+}
+
#undef TARGET_ADDRESS_COST
#define TARGET_ADDRESS_COST aarch64_address_cost
diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
index e2b6c8e2908..c9b30d01865 100644
--- a/gcc/config/aarch64/aarch64.h
+++ b/gcc/config/aarch64/aarch64.h
@@ -365,8 +365,7 @@ extern unsigned long aarch64_tune_flags;
#define HARD_REGNO_MODE_OK(REGNO, MODE) aarch64_hard_regno_mode_ok (REGNO, MODE)
-#define MODES_TIEABLE_P(MODE1, MODE2) \
- (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
+#define MODES_TIEABLE_P(MODE1, MODE2) aarch64_modes_tieable_p (MODE1, MODE2)
#define DWARF2_UNWIND_INFO 1
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 7965db4c9c7..266d7873a5a 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -98,6 +98,9 @@
UNSPEC_ST2
UNSPEC_ST3
UNSPEC_ST4
+ UNSPEC_ST2_LANE
+ UNSPEC_ST3_LANE
+ UNSPEC_ST4_LANE
UNSPEC_TLS
UNSPEC_TLSDESC
UNSPEC_USHL_2S
@@ -2426,6 +2429,25 @@
}
)
+(define_expand "mov<mode>cc"
+ [(set (match_operand:GPF 0 "register_operand" "")
+ (if_then_else:GPF (match_operand 1 "aarch64_comparison_operator" "")
+ (match_operand:GPF 2 "register_operand" "")
+ (match_operand:GPF 3 "register_operand" "")))]
+ ""
+ {
+ rtx ccreg;
+ enum rtx_code code = GET_CODE (operands[1]);
+
+ if (code == UNEQ || code == LTGT)
+ FAIL;
+
+ ccreg = aarch64_gen_compare_reg (code, XEXP (operands[1], 0),
+ XEXP (operands[1], 1));
+ operands[1] = gen_rtx_fmt_ee (code, VOIDmode, ccreg, const0_rtx);
+ }
+)
+
(define_insn "*csinc2<mode>_insn"
[(set (match_operand:GPI 0 "register_operand" "=r")
(plus:GPI (match_operator:GPI 2 "aarch64_comparison_operator"
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 9f1fa98e6fb..f6213ce2aea 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -13199,929 +13199,6 @@ vtstq_p16 (poly16x8_t a, poly16x8_t b)
: /* No clobbers */);
return result;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vuzp1_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("uzp1 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vuzp1_p8 (poly8x8_t a, poly8x8_t b)
-{
- poly8x8_t result;
- __asm__ ("uzp1 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vuzp1_p16 (poly16x4_t a, poly16x4_t b)
-{
- poly16x4_t result;
- __asm__ ("uzp1 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vuzp1_s8 (int8x8_t a, int8x8_t b)
-{
- int8x8_t result;
- __asm__ ("uzp1 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vuzp1_s16 (int16x4_t a, int16x4_t b)
-{
- int16x4_t result;
- __asm__ ("uzp1 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vuzp1_s32 (int32x2_t a, int32x2_t b)
-{
- int32x2_t result;
- __asm__ ("uzp1 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vuzp1_u8 (uint8x8_t a, uint8x8_t b)
-{
- uint8x8_t result;
- __asm__ ("uzp1 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vuzp1_u16 (uint16x4_t a, uint16x4_t b)
-{
- uint16x4_t result;
- __asm__ ("uzp1 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vuzp1_u32 (uint32x2_t a, uint32x2_t b)
-{
- uint32x2_t result;
- __asm__ ("uzp1 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vuzp1q_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("uzp1 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vuzp1q_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("uzp1 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vuzp1q_p8 (poly8x16_t a, poly8x16_t b)
-{
- poly8x16_t result;
- __asm__ ("uzp1 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vuzp1q_p16 (poly16x8_t a, poly16x8_t b)
-{
- poly16x8_t result;
- __asm__ ("uzp1 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vuzp1q_s8 (int8x16_t a, int8x16_t b)
-{
- int8x16_t result;
- __asm__ ("uzp1 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vuzp1q_s16 (int16x8_t a, int16x8_t b)
-{
- int16x8_t result;
- __asm__ ("uzp1 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vuzp1q_s32 (int32x4_t a, int32x4_t b)
-{
- int32x4_t result;
- __asm__ ("uzp1 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vuzp1q_s64 (int64x2_t a, int64x2_t b)
-{
- int64x2_t result;
- __asm__ ("uzp1 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vuzp1q_u8 (uint8x16_t a, uint8x16_t b)
-{
- uint8x16_t result;
- __asm__ ("uzp1 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vuzp1q_u16 (uint16x8_t a, uint16x8_t b)
-{
- uint16x8_t result;
- __asm__ ("uzp1 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vuzp1q_u32 (uint32x4_t a, uint32x4_t b)
-{
- uint32x4_t result;
- __asm__ ("uzp1 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vuzp1q_u64 (uint64x2_t a, uint64x2_t b)
-{
- uint64x2_t result;
- __asm__ ("uzp1 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vuzp2_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("uzp2 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vuzp2_p8 (poly8x8_t a, poly8x8_t b)
-{
- poly8x8_t result;
- __asm__ ("uzp2 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vuzp2_p16 (poly16x4_t a, poly16x4_t b)
-{
- poly16x4_t result;
- __asm__ ("uzp2 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vuzp2_s8 (int8x8_t a, int8x8_t b)
-{
- int8x8_t result;
- __asm__ ("uzp2 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vuzp2_s16 (int16x4_t a, int16x4_t b)
-{
- int16x4_t result;
- __asm__ ("uzp2 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vuzp2_s32 (int32x2_t a, int32x2_t b)
-{
- int32x2_t result;
- __asm__ ("uzp2 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vuzp2_u8 (uint8x8_t a, uint8x8_t b)
-{
- uint8x8_t result;
- __asm__ ("uzp2 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vuzp2_u16 (uint16x4_t a, uint16x4_t b)
-{
- uint16x4_t result;
- __asm__ ("uzp2 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vuzp2_u32 (uint32x2_t a, uint32x2_t b)
-{
- uint32x2_t result;
- __asm__ ("uzp2 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vuzp2q_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("uzp2 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vuzp2q_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("uzp2 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vuzp2q_p8 (poly8x16_t a, poly8x16_t b)
-{
- poly8x16_t result;
- __asm__ ("uzp2 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vuzp2q_p16 (poly16x8_t a, poly16x8_t b)
-{
- poly16x8_t result;
- __asm__ ("uzp2 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vuzp2q_s8 (int8x16_t a, int8x16_t b)
-{
- int8x16_t result;
- __asm__ ("uzp2 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vuzp2q_s16 (int16x8_t a, int16x8_t b)
-{
- int16x8_t result;
- __asm__ ("uzp2 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vuzp2q_s32 (int32x4_t a, int32x4_t b)
-{
- int32x4_t result;
- __asm__ ("uzp2 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vuzp2q_s64 (int64x2_t a, int64x2_t b)
-{
- int64x2_t result;
- __asm__ ("uzp2 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vuzp2q_u8 (uint8x16_t a, uint8x16_t b)
-{
- uint8x16_t result;
- __asm__ ("uzp2 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vuzp2q_u16 (uint16x8_t a, uint16x8_t b)
-{
- uint16x8_t result;
- __asm__ ("uzp2 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vuzp2q_u32 (uint32x4_t a, uint32x4_t b)
-{
- uint32x4_t result;
- __asm__ ("uzp2 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vuzp2q_u64 (uint64x2_t a, uint64x2_t b)
-{
- uint64x2_t result;
- __asm__ ("uzp2 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vzip1_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("zip1 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vzip1_p8 (poly8x8_t a, poly8x8_t b)
-{
- poly8x8_t result;
- __asm__ ("zip1 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vzip1_p16 (poly16x4_t a, poly16x4_t b)
-{
- poly16x4_t result;
- __asm__ ("zip1 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vzip1_s8 (int8x8_t a, int8x8_t b)
-{
- int8x8_t result;
- __asm__ ("zip1 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vzip1_s16 (int16x4_t a, int16x4_t b)
-{
- int16x4_t result;
- __asm__ ("zip1 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vzip1_s32 (int32x2_t a, int32x2_t b)
-{
- int32x2_t result;
- __asm__ ("zip1 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vzip1_u8 (uint8x8_t a, uint8x8_t b)
-{
- uint8x8_t result;
- __asm__ ("zip1 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vzip1_u16 (uint16x4_t a, uint16x4_t b)
-{
- uint16x4_t result;
- __asm__ ("zip1 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vzip1_u32 (uint32x2_t a, uint32x2_t b)
-{
- uint32x2_t result;
- __asm__ ("zip1 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vzip1q_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("zip1 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vzip1q_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("zip1 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vzip1q_p8 (poly8x16_t a, poly8x16_t b)
-{
- poly8x16_t result;
- __asm__ ("zip1 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vzip1q_p16 (poly16x8_t a, poly16x8_t b)
-{
- poly16x8_t result;
- __asm__ ("zip1 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vzip1q_s8 (int8x16_t a, int8x16_t b)
-{
- int8x16_t result;
- __asm__ ("zip1 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vzip1q_s16 (int16x8_t a, int16x8_t b)
-{
- int16x8_t result;
- __asm__ ("zip1 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vzip1q_s32 (int32x4_t a, int32x4_t b)
-{
- int32x4_t result;
- __asm__ ("zip1 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vzip1q_s64 (int64x2_t a, int64x2_t b)
-{
- int64x2_t result;
- __asm__ ("zip1 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vzip1q_u8 (uint8x16_t a, uint8x16_t b)
-{
- uint8x16_t result;
- __asm__ ("zip1 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vzip1q_u16 (uint16x8_t a, uint16x8_t b)
-{
- uint16x8_t result;
- __asm__ ("zip1 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vzip1q_u32 (uint32x4_t a, uint32x4_t b)
-{
- uint32x4_t result;
- __asm__ ("zip1 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vzip1q_u64 (uint64x2_t a, uint64x2_t b)
-{
- uint64x2_t result;
- __asm__ ("zip1 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vzip2_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("zip2 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vzip2_p8 (poly8x8_t a, poly8x8_t b)
-{
- poly8x8_t result;
- __asm__ ("zip2 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vzip2_p16 (poly16x4_t a, poly16x4_t b)
-{
- poly16x4_t result;
- __asm__ ("zip2 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vzip2_s8 (int8x8_t a, int8x8_t b)
-{
- int8x8_t result;
- __asm__ ("zip2 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vzip2_s16 (int16x4_t a, int16x4_t b)
-{
- int16x4_t result;
- __asm__ ("zip2 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vzip2_s32 (int32x2_t a, int32x2_t b)
-{
- int32x2_t result;
- __asm__ ("zip2 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vzip2_u8 (uint8x8_t a, uint8x8_t b)
-{
- uint8x8_t result;
- __asm__ ("zip2 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vzip2_u16 (uint16x4_t a, uint16x4_t b)
-{
- uint16x4_t result;
- __asm__ ("zip2 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vzip2_u32 (uint32x2_t a, uint32x2_t b)
-{
- uint32x2_t result;
- __asm__ ("zip2 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vzip2q_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("zip2 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vzip2q_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("zip2 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vzip2q_p8 (poly8x16_t a, poly8x16_t b)
-{
- poly8x16_t result;
- __asm__ ("zip2 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vzip2q_p16 (poly16x8_t a, poly16x8_t b)
-{
- poly16x8_t result;
- __asm__ ("zip2 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vzip2q_s8 (int8x16_t a, int8x16_t b)
-{
- int8x16_t result;
- __asm__ ("zip2 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vzip2q_s16 (int16x8_t a, int16x8_t b)
-{
- int16x8_t result;
- __asm__ ("zip2 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vzip2q_s32 (int32x4_t a, int32x4_t b)
-{
- int32x4_t result;
- __asm__ ("zip2 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vzip2q_s64 (int64x2_t a, int64x2_t b)
-{
- int64x2_t result;
- __asm__ ("zip2 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vzip2q_u8 (uint8x16_t a, uint8x16_t b)
-{
- uint8x16_t result;
- __asm__ ("zip2 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vzip2q_u16 (uint16x8_t a, uint16x8_t b)
-{
- uint16x8_t result;
- __asm__ ("zip2 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vzip2q_u32 (uint32x4_t a, uint32x4_t b)
-{
- uint32x4_t result;
- __asm__ ("zip2 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vzip2q_u64 (uint64x2_t a, uint64x2_t b)
-{
- uint64x2_t result;
- __asm__ ("zip2 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
/* End of temporary inline asm implementations. */
@@ -14452,131 +13529,224 @@ __LD4_LANE_FUNC (uint16x8x4_t, uint16_t, 8h, h, u16, q)
__LD4_LANE_FUNC (uint32x4x4_t, uint32_t, 4s, s, u32, q)
__LD4_LANE_FUNC (uint64x2x4_t, uint64_t, 2d, d, u64, q)
-#define __ST2_LANE_FUNC(intype, ptrtype, regsuffix, \
- lnsuffix, funcsuffix, Q) \
- typedef struct { ptrtype __x[2]; } __ST2_LANE_STRUCTURE_##intype; \
- __extension__ static __inline void \
- __attribute__ ((__always_inline__)) \
- vst2 ## Q ## _lane_ ## funcsuffix (ptrtype *ptr, \
- intype b, const int c) \
- { \
- __ST2_LANE_STRUCTURE_##intype *__p = \
- (__ST2_LANE_STRUCTURE_##intype *)ptr; \
- __asm__ ("ld1 {v16." #regsuffix ", v17." #regsuffix "}, %1\n\t" \
- "st2 {v16." #lnsuffix ", v17." #lnsuffix "}[%2], %0\n\t" \
- : "=Q"(*__p) \
- : "Q"(b), "i"(c) \
- : "v16", "v17"); \
- }
-
-__ST2_LANE_FUNC (int8x8x2_t, int8_t, 8b, b, s8,)
-__ST2_LANE_FUNC (float32x2x2_t, float32_t, 2s, s, f32,)
-__ST2_LANE_FUNC (float64x1x2_t, float64_t, 1d, d, f64,)
-__ST2_LANE_FUNC (poly8x8x2_t, poly8_t, 8b, b, p8,)
-__ST2_LANE_FUNC (poly16x4x2_t, poly16_t, 4h, h, p16,)
-__ST2_LANE_FUNC (int16x4x2_t, int16_t, 4h, h, s16,)
-__ST2_LANE_FUNC (int32x2x2_t, int32_t, 2s, s, s32,)
-__ST2_LANE_FUNC (int64x1x2_t, int64_t, 1d, d, s64,)
-__ST2_LANE_FUNC (uint8x8x2_t, uint8_t, 8b, b, u8,)
-__ST2_LANE_FUNC (uint16x4x2_t, uint16_t, 4h, h, u16,)
-__ST2_LANE_FUNC (uint32x2x2_t, uint32_t, 2s, s, u32,)
-__ST2_LANE_FUNC (uint64x1x2_t, uint64_t, 1d, d, u64,)
-__ST2_LANE_FUNC (float32x4x2_t, float32_t, 4s, s, f32, q)
-__ST2_LANE_FUNC (float64x2x2_t, float64_t, 2d, d, f64, q)
-__ST2_LANE_FUNC (poly8x16x2_t, poly8_t, 16b, b, p8, q)
-__ST2_LANE_FUNC (poly16x8x2_t, poly16_t, 8h, h, p16, q)
-__ST2_LANE_FUNC (int8x16x2_t, int8_t, 16b, b, s8, q)
-__ST2_LANE_FUNC (int16x8x2_t, int16_t, 8h, h, s16, q)
-__ST2_LANE_FUNC (int32x4x2_t, int32_t, 4s, s, s32, q)
-__ST2_LANE_FUNC (int64x2x2_t, int64_t, 2d, d, s64, q)
-__ST2_LANE_FUNC (uint8x16x2_t, uint8_t, 16b, b, u8, q)
-__ST2_LANE_FUNC (uint16x8x2_t, uint16_t, 8h, h, u16, q)
-__ST2_LANE_FUNC (uint32x4x2_t, uint32_t, 4s, s, u32, q)
-__ST2_LANE_FUNC (uint64x2x2_t, uint64_t, 2d, d, u64, q)
-
-#define __ST3_LANE_FUNC(intype, ptrtype, regsuffix, \
- lnsuffix, funcsuffix, Q) \
- typedef struct { ptrtype __x[3]; } __ST3_LANE_STRUCTURE_##intype; \
- __extension__ static __inline void \
- __attribute__ ((__always_inline__)) \
- vst3 ## Q ## _lane_ ## funcsuffix (ptrtype *ptr, \
- intype b, const int c) \
- { \
- __ST3_LANE_STRUCTURE_##intype *__p = \
- (__ST3_LANE_STRUCTURE_##intype *)ptr; \
- __asm__ ("ld1 {v16." #regsuffix " - v18." #regsuffix "}, %1\n\t" \
- "st3 {v16." #lnsuffix " - v18." #lnsuffix "}[%2], %0\n\t" \
- : "=Q"(*__p) \
- : "Q"(b), "i"(c) \
- : "v16", "v17", "v18"); \
- }
-
-__ST3_LANE_FUNC (int8x8x3_t, int8_t, 8b, b, s8,)
-__ST3_LANE_FUNC (float32x2x3_t, float32_t, 2s, s, f32,)
-__ST3_LANE_FUNC (float64x1x3_t, float64_t, 1d, d, f64,)
-__ST3_LANE_FUNC (poly8x8x3_t, poly8_t, 8b, b, p8,)
-__ST3_LANE_FUNC (poly16x4x3_t, poly16_t, 4h, h, p16,)
-__ST3_LANE_FUNC (int16x4x3_t, int16_t, 4h, h, s16,)
-__ST3_LANE_FUNC (int32x2x3_t, int32_t, 2s, s, s32,)
-__ST3_LANE_FUNC (int64x1x3_t, int64_t, 1d, d, s64,)
-__ST3_LANE_FUNC (uint8x8x3_t, uint8_t, 8b, b, u8,)
-__ST3_LANE_FUNC (uint16x4x3_t, uint16_t, 4h, h, u16,)
-__ST3_LANE_FUNC (uint32x2x3_t, uint32_t, 2s, s, u32,)
-__ST3_LANE_FUNC (uint64x1x3_t, uint64_t, 1d, d, u64,)
-__ST3_LANE_FUNC (float32x4x3_t, float32_t, 4s, s, f32, q)
-__ST3_LANE_FUNC (float64x2x3_t, float64_t, 2d, d, f64, q)
-__ST3_LANE_FUNC (poly8x16x3_t, poly8_t, 16b, b, p8, q)
-__ST3_LANE_FUNC (poly16x8x3_t, poly16_t, 8h, h, p16, q)
-__ST3_LANE_FUNC (int8x16x3_t, int8_t, 16b, b, s8, q)
-__ST3_LANE_FUNC (int16x8x3_t, int16_t, 8h, h, s16, q)
-__ST3_LANE_FUNC (int32x4x3_t, int32_t, 4s, s, s32, q)
-__ST3_LANE_FUNC (int64x2x3_t, int64_t, 2d, d, s64, q)
-__ST3_LANE_FUNC (uint8x16x3_t, uint8_t, 16b, b, u8, q)
-__ST3_LANE_FUNC (uint16x8x3_t, uint16_t, 8h, h, u16, q)
-__ST3_LANE_FUNC (uint32x4x3_t, uint32_t, 4s, s, u32, q)
-__ST3_LANE_FUNC (uint64x2x3_t, uint64_t, 2d, d, u64, q)
-
-#define __ST4_LANE_FUNC(intype, ptrtype, regsuffix, \
- lnsuffix, funcsuffix, Q) \
- typedef struct { ptrtype __x[4]; } __ST4_LANE_STRUCTURE_##intype; \
- __extension__ static __inline void \
- __attribute__ ((__always_inline__)) \
- vst4 ## Q ## _lane_ ## funcsuffix (ptrtype *ptr, \
- intype b, const int c) \
- { \
- __ST4_LANE_STRUCTURE_##intype *__p = \
- (__ST4_LANE_STRUCTURE_##intype *)ptr; \
- __asm__ ("ld1 {v16." #regsuffix " - v19." #regsuffix "}, %1\n\t" \
- "st4 {v16." #lnsuffix " - v19." #lnsuffix "}[%2], %0\n\t" \
- : "=Q"(*__p) \
- : "Q"(b), "i"(c) \
- : "v16", "v17", "v18", "v19"); \
- }
-
-__ST4_LANE_FUNC (int8x8x4_t, int8_t, 8b, b, s8,)
-__ST4_LANE_FUNC (float32x2x4_t, float32_t, 2s, s, f32,)
-__ST4_LANE_FUNC (float64x1x4_t, float64_t, 1d, d, f64,)
-__ST4_LANE_FUNC (poly8x8x4_t, poly8_t, 8b, b, p8,)
-__ST4_LANE_FUNC (poly16x4x4_t, poly16_t, 4h, h, p16,)
-__ST4_LANE_FUNC (int16x4x4_t, int16_t, 4h, h, s16,)
-__ST4_LANE_FUNC (int32x2x4_t, int32_t, 2s, s, s32,)
-__ST4_LANE_FUNC (int64x1x4_t, int64_t, 1d, d, s64,)
-__ST4_LANE_FUNC (uint8x8x4_t, uint8_t, 8b, b, u8,)
-__ST4_LANE_FUNC (uint16x4x4_t, uint16_t, 4h, h, u16,)
-__ST4_LANE_FUNC (uint32x2x4_t, uint32_t, 2s, s, u32,)
-__ST4_LANE_FUNC (uint64x1x4_t, uint64_t, 1d, d, u64,)
-__ST4_LANE_FUNC (float32x4x4_t, float32_t, 4s, s, f32, q)
-__ST4_LANE_FUNC (float64x2x4_t, float64_t, 2d, d, f64, q)
-__ST4_LANE_FUNC (poly8x16x4_t, poly8_t, 16b, b, p8, q)
-__ST4_LANE_FUNC (poly16x8x4_t, poly16_t, 8h, h, p16, q)
-__ST4_LANE_FUNC (int8x16x4_t, int8_t, 16b, b, s8, q)
-__ST4_LANE_FUNC (int16x8x4_t, int16_t, 8h, h, s16, q)
-__ST4_LANE_FUNC (int32x4x4_t, int32_t, 4s, s, s32, q)
-__ST4_LANE_FUNC (int64x2x4_t, int64_t, 2d, d, s64, q)
-__ST4_LANE_FUNC (uint8x16x4_t, uint8_t, 16b, b, u8, q)
-__ST4_LANE_FUNC (uint16x8x4_t, uint16_t, 8h, h, u16, q)
-__ST4_LANE_FUNC (uint32x4x4_t, uint32_t, 4s, s, u32, q)
-__ST4_LANE_FUNC (uint64x2x4_t, uint64_t, 2d, d, u64, q)
+#define __ST2_LANE_FUNC(intype, largetype, ptrtype, \
+ mode, ptr_mode, funcsuffix, signedtype) \
+__extension__ static __inline void \
+__attribute__ ((__always_inline__)) \
+vst2_lane_ ## funcsuffix (ptrtype *__ptr, \
+ intype __b, const int __c) \
+{ \
+ __builtin_aarch64_simd_oi __o; \
+ largetype __temp; \
+ __temp.val[0] \
+ = vcombine_##funcsuffix (__b.val[0], \
+ vcreate_##funcsuffix (__AARCH64_UINT64_C (0))); \
+ __temp.val[1] \
+ = vcombine_##funcsuffix (__b.val[1], \
+ vcreate_##funcsuffix (__AARCH64_UINT64_C (0))); \
+ __o = __builtin_aarch64_set_qregoi##mode (__o, \
+ (signedtype) __temp.val[0], 0); \
+ __o = __builtin_aarch64_set_qregoi##mode (__o, \
+ (signedtype) __temp.val[1], 1); \
+ __builtin_aarch64_st2_lane##mode ((__builtin_aarch64_simd_ ## ptr_mode *) \
+ __ptr, __o, __c); \
+}
+
+__ST2_LANE_FUNC (float32x2x2_t, float32x4x2_t, float32_t, v4sf, sf, f32,
+ float32x4_t)
+__ST2_LANE_FUNC (float64x1x2_t, float64x2x2_t, float64_t, v2df, df, f64,
+ float64x2_t)
+__ST2_LANE_FUNC (poly8x8x2_t, poly8x16x2_t, poly8_t, v16qi, qi, p8, int8x16_t)
+__ST2_LANE_FUNC (poly16x4x2_t, poly16x8x2_t, poly16_t, v8hi, hi, p16,
+ int16x8_t)
+__ST2_LANE_FUNC (int8x8x2_t, int8x16x2_t, int8_t, v16qi, qi, s8, int8x16_t)
+__ST2_LANE_FUNC (int16x4x2_t, int16x8x2_t, int16_t, v8hi, hi, s16, int16x8_t)
+__ST2_LANE_FUNC (int32x2x2_t, int32x4x2_t, int32_t, v4si, si, s32, int32x4_t)
+__ST2_LANE_FUNC (int64x1x2_t, int64x2x2_t, int64_t, v2di, di, s64, int64x2_t)
+__ST2_LANE_FUNC (uint8x8x2_t, uint8x16x2_t, uint8_t, v16qi, qi, u8, int8x16_t)
+__ST2_LANE_FUNC (uint16x4x2_t, uint16x8x2_t, uint16_t, v8hi, hi, u16,
+ int16x8_t)
+__ST2_LANE_FUNC (uint32x2x2_t, uint32x4x2_t, uint32_t, v4si, si, u32,
+ int32x4_t)
+__ST2_LANE_FUNC (uint64x1x2_t, uint64x2x2_t, uint64_t, v2di, di, u64,
+ int64x2_t)
+
+#undef __ST2_LANE_FUNC
+#define __ST2_LANE_FUNC(intype, ptrtype, mode, ptr_mode, funcsuffix) \
+__extension__ static __inline void \
+__attribute__ ((__always_inline__)) \
+vst2q_lane_ ## funcsuffix (ptrtype *__ptr, \
+ intype __b, const int __c) \
+{ \
+ union { intype __i; \
+ __builtin_aarch64_simd_oi __o; } __temp = { __b }; \
+ __builtin_aarch64_st2_lane##mode ((__builtin_aarch64_simd_ ## ptr_mode *) \
+ __ptr, __temp.__o, __c); \
+}
+
+__ST2_LANE_FUNC (float32x4x2_t, float32_t, v4sf, sf, f32)
+__ST2_LANE_FUNC (float64x2x2_t, float64_t, v2df, df, f64)
+__ST2_LANE_FUNC (poly8x16x2_t, poly8_t, v16qi, qi, p8)
+__ST2_LANE_FUNC (poly16x8x2_t, poly16_t, v8hi, hi, p16)
+__ST2_LANE_FUNC (int8x16x2_t, int8_t, v16qi, qi, s8)
+__ST2_LANE_FUNC (int16x8x2_t, int16_t, v8hi, hi, s16)
+__ST2_LANE_FUNC (int32x4x2_t, int32_t, v4si, si, s32)
+__ST2_LANE_FUNC (int64x2x2_t, int64_t, v2di, di, s64)
+__ST2_LANE_FUNC (uint8x16x2_t, uint8_t, v16qi, qi, u8)
+__ST2_LANE_FUNC (uint16x8x2_t, uint16_t, v8hi, hi, u16)
+__ST2_LANE_FUNC (uint32x4x2_t, uint32_t, v4si, si, u32)
+__ST2_LANE_FUNC (uint64x2x2_t, uint64_t, v2di, di, u64)
+
+#define __ST3_LANE_FUNC(intype, largetype, ptrtype, \
+ mode, ptr_mode, funcsuffix, signedtype) \
+__extension__ static __inline void \
+__attribute__ ((__always_inline__)) \
+vst3_lane_ ## funcsuffix (ptrtype *__ptr, \
+ intype __b, const int __c) \
+{ \
+ __builtin_aarch64_simd_ci __o; \
+ largetype __temp; \
+ __temp.val[0] \
+ = vcombine_##funcsuffix (__b.val[0], \
+ vcreate_##funcsuffix (__AARCH64_UINT64_C (0))); \
+ __temp.val[1] \
+ = vcombine_##funcsuffix (__b.val[1], \
+ vcreate_##funcsuffix (__AARCH64_UINT64_C (0))); \
+ __temp.val[2] \
+ = vcombine_##funcsuffix (__b.val[2], \
+ vcreate_##funcsuffix (__AARCH64_UINT64_C (0))); \
+ __o = __builtin_aarch64_set_qregci##mode (__o, \
+ (signedtype) __temp.val[0], 0); \
+ __o = __builtin_aarch64_set_qregci##mode (__o, \
+ (signedtype) __temp.val[1], 1); \
+ __o = __builtin_aarch64_set_qregci##mode (__o, \
+ (signedtype) __temp.val[2], 2); \
+ __builtin_aarch64_st3_lane##mode ((__builtin_aarch64_simd_ ## ptr_mode *) \
+ __ptr, __o, __c); \
+}
+
+__ST3_LANE_FUNC (float32x2x3_t, float32x4x3_t, float32_t, v4sf, sf, f32,
+ float32x4_t)
+__ST3_LANE_FUNC (float64x1x3_t, float64x2x3_t, float64_t, v2df, df, f64,
+ float64x2_t)
+__ST3_LANE_FUNC (poly8x8x3_t, poly8x16x3_t, poly8_t, v16qi, qi, p8, int8x16_t)
+__ST3_LANE_FUNC (poly16x4x3_t, poly16x8x3_t, poly16_t, v8hi, hi, p16,
+ int16x8_t)
+__ST3_LANE_FUNC (int8x8x3_t, int8x16x3_t, int8_t, v16qi, qi, s8, int8x16_t)
+__ST3_LANE_FUNC (int16x4x3_t, int16x8x3_t, int16_t, v8hi, hi, s16, int16x8_t)
+__ST3_LANE_FUNC (int32x2x3_t, int32x4x3_t, int32_t, v4si, si, s32, int32x4_t)
+__ST3_LANE_FUNC (int64x1x3_t, int64x2x3_t, int64_t, v2di, di, s64, int64x2_t)
+__ST3_LANE_FUNC (uint8x8x3_t, uint8x16x3_t, uint8_t, v16qi, qi, u8, int8x16_t)
+__ST3_LANE_FUNC (uint16x4x3_t, uint16x8x3_t, uint16_t, v8hi, hi, u16,
+ int16x8_t)
+__ST3_LANE_FUNC (uint32x2x3_t, uint32x4x3_t, uint32_t, v4si, si, u32,
+ int32x4_t)
+__ST3_LANE_FUNC (uint64x1x3_t, uint64x2x3_t, uint64_t, v2di, di, u64,
+ int64x2_t)
+
+#undef __ST3_LANE_FUNC
+#define __ST3_LANE_FUNC(intype, ptrtype, mode, ptr_mode, funcsuffix) \
+__extension__ static __inline void \
+__attribute__ ((__always_inline__)) \
+vst3q_lane_ ## funcsuffix (ptrtype *__ptr, \
+ intype __b, const int __c) \
+{ \
+ union { intype __i; \
+ __builtin_aarch64_simd_ci __o; } __temp = { __b }; \
+ __builtin_aarch64_st3_lane##mode ((__builtin_aarch64_simd_ ## ptr_mode *) \
+ __ptr, __temp.__o, __c); \
+}
+
+__ST3_LANE_FUNC (float32x4x3_t, float32_t, v4sf, sf, f32)
+__ST3_LANE_FUNC (float64x2x3_t, float64_t, v2df, df, f64)
+__ST3_LANE_FUNC (poly8x16x3_t, poly8_t, v16qi, qi, p8)
+__ST3_LANE_FUNC (poly16x8x3_t, poly16_t, v8hi, hi, p16)
+__ST3_LANE_FUNC (int8x16x3_t, int8_t, v16qi, qi, s8)
+__ST3_LANE_FUNC (int16x8x3_t, int16_t, v8hi, hi, s16)
+__ST3_LANE_FUNC (int32x4x3_t, int32_t, v4si, si, s32)
+__ST3_LANE_FUNC (int64x2x3_t, int64_t, v2di, di, s64)
+__ST3_LANE_FUNC (uint8x16x3_t, uint8_t, v16qi, qi, u8)
+__ST3_LANE_FUNC (uint16x8x3_t, uint16_t, v8hi, hi, u16)
+__ST3_LANE_FUNC (uint32x4x3_t, uint32_t, v4si, si, u32)
+__ST3_LANE_FUNC (uint64x2x3_t, uint64_t, v2di, di, u64)
+
+#define __ST4_LANE_FUNC(intype, largetype, ptrtype, \
+ mode, ptr_mode, funcsuffix, signedtype) \
+__extension__ static __inline void \
+__attribute__ ((__always_inline__)) \
+vst4_lane_ ## funcsuffix (ptrtype *__ptr, \
+ intype __b, const int __c) \
+{ \
+ __builtin_aarch64_simd_xi __o; \
+ largetype __temp; \
+ __temp.val[0] \
+ = vcombine_##funcsuffix (__b.val[0], \
+ vcreate_##funcsuffix (__AARCH64_UINT64_C (0))); \
+ __temp.val[1] \
+ = vcombine_##funcsuffix (__b.val[1], \
+ vcreate_##funcsuffix (__AARCH64_UINT64_C (0))); \
+ __temp.val[2] \
+ = vcombine_##funcsuffix (__b.val[2], \
+ vcreate_##funcsuffix (__AARCH64_UINT64_C (0))); \
+ __temp.val[3] \
+ = vcombine_##funcsuffix (__b.val[3], \
+ vcreate_##funcsuffix (__AARCH64_UINT64_C (0))); \
+ __o = __builtin_aarch64_set_qregxi##mode (__o, \
+ (signedtype) __temp.val[0], 0); \
+ __o = __builtin_aarch64_set_qregxi##mode (__o, \
+ (signedtype) __temp.val[1], 1); \
+ __o = __builtin_aarch64_set_qregxi##mode (__o, \
+ (signedtype) __temp.val[2], 2); \
+ __o = __builtin_aarch64_set_qregxi##mode (__o, \
+ (signedtype) __temp.val[3], 3); \
+ __builtin_aarch64_st4_lane##mode ((__builtin_aarch64_simd_ ## ptr_mode *) \
+ __ptr, __o, __c); \
+}
+
+__ST4_LANE_FUNC (float32x2x4_t, float32x4x4_t, float32_t, v4sf, sf, f32,
+ float32x4_t)
+__ST4_LANE_FUNC (float64x1x4_t, float64x2x4_t, float64_t, v2df, df, f64,
+ float64x2_t)
+__ST4_LANE_FUNC (poly8x8x4_t, poly8x16x4_t, poly8_t, v16qi, qi, p8, int8x16_t)
+__ST4_LANE_FUNC (poly16x4x4_t, poly16x8x4_t, poly16_t, v8hi, hi, p16,
+ int16x8_t)
+__ST4_LANE_FUNC (int8x8x4_t, int8x16x4_t, int8_t, v16qi, qi, s8, int8x16_t)
+__ST4_LANE_FUNC (int16x4x4_t, int16x8x4_t, int16_t, v8hi, hi, s16, int16x8_t)
+__ST4_LANE_FUNC (int32x2x4_t, int32x4x4_t, int32_t, v4si, si, s32, int32x4_t)
+__ST4_LANE_FUNC (int64x1x4_t, int64x2x4_t, int64_t, v2di, di, s64, int64x2_t)
+__ST4_LANE_FUNC (uint8x8x4_t, uint8x16x4_t, uint8_t, v16qi, qi, u8, int8x16_t)
+__ST4_LANE_FUNC (uint16x4x4_t, uint16x8x4_t, uint16_t, v8hi, hi, u16,
+ int16x8_t)
+__ST4_LANE_FUNC (uint32x2x4_t, uint32x4x4_t, uint32_t, v4si, si, u32,
+ int32x4_t)
+__ST4_LANE_FUNC (uint64x1x4_t, uint64x2x4_t, uint64_t, v2di, di, u64,
+ int64x2_t)
+
+#undef __ST4_LANE_FUNC
+#define __ST4_LANE_FUNC(intype, ptrtype, mode, ptr_mode, funcsuffix) \
+__extension__ static __inline void \
+__attribute__ ((__always_inline__)) \
+vst4q_lane_ ## funcsuffix (ptrtype *__ptr, \
+ intype __b, const int __c) \
+{ \
+ union { intype __i; \
+ __builtin_aarch64_simd_xi __o; } __temp = { __b }; \
+ __builtin_aarch64_st4_lane##mode ((__builtin_aarch64_simd_ ## ptr_mode *) \
+ __ptr, __temp.__o, __c); \
+}
+
+__ST4_LANE_FUNC (float32x4x4_t, float32_t, v4sf, sf, f32)
+__ST4_LANE_FUNC (float64x2x4_t, float64_t, v2df, df, f64)
+__ST4_LANE_FUNC (poly8x16x4_t, poly8_t, v16qi, qi, p8)
+__ST4_LANE_FUNC (poly16x8x4_t, poly16_t, v8hi, hi, p16)
+__ST4_LANE_FUNC (int8x16x4_t, int8_t, v16qi, qi, s8)
+__ST4_LANE_FUNC (int16x8x4_t, int16_t, v8hi, hi, s16)
+__ST4_LANE_FUNC (int32x4x4_t, int32_t, v4si, si, s32)
+__ST4_LANE_FUNC (int64x2x4_t, int64_t, v2di, di, s64)
+__ST4_LANE_FUNC (uint8x16x4_t, uint8_t, v16qi, qi, u8)
+__ST4_LANE_FUNC (uint16x8x4_t, uint16_t, v8hi, hi, u16)
+__ST4_LANE_FUNC (uint32x4x4_t, uint32_t, v4si, si, u32)
+__ST4_LANE_FUNC (uint64x2x4_t, uint64_t, v2di, di, u64)
__extension__ static __inline int64_t __attribute__ ((__always_inline__))
vaddlv_s32 (int32x2_t a)
@@ -25614,10 +24784,880 @@ vuqaddd_s64 (int64x1_t __a, uint64x1_t __b)
/* vuzp */
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vuzp1_f32 (float32x2_t __a, float32x2_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint32x2_t) {3, 1});
+#else
+ return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2});
+#endif
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vuzp1_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint8x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
+#else
+ return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
+#endif
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vuzp1_p16 (poly16x4_t __a, poly16x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {5, 7, 1, 3});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 2, 4, 6});
+#endif
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vuzp1_s8 (int8x8_t __a, int8x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint8x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
+#else
+ return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
+#endif
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vuzp1_s16 (int16x4_t __a, int16x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {5, 7, 1, 3});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 2, 4, 6});
+#endif
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vuzp1_s32 (int32x2_t __a, int32x2_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint32x2_t) {3, 1});
+#else
+ return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2});
+#endif
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vuzp1_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint8x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
+#else
+ return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
+#endif
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vuzp1_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {5, 7, 1, 3});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 2, 4, 6});
+#endif
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vuzp1_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint32x2_t) {3, 1});
+#else
+ return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2});
+#endif
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vuzp1q_f32 (float32x4_t __a, float32x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint32x4_t) {5, 7, 1, 3});
+#else
+ return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 2, 4, 6});
+#endif
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vuzp1q_f64 (float64x2_t __a, float64x2_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {3, 1});
+#else
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2});
+#endif
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vuzp1q_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint8x16_t)
+ {17, 19, 21, 23, 25, 27, 29, 31, 1, 3, 5, 7, 9, 11, 13, 15});
+#else
+ return __builtin_shuffle (__a, __b, (uint8x16_t)
+ {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30});
+#endif
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vuzp1q_p16 (poly16x8_t __a, poly16x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
+#endif
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vuzp1q_s8 (int8x16_t __a, int8x16_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b,
+ (uint8x16_t) {17, 19, 21, 23, 25, 27, 29, 31, 1, 3, 5, 7, 9, 11, 13, 15});
+#else
+ return __builtin_shuffle (__a, __b,
+ (uint8x16_t) {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30});
+#endif
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vuzp1q_s16 (int16x8_t __a, int16x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
+#endif
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vuzp1q_s32 (int32x4_t __a, int32x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint32x4_t) {5, 7, 1, 3});
+#else
+ return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 2, 4, 6});
+#endif
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vuzp1q_s64 (int64x2_t __a, int64x2_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {3, 1});
+#else
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2});
+#endif
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vuzp1q_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b,
+ (uint8x16_t) {17, 19, 21, 23, 25, 27, 29, 31, 1, 3, 5, 7, 9, 11, 13, 15});
+#else
+ return __builtin_shuffle (__a, __b,
+ (uint8x16_t) {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30});
+#endif
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vuzp1q_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
+#endif
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vuzp1q_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint32x4_t) {5, 7, 1, 3});
+#else
+ return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 2, 4, 6});
+#endif
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vuzp1q_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {3, 1});
+#else
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2});
+#endif
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vuzp2_f32 (float32x2_t __a, float32x2_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint32x2_t) {2, 0});
+#else
+ return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3});
+#endif
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vuzp2_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint8x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
+#else
+ return __builtin_shuffle (__a, __b, (uint8x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
+#endif
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vuzp2_p16 (poly16x4_t __a, poly16x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 6, 0, 2});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 3, 5, 7});
+#endif
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vuzp2_s8 (int8x8_t __a, int8x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint8x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
+#else
+ return __builtin_shuffle (__a, __b, (uint8x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
+#endif
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vuzp2_s16 (int16x4_t __a, int16x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 6, 0, 2});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 3, 5, 7});
+#endif
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vuzp2_s32 (int32x2_t __a, int32x2_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint32x2_t) {2, 0});
+#else
+ return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3});
+#endif
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vuzp2_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint8x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
+#else
+ return __builtin_shuffle (__a, __b, (uint8x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
+#endif
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vuzp2_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 6, 0, 2});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 3, 5, 7});
+#endif
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vuzp2_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint32x2_t) {2, 0});
+#else
+ return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3});
+#endif
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vuzp2q_f32 (float32x4_t __a, float32x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint32x4_t) {4, 6, 0, 2});
+#else
+ return __builtin_shuffle (__a, __b, (uint32x4_t) {1, 3, 5, 7});
+#endif
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vuzp2q_f64 (float64x2_t __a, float64x2_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {2, 0});
+#else
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3});
+#endif
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vuzp2q_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b,
+ (uint8x16_t) {16, 18, 20, 22, 24, 26, 28, 30, 0, 2, 4, 6, 8, 10, 12, 14});
+#else
+ return __builtin_shuffle (__a, __b,
+ (uint8x16_t) {1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31});
+#endif
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vuzp2q_p16 (poly16x8_t __a, poly16x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
+#endif
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vuzp2q_s8 (int8x16_t __a, int8x16_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b,
+ (uint8x16_t) {16, 18, 20, 22, 24, 26, 28, 30, 0, 2, 4, 6, 8, 10, 12, 14});
+#else
+ return __builtin_shuffle (__a, __b,
+ (uint8x16_t) {1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31});
+#endif
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vuzp2q_s16 (int16x8_t __a, int16x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
+#endif
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vuzp2q_s32 (int32x4_t __a, int32x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint32x4_t) {4, 6, 0, 2});
+#else
+ return __builtin_shuffle (__a, __b, (uint32x4_t) {1, 3, 5, 7});
+#endif
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vuzp2q_s64 (int64x2_t __a, int64x2_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {2, 0});
+#else
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3});
+#endif
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vuzp2q_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint8x16_t)
+ {16, 18, 20, 22, 24, 26, 28, 30, 0, 2, 4, 6, 8, 10, 12, 14});
+#else
+ return __builtin_shuffle (__a, __b, (uint8x16_t)
+ {1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31});
+#endif
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vuzp2q_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
+#endif
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vuzp2q_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint32x4_t) {4, 6, 0, 2});
+#else
+ return __builtin_shuffle (__a, __b, (uint32x4_t) {1, 3, 5, 7});
+#endif
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vuzp2q_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {2, 0});
+#else
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3});
+#endif
+}
+
__INTERLEAVE_LIST (uzp)
/* vzip */
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vzip1_f32 (float32x2_t __a, float32x2_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint32x2_t) {3, 1});
+#else
+ return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2});
+#endif
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vzip1_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint8x8_t) {12, 4, 13, 5, 14, 6, 15, 7});
+#else
+ return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 8, 1, 9, 2, 10, 3, 11});
+#endif
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vzip1_p16 (poly16x4_t __a, poly16x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {6, 2, 7, 3});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 4, 1, 5});
+#endif
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vzip1_s8 (int8x8_t __a, int8x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint8x8_t) {12, 4, 13, 5, 14, 6, 15, 7});
+#else
+ return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 8, 1, 9, 2, 10, 3, 11});
+#endif
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vzip1_s16 (int16x4_t __a, int16x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {6, 2, 7, 3});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 4, 1, 5});
+#endif
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vzip1_s32 (int32x2_t __a, int32x2_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint32x2_t) {3, 1});
+#else
+ return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2});
+#endif
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vzip1_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint8x8_t) {12, 4, 13, 5, 14, 6, 15, 7});
+#else
+ return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 8, 1, 9, 2, 10, 3, 11});
+#endif
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vzip1_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {6, 2, 7, 3});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 4, 1, 5});
+#endif
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vzip1_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint32x2_t) {3, 1});
+#else
+ return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2});
+#endif
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vzip1q_f32 (float32x4_t __a, float32x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint32x4_t) {6, 2, 7, 3});
+#else
+ return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 4, 1, 5});
+#endif
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vzip1q_f64 (float64x2_t __a, float64x2_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {3, 1});
+#else
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2});
+#endif
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vzip1q_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint8x16_t)
+ {24, 8, 25, 9, 26, 10, 27, 11, 28, 12, 29, 13, 30, 14, 31, 15});
+#else
+ return __builtin_shuffle (__a, __b, (uint8x16_t)
+ {0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23});
+#endif
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vzip1q_p16 (poly16x8_t __a, poly16x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x8_t)
+ {12, 4, 13, 5, 14, 6, 15, 7});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 8, 1, 9, 2, 10, 3, 11});
+#endif
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vzip1q_s8 (int8x16_t __a, int8x16_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint8x16_t)
+ {24, 8, 25, 9, 26, 10, 27, 11, 28, 12, 29, 13, 30, 14, 31, 15});
+#else
+ return __builtin_shuffle (__a, __b, (uint8x16_t)
+ {0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23});
+#endif
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vzip1q_s16 (int16x8_t __a, int16x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x8_t)
+ {12, 4, 13, 5, 14, 6, 15, 7});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 8, 1, 9, 2, 10, 3, 11});
+#endif
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vzip1q_s32 (int32x4_t __a, int32x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint32x4_t) {6, 2, 7, 3});
+#else
+ return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 4, 1, 5});
+#endif
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vzip1q_s64 (int64x2_t __a, int64x2_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {3, 1});
+#else
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2});
+#endif
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vzip1q_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint8x16_t)
+ {24, 8, 25, 9, 26, 10, 27, 11, 28, 12, 29, 13, 30, 14, 31, 15});
+#else
+ return __builtin_shuffle (__a, __b, (uint8x16_t)
+ {0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23});
+#endif
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vzip1q_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x8_t)
+ {12, 4, 13, 5, 14, 6, 15, 7});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 8, 1, 9, 2, 10, 3, 11});
+#endif
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vzip1q_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint32x4_t) {6, 2, 7, 3});
+#else
+ return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 4, 1, 5});
+#endif
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vzip1q_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {3, 1});
+#else
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2});
+#endif
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vzip2_f32 (float32x2_t __a, float32x2_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint32x2_t) {2, 0});
+#else
+ return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3});
+#endif
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vzip2_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint8x8_t) {8, 0, 9, 1, 10, 2, 11, 3});
+#else
+ return __builtin_shuffle (__a, __b, (uint8x8_t) {4, 12, 5, 13, 6, 14, 7, 15});
+#endif
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vzip2_p16 (poly16x4_t __a, poly16x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 0, 5, 1});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {2, 6, 3, 7});
+#endif
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vzip2_s8 (int8x8_t __a, int8x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint8x8_t) {8, 0, 9, 1, 10, 2, 11, 3});
+#else
+ return __builtin_shuffle (__a, __b, (uint8x8_t) {4, 12, 5, 13, 6, 14, 7, 15});
+#endif
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vzip2_s16 (int16x4_t __a, int16x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 0, 5, 1});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {2, 6, 3, 7});
+#endif
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vzip2_s32 (int32x2_t __a, int32x2_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint32x2_t) {2, 0});
+#else
+ return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3});
+#endif
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vzip2_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint8x8_t) {8, 0, 9, 1, 10, 2, 11, 3});
+#else
+ return __builtin_shuffle (__a, __b, (uint8x8_t) {4, 12, 5, 13, 6, 14, 7, 15});
+#endif
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vzip2_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 0, 5, 1});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {2, 6, 3, 7});
+#endif
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vzip2_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint32x2_t) {2, 0});
+#else
+ return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3});
+#endif
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vzip2q_f32 (float32x4_t __a, float32x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint32x4_t) {4, 0, 5, 1});
+#else
+ return __builtin_shuffle (__a, __b, (uint32x4_t) {2, 6, 3, 7});
+#endif
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vzip2q_f64 (float64x2_t __a, float64x2_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {2, 0});
+#else
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3});
+#endif
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vzip2q_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint8x16_t)
+ {16, 0, 17, 1, 18, 2, 19, 3, 20, 4, 21, 5, 22, 6, 23, 7});
+#else
+ return __builtin_shuffle (__a, __b, (uint8x16_t)
+ {8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31});
+#endif
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vzip2q_p16 (poly16x8_t __a, poly16x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 0, 9, 1, 10, 2, 11, 3});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x8_t)
+ {4, 12, 5, 13, 6, 14, 7, 15});
+#endif
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vzip2q_s8 (int8x16_t __a, int8x16_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint8x16_t)
+ {16, 0, 17, 1, 18, 2, 19, 3, 20, 4, 21, 5, 22, 6, 23, 7});
+#else
+ return __builtin_shuffle (__a, __b, (uint8x16_t)
+ {8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31});
+#endif
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vzip2q_s16 (int16x8_t __a, int16x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 0, 9, 1, 10, 2, 11, 3});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x8_t)
+ {4, 12, 5, 13, 6, 14, 7, 15});
+#endif
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vzip2q_s32 (int32x4_t __a, int32x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint32x4_t) {4, 0, 5, 1});
+#else
+ return __builtin_shuffle (__a, __b, (uint32x4_t) {2, 6, 3, 7});
+#endif
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vzip2q_s64 (int64x2_t __a, int64x2_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {2, 0});
+#else
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3});
+#endif
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vzip2q_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint8x16_t)
+ {16, 0, 17, 1, 18, 2, 19, 3, 20, 4, 21, 5, 22, 6, 23, 7});
+#else
+ return __builtin_shuffle (__a, __b, (uint8x16_t)
+ {8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31});
+#endif
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vzip2q_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 0, 9, 1, 10, 2, 11, 3});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x8_t)
+ {4, 12, 5, 13, 6, 14, 7, 15});
+#endif
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vzip2q_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint32x4_t) {4, 0, 5, 1});
+#else
+ return __builtin_shuffle (__a, __b, (uint32x4_t) {2, 6, 3, 7});
+#endif
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vzip2q_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {2, 0});
+#else
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3});
+#endif
+}
+
__INTERLEAVE_LIST (zip)
#undef __INTERLEAVE_LIST
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index fd1eb482f0f..c537c3780ee 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -564,6 +564,32 @@
(define_mode_attr VSTRUCT_DREG [(OI "TI") (CI "EI") (XI "OI")])
+;; Mode of pair of elements for each vector mode, to define transfer
+;; size for structure lane/dup loads and stores.
+(define_mode_attr V_TWO_ELEM [(V8QI "HI") (V16QI "HI")
+ (V4HI "SI") (V8HI "SI")
+ (V2SI "V2SI") (V4SI "V2SI")
+ (DI "V2DI") (V2DI "V2DI")
+ (V2SF "V2SF") (V4SF "V2SF")
+ (DF "V2DI") (V2DF "V2DI")])
+
+;; Similar, for three elements.
+(define_mode_attr V_THREE_ELEM [(V8QI "BLK") (V16QI "BLK")
+ (V4HI "BLK") (V8HI "BLK")
+ (V2SI "BLK") (V4SI "BLK")
+ (DI "EI") (V2DI "EI")
+ (V2SF "BLK") (V4SF "BLK")
+ (DF "EI") (V2DF "EI")])
+
+;; Similar, for four elements.
+(define_mode_attr V_FOUR_ELEM [(V8QI "SI") (V16QI "SI")
+ (V4HI "V4HI") (V8HI "V4HI")
+ (V2SI "V4SI") (V4SI "V4SI")
+ (DI "OI") (V2DI "OI")
+ (V2SF "V4SF") (V4SF "V4SF")
+ (DF "OI") (V2DF "OI")])
+
+
;; Mode for atomic operation suffixes
(define_mode_attr atomic_sfx
[(QI "b") (HI "h") (SI "") (DI "")])
diff --git a/gcc/config/arc/arc.c b/gcc/config/arc/arc.c
index 987a7c9a55a..58d95d832c5 100644
--- a/gcc/config/arc/arc.c
+++ b/gcc/config/arc/arc.c
@@ -996,7 +996,7 @@ arc_select_cc_mode (enum rtx_code op, rtx x, rtx y)
if (GET_MODE_CLASS (mode) == MODE_INT
&& y == const0_rtx
&& (op == EQ || op == NE
- || ((op == LT || op == GE) && GET_MODE_SIZE (GET_MODE (x) <= 4))))
+ || ((op == LT || op == GE) && GET_MODE_SIZE (GET_MODE (x)) <= 4))))
return CC_ZNmode;
/* add.f for if (a+b) */
@@ -1135,31 +1135,33 @@ arc_init_reg_tables (void)
for (i = 0; i < NUM_MACHINE_MODES; i++)
{
- switch (GET_MODE_CLASS (i))
+ enum machine_mode m = (enum machine_mode) i;
+
+ switch (GET_MODE_CLASS (m))
{
case MODE_INT:
case MODE_PARTIAL_INT:
case MODE_COMPLEX_INT:
- if (GET_MODE_SIZE (i) <= 4)
+ if (GET_MODE_SIZE (m) <= 4)
arc_mode_class[i] = 1 << (int) S_MODE;
- else if (GET_MODE_SIZE (i) == 8)
+ else if (GET_MODE_SIZE (m) == 8)
arc_mode_class[i] = 1 << (int) D_MODE;
- else if (GET_MODE_SIZE (i) == 16)
+ else if (GET_MODE_SIZE (m) == 16)
arc_mode_class[i] = 1 << (int) T_MODE;
- else if (GET_MODE_SIZE (i) == 32)
+ else if (GET_MODE_SIZE (m) == 32)
arc_mode_class[i] = 1 << (int) O_MODE;
else
arc_mode_class[i] = 0;
break;
case MODE_FLOAT:
case MODE_COMPLEX_FLOAT:
- if (GET_MODE_SIZE (i) <= 4)
+ if (GET_MODE_SIZE (m) <= 4)
arc_mode_class[i] = 1 << (int) SF_MODE;
- else if (GET_MODE_SIZE (i) == 8)
+ else if (GET_MODE_SIZE (m) == 8)
arc_mode_class[i] = 1 << (int) DF_MODE;
- else if (GET_MODE_SIZE (i) == 16)
+ else if (GET_MODE_SIZE (m) == 16)
arc_mode_class[i] = 1 << (int) TF_MODE;
- else if (GET_MODE_SIZE (i) == 32)
+ else if (GET_MODE_SIZE (m) == 32)
arc_mode_class[i] = 1 << (int) OF_MODE;
else
arc_mode_class[i] = 0;
diff --git a/gcc/config/arc/arc.opt b/gcc/config/arc/arc.opt
index 2deb9e77e13..1e98db97095 100644
--- a/gcc/config/arc/arc.opt
+++ b/gcc/config/arc/arc.opt
@@ -340,9 +340,9 @@ Pass -marclinux_prof option through to linker.
;; lra is still unproven for ARC, so allow to fall back to reload with -mno-lra.
;Target InverseMask(NO_LRA)
-mlra
; lra still won't allow to configure libgcc; see PR rtl-optimization/55464.
; so don't enable by default.
+mlra
Target Mask(LRA)
Enable lra
diff --git a/gcc/config/i386/sol2-bi.h b/gcc/config/i386/sol2-bi.h
deleted file mode 100644
index 66d17801f03..00000000000
--- a/gcc/config/i386/sol2-bi.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/* Definitions of target machine for GCC, for bi-arch Solaris 2/x86.
- Copyright (C) 2004-2014 Free Software Foundation, Inc.
- Contributed by CodeSourcery, LLC.
-
-This file is part of GCC.
-
-GCC is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 3, or (at your option)
-any later version.
-
-GCC is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING3. If not see
-<http://www.gnu.org/licenses/>. */
-
-/* Override i386/sol2.h version: return 8-byte vectors in MMX registers if
- possible, matching Sun Studio 12 Update 1+ compilers and other x86
- targets. */
-#undef TARGET_SUBTARGET_DEFAULT
-#define TARGET_SUBTARGET_DEFAULT \
- (MASK_80387 | MASK_IEEE_FP | MASK_FLOAT_RETURNS)
-
-#define SUBTARGET_OPTIMIZATION_OPTIONS \
- { OPT_LEVELS_1_PLUS, OPT_momit_leaf_frame_pointer, NULL, 1 }
-
-/* GNU as understands --32 and --64, but the native Solaris
- assembler requires -xarch=generic or -xarch=generic64 instead. */
-#ifdef USE_GAS
-#define ASM_CPU32_DEFAULT_SPEC "--32"
-#define ASM_CPU64_DEFAULT_SPEC "--64"
-#else
-#define ASM_CPU32_DEFAULT_SPEC "-xarch=generic"
-#define ASM_CPU64_DEFAULT_SPEC "-xarch=generic64"
-#endif
-
-#undef ASM_CPU_SPEC
-#define ASM_CPU_SPEC "%(asm_cpu_default)"
-
-/* Don't let i386/x86-64.h override i386/sol2.h version. Still cannot use
- -K PIC with the Solaris 10+ assembler, it gives many warnings:
- Absolute relocation is used for symbol "<symbol>" */
-#undef ASM_SPEC
-#define ASM_SPEC ASM_SPEC_BASE
-
-/* We do not need to search a special directory for startup files. */
-#undef MD_STARTFILE_PREFIX
-
-#define DEFAULT_ARCH32_P !TARGET_64BIT_DEFAULT
-
-#define ARCH64_SUBDIR "amd64"
-
-#ifdef USE_GLD
-/* Since binutils 2.21, GNU ld supports new *_sol2 emulations to strictly
- follow the Solaris 2 ABI. Prefer them if present. */
-#ifdef HAVE_LD_SOL2_EMULATION
-#define ARCH32_EMULATION "elf_i386_sol2"
-#define ARCH64_EMULATION "elf_x86_64_sol2"
-#else
-#define ARCH32_EMULATION "elf_i386"
-#define ARCH64_EMULATION "elf_x86_64"
-#endif
-#endif
-
-#undef ASM_COMMENT_START
-#define ASM_COMMENT_START "/"
-
-/* The native Solaris assembler can't calculate the difference between
- symbols in different sections, which causes problems for -fPIC jump
- tables in .rodata. */
-#ifndef HAVE_AS_IX86_DIFF_SECT_DELTA
-#undef JUMP_TABLES_IN_TEXT_SECTION
-#define JUMP_TABLES_IN_TEXT_SECTION 1
-
-/* The native Solaris assembler cannot handle the SYMBOL-. syntax, but
- requires SYMBOL@rel/@rel64 instead. */
-#define ASM_OUTPUT_DWARF_PCREL(FILE, SIZE, LABEL) \
- do { \
- fputs (integer_asm_op (SIZE, FALSE), FILE); \
- assemble_name (FILE, LABEL); \
- fputs (SIZE == 8 ? "@rel64" : "@rel", FILE); \
- } while (0)
-#endif
-
-/* As in sol2.h, override the default from i386/x86-64.h to work around
- Sun as TLS bug. */
-#undef ASM_OUTPUT_ALIGNED_COMMON
-#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
- do \
- { \
- if (TARGET_SUN_TLS \
- && in_section \
- && ((in_section->common.flags & SECTION_TLS) == SECTION_TLS)) \
- switch_to_section (bss_section); \
- x86_elf_aligned_common (FILE, NAME, SIZE, ALIGN); \
- } \
- while (0)
-
-#define USE_IX86_FRAME_POINTER 1
-#define USE_X86_64_FRAME_POINTER 1
-
-#undef NO_PROFILE_COUNTERS
-
-#undef MCOUNT_NAME
-#define MCOUNT_NAME "_mcount"
diff --git a/gcc/config/i386/sol2.h b/gcc/config/i386/sol2.h
index 6676941f67a..9c3a6f49662 100644
--- a/gcc/config/i386/sol2.h
+++ b/gcc/config/i386/sol2.h
@@ -18,12 +18,8 @@ You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
-/* Augment i386/unix.h version to return 8-byte vectors in memory, matching
- Sun Studio compilers until version 12, the only ones supported on
- Solaris 9. */
-#undef TARGET_SUBTARGET_DEFAULT
-#define TARGET_SUBTARGET_DEFAULT \
- (MASK_80387 | MASK_IEEE_FP | MASK_FLOAT_RETURNS | MASK_VECT8_RETURNS)
+#define SUBTARGET_OPTIMIZATION_OPTIONS \
+ { OPT_LEVELS_1_PLUS, OPT_momit_leaf_frame_pointer, NULL, 1 }
/* Old versions of the Solaris assembler can not handle the difference of
labels in different sections, so force DW_EH_PE_datarel if so. */
@@ -50,27 +46,46 @@ along with GCC; see the file COPYING3. If not see
#undef TARGET_SUN_TLS
#define TARGET_SUN_TLS 1
-#undef SIZE_TYPE
-#define SIZE_TYPE "unsigned int"
-
-#undef PTRDIFF_TYPE
-#define PTRDIFF_TYPE "int"
-
/* Solaris 2/Intel as chokes on #line directives before Solaris 10. */
#undef CPP_SPEC
#define CPP_SPEC "%{,assembler-with-cpp:-P} %(cpp_subtarget)"
-#define ASM_CPU_DEFAULT_SPEC ""
+/* GNU as understands --32 and --64, but the native Solaris
+ assembler requires -xarch=generic or -xarch=generic64 instead. */
+#ifdef USE_GAS
+#define ASM_CPU32_DEFAULT_SPEC "--32"
+#define ASM_CPU64_DEFAULT_SPEC "--64"
+#else
+#define ASM_CPU32_DEFAULT_SPEC "-xarch=generic"
+#define ASM_CPU64_DEFAULT_SPEC "-xarch=generic64"
+#endif
+
+#undef ASM_CPU_SPEC
+#define ASM_CPU_SPEC "%(asm_cpu_default)"
-#define ASM_CPU_SPEC ""
-
-/* Don't include ASM_PIC_SPEC. While the Solaris 9 assembler accepts
- -K PIC, it gives many warnings:
- R_386_32 relocation is used for symbol "<symbol>"
+/* Don't include ASM_PIC_SPEC. While the Solaris 10+ assembler accepts -K PIC,
+ it gives many warnings:
+ Absolute relocation is used for symbol "<symbol>"
GNU as doesn't recognize -K at all. */
#undef ASM_SPEC
#define ASM_SPEC ASM_SPEC_BASE
+#define DEFAULT_ARCH32_P !TARGET_64BIT_DEFAULT
+
+#define ARCH64_SUBDIR "amd64"
+
+#ifdef USE_GLD
+/* Since binutils 2.21, GNU ld supports new *_sol2 emulations to strictly
+ follow the Solaris 2 ABI. Prefer them if present. */
+#ifdef HAVE_LD_SOL2_EMULATION
+#define ARCH32_EMULATION "elf_i386_sol2"
+#define ARCH64_EMULATION "elf_x86_64_sol2"
+#else
+#define ARCH32_EMULATION "elf_i386"
+#define ARCH64_EMULATION "elf_x86_64"
+#endif
+#endif
+
#undef ENDFILE_SPEC
#define ENDFILE_SPEC \
"%{Ofast|ffast-math|funsafe-math-optimizations:crtfastmath.o%s} \
@@ -84,23 +99,39 @@ along with GCC; see the file COPYING3. If not see
{ "asm_cpu", ASM_CPU_SPEC }, \
{ "asm_cpu_default", ASM_CPU_DEFAULT_SPEC }, \
-#undef SUBTARGET_EXTRA_SPECS
-#define SUBTARGET_EXTRA_SPECS \
- { "startfile_arch", STARTFILE_ARCH_SPEC }, \
- { "link_arch", LINK_ARCH_SPEC }, \
- SUBTARGET_CPU_EXTRA_SPECS
-
/* Register the Solaris-specific #pragma directives. */
#define REGISTER_SUBTARGET_PRAGMAS() solaris_register_pragmas ()
#undef LOCAL_LABEL_PREFIX
#define LOCAL_LABEL_PREFIX "."
+/* The Solaris 10 FCS as doesn't accept "#" comments, while later versions
+ do. */
+#undef ASM_COMMENT_START
+#define ASM_COMMENT_START "/"
+
/* The 32-bit Solaris assembler does not support .quad. Do not use it. */
#ifndef HAVE_AS_IX86_QUAD
#undef ASM_QUAD
#endif
+/* The native Solaris assembler can't calculate the difference between
+ symbols in different sections, which causes problems for -fPIC jump
+ tables in .rodata. */
+#ifndef HAVE_AS_IX86_DIFF_SECT_DELTA
+#undef JUMP_TABLES_IN_TEXT_SECTION
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+/* The native Solaris assembler cannot handle the SYMBOL-. syntax, but
+ requires SYMBOL@rel/@rel64 instead. */
+#define ASM_OUTPUT_DWARF_PCREL(FILE, SIZE, LABEL) \
+ do { \
+ fputs (integer_asm_op (SIZE, FALSE), FILE); \
+ assemble_name (FILE, LABEL); \
+ fputs (SIZE == 8 ? "@rel64" : "@rel", FILE); \
+ } while (0)
+#endif
+
/* The Solaris assembler wants a .local for non-exported aliases. */
#define ASM_OUTPUT_DEF_FROM_DECLS(FILE, DECL, TARGET) \
do { \
@@ -148,6 +179,20 @@ along with GCC; see the file COPYING3. If not see
while (0)
#endif /* !USE_GAS */
+/* As in sparc/sol2.h, override the default from i386/x86-64.h to work
+ around Sun as TLS bug. */
+#undef ASM_OUTPUT_ALIGNED_COMMON
+#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
+ do \
+ { \
+ if (TARGET_SUN_TLS \
+ && in_section \
+ && ((in_section->common.flags & SECTION_TLS) == SECTION_TLS)) \
+ switch_to_section (bss_section); \
+ x86_elf_aligned_common (FILE, NAME, SIZE, ALIGN); \
+ } \
+ while (0)
+
/* Output a simple call for .init/.fini. */
#define ASM_OUTPUT_CALL(FILE, FN) \
do \
@@ -174,6 +219,14 @@ along with GCC; see the file COPYING3. If not see
#define DTORS_SECTION_ASM_OP "\t.section\t.dtors, \"aw\""
#endif
+#define USE_IX86_FRAME_POINTER 1
+#define USE_X86_64_FRAME_POINTER 1
+
+#undef NO_PROFILE_COUNTERS
+
+#undef MCOUNT_NAME
+#define MCOUNT_NAME "_mcount"
+
/* We do not need NT_VERSION notes. */
#undef X86_FILE_START_VERSION_DIRECTIVE
#define X86_FILE_START_VERSION_DIRECTIVE false
diff --git a/gcc/config/i386/t-sol2-64 b/gcc/config/i386/t-sol2
index 4e70f0bed27..4e70f0bed27 100644
--- a/gcc/config/i386/t-sol2-64
+++ b/gcc/config/i386/t-sol2
diff --git a/gcc/config/m32c/m32c.c b/gcc/config/m32c/m32c.c
index 57cfb20ee16..837c22b2d77 100644
--- a/gcc/config/m32c/m32c.c
+++ b/gcc/config/m32c/m32c.c
@@ -3159,7 +3159,7 @@ m32c_illegal_subreg_p (rtx op)
{
int offset;
unsigned int i;
- int src_mode, dest_mode;
+ enum machine_mode src_mode, dest_mode;
if (GET_CODE (op) == MEM
&& ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
diff --git a/gcc/config/m32r/m32r.c b/gcc/config/m32r/m32r.c
index 83bc3a7bf3a..2b84b0ff1eb 100644
--- a/gcc/config/m32r/m32r.c
+++ b/gcc/config/m32r/m32r.c
@@ -282,31 +282,33 @@ init_reg_tables (void)
for (i = 0; i < NUM_MACHINE_MODES; i++)
{
- switch (GET_MODE_CLASS (i))
+ enum machine_mode m = (enum machine_mode) i;
+
+ switch (GET_MODE_CLASS (m))
{
case MODE_INT:
case MODE_PARTIAL_INT:
case MODE_COMPLEX_INT:
- if (GET_MODE_SIZE (i) <= 4)
+ if (GET_MODE_SIZE (m) <= 4)
m32r_mode_class[i] = 1 << (int) S_MODE;
- else if (GET_MODE_SIZE (i) == 8)
+ else if (GET_MODE_SIZE (m) == 8)
m32r_mode_class[i] = 1 << (int) D_MODE;
- else if (GET_MODE_SIZE (i) == 16)
+ else if (GET_MODE_SIZE (m) == 16)
m32r_mode_class[i] = 1 << (int) T_MODE;
- else if (GET_MODE_SIZE (i) == 32)
+ else if (GET_MODE_SIZE (m) == 32)
m32r_mode_class[i] = 1 << (int) O_MODE;
else
m32r_mode_class[i] = 0;
break;
case MODE_FLOAT:
case MODE_COMPLEX_FLOAT:
- if (GET_MODE_SIZE (i) <= 4)
+ if (GET_MODE_SIZE (m) <= 4)
m32r_mode_class[i] = 1 << (int) SF_MODE;
- else if (GET_MODE_SIZE (i) == 8)
+ else if (GET_MODE_SIZE (m) == 8)
m32r_mode_class[i] = 1 << (int) DF_MODE;
- else if (GET_MODE_SIZE (i) == 16)
+ else if (GET_MODE_SIZE (m) == 16)
m32r_mode_class[i] = 1 << (int) TF_MODE;
- else if (GET_MODE_SIZE (i) == 32)
+ else if (GET_MODE_SIZE (m) == 32)
m32r_mode_class[i] = 1 << (int) OF_MODE;
else
m32r_mode_class[i] = 0;
diff --git a/gcc/config/msp430/msp430.c b/gcc/config/msp430/msp430.c
index a637e27d41b..1ec96526efd 100644
--- a/gcc/config/msp430/msp430.c
+++ b/gcc/config/msp430/msp430.c
@@ -2162,7 +2162,7 @@ msp430_print_operand (FILE * file, rtx op, int letter)
because builtins are expanded before the frame layout is determined. */
fprintf (file, "%d",
msp430_initial_elimination_offset (ARG_POINTER_REGNUM, STACK_POINTER_REGNUM)
- - 2);
+ - (TARGET_LARGE ? 4 : 2));
return;
case 'J':
diff --git a/gcc/config/msp430/msp430.md b/gcc/config/msp430/msp430.md
index 5d930c37901..74a98b48019 100644
--- a/gcc/config/msp430/msp430.md
+++ b/gcc/config/msp430/msp430.md
@@ -1321,7 +1321,7 @@
[(set (match_operand:SI 0 "register_operand" "=r")
(mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "%0"))
(sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
- "optimize > 2 && msp430_hwmult_type != NONE && ! msp430_is_interrupt_func ()"
+ "optimize > 2 && msp430_hwmult_type != NONE"
"*
if (msp430_use_f5_series_hwmult ())
return \"PUSH.W sr { DINT { MOV.W %1, &0x04C2 { MOV.W %2, &0x04C8 { MOV.W &0x04CA, %L0 { MOV.W &0x04CC, %H0 { POP.W sr\";
@@ -1365,6 +1365,6 @@
if (msp430_use_f5_series_hwmult ())
return \"PUSH.W sr { DINT { MOV.W %L1, &0x04D0 { MOV.W %H1, &0x04D2 { MOV.W %L2, &0x04E0 { MOV.W %H2, &0x04E2 { MOV.W &0x04E4, %A0 { MOV.W &0x04E6, %B0 { MOV.W &0x04E8, %C0 { MOV.W &0x04EA, %D0 { POP.W sr\";
else
- return \"PUSH.W sr { DINT { MOV.W %L1, &0x0140 { MOV.W %H1, &0x0141 { MOV.W %L2, &0x0150 { MOV.W %H2, &0x0152 { MOV.W &0x0154, %A0 { MOV.W &0x0156, %B0 { MOV.W &0x0158, %C0 { MOV.W &0x015A, %D0 { POP.W sr\";
+ return \"PUSH.W sr { DINT { MOV.W %L1, &0x0140 { MOV.W %H1, &0x0142 { MOV.W %L2, &0x0150 { MOV.W %H2, &0x0152 { MOV.W &0x0154, %A0 { MOV.W &0x0156, %B0 { MOV.W &0x0158, %C0 { MOV.W &0x015A, %D0 { POP.W sr\";
"
)
diff --git a/gcc/config/rs6000/predicates.md b/gcc/config/rs6000/predicates.md
index 1616b888c9c..47050c3d03e 100644
--- a/gcc/config/rs6000/predicates.md
+++ b/gcc/config/rs6000/predicates.md
@@ -629,14 +629,14 @@
(match_test "offsettable_nonstrict_memref_p (op)")))
;; Return 1 if the operand is suitable for load/store quad memory.
-;; This predicate only checks for non-atomic loads/stores.
+;; This predicate only checks for non-atomic loads/stores (not lqarx/stqcx).
(define_predicate "quad_memory_operand"
(match_code "mem")
{
rtx addr, op0, op1;
int ret;
- if (!TARGET_QUAD_MEMORY)
+ if (!TARGET_QUAD_MEMORY && !TARGET_SYNC_TI)
ret = 0;
else if (!memory_operand (op, mode))
diff --git a/gcc/config/rs6000/rs6000.md b/gcc/config/rs6000/rs6000.md
index 937eabf3727..f6da9b3a382 100644
--- a/gcc/config/rs6000/rs6000.md
+++ b/gcc/config/rs6000/rs6000.md
@@ -134,6 +134,7 @@
UNSPEC_DIVEUO
UNSPEC_UNPACK_128BIT
UNSPEC_PACK_128BIT
+ UNSPEC_LSQ
])
;;
diff --git a/gcc/config/rs6000/sync.md b/gcc/config/rs6000/sync.md
index 7db439074cd..63152ed04d2 100644
--- a/gcc/config/rs6000/sync.md
+++ b/gcc/config/rs6000/sync.md
@@ -107,10 +107,17 @@
"isync"
[(set_attr "type" "isync")])
+;; Types that we should provide atomic instructions for.
+(define_mode_iterator AINT [QI
+ HI
+ SI
+ (DI "TARGET_POWERPC64")
+ (TI "TARGET_SYNC_TI")])
+
;; The control dependency used for load dependency described
;; in B.2.3 of the Power ISA 2.06B.
(define_insn "loadsync_<mode>"
- [(unspec_volatile:BLK [(match_operand:INT1 0 "register_operand" "r")]
+ [(unspec_volatile:BLK [(match_operand:AINT 0 "register_operand" "r")]
UNSPECV_ISYNC)
(clobber (match_scratch:CC 1 "=y"))]
""
@@ -118,18 +125,56 @@
[(set_attr "type" "isync")
(set_attr "length" "12")])
+(define_insn "load_quadpti"
+ [(set (match_operand:PTI 0 "quad_int_reg_operand" "=&r")
+ (unspec:PTI
+ [(match_operand:TI 1 "quad_memory_operand" "wQ")] UNSPEC_LSQ))]
+ "TARGET_SYNC_TI
+ && !reg_mentioned_p (operands[0], operands[1])"
+ "lq %0,%1"
+ [(set_attr "type" "load")
+ (set_attr "length" "4")])
+
(define_expand "atomic_load<mode>"
- [(set (match_operand:INT1 0 "register_operand" "") ;; output
- (match_operand:INT1 1 "memory_operand" "")) ;; memory
+ [(set (match_operand:AINT 0 "register_operand" "") ;; output
+ (match_operand:AINT 1 "memory_operand" "")) ;; memory
(use (match_operand:SI 2 "const_int_operand" ""))] ;; model
""
{
+ if (<MODE>mode == TImode && !TARGET_SYNC_TI)
+ FAIL;
+
enum memmodel model = (enum memmodel) INTVAL (operands[2]);
if (model == MEMMODEL_SEQ_CST)
emit_insn (gen_hwsync ());
- emit_move_insn (operands[0], operands[1]);
+ if (<MODE>mode != TImode)
+ emit_move_insn (operands[0], operands[1]);
+ else
+ {
+ rtx op0 = operands[0];
+ rtx op1 = operands[1];
+ rtx pti_reg = gen_reg_rtx (PTImode);
+
+ // Can't have indexed address for 'lq'
+ if (indexed_address (XEXP (op1, 0), TImode))
+ {
+ rtx old_addr = XEXP (op1, 0);
+ rtx new_addr = force_reg (Pmode, old_addr);
+ operands[1] = op1 = replace_equiv_address (op1, new_addr);
+ }
+
+ emit_insn (gen_load_quadpti (pti_reg, op1));
+
+ if (WORDS_BIG_ENDIAN)
+ emit_move_insn (op0, gen_lowpart (TImode, pti_reg));
+ else
+ {
+ emit_move_insn (gen_lowpart (DImode, op0), gen_highpart (DImode, pti_reg));
+ emit_move_insn (gen_highpart (DImode, op0), gen_lowpart (DImode, pti_reg));
+ }
+ }
switch (model)
{
@@ -146,12 +191,24 @@
DONE;
})
+(define_insn "store_quadpti"
+ [(set (match_operand:PTI 0 "quad_memory_operand" "=wQ")
+ (unspec:PTI
+ [(match_operand:PTI 1 "quad_int_reg_operand" "r")] UNSPEC_LSQ))]
+ "TARGET_SYNC_TI"
+ "stq %1,%0"
+ [(set_attr "type" "store")
+ (set_attr "length" "4")])
+
(define_expand "atomic_store<mode>"
- [(set (match_operand:INT1 0 "memory_operand" "") ;; memory
- (match_operand:INT1 1 "register_operand" "")) ;; input
+ [(set (match_operand:AINT 0 "memory_operand" "") ;; memory
+ (match_operand:AINT 1 "register_operand" "")) ;; input
(use (match_operand:SI 2 "const_int_operand" ""))] ;; model
""
{
+ if (<MODE>mode == TImode && !TARGET_SYNC_TI)
+ FAIL;
+
enum memmodel model = (enum memmodel) INTVAL (operands[2]);
switch (model)
{
@@ -166,7 +223,33 @@
default:
gcc_unreachable ();
}
- emit_move_insn (operands[0], operands[1]);
+ if (<MODE>mode != TImode)
+ emit_move_insn (operands[0], operands[1]);
+ else
+ {
+ rtx op0 = operands[0];
+ rtx op1 = operands[1];
+ rtx pti_reg = gen_reg_rtx (PTImode);
+
+ // Can't have indexed address for 'stq'
+ if (indexed_address (XEXP (op0, 0), TImode))
+ {
+ rtx old_addr = XEXP (op0, 0);
+ rtx new_addr = force_reg (Pmode, old_addr);
+ operands[0] = op0 = replace_equiv_address (op0, new_addr);
+ }
+
+ if (WORDS_BIG_ENDIAN)
+ emit_move_insn (pti_reg, gen_lowpart (PTImode, op1));
+ else
+ {
+ emit_move_insn (gen_lowpart (DImode, pti_reg), gen_highpart (DImode, op1));
+ emit_move_insn (gen_highpart (DImode, pti_reg), gen_lowpart (DImode, op1));
+ }
+
+ emit_insn (gen_store_quadpti (gen_lowpart (PTImode, op0), pti_reg));
+ }
+
DONE;
})
@@ -180,14 +263,6 @@
SI
(DI "TARGET_POWERPC64")])
-;; Types that we should provide atomic instructions for.
-
-(define_mode_iterator AINT [QI
- HI
- SI
- (DI "TARGET_POWERPC64")
- (TI "TARGET_SYNC_TI")])
-
(define_insn "load_locked<mode>"
[(set (match_operand:ATOMIC 0 "int_reg_operand" "=r")
(unspec_volatile:ATOMIC
diff --git a/gcc/config/sol2-10.h b/gcc/config/sol2-10.h
deleted file mode 100644
index 4488a40cba6..00000000000
--- a/gcc/config/sol2-10.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* Operating system specific defines to be used when targeting GCC for any
- Solaris 2 system starting from Solaris 10.
- Copyright (C) 2006-2014 Free Software Foundation, Inc.
-
-This file is part of GCC.
-
-GCC is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 3, or (at your option)
-any later version.
-
-GCC is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING3. If not see
-<http://www.gnu.org/licenses/>. */
-
-/* Solaris 10 has the float and long double forms of math functions.
- We redefine this hook so the version from elfos.h header won't be used. */
-#undef TARGET_LIBC_HAS_FUNCTION
-#define TARGET_LIBC_HAS_FUNCTION default_libc_has_function
diff --git a/gcc/config/sol2-bi.h b/gcc/config/sol2-bi.h
deleted file mode 100644
index fdb2a28178c..00000000000
--- a/gcc/config/sol2-bi.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/* Definitions of target machine for GCC, for bi-arch Solaris 2.
- Copyright (C) 2011-2014 Free Software Foundation, Inc.
-
- This file is part of GCC.
-
- GCC is free software; you can redistribute it and/or modify it under
- the terms of the GNU General Public License as published by the Free
- Software Foundation; either version 3, or (at your option) any later
- version.
-
- GCC is distributed in the hope that it will be useful, but WITHOUT ANY
- WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- for more details.
-
- Under Section 7 of GPL version 3, you are granted additional
- permissions described in the GCC Runtime Library Exception, version
- 3.1, as published by the Free Software Foundation.
-
- You should have received a copy of the GNU General Public License and
- a copy of the GCC Runtime Library Exception along with this program;
- see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
- <http://www.gnu.org/licenses/>. */
-
-/* wchar_t is called differently in <wchar.h> for 32 and 64-bit
- compilations. This is called for by SCD 2.4.1, p. 6-83, Figure 6-65
- (32-bit) and p. 6P-10, Figure 6.38 (64-bit). */
-
-#undef WCHAR_TYPE
-#define WCHAR_TYPE (TARGET_64BIT ? "int" : "long int")
-
-#undef WCHAR_TYPE_SIZE
-#define WCHAR_TYPE_SIZE 32
-
-/* Same for wint_t. See SCD 2.4.1, p. 6-83, Figure 6-66 (32-bit). There's
- no corresponding 64-bit definition, but this is what Solaris 8
- <iso/wchar_iso.h> uses. */
-
-#undef WINT_TYPE
-#define WINT_TYPE (TARGET_64BIT ? "int" : "long int")
-
-#undef WINT_TYPE_SIZE
-#define WINT_TYPE_SIZE 32
-
-#if DEFAULT_ARCH32_P
-#define MULTILIB_DEFAULTS { "m32" }
-#else
-#define MULTILIB_DEFAULTS { "m64" }
-#endif
-
-#if DEFAULT_ARCH32_P
-#define DEF_ARCH32_SPEC(__str) "%{!m64:" __str "}"
-#define DEF_ARCH64_SPEC(__str) "%{m64:" __str "}"
-#else
-#define DEF_ARCH32_SPEC(__str) "%{m32:" __str "}"
-#define DEF_ARCH64_SPEC(__str) "%{!m32:" __str "}"
-#endif
-
-#undef ASM_CPU_DEFAULT_SPEC
-#define ASM_CPU_DEFAULT_SPEC \
-(DEFAULT_ARCH32_P ? "\
-%{m64:" ASM_CPU64_DEFAULT_SPEC "} \
-%{!m64:" ASM_CPU32_DEFAULT_SPEC "} \
-" : "\
-%{m32:" ASM_CPU32_DEFAULT_SPEC "} \
-%{!m32:" ASM_CPU64_DEFAULT_SPEC "} \
-")
-
-/* This should be the same as LINK_ARCH32_SPEC_BASE, except with
- ARCH64_SUBDIR appended to the paths and /usr/ccs/lib is no longer
- necessary. */
-#undef LINK_ARCH64_SPEC_BASE
-#define LINK_ARCH64_SPEC_BASE \
- "%{G:-G} \
- %{YP,*} \
- %{R*} \
- %{!YP,*:%{p|pg:-Y P,%R/usr/lib/libp/" ARCH64_SUBDIR ":%R/lib/" ARCH64_SUBDIR ":%R/usr/lib/" ARCH64_SUBDIR "} \
- %{!p:%{!pg:-Y P,%R/lib/" ARCH64_SUBDIR ":%R/usr/lib/" ARCH64_SUBDIR "}}}"
-
-#undef LINK_ARCH64_SPEC
-#ifndef USE_GLD
-/* FIXME: Used to be SPARC-only. Not SPARC-specfic but for the model name! */
-#define LINK_ARCH64_SPEC \
- "%{mcmodel=medlow:-M /usr/lib/ld/" ARCH64_SUBDIR "/map.below4G} " \
- LINK_ARCH64_SPEC_BASE
-#else
-#define LINK_ARCH64_SPEC LINK_ARCH64_SPEC_BASE
-#endif
-
-#ifdef USE_GLD
-#if DEFAULT_ARCH32_P
-#define ARCH_DEFAULT_EMULATION ARCH32_EMULATION
-#else
-#define ARCH_DEFAULT_EMULATION ARCH64_EMULATION
-#endif
-#define TARGET_LD_EMULATION "%{m32:-m " ARCH32_EMULATION "}" \
- "%{m64:-m " ARCH64_EMULATION "}" \
- "%{!m32:%{!m64:-m " ARCH_DEFAULT_EMULATION "}} "
-#else
-#define TARGET_LD_EMULATION ""
-#endif
-
-#undef LINK_ARCH_SPEC
-#if DISABLE_MULTILIB
-#if DEFAULT_ARCH32_P
-#define LINK_ARCH_SPEC TARGET_LD_EMULATION " \
-%{m32:%(link_arch32)} \
-%{m64:%edoes not support multilib} \
-%{!m32:%{!m64:%(link_arch_default)}} \
-"
-#else
-#define LINK_ARCH_SPEC TARGET_LD_EMULATION " \
-%{m32:%edoes not support multilib} \
-%{m64:%(link_arch64)} \
-%{!m32:%{!m64:%(link_arch_default)}} \
-"
-#endif
-#else
-#define LINK_ARCH_SPEC TARGET_LD_EMULATION " \
-%{m32:%(link_arch32)} \
-%{m64:%(link_arch64)} \
-%{!m32:%{!m64:%(link_arch_default)}}"
-#endif
-
-#define LINK_ARCH_DEFAULT_SPEC \
-(DEFAULT_ARCH32_P ? LINK_ARCH32_SPEC : LINK_ARCH64_SPEC)
-
-#undef SUBTARGET_EXTRA_SPECS
-#define SUBTARGET_EXTRA_SPECS \
- { "startfile_arch", STARTFILE_ARCH_SPEC }, \
- { "link_arch32", LINK_ARCH32_SPEC }, \
- { "link_arch64", LINK_ARCH64_SPEC }, \
- { "link_arch_default", LINK_ARCH_DEFAULT_SPEC }, \
- { "link_arch", LINK_ARCH_SPEC }, \
- SUBTARGET_CPU_EXTRA_SPECS
diff --git a/gcc/config/sol2.h b/gcc/config/sol2.h
index a21c953b035..d88de37bd2e 100644
--- a/gcc/config/sol2.h
+++ b/gcc/config/sol2.h
@@ -21,20 +21,25 @@ along with GCC; see the file COPYING3. If not see
/* We are compiling for Solaris 2 now. */
#define TARGET_SOLARIS 1
-/* Solaris 2 (at least as of 2.5.1) uses a 32-bit wchar_t. */
+/* wchar_t is called differently in <wchar.h> for 32 and 64-bit
+ compilations. This is called for by SCD 2.4.1, p. 6-83, Figure 6-65
+ (32-bit) and p. 6P-10, Figure 6.38 (64-bit). */
+
#undef WCHAR_TYPE
-#define WCHAR_TYPE "long int"
+#define WCHAR_TYPE (TARGET_64BIT ? "int" : "long int")
#undef WCHAR_TYPE_SIZE
-#define WCHAR_TYPE_SIZE BITS_PER_WORD
+#define WCHAR_TYPE_SIZE 32
+
+/* Same for wint_t. See SCD 2.4.1, p. 6-83, Figure 6-66 (32-bit). There's
+ no corresponding 64-bit definition, but this is what Solaris 8
+ <iso/wchar_iso.h> uses. */
-/* Solaris 2 uses a wint_t different from the default. This is required
- by the SCD 2.4.1, p. 6-83, Figure 6-66. */
-#undef WINT_TYPE
-#define WINT_TYPE "long int"
+#undef WINT_TYPE
+#define WINT_TYPE (TARGET_64BIT ? "int" : "long int")
-#undef WINT_TYPE_SIZE
-#define WINT_TYPE_SIZE BITS_PER_WORD
+#undef WINT_TYPE_SIZE
+#define WINT_TYPE_SIZE 32
#define SIG_ATOMIC_TYPE "int"
@@ -103,6 +108,19 @@ along with GCC; see the file COPYING3. If not see
solaris_override_options (); \
} while (0)
+#if DEFAULT_ARCH32_P
+#define MULTILIB_DEFAULTS { "m32" }
+#else
+#define MULTILIB_DEFAULTS { "m64" }
+#endif
+
+#if DEFAULT_ARCH32_P
+#define DEF_ARCH32_SPEC(__str) "%{!m64:" __str "}"
+#define DEF_ARCH64_SPEC(__str) "%{m64:" __str "}"
+#else
+#define DEF_ARCH32_SPEC(__str) "%{m32:" __str "}"
+#define DEF_ARCH64_SPEC(__str) "%{!m32:" __str "}"
+#endif
/* It's safe to pass -s always, even if -g is not used. Those options are
handled by both Sun as and GNU as. */
@@ -111,6 +129,16 @@ along with GCC; see the file COPYING3. If not see
#define ASM_PIC_SPEC " %{fpic|fpie|fPIC|fPIE:-K PIC}"
+#undef ASM_CPU_DEFAULT_SPEC
+#define ASM_CPU_DEFAULT_SPEC \
+(DEFAULT_ARCH32_P ? "\
+%{m64:" ASM_CPU64_DEFAULT_SPEC "} \
+%{!m64:" ASM_CPU32_DEFAULT_SPEC "} \
+" : "\
+%{m32:" ASM_CPU32_DEFAULT_SPEC "} \
+%{!m32:" ASM_CPU64_DEFAULT_SPEC "} \
+")
+
#undef LIB_SPEC
#define LIB_SPEC \
"%{!symbolic:\
@@ -120,17 +148,11 @@ along with GCC; see the file COPYING3. If not see
#ifndef CROSS_DIRECTORY_STRUCTURE
#undef MD_EXEC_PREFIX
#define MD_EXEC_PREFIX "/usr/ccs/bin/"
-
-#undef MD_STARTFILE_PREFIX
-#define MD_STARTFILE_PREFIX "/usr/ccs/lib/"
#endif
-#undef STARTFILE_ARCH32_SPEC
-#define STARTFILE_ARCH32_SPEC "%{ansi:values-Xc.o%s} \
- %{!ansi:values-Xa.o%s}"
-
#undef STARTFILE_ARCH_SPEC
-#define STARTFILE_ARCH_SPEC STARTFILE_ARCH32_SPEC
+#define STARTFILE_ARCH_SPEC "%{ansi:values-Xc.o%s} \
+ %{!ansi:values-Xa.o%s}"
/* We don't use the standard svr4 STARTFILE_SPEC because it's wrong for us. */
#undef STARTFILE_SPEC
@@ -153,14 +175,78 @@ along with GCC; see the file COPYING3. If not see
"%{G:-G} \
%{YP,*} \
%{R*} \
- %{!YP,*:%{p|pg:-Y P,%R/usr/ccs/lib/libp:%R/usr/lib/libp:%R/usr/ccs/lib:%R/lib:%R/usr/lib} \
- %{!p:%{!pg:-Y P,%R/usr/ccs/lib:%R/lib:%R/usr/lib}}}"
+ %{!YP,*:%{p|pg:-Y P,%R/usr/lib/libp%R/lib:%R/usr/lib} \
+ %{!p:%{!pg:-Y P,%R/lib:%R/usr/lib}}}"
#undef LINK_ARCH32_SPEC
#define LINK_ARCH32_SPEC LINK_ARCH32_SPEC_BASE
+/* This should be the same as LINK_ARCH32_SPEC_BASE, except with
+ ARCH64_SUBDIR appended to the paths. */
+#undef LINK_ARCH64_SPEC_BASE
+#define LINK_ARCH64_SPEC_BASE \
+ "%{G:-G} \
+ %{YP,*} \
+ %{R*} \
+ %{!YP,*:%{p|pg:-Y P,%R/usr/lib/libp/" ARCH64_SUBDIR ":%R/lib/" ARCH64_SUBDIR ":%R/usr/lib/" ARCH64_SUBDIR "} \
+ %{!p:%{!pg:-Y P,%R/lib/" ARCH64_SUBDIR ":%R/usr/lib/" ARCH64_SUBDIR "}}}"
+
+#undef LINK_ARCH64_SPEC
+#ifndef USE_GLD
+/* FIXME: Used to be SPARC-only. Not SPARC-specfic but for the model name! */
+#define LINK_ARCH64_SPEC \
+ "%{mcmodel=medlow:-M /usr/lib/ld/" ARCH64_SUBDIR "/map.below4G} " \
+ LINK_ARCH64_SPEC_BASE
+#else
+#define LINK_ARCH64_SPEC LINK_ARCH64_SPEC_BASE
+#endif
+
+#ifdef USE_GLD
+#if DEFAULT_ARCH32_P
+#define ARCH_DEFAULT_EMULATION ARCH32_EMULATION
+#else
+#define ARCH_DEFAULT_EMULATION ARCH64_EMULATION
+#endif
+#define TARGET_LD_EMULATION "%{m32:-m " ARCH32_EMULATION "}" \
+ "%{m64:-m " ARCH64_EMULATION "}" \
+ "%{!m32:%{!m64:-m " ARCH_DEFAULT_EMULATION "}} "
+#else
+#define TARGET_LD_EMULATION ""
+#endif
+
#undef LINK_ARCH_SPEC
-#define LINK_ARCH_SPEC LINK_ARCH32_SPEC
+#if DISABLE_MULTILIB
+#if DEFAULT_ARCH32_P
+#define LINK_ARCH_SPEC TARGET_LD_EMULATION " \
+%{m32:%(link_arch32)} \
+%{m64:%edoes not support multilib} \
+%{!m32:%{!m64:%(link_arch_default)}} \
+"
+#else
+#define LINK_ARCH_SPEC TARGET_LD_EMULATION " \
+%{m32:%edoes not support multilib} \
+%{m64:%(link_arch64)} \
+%{!m32:%{!m64:%(link_arch_default)}} \
+"
+#endif
+#else
+#define LINK_ARCH_SPEC TARGET_LD_EMULATION " \
+%{m32:%(link_arch32)} \
+%{m64:%(link_arch64)} \
+%{!m32:%{!m64:%(link_arch_default)}}"
+#endif
+
+#define LINK_ARCH_DEFAULT_SPEC \
+(DEFAULT_ARCH32_P ? LINK_ARCH32_SPEC : LINK_ARCH64_SPEC)
+
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ { "startfile_arch", STARTFILE_ARCH_SPEC }, \
+ { "link_arch32", LINK_ARCH32_SPEC }, \
+ { "link_arch64", LINK_ARCH64_SPEC }, \
+ { "link_arch_default", LINK_ARCH_DEFAULT_SPEC }, \
+ { "link_arch", LINK_ARCH_SPEC }, \
+ SUBTARGET_CPU_EXTRA_SPECS
/* C++11 programs need -lrt for nanosleep. */
#define TIME_LIBRARY "rt"
@@ -235,7 +321,9 @@ along with GCC; see the file COPYING3. If not see
#define TARGET_CXX_DECL_MANGLING_CONTEXT solaris_cxx_decl_mangling_context
/* Solaris/x86 as and gas support unquoted section names. */
+#ifndef SECTION_NAME_FORMAT
#define SECTION_NAME_FORMAT "%s"
+#endif
/* This is how to declare the size of a function. For Solaris, we output
any .init or .fini entries here. */
@@ -249,23 +337,6 @@ along with GCC; see the file COPYING3. If not see
} \
while (0)
-/* Solaris as has a bug: a .common directive in .tbss or .tdata section
- behaves as .tls_common rather than normal non-TLS .common. */
-#undef ASM_OUTPUT_ALIGNED_COMMON
-#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
- do \
- { \
- if (TARGET_SUN_TLS \
- && in_section \
- && ((in_section->common.flags & SECTION_TLS) == SECTION_TLS)) \
- switch_to_section (bss_section); \
- fprintf ((FILE), "%s", COMMON_ASM_OP); \
- assemble_name ((FILE), (NAME)); \
- fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n", \
- (SIZE), (ALIGN) / BITS_PER_UNIT); \
- } \
- while (0)
-
#ifndef USE_GAS
#undef TARGET_ASM_ASSEMBLE_VISIBILITY
#define TARGET_ASM_ASSEMBLE_VISIBILITY solaris_assemble_visibility
@@ -291,8 +362,10 @@ along with GCC; see the file COPYING3. If not see
#define TARGET_POSIX_IO
+/* Solaris 10 has the float and long double forms of math functions.
+ We redefine this hook so the version from elfos.h header won't be used. */
#undef TARGET_LIBC_HAS_FUNCTION
-#define TARGET_LIBC_HAS_FUNCTION no_c99_libc_has_function
+#define TARGET_LIBC_HAS_FUNCTION default_libc_has_function
extern GTY(()) tree solaris_pending_aligns;
extern GTY(()) tree solaris_pending_inits;
diff --git a/gcc/config/sparc/sol2.h b/gcc/config/sparc/sol2.h
index b50a937b26f..2fcdc85c81f 100644
--- a/gcc/config/sparc/sol2.h
+++ b/gcc/config/sparc/sol2.h
@@ -109,8 +109,6 @@ along with GCC; see the file COPYING3. If not see
#define CPP_CPU64_DEFAULT_SPEC ""
#undef ASM_CPU32_DEFAULT_SPEC
#define ASM_CPU32_DEFAULT_SPEC "-xarch=v8plus"
-#undef ASM_CPU_DEFAULT_SPEC
-#define ASM_CPU_DEFAULT_SPEC ASM_CPU32_DEFAULT_SPEC
#endif
#if TARGET_CPU_DEFAULT == TARGET_CPU_ultrasparc
@@ -120,8 +118,6 @@ along with GCC; see the file COPYING3. If not see
#define ASM_CPU32_DEFAULT_SPEC "-xarch=v8plusa"
#undef ASM_CPU64_DEFAULT_SPEC
#define ASM_CPU64_DEFAULT_SPEC "-xarch=v9a"
-#undef ASM_CPU_DEFAULT_SPEC
-#define ASM_CPU_DEFAULT_SPEC ASM_CPU32_DEFAULT_SPEC
#endif
#if TARGET_CPU_DEFAULT == TARGET_CPU_ultrasparc3
@@ -131,8 +127,6 @@ along with GCC; see the file COPYING3. If not see
#define ASM_CPU32_DEFAULT_SPEC "-xarch=v8plusb"
#undef ASM_CPU64_DEFAULT_SPEC
#define ASM_CPU64_DEFAULT_SPEC "-xarch=v9b"
-#undef ASM_CPU_DEFAULT_SPEC
-#define ASM_CPU_DEFAULT_SPEC ASM_CPU32_DEFAULT_SPEC
#endif
#if TARGET_CPU_DEFAULT == TARGET_CPU_niagara
@@ -142,8 +136,6 @@ along with GCC; see the file COPYING3. If not see
#define ASM_CPU32_DEFAULT_SPEC "-xarch=v8plusb"
#undef ASM_CPU64_DEFAULT_SPEC
#define ASM_CPU64_DEFAULT_SPEC "-xarch=v9b"
-#undef ASM_CPU_DEFAULT_SPEC
-#define ASM_CPU_DEFAULT_SPEC ASM_CPU32_DEFAULT_SPEC
#endif
#if TARGET_CPU_DEFAULT == TARGET_CPU_niagara2
@@ -153,8 +145,6 @@ along with GCC; see the file COPYING3. If not see
#define ASM_CPU32_DEFAULT_SPEC "-xarch=v8plusb"
#undef ASM_CPU64_DEFAULT_SPEC
#define ASM_CPU64_DEFAULT_SPEC "-xarch=v9b"
-#undef ASM_CPU_DEFAULT_SPEC
-#define ASM_CPU_DEFAULT_SPEC ASM_CPU32_DEFAULT_SPEC
#endif
#if TARGET_CPU_DEFAULT == TARGET_CPU_niagara3
@@ -164,8 +154,6 @@ along with GCC; see the file COPYING3. If not see
#define ASM_CPU32_DEFAULT_SPEC "-xarch=v8plus" AS_NIAGARA3_FLAG
#undef ASM_CPU64_DEFAULT_SPEC
#define ASM_CPU64_DEFAULT_SPEC "-xarch=v9" AS_NIAGARA3_FLAG
-#undef ASM_CPU_DEFAULT_SPEC
-#define ASM_CPU_DEFAULT_SPEC ASM_CPU32_DEFAULT_SPEC
#endif
#if TARGET_CPU_DEFAULT == TARGET_CPU_niagara4
@@ -175,8 +163,6 @@ along with GCC; see the file COPYING3. If not see
#define ASM_CPU32_DEFAULT_SPEC AS_SPARC32_FLAG AS_NIAGARA4_FLAG
#undef ASM_CPU64_DEFAULT_SPEC
#define ASM_CPU64_DEFAULT_SPEC AS_SPARC64_FLAG AS_NIAGARA4_FLAG
-#undef ASM_CPU_DEFAULT_SPEC
-#define ASM_CPU_DEFAULT_SPEC ASM_CPU32_DEFAULT_SPEC
#endif
#undef CPP_CPU_SPEC
@@ -361,6 +347,23 @@ extern const char *host_detect_local_cpu (int argc, const char **argv);
} \
while (0)
+/* Solaris as has a bug: a .common directive in .tbss or .tdata section
+ behaves as .tls_common rather than normal non-TLS .common. */
+#undef ASM_OUTPUT_ALIGNED_COMMON
+#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
+ do \
+ { \
+ if (TARGET_SUN_TLS \
+ && in_section \
+ && ((in_section->common.flags & SECTION_TLS) == SECTION_TLS)) \
+ switch_to_section (bss_section); \
+ fprintf ((FILE), "%s", COMMON_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n", \
+ (SIZE), (ALIGN) / BITS_PER_UNIT); \
+ } \
+ while (0)
+
#ifndef USE_GAS
/* This is how to output an assembler line that says to advance
the location counter to a multiple of 2**LOG bytes using the
@@ -376,7 +379,6 @@ extern const char *host_detect_local_cpu (int argc, const char **argv);
/* Sun as requires doublequoted section names on SPARC. While GNU as
supports that, too, we prefer the standard variant. */
-#undef SECTION_NAME_FORMAT
#define SECTION_NAME_FORMAT "\"%s\""
#endif /* !USE_GAS */
diff --git a/gcc/config/sparc/t-sol2-64 b/gcc/config/sparc/t-sol2
index ec7e4eba6fd..ec7e4eba6fd 100644
--- a/gcc/config/sparc/t-sol2-64
+++ b/gcc/config/sparc/t-sol2
diff --git a/gcc/coretypes.h b/gcc/coretypes.h
index 58f9af0ec98..41ad1a3b22d 100644
--- a/gcc/coretypes.h
+++ b/gcc/coretypes.h
@@ -197,7 +197,8 @@ enum function_class {
function_c94,
function_c99_misc,
function_c99_math_complex,
- function_sincos
+ function_sincos,
+ function_c11_misc
};
/* Memory model types for the __atomic* builtins.
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index a5f3829d705..c078e0685cc 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,47 @@
+2014-04-30 Jason Merrill <jason@redhat.com>
+
+ PR c++/60980
+ * init.c (build_value_init): Don't try to call an array constructor.
+
+ PR c++/60951
+ * typeck2.c (massage_init_elt): Use maybe_constant_init.
+
+2014-04-30 Marek Polacek <polacek@redhat.com>
+
+ * typeck.c (cp_build_binary_op): Call ubsan_instrument_division
+ even when SANITIZE_FLOAT_DIVIDE is on. Set doing_div_or_mod even
+ for non-integer types.
+
+2014-04-29 Jason Merrill <jason@redhat.com>
+
+ DR 1351
+ Represent the unevaluated exception specification of an implicitly
+ declared or deleted function with a simple placeholder, not a list
+ of functions.
+ * cp-tree.h (UNEVALUATED_NOEXCEPT_SPEC_P): New.
+ * except.c (unevaluated_noexcept_spec): New.
+ * class.c (deduce_noexcept_on_destructor): Use it.
+ * decl.c (check_redeclaration_exception_specification): Call
+ maybe_instantiate_noexcept.
+ (duplicate_decls): Call it before merge_types.
+ (start_preparsed_function): Call maybe_instantiate_noexcept.
+ * decl2.c (mark_used): Call maybe_instantiate_noexcept earlier.
+ * init.c (get_nsdmi): Factor out of perform_member_init.
+ * method.c (process_subob_fn): Call maybe_instantiate_noexcept.
+ (walk_field_subobs): Consider NSDMI for EH spec.
+ (get_defaulted_eh_spec): New.
+ (implicitly_declare_fn): Use unevaluated_noexcept_spec.
+ (defaulted_late_check): Defer EH checking in non-template classes.
+ (after_nsdmi_defaulted_late_checks): New.
+ * parser.c (cp_parser_class_specifier_1): Use it.
+ (unparsed_classes): New macro.
+ * parser.h (cp_unparsed_functions_entry_d): Add classes field.
+ * pt.c (maybe_instantiate_noexcept): Use get_defaulted_eh_spec.
+ Remove list-of-functions handling.
+ * typeck2.c (merge_exception_specifiers): Remove list-of-functions
+ handling and FN parameter.
+ * typeck.c (merge_types): Adjust.
+
2014-04-28 Paolo Carlini <paolo.carlini@oracle.com>
PR c++/59120
diff --git a/gcc/cp/class.c b/gcc/cp/class.c
index 67413761d8c..c96d79dbc82 100644
--- a/gcc/cp/class.c
+++ b/gcc/cp/class.c
@@ -4726,11 +4726,7 @@ deduce_noexcept_on_destructor (tree dtor)
{
if (!TYPE_RAISES_EXCEPTIONS (TREE_TYPE (dtor)))
{
- tree ctx = DECL_CONTEXT (dtor);
- tree implicit_fn = implicitly_declare_fn (sfk_destructor, ctx,
- /*const_p=*/false,
- NULL, NULL);
- tree eh_spec = TYPE_RAISES_EXCEPTIONS (TREE_TYPE (implicit_fn));
+ tree eh_spec = unevaluated_noexcept_spec ();
TREE_TYPE (dtor) = build_exception_variant (TREE_TYPE (dtor), eh_spec);
}
}
diff --git a/gcc/cp/cp-tree.def b/gcc/cp/cp-tree.def
index 057e7ea5ef7..b4a72d6e30a 100644
--- a/gcc/cp/cp-tree.def
+++ b/gcc/cp/cp-tree.def
@@ -212,9 +212,12 @@ DEFTREECODE (USING_STMT, "using_stmt", tcc_statement, 1)
parsing had occurred. */
DEFTREECODE (DEFAULT_ARG, "default_arg", tcc_exceptional, 0)
-/* An uninstantiated noexcept-specification. DEFERRED_NOEXCEPT_PATTERN is
- the pattern from the template, and DEFERRED_NOEXCEPT_ARGS are the
- template arguments to substitute into the pattern when needed. */
+/* An uninstantiated/unevaluated noexcept-specification. For the
+ uninstantiated case, DEFERRED_NOEXCEPT_PATTERN is the pattern from the
+ template, and DEFERRED_NOEXCEPT_ARGS are the template arguments to
+ substitute into the pattern when needed. For the unevaluated case,
+ those slots are NULL_TREE and we use get_defaulted_eh_spec to find
+ the exception-specification. */
DEFTREECODE (DEFERRED_NOEXCEPT, "deferred_noexcept", tcc_exceptional, 0)
/* A template-id, like foo<int>. The first operand is the template.
diff --git a/gcc/cp/cp-tree.h b/gcc/cp/cp-tree.h
index f459e55bc84..55ecc4e5b5f 100644
--- a/gcc/cp/cp-tree.h
+++ b/gcc/cp/cp-tree.h
@@ -579,8 +579,10 @@ struct GTY (()) tree_default_arg {
(((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->args)
#define DEFERRED_NOEXCEPT_SPEC_P(NODE) \
((NODE) && (TREE_PURPOSE (NODE)) \
- && (TREE_CODE (TREE_PURPOSE (NODE)) == DEFERRED_NOEXCEPT \
- || is_overloaded_fn (TREE_PURPOSE (NODE))))
+ && (TREE_CODE (TREE_PURPOSE (NODE)) == DEFERRED_NOEXCEPT))
+#define UNEVALUATED_NOEXCEPT_SPEC_P(NODE) \
+ (DEFERRED_NOEXCEPT_SPEC_P (NODE) \
+ && DEFERRED_NOEXCEPT_PATTERN (TREE_PURPOSE (NODE)) == NULL_TREE)
struct GTY (()) tree_deferred_noexcept {
struct tree_base base;
@@ -4359,8 +4361,6 @@ extern int comparing_specializations;
sizeof can be nested. */
extern int cp_unevaluated_operand;
-extern tree cp_convert_range_for (tree, tree, tree, bool);
-extern bool parsing_nsdmi (void);
/* in pt.c */
@@ -5420,6 +5420,7 @@ extern tree get_type_value (tree);
extern tree build_zero_init (tree, tree, bool);
extern tree build_value_init (tree, tsubst_flags_t);
extern tree build_value_init_noctor (tree, tsubst_flags_t);
+extern tree get_nsdmi (tree, bool);
extern tree build_offset_ref (tree, tree, bool,
tsubst_flags_t);
extern tree throw_bad_array_new_length (void);
@@ -5468,6 +5469,9 @@ extern tree make_thunk (tree, bool, tree, tree);
extern void finish_thunk (tree);
extern void use_thunk (tree, bool);
extern bool trivial_fn_p (tree);
+extern tree get_defaulted_eh_spec (tree);
+extern tree unevaluated_noexcept_spec (void);
+extern void after_nsdmi_defaulted_late_checks (tree);
extern bool maybe_explain_implicit_delete (tree);
extern void explain_implicit_non_constexpr (tree);
extern void deduce_inheriting_ctor (tree);
@@ -5489,6 +5493,11 @@ extern tree implicitly_declare_fn (special_function_kind, tree,
/* In optimize.c */
extern bool maybe_clone_body (tree);
+/* In parser.c */
+extern tree cp_convert_range_for (tree, tree, tree, bool);
+extern bool parsing_nsdmi (void);
+extern void inject_this_parameter (tree, cp_cv_quals);
+
/* in pt.c */
extern bool check_template_shadow (tree);
extern tree get_innermost_template_args (tree, int);
@@ -6162,7 +6171,7 @@ extern tree build_x_arrow (location_t, tree,
extern tree build_m_component_ref (tree, tree, tsubst_flags_t);
extern tree build_functional_cast (tree, tree, tsubst_flags_t);
extern tree add_exception_specifier (tree, tree, int);
-extern tree merge_exception_specifiers (tree, tree, tree);
+extern tree merge_exception_specifiers (tree, tree);
/* in mangle.c */
extern void init_mangle (void);
diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c
index 833570b1e72..bfd5395128a 100644
--- a/gcc/cp/decl.c
+++ b/gcc/cp/decl.c
@@ -1168,15 +1168,18 @@ static void
check_redeclaration_exception_specification (tree new_decl,
tree old_decl)
{
- tree new_type;
- tree old_type;
- tree new_exceptions;
- tree old_exceptions;
+ tree new_exceptions = TYPE_RAISES_EXCEPTIONS (TREE_TYPE (new_decl));
+ tree old_exceptions = TYPE_RAISES_EXCEPTIONS (TREE_TYPE (old_decl));
- new_type = TREE_TYPE (new_decl);
- new_exceptions = TYPE_RAISES_EXCEPTIONS (new_type);
- old_type = TREE_TYPE (old_decl);
- old_exceptions = TYPE_RAISES_EXCEPTIONS (old_type);
+ /* Two default specs are equivalent, don't force evaluation. */
+ if (UNEVALUATED_NOEXCEPT_SPEC_P (new_exceptions)
+ && UNEVALUATED_NOEXCEPT_SPEC_P (old_exceptions))
+ return;
+
+ maybe_instantiate_noexcept (new_decl);
+ maybe_instantiate_noexcept (old_decl);
+ new_exceptions = TYPE_RAISES_EXCEPTIONS (TREE_TYPE (new_decl));
+ old_exceptions = TYPE_RAISES_EXCEPTIONS (TREE_TYPE (old_decl));
/* [except.spec]
@@ -1916,13 +1919,13 @@ duplicate_decls (tree newdecl, tree olddecl, bool newdecl_is_friend)
if (types_match)
{
+ if (TREE_CODE (newdecl) == FUNCTION_DECL)
+ check_redeclaration_exception_specification (newdecl, olddecl);
+
/* Automatically handles default parameters. */
tree oldtype = TREE_TYPE (olddecl);
tree newtype;
- if (TREE_CODE (newdecl) == FUNCTION_DECL)
- maybe_instantiate_noexcept (olddecl);
-
/* For typedefs use the old type, as the new type's DECL_NAME points
at newdecl, which will be ggc_freed. */
if (TREE_CODE (newdecl) == TYPE_DECL)
@@ -1953,10 +1956,6 @@ duplicate_decls (tree newdecl, tree olddecl, bool newdecl_is_friend)
}
}
- /* Do this after calling `merge_types' so that default
- parameters don't confuse us. */
- else if (TREE_CODE (newdecl) == FUNCTION_DECL)
- check_redeclaration_exception_specification (newdecl, olddecl);
TREE_TYPE (newdecl) = TREE_TYPE (olddecl) = newtype;
if (TREE_CODE (newdecl) == FUNCTION_DECL)
@@ -13435,6 +13434,9 @@ start_preparsed_function (tree decl1, tree attrs, int flags)
if (!DECL_CLONED_FUNCTION_P (decl1))
determine_visibility (decl1);
+ if (!processing_template_decl)
+ maybe_instantiate_noexcept (decl1);
+
begin_scope (sk_function_parms, decl1);
++function_depth;
diff --git a/gcc/cp/decl2.c b/gcc/cp/decl2.c
index 8a7a8369f19..918ea2fc6d0 100644
--- a/gcc/cp/decl2.c
+++ b/gcc/cp/decl2.c
@@ -4806,6 +4806,9 @@ mark_used (tree decl, tsubst_flags_t complain)
if (TREE_CODE (decl) == CONST_DECL)
used_types_insert (DECL_CONTEXT (decl));
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ maybe_instantiate_noexcept (decl);
+
if (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_DELETED_FN (decl))
{
@@ -4860,9 +4863,6 @@ mark_used (tree decl, tsubst_flags_t complain)
return true;
}
- if (TREE_CODE (decl) == FUNCTION_DECL)
- maybe_instantiate_noexcept (decl);
-
/* Normally, we can wait until instantiation-time to synthesize DECL.
However, if DECL is a static data member initialized with a constant
or a constexpr function, we need it right now because a reference to
diff --git a/gcc/cp/except.c b/gcc/cp/except.c
index 221971ac956..ead889c0658 100644
--- a/gcc/cp/except.c
+++ b/gcc/cp/except.c
@@ -1342,6 +1342,18 @@ build_noexcept_spec (tree expr, int complain)
}
}
+/* Returns a noexcept-specifier to be evaluated later, for an
+ implicitly-declared or explicitly defaulted special member function. */
+
+tree
+unevaluated_noexcept_spec (void)
+{
+ static tree spec;
+ if (spec == NULL_TREE)
+ spec = build_noexcept_spec (make_node (DEFERRED_NOEXCEPT), tf_none);
+ return spec;
+}
+
/* Returns a TRY_CATCH_EXPR that will put TRY_LIST and CATCH_LIST in the
TRY and CATCH locations. CATCH_LIST must be a STATEMENT_LIST */
diff --git a/gcc/cp/init.c b/gcc/cp/init.c
index 045044e4737..6838d2aadd0 100644
--- a/gcc/cp/init.c
+++ b/gcc/cp/init.c
@@ -340,7 +340,8 @@ build_value_init (tree type, tsubst_flags_t complain)
gcc_assert (!processing_template_decl
|| (SCALAR_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE));
- if (type_build_ctor_call (type))
+ if (CLASS_TYPE_P (type)
+ && type_build_ctor_call (type))
{
tree ctor = build_aggr_init_expr
(type,
@@ -522,6 +523,45 @@ perform_target_ctor (tree init)
}
}
+/* Return the non-static data initializer for FIELD_DECL MEMBER. */
+
+tree
+get_nsdmi (tree member, bool in_ctor)
+{
+ tree init;
+ tree save_ccp = current_class_ptr;
+ tree save_ccr = current_class_ref;
+ if (!in_ctor)
+ inject_this_parameter (DECL_CONTEXT (member), TYPE_UNQUALIFIED);
+ if (DECL_LANG_SPECIFIC (member) && DECL_TEMPLATE_INFO (member))
+ /* Do deferred instantiation of the NSDMI. */
+ init = (tsubst_copy_and_build
+ (DECL_INITIAL (DECL_TI_TEMPLATE (member)),
+ DECL_TI_ARGS (member),
+ tf_warning_or_error, member, /*function_p=*/false,
+ /*integral_constant_expression_p=*/false));
+ else
+ {
+ init = DECL_INITIAL (member);
+ if (init && TREE_CODE (init) == DEFAULT_ARG)
+ {
+ error ("constructor required before non-static data member "
+ "for %qD has been parsed", member);
+ DECL_INITIAL (member) = error_mark_node;
+ init = NULL_TREE;
+ }
+ /* Strip redundant TARGET_EXPR so we don't need to remap it, and
+ so the aggregate init code below will see a CONSTRUCTOR. */
+ if (init && TREE_CODE (init) == TARGET_EXPR
+ && !VOID_TYPE_P (TREE_TYPE (TARGET_EXPR_INITIAL (init))))
+ init = TARGET_EXPR_INITIAL (init);
+ init = break_out_target_exprs (init);
+ }
+ current_class_ptr = save_ccp;
+ current_class_ref = save_ccr;
+ return init;
+}
+
/* Initialize MEMBER, a FIELD_DECL, with INIT, a TREE_LIST of
arguments. If TREE_LIST is void_type_node, an empty initializer
list was given; if NULL_TREE no initializer was given. */
@@ -535,31 +575,7 @@ perform_member_init (tree member, tree init)
/* Use the non-static data member initializer if there was no
mem-initializer for this field. */
if (init == NULL_TREE)
- {
- if (DECL_LANG_SPECIFIC (member) && DECL_TEMPLATE_INFO (member))
- /* Do deferred instantiation of the NSDMI. */
- init = (tsubst_copy_and_build
- (DECL_INITIAL (DECL_TI_TEMPLATE (member)),
- DECL_TI_ARGS (member),
- tf_warning_or_error, member, /*function_p=*/false,
- /*integral_constant_expression_p=*/false));
- else
- {
- init = DECL_INITIAL (member);
- if (init && TREE_CODE (init) == DEFAULT_ARG)
- {
- error ("constructor required before non-static data member "
- "for %qD has been parsed", member);
- init = NULL_TREE;
- }
- /* Strip redundant TARGET_EXPR so we don't need to remap it, and
- so the aggregate init code below will see a CONSTRUCTOR. */
- if (init && TREE_CODE (init) == TARGET_EXPR
- && !VOID_TYPE_P (TREE_TYPE (TARGET_EXPR_INITIAL (init))))
- init = TARGET_EXPR_INITIAL (init);
- init = break_out_target_exprs (init);
- }
- }
+ init = get_nsdmi (member, /*ctor*/true);
if (init == error_mark_node)
return;
diff --git a/gcc/cp/method.c b/gcc/cp/method.c
index 11bff7f4587..f8fc01ff531 100644
--- a/gcc/cp/method.c
+++ b/gcc/cp/method.c
@@ -1003,8 +1003,9 @@ process_subob_fn (tree fn, tree *spec_p, bool *trivial_p,
if (spec_p)
{
+ maybe_instantiate_noexcept (fn);
tree raises = TYPE_RAISES_EXCEPTIONS (TREE_TYPE (fn));
- *spec_p = merge_exception_specifiers (*spec_p, raises, fn);
+ *spec_p = merge_exception_specifiers (*spec_p, raises);
}
if (!trivial_fn_p (fn))
@@ -1090,17 +1091,14 @@ walk_field_subobs (tree fields, tree fnname, special_function_kind sfk,
inform (0, "initializer for %q+#D is invalid", field);
if (trivial_p)
*trivial_p = false;
-#if 0
/* Core 1351: If the field has an NSDMI that could throw, the
- default constructor is noexcept(false). FIXME this is
- broken by deferred parsing and 1360 saying we can't lazily
- declare a non-trivial default constructor. Also this
- needs to do deferred instantiation. Disable until the
- conflict between 1351 and 1360 is resolved. */
- if (spec_p && !expr_noexcept_p (DECL_INITIAL (field), complain))
- *spec_p = noexcept_false_spec;
-#endif
-
+ default constructor is noexcept(false). */
+ if (spec_p)
+ {
+ tree nsdmi = get_nsdmi (field, /*ctor*/false);
+ if (!expr_noexcept_p (nsdmi, complain))
+ *spec_p = noexcept_false_spec;
+ }
/* Don't do the normal processing. */
continue;
}
@@ -1438,6 +1436,26 @@ synthesized_method_walk (tree ctype, special_function_kind sfk, bool const_p,
--c_inhibit_evaluation_warnings;
}
+/* DECL is a defaulted function whose exception specification is now
+ needed. Return what it should be. */
+
+tree
+get_defaulted_eh_spec (tree decl)
+{
+ if (DECL_CLONED_FUNCTION_P (decl))
+ decl = DECL_CLONED_FUNCTION (decl);
+ special_function_kind sfk = special_function_p (decl);
+ tree ctype = DECL_CONTEXT (decl);
+ tree parms = FUNCTION_FIRST_USER_PARMTYPE (decl);
+ tree parm_type = TREE_VALUE (parms);
+ bool const_p = CP_TYPE_CONST_P (non_reference (parm_type));
+ tree spec = empty_except_spec;
+ synthesized_method_walk (ctype, sfk, const_p, &spec, NULL, NULL,
+ NULL, false, DECL_INHERITED_CTOR_BASE (decl),
+ parms);
+ return spec;
+}
+
/* DECL is a deleted function. If it's implicitly deleted, explain why and
return true; else return false. */
@@ -1675,6 +1693,13 @@ implicitly_declare_fn (special_function_kind kind, tree type,
deleted_p = DECL_DELETED_FN (inherited_ctor);
constexpr_p = DECL_DECLARED_CONSTEXPR_P (inherited_ctor);
}
+ else if (cxx_dialect >= cxx11)
+ {
+ raises = unevaluated_noexcept_spec ();
+ synthesized_method_walk (type, kind, const_p, NULL, &trivial_p,
+ &deleted_p, &constexpr_p, false,
+ inherited_base, inherited_parms);
+ }
else
synthesized_method_walk (type, kind, const_p, &raises, &trivial_p,
&deleted_p, &constexpr_p, false,
@@ -1826,25 +1851,33 @@ defaulted_late_check (tree fn)
is explicitly defaulted on its first declaration, (...) it is
implicitly considered to have the same exception-specification as if
it had been implicitly declared. */
- if (TYPE_RAISES_EXCEPTIONS (TREE_TYPE (fn)))
+ tree fn_spec = TYPE_RAISES_EXCEPTIONS (TREE_TYPE (fn));
+ if (!fn_spec)
{
- maybe_instantiate_noexcept (fn);
- if (!comp_except_specs (TYPE_RAISES_EXCEPTIONS (TREE_TYPE (fn)),
- eh_spec, ce_normal))
+ if (DECL_DEFAULTED_IN_CLASS_P (fn))
+ TREE_TYPE (fn) = build_exception_variant (TREE_TYPE (fn), eh_spec);
+ }
+ else if (UNEVALUATED_NOEXCEPT_SPEC_P (fn_spec))
+ /* Equivalent to the implicit spec. */;
+ else if (DECL_DEFAULTED_IN_CLASS_P (fn)
+ && !CLASSTYPE_TEMPLATE_INSTANTIATION (ctx))
+ /* We can't compare an explicit exception-specification on a
+ constructor defaulted in the class body to the implicit
+ exception-specification until after we've parsed any NSDMI; see
+ after_nsdmi_defaulted_late_checks. */;
+ else
+ {
+ tree eh_spec = get_defaulted_eh_spec (fn);
+ if (!comp_except_specs (fn_spec, eh_spec, ce_normal))
{
if (DECL_DEFAULTED_IN_CLASS_P (fn))
- {
- DECL_DELETED_FN (fn) = true;
- eh_spec = TYPE_RAISES_EXCEPTIONS (TREE_TYPE (fn));
- }
+ DECL_DELETED_FN (fn) = true;
else
error ("function %q+D defaulted on its redeclaration "
"with an exception-specification that differs from "
- "the implicit declaration %q#D", fn, implicit_fn);
+ "the implicit exception-specification %qX", fn, eh_spec);
}
}
- if (DECL_DEFAULTED_IN_CLASS_P (fn))
- TREE_TYPE (fn) = build_exception_variant (TREE_TYPE (fn), eh_spec);
if (DECL_DEFAULTED_IN_CLASS_P (fn)
&& DECL_DECLARED_CONSTEXPR_P (implicit_fn))
@@ -1874,6 +1907,30 @@ defaulted_late_check (tree fn)
DECL_DELETED_FN (fn) = 1;
}
+/* OK, we've parsed the NSDMI for class T, now we can check any explicit
+ exception-specifications on functions defaulted in the class body. */
+
+void
+after_nsdmi_defaulted_late_checks (tree t)
+{
+ if (uses_template_parms (t))
+ return;
+ if (t == error_mark_node)
+ return;
+ for (tree fn = TYPE_METHODS (t); fn; fn = DECL_CHAIN (fn))
+ if (!DECL_ARTIFICIAL (fn) && DECL_DEFAULTED_IN_CLASS_P (fn))
+ {
+ tree fn_spec = TYPE_RAISES_EXCEPTIONS (TREE_TYPE (fn));
+ if (UNEVALUATED_NOEXCEPT_SPEC_P (fn_spec))
+ continue;
+
+ tree eh_spec = get_defaulted_eh_spec (fn);
+ if (!comp_except_specs (TYPE_RAISES_EXCEPTIONS (TREE_TYPE (fn)),
+ eh_spec, ce_normal))
+ DECL_DELETED_FN (fn) = true;
+ }
+}
+
/* Returns true iff FN can be explicitly defaulted, and gives any
errors if defaulting FN is ill-formed. */
diff --git a/gcc/cp/parser.c b/gcc/cp/parser.c
index 962cacedf80..5542dcd9ba3 100644
--- a/gcc/cp/parser.c
+++ b/gcc/cp/parser.c
@@ -1848,11 +1848,13 @@ cp_parser_context_new (cp_parser_context* next)
parser->unparsed_queues->last ().funs_with_definitions
#define unparsed_nsdmis \
parser->unparsed_queues->last ().nsdmis
+#define unparsed_classes \
+ parser->unparsed_queues->last ().classes
static void
push_unparsed_function_queues (cp_parser *parser)
{
- cp_unparsed_functions_entry e = {NULL, make_tree_vector (), NULL};
+ cp_unparsed_functions_entry e = {NULL, make_tree_vector (), NULL, NULL};
vec_safe_push (parser->unparsed_queues, e);
}
@@ -17834,7 +17836,7 @@ cp_parser_virt_specifier_seq_opt (cp_parser* parser)
/* Used by handling of trailing-return-types and NSDMI, in which 'this'
is in scope even though it isn't real. */
-static void
+void
inject_this_parameter (tree ctype, cp_cv_quals quals)
{
tree this_parm;
@@ -19505,6 +19507,13 @@ cp_parser_class_specifier_1 (cp_parser* parser)
current_class_ref = save_ccr;
if (pushed_scope)
pop_scope (pushed_scope);
+
+ /* Now do some post-NSDMI bookkeeping. */
+ FOR_EACH_VEC_SAFE_ELT (unparsed_classes, ix, class_type)
+ after_nsdmi_defaulted_late_checks (class_type);
+ vec_safe_truncate (unparsed_classes, 0);
+ after_nsdmi_defaulted_late_checks (type);
+
/* Now parse the body of the functions. */
if (flag_openmp)
{
@@ -19521,6 +19530,8 @@ cp_parser_class_specifier_1 (cp_parser* parser)
cp_parser_late_parsing_for_member (parser, decl);
vec_safe_truncate (unparsed_funs_with_definitions, 0);
}
+ else
+ vec_safe_push (unparsed_classes, type);
/* Put back any saved access checks. */
pop_deferring_access_checks ();
diff --git a/gcc/cp/parser.h b/gcc/cp/parser.h
index 758c6df3c27..96a84534045 100644
--- a/gcc/cp/parser.h
+++ b/gcc/cp/parser.h
@@ -149,7 +149,7 @@ typedef struct GTY(()) cp_default_arg_entry_d {
} cp_default_arg_entry;
-/* An entry in a stack for member functions of local classes. */
+/* An entry in a stack for member functions defined within their classes. */
typedef struct GTY(()) cp_unparsed_functions_entry_d {
/* Functions with default arguments that require post-processing.
@@ -163,6 +163,10 @@ typedef struct GTY(()) cp_unparsed_functions_entry_d {
/* Non-static data members with initializers that require post-processing.
FIELD_DECLs appear in this list in declaration order. */
vec<tree, va_gc> *nsdmis;
+
+ /* Nested classes go in this vector, so that we can do some final
+ processing after parsing any NSDMIs. */
+ vec<tree, va_gc> *classes;
} cp_unparsed_functions_entry;
diff --git a/gcc/cp/pt.c b/gcc/cp/pt.c
index c74e7ae7586..48cc2a9e9cf 100644
--- a/gcc/cp/pt.c
+++ b/gcc/cp/pt.c
@@ -19470,14 +19470,16 @@ maybe_instantiate_noexcept (tree fn)
fntype = TREE_TYPE (fn);
spec = TYPE_RAISES_EXCEPTIONS (fntype);
- if (!DEFERRED_NOEXCEPT_SPEC_P (spec))
+ if (!spec || !TREE_PURPOSE (spec))
return;
noex = TREE_PURPOSE (spec);
if (TREE_CODE (noex) == DEFERRED_NOEXCEPT)
{
- if (push_tinst_level (fn))
+ if (DEFERRED_NOEXCEPT_PATTERN (noex) == NULL_TREE)
+ spec = get_defaulted_eh_spec (fn);
+ else if (push_tinst_level (fn))
{
push_access_scope (fn);
push_deferring_access_checks (dk_no_deferred);
@@ -19496,24 +19498,9 @@ maybe_instantiate_noexcept (tree fn)
}
else
spec = noexcept_false_spec;
- }
- else
- {
- /* This is an implicitly declared function, so NOEX is a list of
- other functions to evaluate and merge. */
- tree elt;
- spec = noexcept_true_spec;
- for (elt = noex; elt; elt = OVL_NEXT (elt))
- {
- tree fn = OVL_CURRENT (elt);
- tree subspec;
- maybe_instantiate_noexcept (fn);
- subspec = TYPE_RAISES_EXCEPTIONS (TREE_TYPE (fn));
- spec = merge_exception_specifiers (spec, subspec, NULL_TREE);
- }
- }
- TREE_TYPE (fn) = build_exception_variant (fntype, spec);
+ TREE_TYPE (fn) = build_exception_variant (fntype, spec);
+ }
FOR_EACH_CLONE (clone, fn)
{
@@ -19664,9 +19651,6 @@ instantiate_decl (tree d, int defer_ok,
SET_DECL_IMPLICIT_INSTANTIATION (d);
}
- if (TREE_CODE (d) == FUNCTION_DECL)
- maybe_instantiate_noexcept (d);
-
/* Defer all other templates, unless we have been explicitly
forbidden from doing so. */
if (/* If there is no definition, we cannot instantiate the
diff --git a/gcc/cp/typeck.c b/gcc/cp/typeck.c
index 9a80727dd8e..729e22eadc5 100644
--- a/gcc/cp/typeck.c
+++ b/gcc/cp/typeck.c
@@ -840,8 +840,7 @@ merge_types (tree t1, tree t2)
type_memfn_quals (t1),
type_memfn_rqual (t1));
raises = merge_exception_specifiers (TYPE_RAISES_EXCEPTIONS (t1),
- TYPE_RAISES_EXCEPTIONS (t2),
- NULL_TREE);
+ TYPE_RAISES_EXCEPTIONS (t2));
t1 = build_exception_variant (rval, raises);
break;
}
@@ -852,8 +851,7 @@ merge_types (tree t1, tree t2)
is just the main variant of this. */
tree basetype = class_of_this_parm (t2);
tree raises = merge_exception_specifiers (TYPE_RAISES_EXCEPTIONS (t1),
- TYPE_RAISES_EXCEPTIONS (t2),
- NULL_TREE);
+ TYPE_RAISES_EXCEPTIONS (t2));
cp_ref_qualifier rqual = type_memfn_rqual (t1);
tree t3;
@@ -4112,10 +4110,7 @@ cp_build_binary_op (location_t location,
enum tree_code tcode0 = code0, tcode1 = code1;
tree cop1 = fold_non_dependent_expr_sfinae (op1, tf_none);
cop1 = maybe_constant_value (cop1);
-
- if (tcode0 == INTEGER_TYPE)
- doing_div_or_mod = true;
-
+ doing_div_or_mod = true;
warn_for_div_by_zero (location, cop1);
if (tcode0 == COMPLEX_TYPE || tcode0 == VECTOR_TYPE)
@@ -4155,9 +4150,7 @@ cp_build_binary_op (location_t location,
{
tree cop1 = fold_non_dependent_expr_sfinae (op1, tf_none);
cop1 = maybe_constant_value (cop1);
-
- if (code0 == INTEGER_TYPE)
- doing_div_or_mod = true;
+ doing_div_or_mod = true;
warn_for_div_by_zero (location, cop1);
}
@@ -4904,7 +4897,8 @@ cp_build_binary_op (location_t location,
if (build_type == NULL_TREE)
build_type = result_type;
- if ((flag_sanitize & (SANITIZE_SHIFT | SANITIZE_DIVIDE))
+ if ((flag_sanitize & (SANITIZE_SHIFT | SANITIZE_DIVIDE
+ | SANITIZE_FLOAT_DIVIDE))
&& !processing_template_decl
&& current_function_decl != 0
&& !lookup_attribute ("no_sanitize_undefined",
@@ -4918,7 +4912,8 @@ cp_build_binary_op (location_t location,
tf_none));
op1 = maybe_constant_value (fold_non_dependent_expr_sfinae (op1,
tf_none));
- if (doing_div_or_mod && (flag_sanitize & SANITIZE_DIVIDE))
+ if (doing_div_or_mod && (flag_sanitize & (SANITIZE_DIVIDE
+ | SANITIZE_FLOAT_DIVIDE)))
{
/* For diagnostics we want to use the promoted types without
shorten_binary_op. So convert the arguments to the
diff --git a/gcc/cp/typeck2.c b/gcc/cp/typeck2.c
index 70c06fb81bb..1d8b78278bf 100644
--- a/gcc/cp/typeck2.c
+++ b/gcc/cp/typeck2.c
@@ -1139,7 +1139,7 @@ massage_init_elt (tree type, tree init, tsubst_flags_t complain)
/* When we defer constant folding within a statement, we may want to
defer this folding as well. */
tree t = fold_non_dependent_expr_sfinae (init, complain);
- t = maybe_constant_value (t);
+ t = maybe_constant_init (t);
if (TREE_CONSTANT (t))
init = t;
return init;
@@ -1988,10 +1988,10 @@ nothrow_spec_p_uninst (const_tree spec)
}
/* Combine the two exceptions specifier lists LIST and ADD, and return
- their union. If FN is non-null, it's the source of ADD. */
+ their union. */
tree
-merge_exception_specifiers (tree list, tree add, tree fn)
+merge_exception_specifiers (tree list, tree add)
{
tree noex, orig_list;
@@ -2007,22 +2007,18 @@ merge_exception_specifiers (tree list, tree add, tree fn)
if (nothrow_spec_p_uninst (add))
return list;
- noex = TREE_PURPOSE (list);
- if (DEFERRED_NOEXCEPT_SPEC_P (add))
- {
- /* If ADD is a deferred noexcept, we must have been called from
- process_subob_fn. For implicitly declared functions, we build up
- a list of functions to consider at instantiation time. */
- if (noex && operand_equal_p (noex, boolean_true_node, 0))
- noex = NULL_TREE;
- gcc_assert (fn && (!noex || is_overloaded_fn (noex)));
- noex = build_overload (fn, noex);
- }
- else if (nothrow_spec_p_uninst (list))
+ /* Two implicit noexcept specs (e.g. on a destructor) are equivalent. */
+ if (UNEVALUATED_NOEXCEPT_SPEC_P (add)
+ && UNEVALUATED_NOEXCEPT_SPEC_P (list))
+ return list;
+ /* We should have instantiated other deferred noexcept specs by now. */
+ gcc_assert (!DEFERRED_NOEXCEPT_SPEC_P (add));
+
+ if (nothrow_spec_p_uninst (list))
return add;
- else
- gcc_checking_assert (!TREE_PURPOSE (add)
- || cp_tree_equal (noex, TREE_PURPOSE (add)));
+ noex = TREE_PURPOSE (list);
+ gcc_checking_assert (!TREE_PURPOSE (add)
+ || cp_tree_equal (noex, TREE_PURPOSE (add)));
/* Combine the dynamic-exception-specifiers, if any. */
orig_list = list;
diff --git a/gcc/doc/gimple.texi b/gcc/doc/gimple.texi
index 9bb16e812dc..1e5473d109e 100644
--- a/gcc/doc/gimple.texi
+++ b/gcc/doc/gimple.texi
@@ -70,6 +70,7 @@ with the flag @option{-fdump-tree-gimple}.
@menu
* Tuple representation::
+* Class hierarchy of GIMPLE statements::
* GIMPLE instruction set::
* GIMPLE Exception Handling::
* Temporaries::
@@ -287,35 +288,135 @@ reduce memory utilization further by removing these sets).
@end itemize
All the other tuples are defined in terms of these three basic
-ones. Each tuple will add some fields. The main gimple type
-is defined to be the union of all these structures (@code{GTY} markers
-elided for clarity):
+ones. Each tuple will add some fields.
+
+
+@node Class hierarchy of GIMPLE statements
+@section Class hierarchy of GIMPLE statements
+@cindex GIMPLE class hierarchy
+
+The following diagram shows the C++ inheritance hierarchy of statement
+kinds, along with their relationships to @code{GSS_} values (layouts) and
+@code{GIMPLE_} values (codes):
@smallexample
-union gimple_statement_d
-@{
- struct gimple_statement_base gsbase;
- struct gimple_statement_with_ops gsops;
- struct gimple_statement_with_memory_ops gsmem;
- struct gimple_statement_omp omp;
- struct gimple_statement_bind gimple_bind;
- struct gimple_statement_catch gimple_catch;
- struct gimple_statement_eh_filter gimple_eh_filter;
- struct gimple_statement_phi gimple_phi;
- struct gimple_statement_resx gimple_resx;
- struct gimple_statement_try gimple_try;
- struct gimple_statement_wce gimple_wce;
- struct gimple_statement_asm gimple_asm;
- struct gimple_statement_omp_critical gimple_omp_critical;
- struct gimple_statement_omp_for gimple_omp_for;
- struct gimple_statement_omp_parallel gimple_omp_parallel;
- struct gimple_statement_omp_task gimple_omp_task;
- struct gimple_statement_omp_sections gimple_omp_sections;
- struct gimple_statement_omp_single gimple_omp_single;
- struct gimple_statement_omp_continue gimple_omp_continue;
- struct gimple_statement_omp_atomic_load gimple_omp_atomic_load;
- struct gimple_statement_omp_atomic_store gimple_omp_atomic_store;
-@};
+ gimple_statement_base
+ | layout: GSS_BASE
+ | used for 4 codes: GIMPLE_ERROR_MARK
+ | GIMPLE_NOP
+ | GIMPLE_OMP_SECTIONS_SWITCH
+ | GIMPLE_PREDICT
+ |
+ + gimple_statement_with_ops_base
+ | | (no GSS layout)
+ | |
+ | + gimple_statement_with_ops
+ | | layout: GSS_WITH_OPS
+ | | Used for 5 codes: GIMPLE_COND
+ | | GIMPLE_DEBUG
+ | | GIMPLE_GOTO
+ | | GIMPLE_LABEL
+ | | GIMPLE_SWITCH
+ | |
+ | + gimple_statement_with_memory_ops_base
+ | | layout: GSS_WITH_MEM_OPS_BASE
+ | |
+ | + gimple_statement_with_memory_ops
+ | | layout: GSS_WITH_MEM_OPS.
+ | | used for codes GIMPLE_ASSIGN and GIMPLE_RETURN.
+ | |
+ | + gimple_statement_call
+ | | layout: GSS_CALL, code: GIMPLE_CALL
+ | |
+ | + gimple_statement_asm
+ | | layout: GSS_ASM, code: GIMPLE_ASM
+ | |
+ | + gimple_statement_transaction
+ | layout: GSS_TRANSACTION, code: GIMPLE_TRANSACTION
+ |
+ + gimple_statement_omp
+ | | layout: GSS_OMP. Used for code GIMPLE_OMP_SECTION
+ | |
+ | + gimple_statement_omp_critical
+ | | layout: GSS_OMP_CRITICAL, code: GIMPLE_OMP_CRITICAL
+ | |
+ | + gimple_statement_omp_for
+ | | layout: GSS_OMP_FOR, code: GIMPLE_OMP_FOR
+ | |
+ | + gimple_statement_omp_parallel_layout
+ | | | layout: GSS_OMP_PARALLEL_LAYOUT
+ | | |
+ | | + gimple_statement_omp_taskreg
+ | | | |
+ | | | + gimple_statement_omp_parallel
+ | | | | code: GIMPLE_OMP_PARALLEL
+ | | | |
+ | | | + gimple_statement_omp_task
+ | | | code: GIMPLE_OMP_TASK
+ | | |
+ | | + gimple_statement_omp_target
+ | | code: GIMPLE_OMP_TARGET
+ | |
+ | + gimple_statement_omp_sections
+ | | layout: GSS_OMP_SECTIONS, code: GIMPLE_OMP_SECTIONS
+ | |
+ | + gimple_statement_omp_single_layout
+ | | layout: GSS_OMP_SINGLE_LAYOUT
+ | |
+ | + gimple_statement_omp_single
+ | | code: GIMPLE_OMP_SINGLE
+ | |
+ | + gimple_statement_omp_teams
+ | code: GIMPLE_OMP_TEAMS
+ |
+ + gimple_statement_bind
+ | layout: GSS_BIND, code: GIMPLE_BIND
+ |
+ + gimple_statement_catch
+ | layout: GSS_CATCH, code: GIMPLE_CATCH
+ |
+ + gimple_statement_eh_filter
+ | layout: GSS_EH_FILTER, code: GIMPLE_EH_FILTER
+ |
+ + gimple_statement_eh_else
+ | layout: GSS_EH_ELSE, code: GIMPLE_EH_ELSE
+ |
+ + gimple_statement_eh_mnt
+ | layout: GSS_EH_MNT, code: GIMPLE_EH_MUST_NOT_THROW
+ |
+ + gimple_statement_phi
+ | layout: GSS_PHI, code: GIMPLE_PHI
+ |
+ + gimple_statement_eh_ctrl
+ | | layout: GSS_EH_CTRL
+ | |
+ | + gimple_statement_resx
+ | | code: GIMPLE_RESX
+ | |
+ | + gimple_statement_eh_dispatch
+ | code: GIMPLE_EH_DISPATCH
+ |
+ + gimple_statement_try
+ | layout: GSS_TRY, code: GIMPLE_TRY
+ |
+ + gimple_statement_wce
+ | layout: GSS_WCE, code: GIMPLE_WITH_CLEANUP_EXPR
+ |
+ + gimple_statement_omp_continue
+ | layout: GSS_OMP_CONTINUE, code: GIMPLE_OMP_CONTINUE
+ |
+ + gimple_statement_omp_atomic_load
+ | layout: GSS_OMP_ATOMIC_LOAD, code: GIMPLE_OMP_ATOMIC_LOAD
+ |
+ + gimple_statement_omp_atomic_store_layout
+ | layout: GSS_OMP_ATOMIC_STORE_LAYOUT,
+ | code: GIMPLE_OMP_ATOMIC_STORE
+ |
+ + gimple_statement_omp_atomic_store
+ | code: GIMPLE_OMP_ATOMIC_STORE
+ |
+ + gimple_statement_omp_return
+ code: GIMPLE_OMP_RETURN
@end smallexample
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index da7a00ed00c..cc6d405f168 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -4732,7 +4732,7 @@ Warn if a global function is defined without a previous declaration.
Do so even if the definition itself provides a prototype.
Use this option to detect global functions that are not declared in
header files. In C, no warnings are issued for functions with previous
-non-prototype declarations; use @option{-Wmissing-prototype} to detect
+non-prototype declarations; use @option{-Wmissing-prototypes} to detect
missing prototypes. In C++, no warnings are issued for function templates,
or for inline functions, or for functions in anonymous namespaces.
diff --git a/gcc/dominance.c b/gcc/dominance.c
index ff0dfe6714f..7adec4f7376 100644
--- a/gcc/dominance.c
+++ b/gcc/dominance.c
@@ -681,24 +681,30 @@ calculate_dominance_info (enum cdi_direction dir)
/* Free dominance information for direction DIR. */
void
-free_dominance_info (enum cdi_direction dir)
+free_dominance_info (function *fn, enum cdi_direction dir)
{
basic_block bb;
unsigned int dir_index = dom_convert_dir_to_idx (dir);
- if (!dom_info_available_p (dir))
+ if (!dom_info_available_p (fn, dir))
return;
- FOR_ALL_BB_FN (bb, cfun)
+ FOR_ALL_BB_FN (bb, fn)
{
et_free_tree_force (bb->dom[dir_index]);
bb->dom[dir_index] = NULL;
}
et_free_pools ();
- n_bbs_in_dom_tree[dir_index] = 0;
+ fn->cfg->x_n_bbs_in_dom_tree[dir_index] = 0;
+
+ fn->cfg->x_dom_computed[dir_index] = DOM_NONE;
+}
- dom_computed[dir_index] = DOM_NONE;
+void
+free_dominance_info (enum cdi_direction dir)
+{
+ free_dominance_info (cfun, dir);
}
/* Return the immediate dominator of basic block BB. */
@@ -1461,11 +1467,19 @@ next_dom_son (enum cdi_direction dir, basic_block bb)
/* Return dominance availability for dominance info DIR. */
enum dom_state
-dom_info_state (enum cdi_direction dir)
+dom_info_state (function *fn, enum cdi_direction dir)
{
+ if (!fn->cfg)
+ return DOM_NONE;
+
unsigned int dir_index = dom_convert_dir_to_idx (dir);
+ return fn->cfg->x_dom_computed[dir_index];
+}
- return dom_computed[dir_index];
+enum dom_state
+dom_info_state (enum cdi_direction dir)
+{
+ return dom_info_state (cfun, dir);
}
/* Set the dominance availability for dominance info DIR to NEW_STATE. */
@@ -1481,11 +1495,15 @@ set_dom_info_availability (enum cdi_direction dir, enum dom_state new_state)
/* Returns true if dominance information for direction DIR is available. */
bool
-dom_info_available_p (enum cdi_direction dir)
+dom_info_available_p (function *fn, enum cdi_direction dir)
{
- unsigned int dir_index = dom_convert_dir_to_idx (dir);
+ return dom_info_state (fn, dir) != DOM_NONE;
+}
- return dom_computed[dir_index] != DOM_NONE;
+bool
+dom_info_available_p (enum cdi_direction dir)
+{
+ return dom_info_available_p (cfun, dir);
}
DEBUG_FUNCTION void
diff --git a/gcc/expr.c b/gcc/expr.c
index 989a8780dc9..2868d9d3443 100644
--- a/gcc/expr.c
+++ b/gcc/expr.c
@@ -68,22 +68,6 @@ along with GCC; see the file COPYING3. If not see
#include "tree-ssa-address.h"
#include "cfgexpand.h"
-/* Decide whether a function's arguments should be processed
- from first to last or from last to first.
-
- They should if the stack and args grow in opposite directions, but
- only if we have push insns. */
-
-#ifdef PUSH_ROUNDING
-
-#ifndef PUSH_ARGS_REVERSED
-#if defined (STACK_GROWS_DOWNWARD) != defined (ARGS_GROW_DOWNWARD)
-#define PUSH_ARGS_REVERSED /* If it's last to first. */
-#endif
-#endif
-
-#endif
-
#ifndef STACK_PUSH_CODE
#ifdef STACK_GROWS_DOWNWARD
#define STACK_PUSH_CODE PRE_DEC
@@ -4334,11 +4318,7 @@ emit_push_insn (rtx x, enum machine_mode mode, tree type, rtx size,
/* Loop over all the words allocated on the stack for this arg. */
/* We can do it by words, because any scalar bigger than a word
has a size a multiple of a word. */
-#ifndef PUSH_ARGS_REVERSED
- for (i = not_stack; i < size; i++)
-#else
for (i = size - 1; i >= not_stack; i--)
-#endif
if (i >= not_stack + offset)
emit_push_insn (operand_subword_force (x, i, mode),
word_mode, NULL_TREE, NULL_RTX, align, 0, NULL_RTX,
diff --git a/gcc/flag-types.h b/gcc/flag-types.h
index fc3261bbbb7..caf4039f9df 100644
--- a/gcc/flag-types.h
+++ b/gcc/flag-types.h
@@ -228,6 +228,7 @@ enum sanitize_code {
SANITIZE_SI_OVERFLOW = 1 << 9,
SANITIZE_BOOL = 1 << 10,
SANITIZE_ENUM = 1 << 11,
+ SANITIZE_FLOAT_DIVIDE = 1 << 12,
SANITIZE_UNDEFINED = SANITIZE_SHIFT | SANITIZE_DIVIDE | SANITIZE_UNREACHABLE
| SANITIZE_VLA | SANITIZE_NULL | SANITIZE_RETURN
| SANITIZE_SI_OVERFLOW | SANITIZE_BOOL | SANITIZE_ENUM
diff --git a/gcc/gcc.c b/gcc/gcc.c
index e5130d13535..7bea6d79ac2 100644
--- a/gcc/gcc.c
+++ b/gcc/gcc.c
@@ -8170,7 +8170,7 @@ sanitize_spec_function (int argc, const char **argv)
if (strcmp (argv[0], "thread") == 0)
return (flag_sanitize & SANITIZE_THREAD) ? "" : NULL;
if (strcmp (argv[0], "undefined") == 0)
- return ((flag_sanitize & SANITIZE_UNDEFINED)
+ return ((flag_sanitize & (SANITIZE_UNDEFINED | SANITIZE_FLOAT_DIVIDE))
&& !flag_sanitize_undefined_trap_on_error) ? "" : NULL;
if (strcmp (argv[0], "leak") == 0)
return ((flag_sanitize
diff --git a/gcc/gimple.c b/gcc/gimple.c
index e51329f2702..d6bc15bf424 100644
--- a/gcc/gimple.c
+++ b/gcc/gimple.c
@@ -2565,8 +2565,8 @@ infer_nonnull_range (gimple stmt, tree op, bool dereference, bool attribute)
{
for (unsigned int i = 0; i < gimple_call_num_args (stmt); i++)
{
- if (operand_equal_p (op, gimple_call_arg (stmt, i), 0)
- && POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (stmt, i))))
+ if (POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (stmt, i)))
+ && operand_equal_p (op, gimple_call_arg (stmt, i), 0))
return true;
}
return false;
diff --git a/gcc/go/ChangeLog b/gcc/go/ChangeLog
index 1a863b04dbc..8d86e746d6e 100644
--- a/gcc/go/ChangeLog
+++ b/gcc/go/ChangeLog
@@ -1,3 +1,12 @@
+2014-04-30 Chris Manghane <cmang@google.com>
+
+ * go-gcc.cc: #include "langhooks.h".
+ (Gcc_backend::Gcc_backend): Add constructor.
+ (Gcc_backend::lookup_function): New function.
+ (Gcc_backend::define_builtin): New private function.
+ (Gcc_backend::gcc_backend): Remove.
+ (go_get_backend): Use new to create new Gcc_backend.
+
2014-04-25 Chris Manghane <cmang@google.com>
* go-gcc.cc: Include "cgraph.h" and "gimplify.h".
diff --git a/gcc/go/go-gcc.cc b/gcc/go/go-gcc.cc
index a0283fe12e0..45827a08ea2 100644
--- a/gcc/go/go-gcc.cc
+++ b/gcc/go/go-gcc.cc
@@ -34,6 +34,7 @@
#include "basic-block.h"
#include "gimple-expr.h"
#include "gimplify.h"
+#include "langhooks.h"
#include "toplev.h"
#include "output.h"
#include "real.h"
@@ -131,6 +132,8 @@ class Blabel : public Gcc_tree
class Gcc_backend : public Backend
{
public:
+ Gcc_backend();
+
// Types.
Btype*
@@ -425,6 +428,9 @@ class Gcc_backend : public Backend
bool
function_set_body(Bfunction* function, Bstatement* code_stmt);
+ Bfunction*
+ lookup_builtin(const std::string&);
+
void
write_global_definitions(const std::vector<Btype*>&,
const std::vector<Bexpression*>&,
@@ -459,6 +465,14 @@ class Gcc_backend : public Backend
tree
non_zero_size_type(tree);
+
+private:
+ void
+ define_builtin(built_in_function bcode, const char* name, const char* libname,
+ tree fntype, bool const_p);
+
+ // A mapping of the GCC built-ins exposed to GCCGo.
+ std::map<std::string, Bfunction*> builtin_functions_;
};
// A helper function.
@@ -469,6 +483,172 @@ get_identifier_from_string(const std::string& str)
return get_identifier_with_length(str.data(), str.length());
}
+// Define the built-in functions that are exposed to GCCGo.
+
+Gcc_backend::Gcc_backend()
+{
+ /* We need to define the fetch_and_add functions, since we use them
+ for ++ and --. */
+ tree t = this->integer_type(BITS_PER_UNIT, 1)->get_tree();
+ tree p = build_pointer_type(build_qualified_type(t, TYPE_QUAL_VOLATILE));
+ this->define_builtin(BUILT_IN_SYNC_ADD_AND_FETCH_1, "__sync_fetch_and_add_1",
+ NULL, build_function_type_list(t, p, t, NULL_TREE),
+ false);
+
+ t = this->integer_type(BITS_PER_UNIT * 2, 1)->get_tree();
+ p = build_pointer_type(build_qualified_type(t, TYPE_QUAL_VOLATILE));
+ this->define_builtin(BUILT_IN_SYNC_ADD_AND_FETCH_2, "__sync_fetch_and_add_2",
+ NULL, build_function_type_list(t, p, t, NULL_TREE),
+ false);
+
+ t = this->integer_type(BITS_PER_UNIT * 4, 1)->get_tree();
+ p = build_pointer_type(build_qualified_type(t, TYPE_QUAL_VOLATILE));
+ this->define_builtin(BUILT_IN_SYNC_ADD_AND_FETCH_4, "__sync_fetch_and_add_4",
+ NULL, build_function_type_list(t, p, t, NULL_TREE),
+ false);
+
+ t = this->integer_type(BITS_PER_UNIT * 8, 1)->get_tree();
+ p = build_pointer_type(build_qualified_type(t, TYPE_QUAL_VOLATILE));
+ this->define_builtin(BUILT_IN_SYNC_ADD_AND_FETCH_8, "__sync_fetch_and_add_8",
+ NULL, build_function_type_list(t, p, t, NULL_TREE),
+ false);
+
+ // We use __builtin_expect for magic import functions.
+ this->define_builtin(BUILT_IN_EXPECT, "__builtin_expect", NULL,
+ build_function_type_list(long_integer_type_node,
+ long_integer_type_node,
+ long_integer_type_node,
+ NULL_TREE),
+ true);
+
+ // We use __builtin_memcmp for struct comparisons.
+ this->define_builtin(BUILT_IN_MEMCMP, "__builtin_memcmp", "memcmp",
+ build_function_type_list(integer_type_node,
+ const_ptr_type_node,
+ const_ptr_type_node,
+ size_type_node,
+ NULL_TREE),
+ false);
+
+ // We provide some functions for the math library.
+ tree math_function_type = build_function_type_list(double_type_node,
+ double_type_node,
+ NULL_TREE);
+ tree math_function_type_long =
+ build_function_type_list(long_double_type_node, long_double_type_node,
+ long_double_type_node, NULL_TREE);
+ tree math_function_type_two = build_function_type_list(double_type_node,
+ double_type_node,
+ double_type_node,
+ NULL_TREE);
+ tree math_function_type_long_two =
+ build_function_type_list(long_double_type_node, long_double_type_node,
+ long_double_type_node, NULL_TREE);
+ this->define_builtin(BUILT_IN_ACOS, "__builtin_acos", "acos",
+ math_function_type, true);
+ this->define_builtin(BUILT_IN_ACOSL, "__builtin_acosl", "acosl",
+ math_function_type_long, true);
+ this->define_builtin(BUILT_IN_ASIN, "__builtin_asin", "asin",
+ math_function_type, true);
+ this->define_builtin(BUILT_IN_ASINL, "__builtin_asinl", "asinl",
+ math_function_type_long, true);
+ this->define_builtin(BUILT_IN_ATAN, "__builtin_atan", "atan",
+ math_function_type, true);
+ this->define_builtin(BUILT_IN_ATANL, "__builtin_atanl", "atanl",
+ math_function_type_long, true);
+ this->define_builtin(BUILT_IN_ATAN2, "__builtin_atan2", "atan2",
+ math_function_type_two, true);
+ this->define_builtin(BUILT_IN_ATAN2L, "__builtin_atan2l", "atan2l",
+ math_function_type_long_two, true);
+ this->define_builtin(BUILT_IN_CEIL, "__builtin_ceil", "ceil",
+ math_function_type, true);
+ this->define_builtin(BUILT_IN_CEILL, "__builtin_ceill", "ceill",
+ math_function_type_long, true);
+ this->define_builtin(BUILT_IN_COS, "__builtin_cos", "cos",
+ math_function_type, true);
+ this->define_builtin(BUILT_IN_COSL, "__builtin_cosl", "cosl",
+ math_function_type_long, true);
+ this->define_builtin(BUILT_IN_EXP, "__builtin_exp", "exp",
+ math_function_type, true);
+ this->define_builtin(BUILT_IN_EXPL, "__builtin_expl", "expl",
+ math_function_type_long, true);
+ this->define_builtin(BUILT_IN_EXPM1, "__builtin_expm1", "expm1",
+ math_function_type, true);
+ this->define_builtin(BUILT_IN_EXPM1L, "__builtin_expm1l", "expm1l",
+ math_function_type_long, true);
+ this->define_builtin(BUILT_IN_FABS, "__builtin_fabs", "fabs",
+ math_function_type, true);
+ this->define_builtin(BUILT_IN_FABSL, "__builtin_fabsl", "fabsl",
+ math_function_type_long, true);
+ this->define_builtin(BUILT_IN_FLOOR, "__builtin_floor", "floor",
+ math_function_type, true);
+ this->define_builtin(BUILT_IN_FLOORL, "__builtin_floorl", "floorl",
+ math_function_type_long, true);
+ this->define_builtin(BUILT_IN_FMOD, "__builtin_fmod", "fmod",
+ math_function_type_two, true);
+ this->define_builtin(BUILT_IN_FMODL, "__builtin_fmodl", "fmodl",
+ math_function_type_long_two, true);
+ this->define_builtin(BUILT_IN_LDEXP, "__builtin_ldexp", "ldexp",
+ build_function_type_list(double_type_node,
+ double_type_node,
+ integer_type_node,
+ NULL_TREE),
+ true);
+ this->define_builtin(BUILT_IN_LDEXPL, "__builtin_ldexpl", "ldexpl",
+ build_function_type_list(long_double_type_node,
+ long_double_type_node,
+ integer_type_node,
+ NULL_TREE),
+ true);
+ this->define_builtin(BUILT_IN_LOG, "__builtin_log", "log",
+ math_function_type, true);
+ this->define_builtin(BUILT_IN_LOGL, "__builtin_logl", "logl",
+ math_function_type_long, true);
+ this->define_builtin(BUILT_IN_LOG1P, "__builtin_log1p", "log1p",
+ math_function_type, true);
+ this->define_builtin(BUILT_IN_LOG1PL, "__builtin_log1pl", "log1pl",
+ math_function_type_long, true);
+ this->define_builtin(BUILT_IN_LOG10, "__builtin_log10", "log10",
+ math_function_type, true);
+ this->define_builtin(BUILT_IN_LOG10L, "__builtin_log10l", "log10l",
+ math_function_type_long, true);
+ this->define_builtin(BUILT_IN_LOG2, "__builtin_log2", "log2",
+ math_function_type, true);
+ this->define_builtin(BUILT_IN_LOG2L, "__builtin_log2l", "log2l",
+ math_function_type_long, true);
+ this->define_builtin(BUILT_IN_SIN, "__builtin_sin", "sin",
+ math_function_type, true);
+ this->define_builtin(BUILT_IN_SINL, "__builtin_sinl", "sinl",
+ math_function_type_long, true);
+ this->define_builtin(BUILT_IN_SQRT, "__builtin_sqrt", "sqrt",
+ math_function_type, true);
+ this->define_builtin(BUILT_IN_SQRTL, "__builtin_sqrtl", "sqrtl",
+ math_function_type_long, true);
+ this->define_builtin(BUILT_IN_TAN, "__builtin_tan", "tan",
+ math_function_type, true);
+ this->define_builtin(BUILT_IN_TANL, "__builtin_tanl", "tanl",
+ math_function_type_long, true);
+ this->define_builtin(BUILT_IN_TRUNC, "__builtin_trunc", "trunc",
+ math_function_type, true);
+ this->define_builtin(BUILT_IN_TRUNCL, "__builtin_truncl", "truncl",
+ math_function_type_long, true);
+
+ // We use __builtin_return_address in the thunk we build for
+ // functions which call recover.
+ this->define_builtin(BUILT_IN_RETURN_ADDRESS, "__builtin_return_address",
+ NULL,
+ build_function_type_list(ptr_type_node,
+ unsigned_type_node,
+ NULL_TREE),
+ false);
+
+ // The compiler uses __builtin_trap for some exception handling
+ // cases.
+ this->define_builtin(BUILT_IN_TRAP, "__builtin_trap", NULL,
+ build_function_type(void_type_node, void_list_node),
+ false);
+}
+
// Get an unnamed integer type.
Btype*
@@ -2597,6 +2777,17 @@ Gcc_backend::function_set_body(Bfunction* function, Bstatement* code_stmt)
return true;
}
+// Look up a named built-in function in the current backend implementation.
+// Returns NULL if no built-in function by that name exists.
+
+Bfunction*
+Gcc_backend::lookup_builtin(const std::string& name)
+{
+ if (this->builtin_functions_.count(name) != 0)
+ return this->builtin_functions_[name];
+ return NULL;
+}
+
// Write the definitions for all TYPE_DECLS, CONSTANT_DECLS,
// FUNCTION_DECLS, and VARIABLE_DECLS declared globally.
@@ -2679,16 +2870,38 @@ Gcc_backend::write_global_definitions(
delete[] defs;
}
-// The single backend.
+// Define a builtin function. BCODE is the builtin function code
+// defined by builtins.def. NAME is the name of the builtin function.
+// LIBNAME is the name of the corresponding library function, and is
+// NULL if there isn't one. FNTYPE is the type of the function.
+// CONST_P is true if the function has the const attribute.
-static Gcc_backend gcc_backend;
+void
+Gcc_backend::define_builtin(built_in_function bcode, const char* name,
+ const char* libname, tree fntype, bool const_p)
+{
+ tree decl = add_builtin_function(name, fntype, bcode, BUILT_IN_NORMAL,
+ libname, NULL_TREE);
+ if (const_p)
+ TREE_READONLY(decl) = 1;
+ set_builtin_decl(bcode, decl, true);
+ this->builtin_functions_[name] = this->make_function(decl);
+ if (libname != NULL)
+ {
+ decl = add_builtin_function(libname, fntype, bcode, BUILT_IN_NORMAL,
+ NULL, NULL_TREE);
+ if (const_p)
+ TREE_READONLY(decl) = 1;
+ this->builtin_functions_[libname] = this->make_function(decl);
+ }
+}
// Return the backend generator.
Backend*
go_get_backend()
{
- return &gcc_backend;
+ return new Gcc_backend();
}
// FIXME: Temporary functions while converting to the new backend
diff --git a/gcc/go/gofrontend/backend.h b/gcc/go/gofrontend/backend.h
index aca3dc6f90e..786223fe736 100644
--- a/gcc/go/gofrontend/backend.h
+++ b/gcc/go/gofrontend/backend.h
@@ -660,6 +660,11 @@ class Backend
virtual bool
function_set_body(Bfunction* function, Bstatement* code_stmt) = 0;
+ // Look up a named built-in function in the current backend implementation.
+ // Returns NULL if no built-in function by that name exists.
+ virtual Bfunction*
+ lookup_builtin(const std::string&) = 0;
+
// Utility.
// Write the definitions for all TYPE_DECLS, CONSTANT_DECLS,
diff --git a/gcc/go/gofrontend/go.cc b/gcc/go/gofrontend/go.cc
index ac772a095f7..d2331f3e0ae 100644
--- a/gcc/go/gofrontend/go.cc
+++ b/gcc/go/gofrontend/go.cc
@@ -34,9 +34,6 @@ go_create_gogo(int int_type_size, int pointer_size, const char *pkgpath,
if (relative_import_path != NULL)
::gogo->set_relative_import_path(relative_import_path);
-
- // FIXME: This should be in the gcc dependent code.
- ::gogo->define_builtin_function_trees();
}
// Parse the input files.
diff --git a/gcc/go/gofrontend/gogo-tree.cc b/gcc/go/gofrontend/gogo-tree.cc
index 6b19a1d82e1..5b9a8180287 100644
--- a/gcc/go/gofrontend/gogo-tree.cc
+++ b/gcc/go/gofrontend/gogo-tree.cc
@@ -36,266 +36,6 @@ saw_errors()
return errorcount != 0 || sorrycount != 0;
}
-// A helper function.
-
-static inline tree
-get_identifier_from_string(const std::string& str)
-{
- return get_identifier_with_length(str.data(), str.length());
-}
-
-// Builtin functions.
-
-static std::map<std::string, tree> builtin_functions;
-
-// Define a builtin function. BCODE is the builtin function code
-// defined by builtins.def. NAME is the name of the builtin function.
-// LIBNAME is the name of the corresponding library function, and is
-// NULL if there isn't one. FNTYPE is the type of the function.
-// CONST_P is true if the function has the const attribute.
-
-static void
-define_builtin(built_in_function bcode, const char* name, const char* libname,
- tree fntype, bool const_p)
-{
- tree decl = add_builtin_function(name, fntype, bcode, BUILT_IN_NORMAL,
- libname, NULL_TREE);
- if (const_p)
- TREE_READONLY(decl) = 1;
- set_builtin_decl(bcode, decl, true);
- builtin_functions[name] = decl;
- if (libname != NULL)
- {
- decl = add_builtin_function(libname, fntype, bcode, BUILT_IN_NORMAL,
- NULL, NULL_TREE);
- if (const_p)
- TREE_READONLY(decl) = 1;
- builtin_functions[libname] = decl;
- }
-}
-
-// Create trees for implicit builtin functions.
-
-void
-Gogo::define_builtin_function_trees()
-{
- /* We need to define the fetch_and_add functions, since we use them
- for ++ and --. */
- tree t = go_type_for_size(BITS_PER_UNIT, 1);
- tree p = build_pointer_type(build_qualified_type(t, TYPE_QUAL_VOLATILE));
- define_builtin(BUILT_IN_SYNC_ADD_AND_FETCH_1, "__sync_fetch_and_add_1", NULL,
- build_function_type_list(t, p, t, NULL_TREE), false);
-
- t = go_type_for_size(BITS_PER_UNIT * 2, 1);
- p = build_pointer_type(build_qualified_type(t, TYPE_QUAL_VOLATILE));
- define_builtin (BUILT_IN_SYNC_ADD_AND_FETCH_2, "__sync_fetch_and_add_2", NULL,
- build_function_type_list(t, p, t, NULL_TREE), false);
-
- t = go_type_for_size(BITS_PER_UNIT * 4, 1);
- p = build_pointer_type(build_qualified_type(t, TYPE_QUAL_VOLATILE));
- define_builtin(BUILT_IN_SYNC_ADD_AND_FETCH_4, "__sync_fetch_and_add_4", NULL,
- build_function_type_list(t, p, t, NULL_TREE), false);
-
- t = go_type_for_size(BITS_PER_UNIT * 8, 1);
- p = build_pointer_type(build_qualified_type(t, TYPE_QUAL_VOLATILE));
- define_builtin(BUILT_IN_SYNC_ADD_AND_FETCH_8, "__sync_fetch_and_add_8", NULL,
- build_function_type_list(t, p, t, NULL_TREE), false);
-
- // We use __builtin_expect for magic import functions.
- define_builtin(BUILT_IN_EXPECT, "__builtin_expect", NULL,
- build_function_type_list(long_integer_type_node,
- long_integer_type_node,
- long_integer_type_node,
- NULL_TREE),
- true);
-
- // We use __builtin_memcmp for struct comparisons.
- define_builtin(BUILT_IN_MEMCMP, "__builtin_memcmp", "memcmp",
- build_function_type_list(integer_type_node,
- const_ptr_type_node,
- const_ptr_type_node,
- size_type_node,
- NULL_TREE),
- false);
-
- // We provide some functions for the math library.
- tree math_function_type = build_function_type_list(double_type_node,
- double_type_node,
- NULL_TREE);
- tree math_function_type_long =
- build_function_type_list(long_double_type_node, long_double_type_node,
- long_double_type_node, NULL_TREE);
- tree math_function_type_two = build_function_type_list(double_type_node,
- double_type_node,
- double_type_node,
- NULL_TREE);
- tree math_function_type_long_two =
- build_function_type_list(long_double_type_node, long_double_type_node,
- long_double_type_node, NULL_TREE);
- define_builtin(BUILT_IN_ACOS, "__builtin_acos", "acos",
- math_function_type, true);
- define_builtin(BUILT_IN_ACOSL, "__builtin_acosl", "acosl",
- math_function_type_long, true);
- define_builtin(BUILT_IN_ASIN, "__builtin_asin", "asin",
- math_function_type, true);
- define_builtin(BUILT_IN_ASINL, "__builtin_asinl", "asinl",
- math_function_type_long, true);
- define_builtin(BUILT_IN_ATAN, "__builtin_atan", "atan",
- math_function_type, true);
- define_builtin(BUILT_IN_ATANL, "__builtin_atanl", "atanl",
- math_function_type_long, true);
- define_builtin(BUILT_IN_ATAN2, "__builtin_atan2", "atan2",
- math_function_type_two, true);
- define_builtin(BUILT_IN_ATAN2L, "__builtin_atan2l", "atan2l",
- math_function_type_long_two, true);
- define_builtin(BUILT_IN_CEIL, "__builtin_ceil", "ceil",
- math_function_type, true);
- define_builtin(BUILT_IN_CEILL, "__builtin_ceill", "ceill",
- math_function_type_long, true);
- define_builtin(BUILT_IN_COS, "__builtin_cos", "cos",
- math_function_type, true);
- define_builtin(BUILT_IN_COSL, "__builtin_cosl", "cosl",
- math_function_type_long, true);
- define_builtin(BUILT_IN_EXP, "__builtin_exp", "exp",
- math_function_type, true);
- define_builtin(BUILT_IN_EXPL, "__builtin_expl", "expl",
- math_function_type_long, true);
- define_builtin(BUILT_IN_EXPM1, "__builtin_expm1", "expm1",
- math_function_type, true);
- define_builtin(BUILT_IN_EXPM1L, "__builtin_expm1l", "expm1l",
- math_function_type_long, true);
- define_builtin(BUILT_IN_FABS, "__builtin_fabs", "fabs",
- math_function_type, true);
- define_builtin(BUILT_IN_FABSL, "__builtin_fabsl", "fabsl",
- math_function_type_long, true);
- define_builtin(BUILT_IN_FLOOR, "__builtin_floor", "floor",
- math_function_type, true);
- define_builtin(BUILT_IN_FLOORL, "__builtin_floorl", "floorl",
- math_function_type_long, true);
- define_builtin(BUILT_IN_FMOD, "__builtin_fmod", "fmod",
- math_function_type_two, true);
- define_builtin(BUILT_IN_FMODL, "__builtin_fmodl", "fmodl",
- math_function_type_long_two, true);
- define_builtin(BUILT_IN_LDEXP, "__builtin_ldexp", "ldexp",
- build_function_type_list(double_type_node,
- double_type_node,
- integer_type_node,
- NULL_TREE),
- true);
- define_builtin(BUILT_IN_LDEXPL, "__builtin_ldexpl", "ldexpl",
- build_function_type_list(long_double_type_node,
- long_double_type_node,
- integer_type_node,
- NULL_TREE),
- true);
- define_builtin(BUILT_IN_LOG, "__builtin_log", "log",
- math_function_type, true);
- define_builtin(BUILT_IN_LOGL, "__builtin_logl", "logl",
- math_function_type_long, true);
- define_builtin(BUILT_IN_LOG1P, "__builtin_log1p", "log1p",
- math_function_type, true);
- define_builtin(BUILT_IN_LOG1PL, "__builtin_log1pl", "log1pl",
- math_function_type_long, true);
- define_builtin(BUILT_IN_LOG10, "__builtin_log10", "log10",
- math_function_type, true);
- define_builtin(BUILT_IN_LOG10L, "__builtin_log10l", "log10l",
- math_function_type_long, true);
- define_builtin(BUILT_IN_LOG2, "__builtin_log2", "log2",
- math_function_type, true);
- define_builtin(BUILT_IN_LOG2L, "__builtin_log2l", "log2l",
- math_function_type_long, true);
- define_builtin(BUILT_IN_SIN, "__builtin_sin", "sin",
- math_function_type, true);
- define_builtin(BUILT_IN_SINL, "__builtin_sinl", "sinl",
- math_function_type_long, true);
- define_builtin(BUILT_IN_SQRT, "__builtin_sqrt", "sqrt",
- math_function_type, true);
- define_builtin(BUILT_IN_SQRTL, "__builtin_sqrtl", "sqrtl",
- math_function_type_long, true);
- define_builtin(BUILT_IN_TAN, "__builtin_tan", "tan",
- math_function_type, true);
- define_builtin(BUILT_IN_TANL, "__builtin_tanl", "tanl",
- math_function_type_long, true);
- define_builtin(BUILT_IN_TRUNC, "__builtin_trunc", "trunc",
- math_function_type, true);
- define_builtin(BUILT_IN_TRUNCL, "__builtin_truncl", "truncl",
- math_function_type_long, true);
-
- // We use __builtin_return_address in the thunk we build for
- // functions which call recover.
- define_builtin(BUILT_IN_RETURN_ADDRESS, "__builtin_return_address", NULL,
- build_function_type_list(ptr_type_node,
- unsigned_type_node,
- NULL_TREE),
- false);
-
- // The compiler uses __builtin_trap for some exception handling
- // cases.
- define_builtin(BUILT_IN_TRAP, "__builtin_trap", NULL,
- build_function_type(void_type_node, void_list_node),
- false);
-}
-
-// Get the backend representation.
-
-Bfunction*
-Function_declaration::get_or_make_decl(Gogo* gogo, Named_object* no)
-{
- if (this->fndecl_ == NULL)
- {
- // Let Go code use an asm declaration to pick up a builtin
- // function.
- if (!this->asm_name_.empty())
- {
- std::map<std::string, tree>::const_iterator p =
- builtin_functions.find(this->asm_name_);
- if (p != builtin_functions.end())
- {
- this->fndecl_ = tree_to_function(p->second);
- return this->fndecl_;
- }
- }
-
- std::string asm_name;
- if (this->asm_name_.empty())
- {
- asm_name = (no->package() == NULL
- ? gogo->pkgpath_symbol()
- : no->package()->pkgpath_symbol());
- asm_name.append(1, '.');
- asm_name.append(Gogo::unpack_hidden_name(no->name()));
- if (this->fntype_->is_method())
- {
- asm_name.append(1, '.');
- Type* rtype = this->fntype_->receiver()->type();
- asm_name.append(rtype->mangled_name(gogo));
- }
- }
-
- Btype* functype = this->fntype_->get_backend_fntype(gogo);
- this->fndecl_ =
- gogo->backend()->function(functype, no->get_id(gogo), asm_name,
- true, true, true, false, false,
- this->location());
- }
-
- return this->fndecl_;
-}
-
-// Build the descriptor for a function declaration. This won't
-// necessarily happen if the package has just a declaration for the
-// function and no other reference to it, but we may still need the
-// descriptor for references from other packages.
-void
-Function_declaration::build_backend_descriptor(Gogo* gogo)
-{
- if (this->descriptor_ != NULL)
- {
- Translate_context context(gogo, NULL, NULL, NULL);
- this->descriptor_->get_tree(&context);
- }
-}
-
// Return the integer type to use for a size.
GO_EXTERN_C
@@ -380,121 +120,3 @@ go_type_for_mode(enum machine_mode mode, int unsignedp)
else
return NULL_TREE;
}
-
-// Build a constructor for a slice. SLICE_TYPE_TREE is the type of
-// the slice. VALUES is the value pointer and COUNT is the number of
-// entries. If CAPACITY is not NULL, it is the capacity; otherwise
-// the capacity and the count are the same.
-
-tree
-Gogo::slice_constructor(tree slice_type_tree, tree values, tree count,
- tree capacity)
-{
- go_assert(TREE_CODE(slice_type_tree) == RECORD_TYPE);
-
- vec<constructor_elt, va_gc> *init;
- vec_alloc(init, 3);
-
- tree field = TYPE_FIELDS(slice_type_tree);
- go_assert(strcmp(IDENTIFIER_POINTER(DECL_NAME(field)), "__values") == 0);
- constructor_elt empty = {NULL, NULL};
- constructor_elt* elt = init->quick_push(empty);
- elt->index = field;
- go_assert(TYPE_MAIN_VARIANT(TREE_TYPE(field))
- == TYPE_MAIN_VARIANT(TREE_TYPE(values)));
- elt->value = values;
-
- count = fold_convert(sizetype, count);
- if (capacity == NULL_TREE)
- {
- count = save_expr(count);
- capacity = count;
- }
-
- field = DECL_CHAIN(field);
- go_assert(strcmp(IDENTIFIER_POINTER(DECL_NAME(field)), "__count") == 0);
- elt = init->quick_push(empty);
- elt->index = field;
- elt->value = fold_convert(TREE_TYPE(field), count);
-
- field = DECL_CHAIN(field);
- go_assert(strcmp(IDENTIFIER_POINTER(DECL_NAME(field)), "__capacity") == 0);
- elt = init->quick_push(empty);
- elt->index = field;
- elt->value = fold_convert(TREE_TYPE(field), capacity);
-
- return build_constructor(slice_type_tree, init);
-}
-
-// Mark a function as a builtin library function.
-
-void
-Gogo::mark_fndecl_as_builtin_library(tree fndecl)
-{
- DECL_EXTERNAL(fndecl) = 1;
- TREE_PUBLIC(fndecl) = 1;
- DECL_ARTIFICIAL(fndecl) = 1;
- TREE_NOTHROW(fndecl) = 1;
- DECL_VISIBILITY(fndecl) = VISIBILITY_DEFAULT;
- DECL_VISIBILITY_SPECIFIED(fndecl) = 1;
-}
-
-// Build a call to a builtin function.
-
-tree
-Gogo::call_builtin(tree* pdecl, Location location, const char* name,
- int nargs, tree rettype, ...)
-{
- if (rettype == error_mark_node)
- return error_mark_node;
-
- tree* types = new tree[nargs];
- tree* args = new tree[nargs];
-
- va_list ap;
- va_start(ap, rettype);
- for (int i = 0; i < nargs; ++i)
- {
- types[i] = va_arg(ap, tree);
- args[i] = va_arg(ap, tree);
- if (types[i] == error_mark_node || args[i] == error_mark_node)
- {
- delete[] types;
- delete[] args;
- return error_mark_node;
- }
- }
- va_end(ap);
-
- if (*pdecl == NULL_TREE)
- {
- tree fnid = get_identifier(name);
-
- tree argtypes = NULL_TREE;
- tree* pp = &argtypes;
- for (int i = 0; i < nargs; ++i)
- {
- *pp = tree_cons(NULL_TREE, types[i], NULL_TREE);
- pp = &TREE_CHAIN(*pp);
- }
- *pp = void_list_node;
-
- tree fntype = build_function_type(rettype, argtypes);
-
- *pdecl = build_decl(BUILTINS_LOCATION, FUNCTION_DECL, fnid, fntype);
- Gogo::mark_fndecl_as_builtin_library(*pdecl);
- go_preserve_from_gc(*pdecl);
- }
-
- tree fnptr = build_fold_addr_expr(*pdecl);
- if (CAN_HAVE_LOCATION_P(fnptr))
- SET_EXPR_LOCATION(fnptr, location.gcc_location());
-
- tree ret = build_call_array(rettype, fnptr, nargs, args);
- SET_EXPR_LOCATION(ret, location.gcc_location());
-
- delete[] types;
- delete[] args;
-
- return ret;
-}
diff --git a/gcc/go/gofrontend/gogo.cc b/gcc/go/gofrontend/gogo.cc
index c6ff9886090..995a4f2d2da 100644
--- a/gcc/go/gofrontend/gogo.cc
+++ b/gcc/go/gofrontend/gogo.cc
@@ -4855,6 +4855,66 @@ Function::get_or_make_decl(Gogo* gogo, Named_object* no)
return this->fndecl_;
}
+// Get the backend representation.
+
+Bfunction*
+Function_declaration::get_or_make_decl(Gogo* gogo, Named_object* no)
+{
+ if (this->fndecl_ == NULL)
+ {
+ // Let Go code use an asm declaration to pick up a builtin
+ // function.
+ if (!this->asm_name_.empty())
+ {
+ Bfunction* builtin_decl =
+ gogo->backend()->lookup_builtin(this->asm_name_);
+ if (builtin_decl != NULL)
+ {
+ this->fndecl_ = builtin_decl;
+ return this->fndecl_;
+ }
+ }
+
+ std::string asm_name;
+ if (this->asm_name_.empty())
+ {
+ asm_name = (no->package() == NULL
+ ? gogo->pkgpath_symbol()
+ : no->package()->pkgpath_symbol());
+ asm_name.append(1, '.');
+ asm_name.append(Gogo::unpack_hidden_name(no->name()));
+ if (this->fntype_->is_method())
+ {
+ asm_name.append(1, '.');
+ Type* rtype = this->fntype_->receiver()->type();
+ asm_name.append(rtype->mangled_name(gogo));
+ }
+ }
+
+ Btype* functype = this->fntype_->get_backend_fntype(gogo);
+ this->fndecl_ =
+ gogo->backend()->function(functype, no->get_id(gogo), asm_name,
+ true, true, true, false, false,
+ this->location());
+ }
+
+ return this->fndecl_;
+}
+
+// Build the descriptor for a function declaration. This won't
+// necessarily happen if the package has just a declaration for the
+// function and no other reference to it, but we may still need the
+// descriptor for references from other packages.
+void
+Function_declaration::build_backend_descriptor(Gogo* gogo)
+{
+ if (this->descriptor_ != NULL)
+ {
+ Translate_context context(gogo, NULL, NULL, NULL);
+ this->descriptor_->get_tree(&context);
+ }
+}
+
// Return the function's decl after it has been built.
Bfunction*
diff --git a/gcc/go/gofrontend/gogo.h b/gcc/go/gofrontend/gogo.h
index 0be81b2aafe..37cbbdf4411 100644
--- a/gcc/go/gofrontend/gogo.h
+++ b/gcc/go/gofrontend/gogo.h
@@ -575,35 +575,10 @@ class Gogo
void
write_globals();
- // Create trees for implicit builtin functions.
- void
- define_builtin_function_trees();
-
- // Build a call to a builtin function. PDECL should point to a NULL
- // initialized static pointer which will hold the fndecl. NAME is
- // the name of the function. NARGS is the number of arguments.
- // RETTYPE is the return type. It is followed by NARGS pairs of
- // type and argument (both trees).
- static tree
- call_builtin(tree* pdecl, Location, const char* name, int nargs,
- tree rettype, ...);
-
// Build a call to the runtime error function.
Expression*
runtime_error(int code, Location);
- // Mark a function declaration as a builtin library function.
- static void
- mark_fndecl_as_builtin_library(tree fndecl);
-
- // Build a constructor for a slice. SLICE_TYPE_TREE is the type of
- // the slice. VALUES points to the values. COUNT is the size,
- // CAPACITY is the capacity. If CAPACITY is NULL, it is set to
- // COUNT.
- static tree
- slice_constructor(tree slice_type_tree, tree values, tree count,
- tree capacity);
-
// Build required interface method tables.
void
build_interface_method_tables();
diff --git a/gcc/go/gofrontend/import-archive.cc b/gcc/go/gofrontend/import-archive.cc
index 9a1d5b3d7ef..34fb528ab5c 100644
--- a/gcc/go/gofrontend/import-archive.cc
+++ b/gcc/go/gofrontend/import-archive.cc
@@ -261,7 +261,7 @@ Archive_file::interpret_header(const Archive_header* hdr, off_t off,
char size_string[size_string_size + 1];
memcpy(size_string, hdr->ar_size, size_string_size);
char* ps = size_string + size_string_size;
- while (ps[-1] == ' ')
+ while (ps > size_string && ps[-1] == ' ')
--ps;
*ps = '\0';
diff --git a/gcc/graphite-scop-detection.c b/gcc/graphite-scop-detection.c
index 635e21a8519..5d1c96e353d 100644
--- a/gcc/graphite-scop-detection.c
+++ b/gcc/graphite-scop-detection.c
@@ -1056,7 +1056,7 @@ create_sese_edges (vec<sd_region> regions)
#ifdef ENABLE_CHECKING
verify_loop_structure ();
- verify_ssa (false);
+ verify_ssa (false, true);
#endif
}
diff --git a/gcc/graphite-sese-to-poly.c b/gcc/graphite-sese-to-poly.c
index 3ad80410f9f..0bc443302c6 100644
--- a/gcc/graphite-sese-to-poly.c
+++ b/gcc/graphite-sese-to-poly.c
@@ -2469,7 +2469,7 @@ rewrite_cross_bb_scalar_deps (scop_p scop, gimple_stmt_iterator *gsi)
gsi_next (gsi);
}
- rewrite_cross_bb_scalar_dependence (scop, zero_dim_array,
+ rewrite_cross_bb_scalar_dependence (scop, unshare_expr (zero_dim_array),
def, use_stmt);
}
diff --git a/gcc/lto/ChangeLog b/gcc/lto/ChangeLog
index f1ecec9e027..edb5bb9cfca 100644
--- a/gcc/lto/ChangeLog
+++ b/gcc/lto/ChangeLog
@@ -1,3 +1,8 @@
+2014-04-28 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/60092
+ * lto-lang.c: Define flag_isoc11.
+
2014-04-23 David Malcolm <dmalcolm@redhat.com>
* lto-partition.c (add_references_to_partition): Update for
diff --git a/gcc/lto/lto-lang.c b/gcc/lto/lto-lang.c
index 1c00ec65be7..e213e923318 100644
--- a/gcc/lto/lto-lang.c
+++ b/gcc/lto/lto-lang.c
@@ -192,6 +192,7 @@ static GTY(()) tree signed_size_type_node;
/* Flags needed to process builtins.def. */
int flag_isoc94;
int flag_isoc99;
+int flag_isoc11;
/* Attribute handlers. */
diff --git a/gcc/opts.c b/gcc/opts.c
index 1873b96a028..3c214f09701 100644
--- a/gcc/opts.c
+++ b/gcc/opts.c
@@ -1461,6 +1461,8 @@ common_handle_option (struct gcc_options *opts,
sizeof "signed-integer-overflow" -1 },
{ "bool", SANITIZE_BOOL, sizeof "bool" - 1 },
{ "enum", SANITIZE_ENUM, sizeof "enum" - 1 },
+ { "float-divide-by-zero", SANITIZE_FLOAT_DIVIDE,
+ sizeof "float-divide-by-zero" - 1 },
{ NULL, 0, 0 }
};
const char *comma;
diff --git a/gcc/passes.c b/gcc/passes.c
index c0a76d62d21..dbff5875a5d 100644
--- a/gcc/passes.c
+++ b/gcc/passes.c
@@ -1716,6 +1716,7 @@ pass_manager::dump_profile_report () const
static void
execute_function_todo (function *fn, void *data)
{
+ bool from_ipa_pass = (cfun == NULL);
unsigned int flags = (size_t)data;
flags &= ~fn->last_verified;
if (!flags)
@@ -1761,32 +1762,56 @@ execute_function_todo (function *fn, void *data)
rebuild_cgraph_edges ();
/* If we've seen errors do not bother running any verifiers. */
- if (seen_error ())
+ if (!seen_error ())
{
- pop_cfun ();
- return;
- }
-
#if defined ENABLE_CHECKING
- if (flags & TODO_verify_ssa
- || (current_loops && loops_state_satisfies_p (LOOP_CLOSED_SSA)))
- {
- verify_gimple_in_cfg (cfun);
- verify_ssa (true);
- }
- else if (flags & TODO_verify_stmts)
- verify_gimple_in_cfg (cfun);
- if (flags & TODO_verify_flow)
- verify_flow_info ();
- if (current_loops && loops_state_satisfies_p (LOOP_CLOSED_SSA))
- verify_loop_closed_ssa (false);
- if (flags & TODO_verify_rtl_sharing)
- verify_rtl_sharing ();
+ dom_state pre_verify_state = dom_info_state (fn, CDI_DOMINATORS);
+ dom_state pre_verify_pstate = dom_info_state (fn, CDI_POST_DOMINATORS);
+
+ if (flags & TODO_verify_il)
+ {
+ if (cfun->curr_properties & PROP_trees)
+ {
+ if (cfun->curr_properties & PROP_cfg)
+ /* IPA passes leave stmts to be fixed up, so make sure to
+ not verify stmts really throw. */
+ verify_gimple_in_cfg (cfun, !from_ipa_pass);
+ else
+ verify_gimple_in_seq (gimple_body (cfun->decl));
+ }
+ if (cfun->curr_properties & PROP_ssa)
+ /* IPA passes leave stmts to be fixed up, so make sure to
+ not verify SSA operands whose verifier will choke on that. */
+ verify_ssa (true, !from_ipa_pass);
+ }
+ if (flags & TODO_verify_flow)
+ verify_flow_info ();
+ if (flags & TODO_verify_il)
+ {
+ if (current_loops
+ && loops_state_satisfies_p (LOOP_CLOSED_SSA))
+ verify_loop_closed_ssa (false);
+ }
+ if (flags & TODO_verify_rtl_sharing)
+ verify_rtl_sharing ();
+
+ /* Make sure verifiers don't change dominator state. */
+ gcc_assert (dom_info_state (fn, CDI_DOMINATORS) == pre_verify_state);
+ gcc_assert (dom_info_state (fn, CDI_POST_DOMINATORS) == pre_verify_pstate);
#endif
+ }
fn->last_verified = flags & TODO_verify_all;
pop_cfun ();
+
+ /* For IPA passes make sure to release dominator info, it can be
+ computed by non-verifying TODOs. */
+ if (from_ipa_pass)
+ {
+ free_dominance_info (fn, CDI_DOMINATORS);
+ free_dominance_info (fn, CDI_POST_DOMINATORS);
+ }
}
/* Perform all TODO actions. */
@@ -2167,7 +2192,7 @@ execute_one_pass (opt_pass *pass)
check_profile_consistency (pass->static_pass_number, 0, true);
/* Run post-pass cleanup and verification. */
- execute_todo (todo_after | pass->todo_flags_finish);
+ execute_todo (todo_after | pass->todo_flags_finish | TODO_verify_il);
if (profile_report && cfun && (cfun->curr_properties & PROP_cfg))
check_profile_consistency (pass->static_pass_number, 1, true);
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index 261bb98a3a4..00ae0d12f54 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,166 @@
+2014-04-30 Alan Lawrence <alan.lawrence@arm.com>
+
+ * gcc.target/aarch64/vuzps32_1.c: Expect zip1/2 insn rather than uzp1/2.
+ * gcc.target/aarch64/vuzpu32_1.c: Likewise.
+ * gcc.target/aarch64/vuzpf32_1.c: Likewise.
+
+2014-04-30 Alan Lawrence <alan.lawrence@arm.com>
+
+ * gcc.target/aarch64/simd/vuzpf32_1.c: New file.
+ * gcc.target/aarch64/simd/vuzpf32.x: New file.
+ * gcc.target/aarch64/simd/vuzpp16_1.c: New file.
+ * gcc.target/aarch64/simd/vuzpp16.x: New file.
+ * gcc.target/aarch64/simd/vuzpp8_1.c: New file.
+ * gcc.target/aarch64/simd/vuzpp8.x: New file.
+ * gcc.target/aarch64/simd/vuzpqf32_1.c: New file.
+ * gcc.target/aarch64/simd/vuzpqf32.x: New file.
+ * gcc.target/aarch64/simd/vuzpqp16_1.c: New file.
+ * gcc.target/aarch64/simd/vuzpqp16.x: New file.
+ * gcc.target/aarch64/simd/vuzpqp8_1.c: New file.
+ * gcc.target/aarch64/simd/vuzpqp8.x: New file.
+ * gcc.target/aarch64/simd/vuzpqs16_1.c: New file.
+ * gcc.target/aarch64/simd/vuzpqs16.x: New file.
+ * gcc.target/aarch64/simd/vuzpqs32_1.c: New file.
+ * gcc.target/aarch64/simd/vuzpqs32.x: New file.
+ * gcc.target/aarch64/simd/vuzpqs8_1.c: New file.
+ * gcc.target/aarch64/simd/vuzpqs8.x: New file.
+ * gcc.target/aarch64/simd/vuzpqu16_1.c: New file.
+ * gcc.target/aarch64/simd/vuzpqu16.x: New file.
+ * gcc.target/aarch64/simd/vuzpqu32_1.c: New file.
+ * gcc.target/aarch64/simd/vuzpqu32.x: New file.
+ * gcc.target/aarch64/simd/vuzpqu8_1.c: New file.
+ * gcc.target/aarch64/simd/vuzpqu8.x: New file.
+ * gcc.target/aarch64/simd/vuzps16_1.c: New file.
+ * gcc.target/aarch64/simd/vuzps16.x: New file.
+ * gcc.target/aarch64/simd/vuzps32_1.c: New file.
+ * gcc.target/aarch64/simd/vuzps32.x: New file.
+ * gcc.target/aarch64/simd/vuzps8_1.c: New file.
+ * gcc.target/aarch64/simd/vuzps8.x: New file.
+ * gcc.target/aarch64/simd/vuzpu16_1.c: New file.
+ * gcc.target/aarch64/simd/vuzpu16.x: New file.
+ * gcc.target/aarch64/simd/vuzpu32_1.c: New file.
+ * gcc.target/aarch64/simd/vuzpu32.x: New file.
+ * gcc.target/aarch64/simd/vuzpu8_1.c: New file.
+ * gcc.target/aarch64/simd/vuzpu8.x: New file.
+
+2014-04-30 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/48329
+ * gfortran.dg/vect/pr48329.f90: New testcase.
+
+2014-04-30 Marek Polacek <polacek@redhat.com>
+
+ * c-c++-common/ubsan/div-by-zero-5.c: Fix formatting.
+ * c-c++-common/ubsan/float-div-by-zero-1.c: New test.
+
+2014-04-30 Marek Polacek <polacek@redhat.com>
+
+ PR c/60139
+ * gcc.dg/pr60139.c: New test.
+
+2014-04-30 Marek Polacek <polacek@redhat.com>
+
+ PR c/60351
+ * gcc.dg/pr60351.c: New test.
+
+2013-04-29 Alan Lawrence <alan.lawrence@arm.com>
+
+ * gcc.target/arm/simd/simd.exp: New file.
+ * gcc.target/arm/simd/vzipqf32_1.c: New file.
+ * gcc.target/arm/simd/vzipqp16_1.c: New file.
+ * gcc.target/arm/simd/vzipqp8_1.c: New file.
+ * gcc.target/arm/simd/vzipqs16_1.c: New file.
+ * gcc.target/arm/simd/vzipqs32_1.c: New file.
+ * gcc.target/arm/simd/vzipqs8_1.c: New file.
+ * gcc.target/arm/simd/vzipqu16_1.c: New file.
+ * gcc.target/arm/simd/vzipqu32_1.c: New file.
+ * gcc.target/arm/simd/vzipqu8_1.c: New file.
+ * gcc.target/arm/simd/vzipf32_1.c: New file.
+ * gcc.target/arm/simd/vzipp16_1.c: New file.
+ * gcc.target/arm/simd/vzipp8_1.c: New file.
+ * gcc.target/arm/simd/vzips16_1.c: New file.
+ * gcc.target/arm/simd/vzips32_1.c: New file.
+ * gcc.target/arm/simd/vzips8_1.c: New file.
+ * gcc.target/arm/simd/vzipu16_1.c: New file.
+ * gcc.target/arm/simd/vzipu32_1.c: New file.
+ * gcc.target/arm/simd/vzipu8_1.c: New file.
+
+2014-04-29 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/51707
+ * g++.dg/cpp0x/constexpr-51707.C: New.
+
+2014-04-29 David Malcolm <dmalcolm@redhat.com>
+
+ * gcc.dg/tree-ssa/pr23401.c: Update the expected number of
+ occurrences of "int" in the gimple dump to reflect that the return
+ types of functions now show up in such dumps.
+ * gcc.dg/tree-ssa/pr27810.c: Likewise.
+
+2014-04-29 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/60971
+ * c-c++-common/turtore/pr60971.c: New test.
+
+2014-04-29 Alan Lawrence <alan.lawrence@arm.com>
+
+ * gcc.target/aarch64/simd/simd.exp: New file.
+ * gcc.target/aarch64/simd/vzipf32_1.c: New file.
+ * gcc.target/aarch64/simd/vzipf32.x: New file.
+ * gcc.target/aarch64/simd/vzipp16_1.c: New file.
+ * gcc.target/aarch64/simd/vzipp16.x: New file.
+ * gcc.target/aarch64/simd/vzipp8_1.c: New file.
+ * gcc.target/aarch64/simd/vzipp8.x: New file.
+ * gcc.target/aarch64/simd/vzipqf32_1.c: New file.
+ * gcc.target/aarch64/simd/vzipqf32.x: New file.
+ * gcc.target/aarch64/simd/vzipqp16_1.c: New file.
+ * gcc.target/aarch64/simd/vzipqp16.x: New file.
+ * gcc.target/aarch64/simd/vzipqp8_1.c: New file.
+ * gcc.target/aarch64/simd/vzipqp8.x: New file.
+ * gcc.target/aarch64/simd/vzipqs16_1.c: New file.
+ * gcc.target/aarch64/simd/vzipqs16.x: New file.
+ * gcc.target/aarch64/simd/vzipqs32_1.c: New file.
+ * gcc.target/aarch64/simd/vzipqs32.x: New file.
+ * gcc.target/aarch64/simd/vzipqs8_1.c: New file.
+ * gcc.target/aarch64/simd/vzipqs8.x: New file.
+ * gcc.target/aarch64/simd/vzipqu16_1.c: New file.
+ * gcc.target/aarch64/simd/vzipqu16.x: New file.
+ * gcc.target/aarch64/simd/vzipqu32_1.c: New file.
+ * gcc.target/aarch64/simd/vzipqu32.x: New file.
+ * gcc.target/aarch64/simd/vzipqu8_1.c: New file.
+ * gcc.target/aarch64/simd/vzipqu8.x: New file.
+ * gcc.target/aarch64/simd/vzips16_1.c: New file.
+ * gcc.target/aarch64/simd/vzips16.x: New file.
+ * gcc.target/aarch64/simd/vzips32_1.c: New file.
+ * gcc.target/aarch64/simd/vzips32.x: New file.
+ * gcc.target/aarch64/simd/vzips8_1.c: New file.
+ * gcc.target/aarch64/simd/vzips8.x: New file.
+ * gcc.target/aarch64/simd/vzipu16_1.c: New file.
+ * gcc.target/aarch64/simd/vzipu16.x: New file.
+ * gcc.target/aarch64/simd/vzipu32_1.c: New file.
+ * gcc.target/aarch64/simd/vzipu32.x: New file.
+ * gcc.target/aarch64/simd/vzipu8_1.c: New file.
+ * gcc.target/aarch64/simd/vzipu8.x: New file.
+
+2014-04-29 Zhenqiang Chen <zhenqiang.chen@linaro.org>
+
+ * gcc.target/aarch64/fcsel_1.c: New test case.
+
+2014-04-28 Jerry DeLisle <jvdelisle@gcc.gnu>
+
+ PR libfortran/60810
+ * gfortran.dg/arrayio_13.f90: New test.
+
+2014-04-28 Martin Jambor <mjambor@suse.cz>
+
+ * gcc.dg/tree-ssa/sra-14.c: New test.
+
+2014-04-28 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/60092
+ * gcc.dg/tree-ssa/alias-32.c: New testcase.
+ * gcc.dg/vect/pr60092.c: Likewise.
+
2014-04-28 Richard Biener <rguenther@suse.de>
* gcc.dg/tree-ssa/vrp91.c: New testcase.
diff --git a/gcc/testsuite/c-c++-common/torture/pr60971.c b/gcc/testsuite/c-c++-common/torture/pr60971.c
new file mode 100644
index 00000000000..b7a967dabb4
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/pr60971.c
@@ -0,0 +1,34 @@
+/* PR tree-optimization/60971 */
+/* { dg-do run } */
+
+#ifndef __cplusplus
+#define bool _Bool
+#endif
+
+volatile unsigned char c;
+
+__attribute__((noinline)) unsigned char
+foo (void)
+{
+ return c;
+}
+
+__attribute__((noinline)) bool
+bar (void)
+{
+ return foo () & 1;
+}
+
+int
+main ()
+{
+ c = 0x41;
+ c = bar ();
+ if (c != 1)
+ __builtin_abort ();
+ c = 0x20;
+ c = bar ();
+ if (c != 0)
+ __builtin_abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/div-by-zero-5.c b/gcc/testsuite/c-c++-common/ubsan/div-by-zero-5.c
index 7a28bacd14b..bb391c5b36d 100644
--- a/gcc/testsuite/c-c++-common/ubsan/div-by-zero-5.c
+++ b/gcc/testsuite/c-c++-common/ubsan/div-by-zero-5.c
@@ -1,4 +1,4 @@
-/* { dg-do compile} */
+/* { dg-do compile } */
/* { dg-options "-fsanitize=integer-divide-by-zero" } */
void
diff --git a/gcc/testsuite/c-c++-common/ubsan/float-div-by-zero-1.c b/gcc/testsuite/c-c++-common/ubsan/float-div-by-zero-1.c
new file mode 100644
index 00000000000..2271ea9b776
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/float-div-by-zero-1.c
@@ -0,0 +1,26 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=float-divide-by-zero" } */
+
+int
+main (void)
+{
+ volatile float a = 1.3f;
+ volatile double b = 0.0;
+ volatile int c = 4;
+ volatile float res;
+
+ res = a / b;
+ res = a / 0.0;
+ res = 2.7f / b;
+ res = 3.6 / (b = 0.0, b);
+ res = c / b;
+ res = b / c;
+
+ return 0;
+}
+
+/* { dg-output "division by zero\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division by zero\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division by zero\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division by zero\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division by zero\[^\n\r]*" } */
diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-51707.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-51707.C
new file mode 100644
index 00000000000..ae02a31c540
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-51707.C
@@ -0,0 +1,14 @@
+// PR c++/51707
+// { dg-do compile { target c++11 } }
+
+struct S {
+ constexpr S() {}
+};
+
+struct T {
+ constexpr T(S const& s) : s{s} {}
+ S const& s;
+};
+
+constexpr S s {};
+constexpr T t { s };
diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-aggr1.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-aggr1.C
new file mode 100644
index 00000000000..7e4da11a2df
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-aggr1.C
@@ -0,0 +1,17 @@
+// PR c++/60951
+// { dg-do compile { target c++11 } }
+
+struct Foo {
+ constexpr Foo(int x = 0) : memb(x) {}
+ int memb;
+};
+
+struct FooContainer {
+ Foo foo[2];
+};
+
+void fubar() {
+ int nonConst = 0;
+ FooContainer fooContainer;
+ fooContainer = { { 0, nonConst } };
+}
diff --git a/gcc/testsuite/g++.dg/cpp0x/defaulted49.C b/gcc/testsuite/g++.dg/cpp0x/defaulted49.C
new file mode 100644
index 00000000000..357be419db7
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/defaulted49.C
@@ -0,0 +1,15 @@
+// PR c++/60980
+// { dg-do compile { target c++11 } }
+
+struct x0
+{
+ x0 () = default;
+};
+struct x1
+{
+ x0 x2[2];
+ void x3 ()
+ {
+ x1 ();
+ }
+};
diff --git a/gcc/testsuite/g++.dg/cpp0x/nsdmi-defer6.C b/gcc/testsuite/g++.dg/cpp0x/nsdmi-defer6.C
index 033c14264dd..0f06343958b 100644
--- a/gcc/testsuite/g++.dg/cpp0x/nsdmi-defer6.C
+++ b/gcc/testsuite/g++.dg/cpp0x/nsdmi-defer6.C
@@ -1,8 +1,8 @@
// { dg-do compile { target c++11 } }
-struct A // { dg-error "non-static data member" }
+struct A
{
- int i = (A(), 42); // { dg-message "required here" }
+ int i = (A(), 42); // { dg-error "constructor required" }
};
A a;
diff --git a/gcc/testsuite/g++.dg/cpp0x/nsdmi-dr1397.C b/gcc/testsuite/g++.dg/cpp0x/nsdmi-dr1397.C
new file mode 100644
index 00000000000..061af8b8c29
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/nsdmi-dr1397.C
@@ -0,0 +1,7 @@
+// DR 1397
+// { dg-require-effective-target c++11 }
+
+struct A
+{
+ int i = sizeof(A{}); // { dg-error "" }
+};
diff --git a/gcc/testsuite/g++.dg/cpp0x/nsdmi-eh1.C b/gcc/testsuite/g++.dg/cpp0x/nsdmi-eh1.C
index edcf5887db1..9bc632c4bc7 100644
--- a/gcc/testsuite/g++.dg/cpp0x/nsdmi-eh1.C
+++ b/gcc/testsuite/g++.dg/cpp0x/nsdmi-eh1.C
@@ -1,5 +1,5 @@
// Core issue 1351
-// { dg-do run { xfail *-*-* } }
+// { dg-do run }
// { dg-require-effective-target c++11 }
bool fail;
diff --git a/gcc/testsuite/gcc.dg/pr60139.c b/gcc/testsuite/gcc.dg/pr60139.c
new file mode 100644
index 00000000000..a63d8b5b9bd
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr60139.c
@@ -0,0 +1,14 @@
+/* PR c/60139 */
+/* { dg-do compile } */
+/* { dg-options "-Wpedantic" } */
+/* { dg-prune-output ".*near initialization for.*" } */
+
+double sin (double);
+void
+fn (int *p)
+{
+ int **a[] = { &p, /* { dg-warning "17:initializer element is not computable at load time" } */
+ (void *) 0, &p }; /* { dg-warning "28:initializer element is not computable at load time" } */
+ double d[] = { sin (1.0), /* { dg-warning "18:initializer element is not a constant expression" } */
+ 8.8, sin (1.0), 2.6 }; /* { dg-warning "23:initializer element is not a constant expression" } */
+}
diff --git a/gcc/testsuite/gcc.dg/pr60351.c b/gcc/testsuite/gcc.dg/pr60351.c
new file mode 100644
index 00000000000..29184d94872
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr60351.c
@@ -0,0 +1,11 @@
+/* PR c/60351 */
+/* { dg-do compile } */
+
+void
+f (int i)
+{
+ i >> -1; /* { dg-warning "5:right shift count is negative" } */
+ i >> 250; /* { dg-warning "5:right shift count >= width of type" } */
+ i << -1; /* { dg-warning "5:left shift count is negative" } */
+ i << 250; /* { dg-warning "5:left shift count >= width of type" } */
+}
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/alias-32.c b/gcc/testsuite/gcc.dg/tree-ssa/alias-32.c
new file mode 100644
index 00000000000..5d0dcc2937e
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/alias-32.c
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-cddce1" } */
+
+int bar (short *p)
+{
+ int res = *p;
+ struct { int *q1; int *q2; } q;
+ q.q1 = __builtin_aligned_alloc (128, 128 * sizeof (int));
+ q.q2 = __builtin_aligned_alloc (128, 128 * sizeof (int));
+ *q.q1 = 1;
+ *q.q2 = 2;
+ return res + *p + *q.q1 + *q.q2;
+}
+
+/* There should be only one load from *p left. All stores and all
+ other loads should be removed. Likewise the calls to aligned_alloc. */
+
+/* { dg-final { scan-tree-dump-times "\\\*\[^ \]" 1 "cddce1" } } */
+/* { dg-final { scan-tree-dump-not "aligned_alloc" "cddce1" } } */
+/* { dg-final { cleanup-tree-dump "cddce1" } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr23401.c b/gcc/testsuite/gcc.dg/tree-ssa/pr23401.c
index 1d30ac7519f..3940692cd6c 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/pr23401.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/pr23401.c
@@ -19,6 +19,6 @@ int ffff(int i)
/* We should not use extra temporaries apart from for i1 + i2. */
-/* { dg-final { scan-tree-dump-times "int" 5 "gimple" } } */
+/* { dg-final { scan-tree-dump-times "int" 6 "gimple" } } */
/* { dg-final { scan-tree-dump-times "int D\\\." 1 "gimple" } } */
/* { dg-final { cleanup-tree-dump "gimple" } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr27810.c b/gcc/testsuite/gcc.dg/tree-ssa/pr27810.c
index c7da3bd5d06..6d0904bb06b 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/pr27810.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/pr27810.c
@@ -13,6 +13,6 @@ int qqq (int a)
/* We should not use an extra temporary for the result of the
function call. */
-/* { dg-final { scan-tree-dump-times "int" 3 "gimple" } } */
+/* { dg-final { scan-tree-dump-times "int" 4 "gimple" } } */
/* { dg-final { scan-tree-dump-times "int D\\\." 1 "gimple" } } */
/* { dg-final { cleanup-tree-dump "gimple" } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/sra-14.c b/gcc/testsuite/gcc.dg/tree-ssa/sra-14.c
new file mode 100644
index 00000000000..6cbc0b43d58
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/sra-14.c
@@ -0,0 +1,70 @@
+/* { dg-do run } */
+/* { dg-options "-O1" } */
+
+struct S
+{
+ int i, j;
+};
+
+struct Z
+{
+ struct S d, s;
+};
+
+struct S __attribute__ ((noinline, noclone))
+get_s (void)
+{
+ struct S s;
+ s.i = 5;
+ s.j = 6;
+
+ return s;
+}
+
+struct S __attribute__ ((noinline, noclone))
+get_d (void)
+{
+ struct S d;
+ d.i = 0;
+ d.j = 0;
+
+ return d;
+}
+
+int __attribute__ ((noinline, noclone))
+get_c (void)
+{
+ return 1;
+}
+
+int __attribute__ ((noinline, noclone))
+my_nop (int i)
+{
+ return i;
+}
+
+int __attribute__ ((noinline, noclone))
+foo (void)
+{
+ struct Z z;
+ int i, c = get_c ();
+
+ z.d = get_d ();
+ z.s = get_s ();
+
+ for (i = 0; i < c; i++)
+ {
+ z.s.i = my_nop (z.s.i);
+ z.s.j = my_nop (z.s.j);
+ }
+
+ return z.s.i + z.s.j;
+}
+
+int main (int argc, char *argv[])
+{
+ if (foo () != 11)
+ __builtin_abort ();
+ return 0;
+}
+
diff --git a/gcc/testsuite/gcc.dg/vect/pr60092.c b/gcc/testsuite/gcc.dg/vect/pr60092.c
new file mode 100644
index 00000000000..e03c625b489
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/pr60092.c
@@ -0,0 +1,18 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target vect_int } */
+
+int *foo (int n)
+{
+ int *p = __builtin_aligned_alloc (256, n * sizeof (int));
+ int *q = __builtin_aligned_alloc (256, n * sizeof (int));
+ bar (q);
+ int i;
+ for (i = 0; i < n; ++i)
+ p[i] = q[i] + q[i];
+ return p;
+}
+
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump-not "Peeling for alignment will be applied" "vect" } } */
+/* { dg-final { scan-tree-dump-not "Vectorizing an unaligned access" "vect" } } */
+/* { dg-final { cleanup-tree-dump "vect" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/fcsel_1.c b/gcc/testsuite/gcc.target/aarch64/fcsel_1.c
new file mode 100644
index 00000000000..2704ee0ede7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/fcsel_1.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-options " -O2 " } */
+
+float
+f_1 (float a, float b, float c, float d)
+{
+ if (a > 0.0)
+ return c;
+ else
+ return 2.0;
+}
+
+double
+f_2 (double a, double b, double c, double d)
+{
+ if (a > b)
+ return c;
+ else
+ return d;
+}
+
+/* { dg-final { scan-assembler-times "\tfcsel" 2 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/simd.exp b/gcc/testsuite/gcc.target/aarch64/simd/simd.exp
new file mode 100644
index 00000000000..097d29a9e13
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/simd.exp
@@ -0,0 +1,45 @@
+# Specific regression driver for AArch64 SIMD instructions.
+# Copyright (C) 2014 Free Software Foundation, Inc.
+# Contributed by ARM Ltd.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>. */
+
+# GCC testsuite that uses the `dg.exp' driver.
+
+# Exit immediately if this isn't an AArch64 target.
+if {![istarget aarch64*-*-*] } then {
+ return
+}
+
+# Load support procs.
+load_lib gcc-dg.exp
+
+# If a testcase doesn't have special options, use these.
+global DEFAULT_CFLAGS
+if ![info exists DEFAULT_CFLAGS] then {
+ set DEFAULT_CFLAGS " -ansi -pedantic-errors"
+}
+
+# Initialize `dg'.
+dg-init
+
+# Main loop.
+dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cCS\]]] \
+ "" $DEFAULT_CFLAGS
+
+# All done.
+dg-finish
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpf32.x b/gcc/testsuite/gcc.target/aarch64/simd/vuzpf32.x
new file mode 100644
index 00000000000..86c3700e522
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpf32.x
@@ -0,0 +1,26 @@
+extern void abort (void);
+
+float32x2x2_t
+test_vuzpf32 (float32x2_t _a, float32x2_t _b)
+{
+ return vuzp_f32 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ float32_t first[] = {1, 2};
+ float32_t second[] = {3, 4};
+ float32x2x2_t result = test_vuzpf32 (vld1_f32 (first), vld1_f32 (second));
+ float32_t exp1[] = {1, 3};
+ float32_t exp2[] = {2, 4};
+ float32x2_t expect1 = vld1_f32 (exp1);
+ float32x2_t expect2 = vld1_f32 (exp2);
+
+ for (i = 0; i < 2; i++)
+ if ((result.val[0][i] != expect1[i]) || (result.val[1][i] != expect2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpf32_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vuzpf32_1.c
new file mode 100644
index 00000000000..0daba1c7f93
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpf32_1.c
@@ -0,0 +1,11 @@
+/* Test the `vuzp_f32' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vuzpf32.x"
+
+/* { dg-final { scan-assembler-times "zip1\[ \t\]+v\[0-9\]+\.2s, ?v\[0-9\]+\.2s, ?v\[0-9\]+\.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "zip2\[ \t\]+v\[0-9\]+\.2s, ?v\[0-9\]+\.2s, ?v\[0-9\]+\.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpp16.x b/gcc/testsuite/gcc.target/aarch64/simd/vuzpp16.x
new file mode 100644
index 00000000000..bc45efcd965
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpp16.x
@@ -0,0 +1,26 @@
+extern void abort (void);
+
+poly16x4x2_t
+test_vuzpp16 (poly16x4_t _a, poly16x4_t _b)
+{
+ return vuzp_p16 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ poly16_t first[] = {1, 2, 3, 4};
+ poly16_t second[] = {5, 6, 7, 8};
+ poly16x4x2_t result = test_vuzpp16 (vld1_p16 (first), vld1_p16 (second));
+ poly16_t exp1[] = {1, 3, 5, 7};
+ poly16_t exp2[] = {2, 4, 6, 8};
+ poly16x4_t expect1 = vld1_p16 (exp1);
+ poly16x4_t expect2 = vld1_p16 (exp2);
+
+ for (i = 0; i < 4; i++)
+ if ((result.val[0][i] != expect1[i]) || (result.val[1][i] != expect2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpp16_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vuzpp16_1.c
new file mode 100644
index 00000000000..03b07220640
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpp16_1.c
@@ -0,0 +1,11 @@
+/* Test the `vuzp_p16' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vuzpp16.x"
+
+/* { dg-final { scan-assembler-times "uzp1\[ \t\]+v\[0-9\]+\.4h, ?v\[0-9\]+\.4h, ?v\[0-9\]+\.4h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "uzp2\[ \t\]+v\[0-9\]+\.4h, ?v\[0-9\]+\.4h, ?v\[0-9\]+\.4h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpp8.x b/gcc/testsuite/gcc.target/aarch64/simd/vuzpp8.x
new file mode 100644
index 00000000000..b4ef51cae74
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpp8.x
@@ -0,0 +1,26 @@
+extern void abort (void);
+
+poly8x8x2_t
+test_vuzpp8 (poly8x8_t _a, poly8x8_t _b)
+{
+ return vuzp_p8 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ poly8_t first[] = {1, 2, 3, 4, 5, 6, 7, 8};
+ poly8_t second[] = {9, 10, 11, 12, 13, 14, 15, 16};
+ poly8x8x2_t result = test_vuzpp8 (vld1_p8 (first), vld1_p8 (second));
+ poly8_t exp1[] = {1, 3, 5, 7, 9, 11, 13, 15};
+ poly8_t exp2[] = {2, 4, 6, 8, 10, 12, 14, 16};
+ poly8x8_t expect1 = vld1_p8 (exp1);
+ poly8x8_t expect2 = vld1_p8 (exp2);
+
+ for (i = 0; i < 8; i++)
+ if ((result.val[0][i] != expect1[i]) || (result.val[1][i] != expect2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpp8_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vuzpp8_1.c
new file mode 100644
index 00000000000..5186b1f9166
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpp8_1.c
@@ -0,0 +1,11 @@
+/* Test the `vuzp_p8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vuzpp8.x"
+
+/* { dg-final { scan-assembler-times "uzp1\[ \t\]+v\[0-9\]+\.8b, ?v\[0-9\]+\.8b, ?v\[0-9\]+\.8b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "uzp2\[ \t\]+v\[0-9\]+\.8b, ?v\[0-9\]+\.8b, ?v\[0-9\]+\.8b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpqf32.x b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqf32.x
new file mode 100644
index 00000000000..f1b48da315f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqf32.x
@@ -0,0 +1,26 @@
+extern void abort (void);
+
+float32x4x2_t
+test_vuzpqf32 (float32x4_t _a, float32x4_t _b)
+{
+ return vuzpq_f32 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ float32_t first[] = {1, 2, 3, 4};
+ float32_t second[] = {5, 6, 7, 8};
+ float32x4x2_t result = test_vuzpqf32 (vld1q_f32 (first), vld1q_f32 (second));
+ float32_t exp1[] = {1, 3, 5, 7};
+ float32_t exp2[] = {2, 4, 6, 8};
+ float32x4_t expect1 = vld1q_f32 (exp1);
+ float32x4_t expect2 = vld1q_f32 (exp2);
+
+ for (i = 0; i < 4; i++)
+ if ((result.val[0][i] != expect1[i]) || (result.val[1][i] != expect2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpqf32_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqf32_1.c
new file mode 100644
index 00000000000..1167f7bbe00
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqf32_1.c
@@ -0,0 +1,11 @@
+/* Test the `vuzpq_f32' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vuzpqf32.x"
+
+/* { dg-final { scan-assembler-times "uzp1\[ \t\]+v\[0-9\]+\.4s, ?v\[0-9\]+\.4s, ?v\[0-9\]+\.4s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "uzp2\[ \t\]+v\[0-9\]+\.4s, ?v\[0-9\]+\.4s, ?v\[0-9\]+\.4s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpqp16.x b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqp16.x
new file mode 100644
index 00000000000..d4e08f74631
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqp16.x
@@ -0,0 +1,26 @@
+extern void abort (void);
+
+poly16x8x2_t
+test_vuzpqp16 (poly16x8_t _a, poly16x8_t _b)
+{
+ return vuzpq_p16 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ poly16_t first[] = {1, 2, 3, 4, 5, 6, 7, 8};
+ poly16_t second[] = {9, 10, 11, 12, 13, 14, 15, 16};
+ poly16x8x2_t result = test_vuzpqp16 (vld1q_p16 (first), vld1q_p16 (second));
+ poly16_t exp1[] = {1, 3, 5, 7, 9, 11, 13, 15};
+ poly16_t exp2[] = {2, 4, 6, 8, 10, 12, 14, 16};
+ poly16x8_t expect1 = vld1q_p16 (exp1);
+ poly16x8_t expect2 = vld1q_p16 (exp2);
+
+ for (i = 0; i < 8; i++)
+ if ((result.val[0][i] != expect1[i]) || (result.val[1][i] != expect2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpqp16_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqp16_1.c
new file mode 100644
index 00000000000..c6648045cf3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqp16_1.c
@@ -0,0 +1,11 @@
+/* Test the `vuzpq_p16' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vuzpqp16.x"
+
+/* { dg-final { scan-assembler-times "uzp1\[ \t\]+v\[0-9\]+\.8h, ?v\[0-9\]+\.8h, ?v\[0-9\]+\.8h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "uzp2\[ \t\]+v\[0-9\]+\.8h, ?v\[0-9\]+\.8h, ?v\[0-9\]+\.8h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpqp8.x b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqp8.x
new file mode 100644
index 00000000000..31541de7e14
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqp8.x
@@ -0,0 +1,27 @@
+extern void abort (void);
+
+poly8x16x2_t
+test_vuzpqp8 (poly8x16_t _a, poly8x16_t _b)
+{
+ return vuzpq_p8 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ poly8_t first[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+ poly8_t second[] =
+ {17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32};
+ poly8x16x2_t result = test_vuzpqp8 (vld1q_p8 (first), vld1q_p8 (second));
+ poly8_t exp1[] = {1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31};
+ poly8_t exp2[] = {2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32};
+ poly8x16_t expect1 = vld1q_p8 (exp1);
+ poly8x16_t expect2 = vld1q_p8 (exp2);
+
+ for (i = 0; i < 16; i++)
+ if ((result.val[0][i] != expect1[i]) || (result.val[1][i] != expect2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpqp8_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqp8_1.c
new file mode 100644
index 00000000000..a9e6ce222e7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqp8_1.c
@@ -0,0 +1,11 @@
+/* Test the `vuzpq_p8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vuzpqp8.x"
+
+/* { dg-final { scan-assembler-times "uzp1\[ \t\]+v\[0-9\]+\.16b, ?v\[0-9\]+\.16b, ?v\[0-9\]+\.16b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "uzp2\[ \t\]+v\[0-9\]+\.16b, ?v\[0-9\]+\.16b, ?v\[0-9\]+\.16b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpqs16.x b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqs16.x
new file mode 100644
index 00000000000..439107b2ec5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqs16.x
@@ -0,0 +1,26 @@
+extern void abort (void);
+
+int16x8x2_t
+test_vuzpqs16 (int16x8_t _a, int16x8_t _b)
+{
+ return vuzpq_s16 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int16_t first[] = {1, 2, 3, 4, 5, 6, 7, 8};
+ int16_t second[] = {9, 10, 11, 12, 13, 14, 15, 16};
+ int16x8x2_t result = test_vuzpqs16 (vld1q_s16 (first), vld1q_s16 (second));
+ int16_t exp1[] = {1, 3, 5, 7, 9, 11, 13, 15};
+ int16_t exp2[] = {2, 4, 6, 8, 10, 12, 14, 16};
+ int16x8_t expect1 = vld1q_s16 (exp1);
+ int16x8_t expect2 = vld1q_s16 (exp2);
+
+ for (i = 0; i < 8; i++)
+ if ((result.val[0][i] != expect1[i]) || (result.val[1][i] != expect2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpqs16_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqs16_1.c
new file mode 100644
index 00000000000..af1e28b7669
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqs16_1.c
@@ -0,0 +1,11 @@
+/* Test the `vuzpq_s16' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vuzpqs16.x"
+
+/* { dg-final { scan-assembler-times "uzp1\[ \t\]+v\[0-9\]+\.8h, ?v\[0-9\]+\.8h, ?v\[0-9\]+\.8h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "uzp2\[ \t\]+v\[0-9\]+\.8h, ?v\[0-9\]+\.8h, ?v\[0-9\]+\.8h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpqs32.x b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqs32.x
new file mode 100644
index 00000000000..84463f038e5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqs32.x
@@ -0,0 +1,26 @@
+extern void abort (void);
+
+int32x4x2_t
+test_vuzpqs32 (int32x4_t _a, int32x4_t _b)
+{
+ return vuzpq_s32 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int32_t first[] = {1, 2, 3, 4};
+ int32_t second[] = {5, 6, 7, 8};
+ int32x4x2_t result = test_vuzpqs32 (vld1q_s32 (first), vld1q_s32 (second));
+ int32_t exp1[] = {1, 3, 5, 7};
+ int32_t exp2[] = {2, 4, 6, 8};
+ int32x4_t expect1 = vld1q_s32 (exp1);
+ int32x4_t expect2 = vld1q_s32 (exp2);
+
+ for (i = 0; i < 4; i++)
+ if ((result.val[0][i] != expect1[i]) || (result.val[1][i] != expect2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpqs32_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqs32_1.c
new file mode 100644
index 00000000000..a4bf7ac7e59
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqs32_1.c
@@ -0,0 +1,11 @@
+/* Test the `vuzpq_s32' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vuzpqs32.x"
+
+/* { dg-final { scan-assembler-times "uzp1\[ \t\]+v\[0-9\]+\.4s, ?v\[0-9\]+\.4s, ?v\[0-9\]+\.4s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "uzp2\[ \t\]+v\[0-9\]+\.4s, ?v\[0-9\]+\.4s, ?v\[0-9\]+\.4s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpqs8.x b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqs8.x
new file mode 100644
index 00000000000..c8b916780d7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqs8.x
@@ -0,0 +1,27 @@
+extern void abort (void);
+
+int8x16x2_t
+test_vuzpqs8 (int8x16_t _a, int8x16_t _b)
+{
+ return vuzpq_s8 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int8_t first[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+ int8_t second[] =
+ {17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32};
+ int8x16x2_t result = test_vuzpqs8 (vld1q_s8 (first), vld1q_s8 (second));
+ int8_t exp1[] = {1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31};
+ int8_t exp2[] = {2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32};
+ int8x16_t expect1 = vld1q_s8 (exp1);
+ int8x16_t expect2 = vld1q_s8 (exp2);
+
+ for (i = 0; i < 16; i++)
+ if ((result.val[0][i] != expect1[i]) || (result.val[1][i] != expect2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpqs8_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqs8_1.c
new file mode 100644
index 00000000000..234a3292823
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqs8_1.c
@@ -0,0 +1,11 @@
+/* Test the `vuzpq_s8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vuzpqs8.x"
+
+/* { dg-final { scan-assembler-times "uzp1\[ \t\]+v\[0-9\]+\.16b, ?v\[0-9\]+\.16b, ?v\[0-9\]+\.16b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "uzp2\[ \t\]+v\[0-9\]+\.16b, ?v\[0-9\]+\.16b, ?v\[0-9\]+\.16b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpqu16.x b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqu16.x
new file mode 100644
index 00000000000..1757467b467
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqu16.x
@@ -0,0 +1,26 @@
+extern void abort (void);
+
+uint16x8x2_t
+test_vuzpqu16 (uint16x8_t _a, uint16x8_t _b)
+{
+ return vuzpq_u16 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ uint16_t first[] = {1, 2, 3, 4, 5, 6, 7, 8};
+ uint16_t second[] = {9, 10, 11, 12, 13, 14, 15, 16};
+ uint16x8x2_t result = test_vuzpqu16 (vld1q_u16 (first), vld1q_u16 (second));
+ uint16_t exp1[] = {1, 3, 5, 7, 9, 11, 13, 15};
+ uint16_t exp2[] = {2, 4, 6, 8, 10, 12, 14, 16};
+ uint16x8_t expect1 = vld1q_u16 (exp1);
+ uint16x8_t expect2 = vld1q_u16 (exp2);
+
+ for (i = 0; i < 8; i++)
+ if ((result.val[0][i] != expect1[i]) || (result.val[1][i] != expect2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpqu16_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqu16_1.c
new file mode 100644
index 00000000000..3f029ed5439
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqu16_1.c
@@ -0,0 +1,11 @@
+/* Test the `vuzpq_u16' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vuzpqu16.x"
+
+/* { dg-final { scan-assembler-times "uzp1\[ \t\]+v\[0-9\]+\.8h, ?v\[0-9\]+\.8h, ?v\[0-9\]+\.8h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "uzp2\[ \t\]+v\[0-9\]+\.8h, ?v\[0-9\]+\.8h, ?v\[0-9\]+\.8h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpqu32.x b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqu32.x
new file mode 100644
index 00000000000..9ff23694c29
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqu32.x
@@ -0,0 +1,26 @@
+extern void abort (void);
+
+uint32x4x2_t
+test_vuzpqu32 (uint32x4_t _a, uint32x4_t _b)
+{
+ return vuzpq_u32 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ uint32_t first[] = {1, 2, 3, 4};
+ uint32_t second[] = {5, 6, 7, 8};
+ uint32x4x2_t result = test_vuzpqu32 (vld1q_u32 (first), vld1q_u32 (second));
+ uint32_t exp1[] = {1, 3, 5, 7};
+ uint32_t exp2[] = {2, 4, 6, 8};
+ uint32x4_t expect1 = vld1q_u32 (exp1);
+ uint32x4_t expect2 = vld1q_u32 (exp2);
+
+ for (i = 0; i < 4; i++)
+ if ((result.val[0][i] != expect1[i]) || (result.val[1][i] != expect2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpqu32_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqu32_1.c
new file mode 100644
index 00000000000..16090eed712
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqu32_1.c
@@ -0,0 +1,11 @@
+/* Test the `vuzpq_u32' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vuzpqu32.x"
+
+/* { dg-final { scan-assembler-times "uzp1\[ \t\]+v\[0-9\]+\.4s, ?v\[0-9\]+\.4s, ?v\[0-9\]+\.4s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "uzp2\[ \t\]+v\[0-9\]+\.4s, ?v\[0-9\]+\.4s, ?v\[0-9\]+\.4s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpqu8.x b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqu8.x
new file mode 100644
index 00000000000..1f5288d0ce3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqu8.x
@@ -0,0 +1,27 @@
+extern void abort (void);
+
+uint8x16x2_t
+test_vuzpqu8 (uint8x16_t _a, uint8x16_t _b)
+{
+ return vuzpq_u8 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ uint8_t first[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+ uint8_t second[] =
+ {17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32};
+ uint8x16x2_t result = test_vuzpqu8 (vld1q_u8 (first), vld1q_u8 (second));
+ uint8_t exp1[] = {1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31};
+ uint8_t exp2[] = {2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32};
+ uint8x16_t expect1 = vld1q_u8 (exp1);
+ uint8x16_t expect2 = vld1q_u8 (exp2);
+
+ for (i = 0; i < 16; i++)
+ if ((result.val[0][i] != expect1[i]) || (result.val[1][i] != expect2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpqu8_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqu8_1.c
new file mode 100644
index 00000000000..6313e4c9b5d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpqu8_1.c
@@ -0,0 +1,11 @@
+/* Test the `vuzpq_u8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vuzpqu8.x"
+
+/* { dg-final { scan-assembler-times "uzp1\[ \t\]+v\[0-9\]+\.16b, ?v\[0-9\]+\.16b, ?v\[0-9\]+\.16b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "uzp2\[ \t\]+v\[0-9\]+\.16b, ?v\[0-9\]+\.16b, ?v\[0-9\]+\.16b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzps16.x b/gcc/testsuite/gcc.target/aarch64/simd/vuzps16.x
new file mode 100644
index 00000000000..4775135d842
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzps16.x
@@ -0,0 +1,26 @@
+extern void abort (void);
+
+int16x4x2_t
+test_vuzps16 (int16x4_t _a, int16x4_t _b)
+{
+ return vuzp_s16 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int16_t first[] = {1, 2, 3, 4};
+ int16_t second[] = {5, 6, 7, 8};
+ int16x4x2_t result = test_vuzps16 (vld1_s16 (first), vld1_s16 (second));
+ int16_t exp1[] = {1, 3, 5, 7};
+ int16_t exp2[] = {2, 4, 6, 8};
+ int16x4_t expect1 = vld1_s16 (exp1);
+ int16x4_t expect2 = vld1_s16 (exp2);
+
+ for (i = 0; i < 4; i++)
+ if ((result.val[0][i] != expect1[i]) || (result.val[1][i] != expect2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzps16_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vuzps16_1.c
new file mode 100644
index 00000000000..f31bd31d0bf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzps16_1.c
@@ -0,0 +1,11 @@
+/* Test the `vuzp_s16' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vuzps16.x"
+
+/* { dg-final { scan-assembler-times "uzp1\[ \t\]+v\[0-9\]+\.4h, ?v\[0-9\]+\.4h, ?v\[0-9\]+\.4h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "uzp2\[ \t\]+v\[0-9\]+\.4h, ?v\[0-9\]+\.4h, ?v\[0-9\]+\.4h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzps32.x b/gcc/testsuite/gcc.target/aarch64/simd/vuzps32.x
new file mode 100644
index 00000000000..6f885ce083b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzps32.x
@@ -0,0 +1,26 @@
+extern void abort (void);
+
+int32x2x2_t
+test_vuzps32 (int32x2_t _a, int32x2_t _b)
+{
+ return vuzp_s32 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int32_t first[] = {1, 2};
+ int32_t second[] = {3, 4};
+ int32x2x2_t result = test_vuzps32 (vld1_s32 (first), vld1_s32 (second));
+ int32_t exp1[] = {1, 3};
+ int32_t exp2[] = {2, 4};
+ int32x2_t expect1 = vld1_s32 (exp1);
+ int32x2_t expect2 = vld1_s32 (exp2);
+
+ for (i = 0; i < 2; i++)
+ if ((result.val[0][i] != expect1[i]) || (result.val[1][i] != expect2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzps32_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vuzps32_1.c
new file mode 100644
index 00000000000..af48d63a67e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzps32_1.c
@@ -0,0 +1,11 @@
+/* Test the `vuzp_s32' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vuzps32.x"
+
+/* { dg-final { scan-assembler-times "zip1\[ \t\]+v\[0-9\]+\.2s, ?v\[0-9\]+\.2s, ?v\[0-9\]+\.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "zip2\[ \t\]+v\[0-9\]+\.2s, ?v\[0-9\]+\.2s, ?v\[0-9\]+\.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzps8.x b/gcc/testsuite/gcc.target/aarch64/simd/vuzps8.x
new file mode 100644
index 00000000000..62ccad45779
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzps8.x
@@ -0,0 +1,26 @@
+extern void abort (void);
+
+int8x8x2_t
+test_vuzps8 (int8x8_t _a, int8x8_t _b)
+{
+ return vuzp_s8 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int8_t first[] = {1, 2, 3, 4, 5, 6, 7, 8};
+ int8_t second[] = {9, 10, 11, 12, 13, 14, 15, 16};
+ int8x8x2_t result = test_vuzps8 (vld1_s8 (first), vld1_s8 (second));
+ int8_t exp1[] = {1, 3, 5, 7, 9, 11, 13, 15};
+ int8_t exp2[] = {2, 4, 6, 8, 10, 12, 14, 16};
+ int8x8_t expect1 = vld1_s8 (exp1);
+ int8x8_t expect2 = vld1_s8 (exp2);
+
+ for (i = 0; i < 8; i++)
+ if ((result.val[0][i] != expect1[i]) || (result.val[1][i] != expect2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzps8_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vuzps8_1.c
new file mode 100644
index 00000000000..5962604ae42
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzps8_1.c
@@ -0,0 +1,11 @@
+/* Test the `vuzp_s8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vuzps8.x"
+
+/* { dg-final { scan-assembler-times "uzp1\[ \t\]+v\[0-9\]+\.8b, ?v\[0-9\]+\.8b, ?v\[0-9\]+\.8b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "uzp2\[ \t\]+v\[0-9\]+\.8b, ?v\[0-9\]+\.8b, ?v\[0-9\]+\.8b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpu16.x b/gcc/testsuite/gcc.target/aarch64/simd/vuzpu16.x
new file mode 100644
index 00000000000..a5983f6f0b2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpu16.x
@@ -0,0 +1,26 @@
+extern void abort (void);
+
+uint16x4x2_t
+test_vuzpu16 (uint16x4_t _a, uint16x4_t _b)
+{
+ return vuzp_u16 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ uint16_t first[] = {1, 2, 3, 4};
+ uint16_t second[] = {5, 6, 7, 8};
+ uint16x4x2_t result = test_vuzpu16 (vld1_u16 (first), vld1_u16 (second));
+ uint16_t exp1[] = {1, 3, 5, 7};
+ uint16_t exp2[] = {2, 4, 6, 8};
+ uint16x4_t expect1 = vld1_u16 (exp1);
+ uint16x4_t expect2 = vld1_u16 (exp2);
+
+ for (i = 0; i < 4; i++)
+ if ((result.val[0][i] != expect1[i]) || (result.val[1][i] != expect2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpu16_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vuzpu16_1.c
new file mode 100644
index 00000000000..5025c5ff43e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpu16_1.c
@@ -0,0 +1,11 @@
+/* Test the `vuzp_u16' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vuzpu16.x"
+
+/* { dg-final { scan-assembler-times "uzp1\[ \t\]+v\[0-9\]+\.4h, ?v\[0-9\]+\.4h, ?v\[0-9\]+\.4h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "uzp2\[ \t\]+v\[0-9\]+\.4h, ?v\[0-9\]+\.4h, ?v\[0-9\]+\.4h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpu32.x b/gcc/testsuite/gcc.target/aarch64/simd/vuzpu32.x
new file mode 100644
index 00000000000..6bf673130d9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpu32.x
@@ -0,0 +1,26 @@
+extern void abort (void);
+
+uint32x2x2_t
+test_vuzpu32 (uint32x2_t _a, uint32x2_t _b)
+{
+ return vuzp_u32 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ uint32_t first[] = {1, 2};
+ uint32_t second[] = {3, 4};
+ uint32x2x2_t result = test_vuzpu32 (vld1_u32 (first), vld1_u32 (second));
+ uint32_t exp1[] = {1, 3};
+ uint32_t exp2[] = {2, 4};
+ uint32x2_t expect1 = vld1_u32 (exp1);
+ uint32x2_t expect2 = vld1_u32 (exp2);
+
+ for (i = 0; i < 2; i++)
+ if ((result.val[0][i] != expect1[i]) || (result.val[1][i] != expect2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpu32_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vuzpu32_1.c
new file mode 100644
index 00000000000..05e1c95d42d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpu32_1.c
@@ -0,0 +1,11 @@
+/* Test the `vuzp_u32' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vuzpu32.x"
+
+/* { dg-final { scan-assembler-times "zip1\[ \t\]+v\[0-9\]+\.2s, ?v\[0-9\]+\.2s, ?v\[0-9\]+\.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "zip2\[ \t\]+v\[0-9\]+\.2s, ?v\[0-9\]+\.2s, ?v\[0-9\]+\.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpu8.x b/gcc/testsuite/gcc.target/aarch64/simd/vuzpu8.x
new file mode 100644
index 00000000000..c3e67e8418f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpu8.x
@@ -0,0 +1,26 @@
+extern void abort (void);
+
+uint8x8x2_t
+test_vuzpu8 (uint8x8_t _a, uint8x8_t _b)
+{
+ return vuzp_u8 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ uint8_t first[] = {1, 2, 3, 4, 5, 6, 7, 8};
+ uint8_t second[] = {9, 10, 11, 12, 13, 14, 15, 16};
+ uint8x8x2_t result = test_vuzpu8 (vld1_u8 (first), vld1_u8 (second));
+ uint8_t exp1[] = {1, 3, 5, 7, 9, 11, 13, 15};
+ uint8_t exp2[] = {2, 4, 6, 8, 10, 12, 14, 16};
+ uint8x8_t expect1 = vld1_u8 (exp1);
+ uint8x8_t expect2 = vld1_u8 (exp2);
+
+ for (i = 0; i < 8; i++)
+ if ((result.val[0][i] != expect1[i]) || (result.val[1][i] != expect2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vuzpu8_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vuzpu8_1.c
new file mode 100644
index 00000000000..57aa49c9330
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vuzpu8_1.c
@@ -0,0 +1,11 @@
+/* Test the `vuzp_u8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vuzpu8.x"
+
+/* { dg-final { scan-assembler-times "uzp1\[ \t\]+v\[0-9\]+\.8b, ?v\[0-9\]+\.8b, ?v\[0-9\]+\.8b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "uzp2\[ \t\]+v\[0-9\]+\.8b, ?v\[0-9\]+\.8b, ?v\[0-9\]+\.8b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipf32.x b/gcc/testsuite/gcc.target/aarch64/simd/vzipf32.x
new file mode 100644
index 00000000000..cc69b892a02
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipf32.x
@@ -0,0 +1,27 @@
+extern void abort (void);
+
+float32x2x2_t
+test_vzipf32 (float32x2_t _a, float32x2_t _b)
+{
+ return vzip_f32 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ float32_t first[] = {1, 2};
+ float32_t second[] = {3, 4};
+ float32x2x2_t result = test_vzipf32 (vld1_f32 (first), vld1_f32 (second));
+ float32x2_t res1 = result.val[0], res2 = result.val[1];
+ float32_t exp1[] = {1, 3};
+ float32_t exp2[] = {2, 4};
+ float32x2_t expected1 = vld1_f32 (exp1);
+ float32x2_t expected2 = vld1_f32 (exp2);
+
+ for (i = 0; i < 2; i++)
+ if ((res1[i] != expected1[i]) || (res2[i] != expected2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipf32_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vzipf32_1.c
new file mode 100644
index 00000000000..df3395a034e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipf32_1.c
@@ -0,0 +1,11 @@
+/* Test the `vzip_f32' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vzipf32.x"
+
+/* { dg-final { scan-assembler-times "zip1\[ \t\]+v\[0-9\]+\.2s, ?v\[0-9\]+\.2s, ?v\[0-9\]+\.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "zip2\[ \t\]+v\[0-9\]+\.2s, ?v\[0-9\]+\.2s, ?v\[0-9\]+\.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipp16.x b/gcc/testsuite/gcc.target/aarch64/simd/vzipp16.x
new file mode 100644
index 00000000000..6bdb3e66ac6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipp16.x
@@ -0,0 +1,27 @@
+extern void abort (void);
+
+poly16x4x2_t
+test_vzipp16 (poly16x4_t _a, poly16x4_t _b)
+{
+ return vzip_p16 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ poly16_t first[] = {1, 2, 3, 4};
+ poly16_t second[] = {5, 6, 7, 8};
+ poly16x4x2_t result = test_vzipp16 (vld1_p16 (first), vld1_p16 (second));
+ poly16x4_t res1 = result.val[0], res2 = result.val[1];
+ poly16_t exp1[] = {1, 5, 2, 6};
+ poly16_t exp2[] = {3, 7, 4, 8};
+ poly16x4_t expected1 = vld1_p16 (exp1);
+ poly16x4_t expected2 = vld1_p16 (exp2);
+
+ for (i = 0; i < 4; i++)
+ if ((res1[i] != expected1[i]) || (res2[i] != expected2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipp16_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vzipp16_1.c
new file mode 100644
index 00000000000..e626a7877b3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipp16_1.c
@@ -0,0 +1,11 @@
+/* Test the `vzip_p16' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vzipp16.x"
+
+/* { dg-final { scan-assembler-times "zip1\[ \t\]+v\[0-9\]+\.4h, ?v\[0-9\]+\.4h, ?v\[0-9\]+\.4h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "zip2\[ \t\]+v\[0-9\]+\.4h, ?v\[0-9\]+\.4h, ?v\[0-9\]+\.4h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipp8.x b/gcc/testsuite/gcc.target/aarch64/simd/vzipp8.x
new file mode 100644
index 00000000000..5e8297eee4b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipp8.x
@@ -0,0 +1,27 @@
+extern void abort (void);
+
+poly8x8x2_t
+test_vzipp8 (poly8x8_t _a, poly8x8_t _b)
+{
+ return vzip_p8 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ poly8_t first[] = {1, 2, 3, 4, 5, 6, 7, 8};
+ poly8_t second[] = {9, 10, 11, 12, 13, 14, 15, 16};
+ poly8x8x2_t result = test_vzipp8 (vld1_p8 (first), vld1_p8 (second));
+ poly8x8_t res1 = result.val[0], res2 = result.val[1];
+ poly8_t exp1[] = {1, 9, 2, 10, 3, 11, 4, 12};
+ poly8_t exp2[] = {5, 13, 6, 14, 7, 15, 8, 16};
+ poly8x8_t expected1 = vld1_p8 (exp1);
+ poly8x8_t expected2 = vld1_p8 (exp2);
+
+ for (i = 0; i < 8; i++)
+ if ((res1[i] != expected1[i]) || (res2[i] != expected2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipp8_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vzipp8_1.c
new file mode 100644
index 00000000000..f99cb70211b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipp8_1.c
@@ -0,0 +1,11 @@
+/* Test the `vzip_p8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vzipp8.x"
+
+/* { dg-final { scan-assembler-times "zip1\[ \t\]+v\[0-9\]+\.8b, ?v\[0-9\]+\.8b, ?v\[0-9\]+\.8b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "zip2\[ \t\]+v\[0-9\]+\.8b, ?v\[0-9\]+\.8b, ?v\[0-9\]+\.8b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipqf32.x b/gcc/testsuite/gcc.target/aarch64/simd/vzipqf32.x
new file mode 100644
index 00000000000..e220aeaeb14
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipqf32.x
@@ -0,0 +1,27 @@
+extern void abort (void);
+
+float32x4x2_t
+test_vzipqf32 (float32x4_t _a, float32x4_t _b)
+{
+ return vzipq_f32 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ float32_t first[] = {1, 2, 3, 4};
+ float32_t second[] = {5, 6, 7, 8};
+ float32x4x2_t result = test_vzipqf32 (vld1q_f32 (first), vld1q_f32 (second));
+ float32x4_t res1 = result.val[0], res2 = result.val[1];
+ float32_t exp1[] = {1, 5, 2, 6};
+ float32_t exp2[] = {3, 7, 4, 8};
+ float32x4_t expected1 = vld1q_f32 (exp1);
+ float32x4_t expected2 = vld1q_f32 (exp2);
+
+ for (i = 0; i < 4; i++)
+ if ((res1[i] != expected1[i]) || (res2[i] != expected2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipqf32_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vzipqf32_1.c
new file mode 100644
index 00000000000..74dae27dda0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipqf32_1.c
@@ -0,0 +1,11 @@
+/* Test the `vzipq_f32' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vzipqf32.x"
+
+/* { dg-final { scan-assembler-times "zip1\[ \t\]+v\[0-9\]+\.4s, ?v\[0-9\]+\.4s, ?v\[0-9\]+\.4s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "zip2\[ \t\]+v\[0-9\]+\.4s, ?v\[0-9\]+\.4s, ?v\[0-9\]+\.4s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipqp16.x b/gcc/testsuite/gcc.target/aarch64/simd/vzipqp16.x
new file mode 100644
index 00000000000..640d7a2513f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipqp16.x
@@ -0,0 +1,27 @@
+extern void abort (void);
+
+poly16x8x2_t
+test_vzipqp16 (poly16x8_t _a, poly16x8_t _b)
+{
+ return vzipq_p16 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ poly16_t first[] = {1, 2, 3, 4, 5, 6, 7, 8};
+ poly16_t second[] = {9, 10, 11, 12, 13, 14, 15, 16};
+ poly16x8x2_t result = test_vzipqp16 (vld1q_p16 (first), vld1q_p16 (second));
+ poly16x8_t res1 = result.val[0], res2 = result.val[1];
+ poly16_t exp1[] = {1, 9, 2, 10, 3, 11, 4, 12};
+ poly16_t exp2[] = {5, 13, 6, 14, 7, 15, 8, 16};
+ poly16x8_t expected1 = vld1q_p16 (exp1);
+ poly16x8_t expected2 = vld1q_p16 (exp2);
+
+ for (i = 0; i < 8; i++)
+ if ((res1[i] != expected1[i]) || (res2[i] != expected2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipqp16_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vzipqp16_1.c
new file mode 100644
index 00000000000..0bfd4f1a53b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipqp16_1.c
@@ -0,0 +1,11 @@
+/* Test the `vzipq_p16' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vzipqp16.x"
+
+/* { dg-final { scan-assembler-times "zip1\[ \t\]+v\[0-9\]+\.8h, ?v\[0-9\]+\.8h, ?v\[0-9\]+\.8h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "zip2\[ \t\]+v\[0-9\]+\.8h, ?v\[0-9\]+\.8h, ?v\[0-9\]+\.8h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipqp8.x b/gcc/testsuite/gcc.target/aarch64/simd/vzipqp8.x
new file mode 100644
index 00000000000..b211b4e532f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipqp8.x
@@ -0,0 +1,29 @@
+extern void abort (void);
+
+poly8x16x2_t
+test_vzipqp8 (poly8x16_t _a, poly8x16_t _b)
+{
+ return vzipq_p8 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ poly8_t first[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+ poly8_t second[] =
+ {17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32};
+ poly8x16x2_t result = test_vzipqp8 (vld1q_p8 (first), vld1q_p8 (second));
+ poly8x16_t res1 = result.val[0], res2 = result.val[1];
+ poly8_t exp1[] = {1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23, 8, 24};
+ poly8_t exp2[] =
+ {9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31, 16, 32};
+ poly8x16_t expected1 = vld1q_p8 (exp1);
+ poly8x16_t expected2 = vld1q_p8 (exp2);
+
+ for (i = 0; i < 16; i++)
+ if ((res1[i] != expected1[i]) || (res2[i] != expected2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipqp8_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vzipqp8_1.c
new file mode 100644
index 00000000000..fb245063491
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipqp8_1.c
@@ -0,0 +1,11 @@
+/* Test the `vzipq_p8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vzipqp8.x"
+
+/* { dg-final { scan-assembler-times "zip1\[ \t\]+v\[0-9\]+\.16b, ?v\[0-9\]+\.16b, ?v\[0-9\]+\.16b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "zip2\[ \t\]+v\[0-9\]+\.16b, ?v\[0-9\]+\.16b, ?v\[0-9\]+\.16b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipqs16.x b/gcc/testsuite/gcc.target/aarch64/simd/vzipqs16.x
new file mode 100644
index 00000000000..97ee6b5bde6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipqs16.x
@@ -0,0 +1,27 @@
+extern void abort (void);
+
+int16x8x2_t
+test_vzipqs16 (int16x8_t _a, int16x8_t _b)
+{
+ return vzipq_s16 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int16_t first[] = {1, 2, 3, 4, 5, 6, 7, 8};
+ int16_t second[] = {9, 10, 11, 12, 13, 14, 15, 16};
+ int16x8x2_t result = test_vzipqs16 (vld1q_s16 (first), vld1q_s16 (second));
+ int16x8_t res1 = result.val[0], res2 = result.val[1];
+ int16_t exp1[] = {1, 9, 2, 10, 3, 11, 4, 12};
+ int16_t exp2[] = {5, 13, 6, 14, 7, 15, 8, 16};
+ int16x8_t expected1 = vld1q_s16 (exp1);
+ int16x8_t expected2 = vld1q_s16 (exp2);
+
+ for (i = 0; i < 8; i++)
+ if ((res1[i] != expected1[i]) || (res2[i] != expected2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipqs16_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vzipqs16_1.c
new file mode 100644
index 00000000000..3ff551cceb2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipqs16_1.c
@@ -0,0 +1,11 @@
+/* Test the `vzipq_s16' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vzipqs16.x"
+
+/* { dg-final { scan-assembler-times "zip1\[ \t\]+v\[0-9\]+\.8h, ?v\[0-9\]+\.8h, ?v\[0-9\]+\.8h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "zip2\[ \t\]+v\[0-9\]+\.8h, ?v\[0-9\]+\.8h, ?v\[0-9\]+\.8h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipqs32.x b/gcc/testsuite/gcc.target/aarch64/simd/vzipqs32.x
new file mode 100644
index 00000000000..45f490d7350
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipqs32.x
@@ -0,0 +1,27 @@
+extern void abort (void);
+
+int32x4x2_t
+test_vzipqs32 (int32x4_t _a, int32x4_t _b)
+{
+ return vzipq_s32 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int32_t first[] = {1, 2, 3, 4};
+ int32_t second[] = {5, 6, 7, 8};
+ int32x4x2_t result = test_vzipqs32 (vld1q_s32 (first), vld1q_s32 (second));
+ int32x4_t res1 = result.val[0], res2 = result.val[1];
+ int32_t exp1[] = {1, 5, 2, 6};
+ int32_t exp2[] = {3, 7, 4, 8};
+ int32x4_t expected1 = vld1q_s32 (exp1);
+ int32x4_t expected2 = vld1q_s32 (exp2);
+
+ for (i = 0; i < 4; i++)
+ if ((res1[i] != expected1[i]) || (res2[i] != expected2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipqs32_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vzipqs32_1.c
new file mode 100644
index 00000000000..51681581bc9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipqs32_1.c
@@ -0,0 +1,11 @@
+/* Test the `vzipq_s32' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vzipqs32.x"
+
+/* { dg-final { scan-assembler-times "zip1\[ \t\]+v\[0-9\]+\.4s, ?v\[0-9\]+\.4s, ?v\[0-9\]+\.4s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "zip2\[ \t\]+v\[0-9\]+\.4s, ?v\[0-9\]+\.4s, ?v\[0-9\]+\.4s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipqs8.x b/gcc/testsuite/gcc.target/aarch64/simd/vzipqs8.x
new file mode 100644
index 00000000000..68cc84b7e86
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipqs8.x
@@ -0,0 +1,29 @@
+extern void abort (void);
+
+int8x16x2_t
+test_vzipqs8 (int8x16_t _a, int8x16_t _b)
+{
+ return vzipq_s8 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int8_t first[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+ int8_t second[] =
+ {17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32};
+ int8x16x2_t result = test_vzipqs8 (vld1q_s8 (first), vld1q_s8 (second));
+ int8x16_t res1 = result.val[0], res2 = result.val[1];
+ int8_t exp1[] = {1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23, 8, 24};
+ int8_t exp2[] =
+ {9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31, 16, 32};
+ int8x16_t expected1 = vld1q_s8 (exp1);
+ int8x16_t expected2 = vld1q_s8 (exp2);
+
+ for (i = 0; i < 16; i++)
+ if ((res1[i] != expected1[i]) || (res2[i] != expected2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipqs8_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vzipqs8_1.c
new file mode 100644
index 00000000000..ec035f3247d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipqs8_1.c
@@ -0,0 +1,11 @@
+/* Test the `vzipq_s8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vzipqs8.x"
+
+/* { dg-final { scan-assembler-times "zip1\[ \t\]+v\[0-9\]+\.16b, ?v\[0-9\]+\.16b, ?v\[0-9\]+\.16b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "zip2\[ \t\]+v\[0-9\]+\.16b, ?v\[0-9\]+\.16b, ?v\[0-9\]+\.16b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipqu16.x b/gcc/testsuite/gcc.target/aarch64/simd/vzipqu16.x
new file mode 100644
index 00000000000..dc4e1462d79
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipqu16.x
@@ -0,0 +1,27 @@
+extern void abort (void);
+
+uint16x8x2_t
+test_vzipqu16 (uint16x8_t _a, uint16x8_t _b)
+{
+ return vzipq_u16 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ uint16_t first[] = {1, 2, 3, 4, 5, 6, 7, 8};
+ uint16_t second[] = {9, 10, 11, 12, 13, 14, 15, 16};
+ uint16x8x2_t result = test_vzipqu16 (vld1q_u16 (first), vld1q_u16 (second));
+ uint16x8_t res1 = result.val[0], res2 = result.val[1];
+ uint16_t exp1[] = {1, 9, 2, 10, 3, 11, 4, 12};
+ uint16_t exp2[] = {5, 13, 6, 14, 7, 15, 8, 16};
+ uint16x8_t expected1 = vld1q_u16 (exp1);
+ uint16x8_t expected2 = vld1q_u16 (exp2);
+
+ for (i = 0; i < 8; i++)
+ if ((res1[i] != expected1[i]) || (res2[i] != expected2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipqu16_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vzipqu16_1.c
new file mode 100644
index 00000000000..b540c8236a6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipqu16_1.c
@@ -0,0 +1,11 @@
+/* Test the `vzipq_u16' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vzipqu16.x"
+
+/* { dg-final { scan-assembler-times "zip1\[ \t\]+v\[0-9\]+\.8h, ?v\[0-9\]+\.8h, ?v\[0-9\]+\.8h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "zip2\[ \t\]+v\[0-9\]+\.8h, ?v\[0-9\]+\.8h, ?v\[0-9\]+\.8h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipqu32.x b/gcc/testsuite/gcc.target/aarch64/simd/vzipqu32.x
new file mode 100644
index 00000000000..8dde7e9bd62
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipqu32.x
@@ -0,0 +1,27 @@
+extern void abort (void);
+
+uint32x4x2_t
+test_vzipqu32 (uint32x4_t _a, uint32x4_t _b)
+{
+ return vzipq_u32 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ uint32_t first[] = {1, 2, 3, 4};
+ uint32_t second[] = {5, 6, 7, 8};
+ uint32x4x2_t result = test_vzipqu32 (vld1q_u32 (first), vld1q_u32 (second));
+ uint32x4_t res1 = result.val[0], res2 = result.val[1];
+ uint32_t exp1[] = {1, 5, 2, 6};
+ uint32_t exp2[] = {3, 7, 4, 8};
+ uint32x4_t expected1 = vld1q_u32 (exp1);
+ uint32x4_t expected2 = vld1q_u32 (exp2);
+
+ for (i = 0; i < 4; i++)
+ if ((res1[i] != expected1[i]) || (res2[i] != expected2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipqu32_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vzipqu32_1.c
new file mode 100644
index 00000000000..ca907b34523
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipqu32_1.c
@@ -0,0 +1,11 @@
+/* Test the `vzipq_u32' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vzipqu32.x"
+
+/* { dg-final { scan-assembler-times "zip1\[ \t\]+v\[0-9\]+\.4s, ?v\[0-9\]+\.4s, ?v\[0-9\]+\.4s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "zip2\[ \t\]+v\[0-9\]+\.4s, ?v\[0-9\]+\.4s, ?v\[0-9\]+\.4s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipqu8.x b/gcc/testsuite/gcc.target/aarch64/simd/vzipqu8.x
new file mode 100644
index 00000000000..8f2603bfebe
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipqu8.x
@@ -0,0 +1,29 @@
+extern void abort (void);
+
+uint8x16x2_t
+test_vzipqu8 (uint8x16_t _a, uint8x16_t _b)
+{
+ return vzipq_u8 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ uint8_t first[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+ uint8_t second[] =
+ {17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32};
+ uint8x16x2_t result = test_vzipqu8 (vld1q_u8 (first), vld1q_u8 (second));
+ uint8x16_t res1 = result.val[0], res2 = result.val[1];
+ uint8_t exp1[] = {1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23, 8, 24};
+ uint8_t exp2[] =
+ {9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31, 16, 32};
+ uint8x16_t expected1 = vld1q_u8 (exp1);
+ uint8x16_t expected2 = vld1q_u8 (exp2);
+
+ for (i = 0; i < 16; i++)
+ if ((res1[i] != expected1[i]) || (res2[i] != expected2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipqu8_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vzipqu8_1.c
new file mode 100644
index 00000000000..16ada581746
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipqu8_1.c
@@ -0,0 +1,11 @@
+/* Test the `vzipq_u8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vzipqu8.x"
+
+/* { dg-final { scan-assembler-times "zip1\[ \t\]+v\[0-9\]+\.16b, ?v\[0-9\]+\.16b, ?v\[0-9\]+\.16b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "zip2\[ \t\]+v\[0-9\]+\.16b, ?v\[0-9\]+\.16b, ?v\[0-9\]+\.16b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzips16.x b/gcc/testsuite/gcc.target/aarch64/simd/vzips16.x
new file mode 100644
index 00000000000..71ee4687f3d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzips16.x
@@ -0,0 +1,27 @@
+extern void abort (void);
+
+int16x4x2_t
+test_vzips16 (int16x4_t _a, int16x4_t _b)
+{
+ return vzip_s16 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int16_t first[] = {1, 2, 3, 4};
+ int16_t second[] = {5, 6, 7, 8};
+ int16x4x2_t result = test_vzips16 (vld1_s16 (first), vld1_s16 (second));
+ int16x4_t res1 = result.val[0], res2 = result.val[1];
+ int16_t exp1[] = {1, 5, 2, 6};
+ int16_t exp2[] = {3, 7, 4, 8};
+ int16x4_t expected1 = vld1_s16 (exp1);
+ int16x4_t expected2 = vld1_s16 (exp2);
+
+ for (i = 0; i < 4; i++)
+ if ((res1[i] != expected1[i]) || (res2[i] != expected2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzips16_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vzips16_1.c
new file mode 100644
index 00000000000..04a97546922
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzips16_1.c
@@ -0,0 +1,11 @@
+/* Test the `vzip_s16' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vzips16.x"
+
+/* { dg-final { scan-assembler-times "zip1\[ \t\]+v\[0-9\]+\.4h, ?v\[0-9\]+\.4h, ?v\[0-9\]+\.4h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "zip2\[ \t\]+v\[0-9\]+\.4h, ?v\[0-9\]+\.4h, ?v\[0-9\]+\.4h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzips32.x b/gcc/testsuite/gcc.target/aarch64/simd/vzips32.x
new file mode 100644
index 00000000000..25bee1c2846
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzips32.x
@@ -0,0 +1,27 @@
+extern void abort (void);
+
+int32x2x2_t
+test_vzips32 (int32x2_t _a, int32x2_t _b)
+{
+ return vzip_s32 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int32_t first[] = {1, 2};
+ int32_t second[] = {3, 4};
+ int32x2x2_t result = test_vzips32 (vld1_s32 (first), vld1_s32 (second));
+ int32x2_t res1 = result.val[0], res2 = result.val[1];
+ int32_t exp1[] = {1, 3};
+ int32_t exp2[] = {2, 4};
+ int32x2_t expected1 = vld1_s32 (exp1);
+ int32x2_t expected2 = vld1_s32 (exp2);
+
+ for (i = 0; i < 2; i++)
+ if ((res1[i] != expected1[i]) || (res2[i] != expected2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzips32_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vzips32_1.c
new file mode 100644
index 00000000000..1c44f64453e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzips32_1.c
@@ -0,0 +1,11 @@
+/* Test the `vzip_s32' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vzips32.x"
+
+/* { dg-final { scan-assembler-times "zip1\[ \t\]+v\[0-9\]+\.2s, ?v\[0-9\]+\.2s, ?v\[0-9\]+\.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "zip2\[ \t\]+v\[0-9\]+\.2s, ?v\[0-9\]+\.2s, ?v\[0-9\]+\.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzips8.x b/gcc/testsuite/gcc.target/aarch64/simd/vzips8.x
new file mode 100644
index 00000000000..4f04d731abe
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzips8.x
@@ -0,0 +1,27 @@
+extern void abort (void);
+
+int8x8x2_t
+test_vzips8 (int8x8_t _a, int8x8_t _b)
+{
+ return vzip_s8 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int8_t first[] = {1, 2, 3, 4, 5, 6, 7, 8};
+ int8_t second[] = {9, 10, 11, 12, 13, 14, 15, 16};
+ int8x8x2_t result = test_vzips8 (vld1_s8 (first), vld1_s8 (second));
+ int8x8_t res1 = result.val[0], res2 = result.val[1];
+ int8_t exp1[] = {1, 9, 2, 10, 3, 11, 4, 12};
+ int8_t exp2[] = {5, 13, 6, 14, 7, 15, 8, 16};
+ int8x8_t expected1 = vld1_s8 (exp1);
+ int8x8_t expected2 = vld1_s8 (exp2);
+
+ for (i = 0; i < 8; i++)
+ if ((res1[i] != expected1[i]) || (res2[i] != expected2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzips8_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vzips8_1.c
new file mode 100644
index 00000000000..5ab7230aa04
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzips8_1.c
@@ -0,0 +1,11 @@
+/* Test the `vzip_s8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vzips8.x"
+
+/* { dg-final { scan-assembler-times "zip1\[ \t\]+v\[0-9\]+\.8b, ?v\[0-9\]+\.8b, ?v\[0-9\]+\.8b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "zip2\[ \t\]+v\[0-9\]+\.8b, ?v\[0-9\]+\.8b, ?v\[0-9\]+\.8b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipu16.x b/gcc/testsuite/gcc.target/aarch64/simd/vzipu16.x
new file mode 100644
index 00000000000..f8dd2ceea98
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipu16.x
@@ -0,0 +1,27 @@
+extern void abort (void);
+
+uint16x4x2_t
+test_vzipu16 (uint16x4_t _a, uint16x4_t _b)
+{
+ return vzip_u16 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ uint16_t first[] = {1, 2, 3, 4};
+ uint16_t second[] = {5, 6, 7, 8};
+ uint16x4x2_t result = test_vzipu16 (vld1_u16 (first), vld1_u16 (second));
+ uint16x4_t res1 = result.val[0], res2 = result.val[1];
+ uint16_t exp1[] = {1, 5, 2, 6};
+ uint16_t exp2[] = {3, 7, 4, 8};
+ uint16x4_t expected1 = vld1_u16 (exp1);
+ uint16x4_t expected2 = vld1_u16 (exp2);
+
+ for (i = 0; i < 4; i++)
+ if ((res1[i] != expected1[i]) || (res2[i] != expected2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipu16_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vzipu16_1.c
new file mode 100644
index 00000000000..abf7365a733
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipu16_1.c
@@ -0,0 +1,11 @@
+/* Test the `vzip_u16' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vzipu16.x"
+
+/* { dg-final { scan-assembler-times "zip1\[ \t\]+v\[0-9\]+\.4h, ?v\[0-9\]+\.4h, ?v\[0-9\]+\.4h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "zip2\[ \t\]+v\[0-9\]+\.4h, ?v\[0-9\]+\.4h, ?v\[0-9\]+\.4h!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipu32.x b/gcc/testsuite/gcc.target/aarch64/simd/vzipu32.x
new file mode 100644
index 00000000000..0579fc4a8be
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipu32.x
@@ -0,0 +1,27 @@
+extern void abort (void);
+
+uint32x2x2_t
+test_vzipu32 (uint32x2_t _a, uint32x2_t _b)
+{
+ return vzip_u32 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ uint32_t first[] = {1, 2};
+ uint32_t second[] = {3, 4};
+ uint32x2x2_t result = test_vzipu32 (vld1_u32 (first), vld1_u32 (second));
+ uint32x2_t res1 = result.val[0], res2 = result.val[1];
+ uint32_t exp1[] = {1, 3};
+ uint32_t exp2[] = {2, 4};
+ uint32x2_t expected1 = vld1_u32 (exp1);
+ uint32x2_t expected2 = vld1_u32 (exp2);
+
+ for (i = 0; i < 2; i++)
+ if ((res1[i] != expected1[i]) || (res2[i] != expected2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipu32_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vzipu32_1.c
new file mode 100644
index 00000000000..d994cb29c4f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipu32_1.c
@@ -0,0 +1,11 @@
+/* Test the `vzip_u32' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vzipu32.x"
+
+/* { dg-final { scan-assembler-times "zip1\[ \t\]+v\[0-9\]+\.2s, ?v\[0-9\]+\.2s, ?v\[0-9\]+\.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "zip2\[ \t\]+v\[0-9\]+\.2s, ?v\[0-9\]+\.2s, ?v\[0-9\]+\.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipu8.x b/gcc/testsuite/gcc.target/aarch64/simd/vzipu8.x
new file mode 100644
index 00000000000..28d9205c4f5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipu8.x
@@ -0,0 +1,27 @@
+extern void abort (void);
+
+uint8x8x2_t
+test_vzipu8 (uint8x8_t _a, uint8x8_t _b)
+{
+ return vzip_u8 (_a, _b);
+}
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ uint8_t first[] = {1, 2, 3, 4, 5, 6, 7, 8};
+ uint8_t second[] = {9, 10, 11, 12, 13, 14, 15, 16};
+ uint8x8x2_t result = test_vzipu8 (vld1_u8 (first), vld1_u8 (second));
+ uint8x8_t res1 = result.val[0], res2 = result.val[1];
+ uint8_t exp1[] = {1, 9, 2, 10, 3, 11, 4, 12};
+ uint8_t exp2[] = {5, 13, 6, 14, 7, 15, 8, 16};
+ uint8x8_t expected1 = vld1_u8 (exp1);
+ uint8x8_t expected2 = vld1_u8 (exp2);
+
+ for (i = 0; i < 8; i++)
+ if ((res1[i] != expected1[i]) || (res2[i] != expected2[i]))
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vzipu8_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vzipu8_1.c
new file mode 100644
index 00000000000..990186a33f1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vzipu8_1.c
@@ -0,0 +1,11 @@
+/* Test the `vzip_u8' AArch64 SIMD intrinsic. */
+
+/* { dg-do run } */
+/* { dg-options "-save-temps -fno-inline" } */
+
+#include <arm_neon.h>
+#include "vzipu8.x"
+
+/* { dg-final { scan-assembler-times "zip1\[ \t\]+v\[0-9\]+\.8b, ?v\[0-9\]+\.8b, ?v\[0-9\]+\.8b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { scan-assembler-times "zip2\[ \t\]+v\[0-9\]+\.8b, ?v\[0-9\]+\.8b, ?v\[0-9\]+\.8b!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/simd/simd.exp b/gcc/testsuite/gcc.target/arm/simd/simd.exp
new file mode 100644
index 00000000000..746429dadf6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/simd/simd.exp
@@ -0,0 +1,35 @@
+# Copyright (C) 1997-2014 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# GCC testsuite that uses the `dg.exp' driver.
+
+# Exit immediately if this isn't an ARM target.
+if ![istarget arm*-*-*] then {
+ return
+}
+
+# Load support procs.
+load_lib gcc-dg.exp
+
+# Initialize `dg'.
+dg-init
+
+# Main loop.
+dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cCS\]]] \
+ "" ""
+
+# All done.
+dg-finish
diff --git a/gcc/testsuite/gcc.target/arm/simd/vzipf32_1.c b/gcc/testsuite/gcc.target/arm/simd/vzipf32_1.c
new file mode 100644
index 00000000000..efaa96ea955
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/simd/vzipf32_1.c
@@ -0,0 +1,12 @@
+/* Test the `vzipf32' ARM Neon intrinsic. */
+
+/* { dg-do run } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O1 -fno-inline" } */
+/* { dg-add-options arm_neon } */
+
+#include "arm_neon.h"
+#include "../../aarch64/simd/vzipf32.x"
+
+/* { dg-final { scan-assembler-times "vuzp\.32\[ \t\]+\[dD\]\[0-9\]+, ?\[dD\]\[0-9\]+!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/simd/vzipp16_1.c b/gcc/testsuite/gcc.target/arm/simd/vzipp16_1.c
new file mode 100644
index 00000000000..4154333a7f7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/simd/vzipp16_1.c
@@ -0,0 +1,12 @@
+/* Test the `vzipp16' ARM Neon intrinsic. */
+
+/* { dg-do run } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O1 -fno-inline" } */
+/* { dg-add-options arm_neon } */
+
+#include "arm_neon.h"
+#include "../../aarch64/simd/vzipp16.x"
+
+/* { dg-final { scan-assembler-times "vzip\.16\[ \t\]+\[dD\]\[0-9\]+, ?\[dD\]\[0-9\]+!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/simd/vzipp8_1.c b/gcc/testsuite/gcc.target/arm/simd/vzipp8_1.c
new file mode 100644
index 00000000000..9fe2384c9f9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/simd/vzipp8_1.c
@@ -0,0 +1,12 @@
+/* Test the `vzipp8' ARM Neon intrinsic. */
+
+/* { dg-do run } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O1 -fno-inline" } */
+/* { dg-add-options arm_neon } */
+
+#include "arm_neon.h"
+#include "../../aarch64/simd/vzipp8.x"
+
+/* { dg-final { scan-assembler-times "vzip\.8\[ \t\]+\[dD\]\[0-9\]+, ?\[dD\]\[0-9\]+!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/simd/vzipqf32_1.c b/gcc/testsuite/gcc.target/arm/simd/vzipqf32_1.c
new file mode 100644
index 00000000000..8c547a79f5b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/simd/vzipqf32_1.c
@@ -0,0 +1,12 @@
+/* Test the `vzipQf32' ARM Neon intrinsic. */
+
+/* { dg-do run } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O1 -fno-inline" } */
+/* { dg-add-options arm_neon } */
+
+#include "arm_neon.h"
+#include "../../aarch64/simd/vzipqf32.x"
+
+/* { dg-final { scan-assembler-times "vzip\.32\[ \t\]+\[qQ\]\[0-9\]+, ?\[qQ\]\[0-9\]+!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/simd/vzipqp16_1.c b/gcc/testsuite/gcc.target/arm/simd/vzipqp16_1.c
new file mode 100644
index 00000000000..e2af10b2af1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/simd/vzipqp16_1.c
@@ -0,0 +1,12 @@
+/* Test the `vzipQp16' ARM Neon intrinsic. */
+
+/* { dg-do run } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O1 -fno-inline" } */
+/* { dg-add-options arm_neon } */
+
+#include "arm_neon.h"
+#include "../../aarch64/simd/vzipqp16.x"
+
+/* { dg-final { scan-assembler-times "vzip\.16\[ \t\]+\[qQ\]\[0-9\]+, ?\[qQ\]\[0-9\]+!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/simd/vzipqp8_1.c b/gcc/testsuite/gcc.target/arm/simd/vzipqp8_1.c
new file mode 100644
index 00000000000..11a13298563
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/simd/vzipqp8_1.c
@@ -0,0 +1,12 @@
+/* Test the `vzipQp8' ARM Neon intrinsic. */
+
+/* { dg-do run } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O1 -fno-inline" } */
+/* { dg-add-options arm_neon } */
+
+#include "arm_neon.h"
+#include "../../aarch64/simd/vzipqp8.x"
+
+/* { dg-final { scan-assembler-times "vzip\.8\[ \t\]+\[qQ\]\[0-9\]+, ?\[qQ\]\[0-9\]+!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/simd/vzipqs16_1.c b/gcc/testsuite/gcc.target/arm/simd/vzipqs16_1.c
new file mode 100644
index 00000000000..0576c0033e6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/simd/vzipqs16_1.c
@@ -0,0 +1,12 @@
+/* Test the `vzipQs16' ARM Neon intrinsic. */
+
+/* { dg-do run } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O1 -fno-inline" } */
+/* { dg-add-options arm_neon } */
+
+#include "arm_neon.h"
+#include "../../aarch64/simd/vzipqs16.x"
+
+/* { dg-final { scan-assembler-times "vzip\.16\[ \t\]+\[qQ\]\[0-9\]+, ?\[qQ\]\[0-9\]+!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/simd/vzipqs32_1.c b/gcc/testsuite/gcc.target/arm/simd/vzipqs32_1.c
new file mode 100644
index 00000000000..6cf24396d20
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/simd/vzipqs32_1.c
@@ -0,0 +1,12 @@
+/* Test the `vzipQs32' ARM Neon intrinsic. */
+
+/* { dg-do run } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O1 -fno-inline" } */
+/* { dg-add-options arm_neon } */
+
+#include "arm_neon.h"
+#include "../../aarch64/simd/vzipqs32.x"
+
+/* { dg-final { scan-assembler-times "vzip\.32\[ \t\]+\[qQ\]\[0-9\]+, ?\[qQ\]\[0-9\]+!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/simd/vzipqs8_1.c b/gcc/testsuite/gcc.target/arm/simd/vzipqs8_1.c
new file mode 100644
index 00000000000..0244374e001
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/simd/vzipqs8_1.c
@@ -0,0 +1,12 @@
+/* Test the `vzipQs8' ARM Neon intrinsic. */
+
+/* { dg-do run } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O1 -fno-inline" } */
+/* { dg-add-options arm_neon } */
+
+#include "arm_neon.h"
+#include "../../aarch64/simd/vzipqs8.x"
+
+/* { dg-final { scan-assembler-times "vzip\.8\[ \t\]+\[qQ\]\[0-9\]+, ?\[qQ\]\[0-9\]+!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/simd/vzipqu16_1.c b/gcc/testsuite/gcc.target/arm/simd/vzipqu16_1.c
new file mode 100644
index 00000000000..3c406f514d2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/simd/vzipqu16_1.c
@@ -0,0 +1,12 @@
+/* Test the `vzipQu16' ARM Neon intrinsic. */
+
+/* { dg-do run } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O1 -fno-inline" } */
+/* { dg-add-options arm_neon } */
+
+#include "arm_neon.h"
+#include "../../aarch64/simd/vzipqu16.x"
+
+/* { dg-final { scan-assembler-times "vzip\.16\[ \t\]+\[qQ\]\[0-9\]+, ?\[qQ\]\[0-9\]+!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/simd/vzipqu32_1.c b/gcc/testsuite/gcc.target/arm/simd/vzipqu32_1.c
new file mode 100644
index 00000000000..ba1393c6c92
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/simd/vzipqu32_1.c
@@ -0,0 +1,12 @@
+/* Test the `vzipQu32' ARM Neon intrinsic. */
+
+/* { dg-do run } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O1 -fno-inline" } */
+/* { dg-add-options arm_neon } */
+
+#include "arm_neon.h"
+#include "../../aarch64/simd/vzipqu32.x"
+
+/* { dg-final { scan-assembler-times "vzip\.32\[ \t\]+\[qQ\]\[0-9\]+, ?\[qQ\]\[0-9\]+!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/simd/vzipqu8_1.c b/gcc/testsuite/gcc.target/arm/simd/vzipqu8_1.c
new file mode 100644
index 00000000000..023ecac3a52
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/simd/vzipqu8_1.c
@@ -0,0 +1,12 @@
+/* Test the `vzipQu8' ARM Neon intrinsic. */
+
+/* { dg-do run } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O1 -fno-inline" } */
+/* { dg-add-options arm_neon } */
+
+#include "arm_neon.h"
+#include "../../aarch64/simd/vzipqu8.x"
+
+/* { dg-final { scan-assembler-times "vzip\.8\[ \t\]+\[qQ\]\[0-9\]+, ?\[qQ\]\[0-9\]+!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/simd/vzips16_1.c b/gcc/testsuite/gcc.target/arm/simd/vzips16_1.c
new file mode 100644
index 00000000000..b6c3c2fe897
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/simd/vzips16_1.c
@@ -0,0 +1,12 @@
+/* Test the `vzips16' ARM Neon intrinsic. */
+
+/* { dg-do run } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O1 -fno-inline" } */
+/* { dg-add-options arm_neon } */
+
+#include "arm_neon.h"
+#include "../../aarch64/simd/vzips16.x"
+
+/* { dg-final { scan-assembler-times "vzip\.16\[ \t\]+\[dD\]\[0-9\]+, ?\[dD\]\[0-9\]+!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/simd/vzips32_1.c b/gcc/testsuite/gcc.target/arm/simd/vzips32_1.c
new file mode 100644
index 00000000000..1a6f1709342
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/simd/vzips32_1.c
@@ -0,0 +1,12 @@
+/* Test the `vzips32' ARM Neon intrinsic. */
+
+/* { dg-do run } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O1 -fno-inline" } */
+/* { dg-add-options arm_neon } */
+
+#include "arm_neon.h"
+#include "../../aarch64/simd/vzips32.x"
+
+/* { dg-final { scan-assembler-times "vuzp\.32\[ \t\]+\[dD\]\[0-9\]+, ?\[dD\]\[0-9\]+!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/simd/vzips8_1.c b/gcc/testsuite/gcc.target/arm/simd/vzips8_1.c
new file mode 100644
index 00000000000..8569357817b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/simd/vzips8_1.c
@@ -0,0 +1,12 @@
+/* Test the `vzips8' ARM Neon intrinsic. */
+
+/* { dg-do run } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O1 -fno-inline" } */
+/* { dg-add-options arm_neon } */
+
+#include "arm_neon.h"
+#include "../../aarch64/simd/vzips8.x"
+
+/* { dg-final { scan-assembler-times "vzip\.8\[ \t\]+\[dD\]\[0-9\]+, ?\[dD\]\[0-9\]+!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/simd/vzipu16_1.c b/gcc/testsuite/gcc.target/arm/simd/vzipu16_1.c
new file mode 100644
index 00000000000..23bfcc4d962
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/simd/vzipu16_1.c
@@ -0,0 +1,12 @@
+/* Test the `vzipu16' ARM Neon intrinsic. */
+
+/* { dg-do run } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O1 -fno-inline" } */
+/* { dg-add-options arm_neon } */
+
+#include "arm_neon.h"
+#include "../../aarch64/simd/vzipu16.x"
+
+/* { dg-final { scan-assembler-times "vzip\.16\[ \t\]+\[dD\]\[0-9\]+, ?\[dD\]\[0-9\]+!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/simd/vzipu32_1.c b/gcc/testsuite/gcc.target/arm/simd/vzipu32_1.c
new file mode 100644
index 00000000000..6a753f25a9c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/simd/vzipu32_1.c
@@ -0,0 +1,12 @@
+/* Test the `vzipu32' ARM Neon intrinsic. */
+
+/* { dg-do run } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O1 -fno-inline" } */
+/* { dg-add-options arm_neon } */
+
+#include "arm_neon.h"
+#include "../../aarch64/simd/vzipu32.x"
+
+/* { dg-final { scan-assembler-times "vuzp\.32\[ \t\]+\[dD\]\[0-9\]+, ?\[dD\]\[0-9\]+!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gcc.target/arm/simd/vzipu8_1.c b/gcc/testsuite/gcc.target/arm/simd/vzipu8_1.c
new file mode 100644
index 00000000000..972af74237f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/simd/vzipu8_1.c
@@ -0,0 +1,12 @@
+/* Test the `vzipu8' ARM Neon intrinsic. */
+
+/* { dg-do run } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O1 -fno-inline" } */
+/* { dg-add-options arm_neon } */
+
+#include "arm_neon.h"
+#include "../../aarch64/simd/vzipu8.x"
+
+/* { dg-final { scan-assembler-times "vzip\.8\[ \t\]+\[dD\]\[0-9\]+, ?\[dD\]\[0-9\]+!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
+/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/gfortran.dg/arrayio_13.f90 b/gcc/testsuite/gfortran.dg/arrayio_13.f90
new file mode 100644
index 00000000000..92a856bc869
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/arrayio_13.f90
@@ -0,0 +1,14 @@
+! { dg-do run }
+! PR60810 Bogus end-of-file
+program readstrlist
+ character(len=80), dimension(2) :: ver
+ integer :: a, b, c
+ a = 1
+ b = 2
+ c = 3
+ ver(1) = '285 383'
+ ver(2) = '985'
+ read( ver, *) a, b, c
+ if (a /= 285 .or. b /= 383 .or. c /= 985) call abort
+ !write ( *, *) a, b, c
+end
diff --git a/gcc/testsuite/gfortran.dg/vect/pr48329.f90 b/gcc/testsuite/gfortran.dg/vect/pr48329.f90
new file mode 100644
index 00000000000..6ad03d4bd33
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/vect/pr48329.f90
@@ -0,0 +1,29 @@
+! { dg-do compile }
+! { dg-require-effective-target vect_float }
+! { dg-require-effective-target vect_intfloat_cvt }
+! { dg-additional-options "-ffast-math" }
+
+program calcpi
+
+ implicit none
+ real(kind=4):: h,x,sum,pi
+ integer:: n,i
+ real(kind=4):: f
+
+ f(x) = 4.0/(1.0+x**2)
+
+ n = 2100000000
+
+ h= 1.0 / n
+ sum = 0.0
+ DO i=1, n
+ x = h * (i-0.5)
+ sum = sum + f(x)
+ END DO
+ pi = h * sum
+ write(*,*) 'Pi=',pi
+
+end program calcpi
+
+! { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } }
+! { dg-final { cleanup-tree-dump "vect" } }
diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c
index f0845b66975..5793fcc11fc 100644
--- a/gcc/tree-cfg.c
+++ b/gcc/tree-cfg.c
@@ -4787,7 +4787,7 @@ collect_subblocks (pointer_set_t *blocks, tree block)
/* Verify the GIMPLE statements in the CFG of FN. */
DEBUG_FUNCTION void
-verify_gimple_in_cfg (struct function *fn)
+verify_gimple_in_cfg (struct function *fn, bool verify_nothrow)
{
basic_block bb;
bool err = false;
@@ -4923,16 +4923,17 @@ verify_gimple_in_cfg (struct function *fn)
that they cannot throw, that we update other data structures
to match. */
lp_nr = lookup_stmt_eh_lp (stmt);
- if (lp_nr != 0)
+ if (lp_nr > 0)
{
if (!stmt_could_throw_p (stmt))
{
- error ("statement marked for throw, but doesn%'t");
- err2 |= true;
+ if (verify_nothrow)
+ {
+ error ("statement marked for throw, but doesn%'t");
+ err2 |= true;
+ }
}
- else if (lp_nr > 0
- && !gsi_one_before_end_p (gsi)
- && stmt_can_throw_internal (stmt))
+ else if (!gsi_one_before_end_p (gsi))
{
error ("statement marked for throw in middle of block");
err2 |= true;
@@ -7088,6 +7089,12 @@ dump_function_to_file (tree fndecl, FILE *file, int flags)
struct function *fun = DECL_STRUCT_FUNCTION (fndecl);
current_function_decl = fndecl;
+
+ /* Print the return type of the function: */
+ print_generic_expr (file, TREE_TYPE (TREE_TYPE (fun->decl)),
+ dump_flags | TDF_SLIM);
+ fprintf (file, "\n");
+
fprintf (file, "%s %s(", function_name (fun), tmclone ? "[tm-clone] " : "");
arg = DECL_ARGUMENTS (fndecl);
diff --git a/gcc/tree-cfg.h b/gcc/tree-cfg.h
index a115df58b9d..751d0a29e5a 100644
--- a/gcc/tree-cfg.h
+++ b/gcc/tree-cfg.h
@@ -58,7 +58,7 @@ extern gimple first_stmt (basic_block);
extern gimple last_stmt (basic_block);
extern gimple last_and_only_stmt (basic_block);
extern void verify_gimple_in_seq (gimple_seq);
-extern void verify_gimple_in_cfg (struct function *);
+extern void verify_gimple_in_cfg (struct function *, bool);
extern tree gimple_block_label (basic_block);
extern void add_phi_args_after_copy_bb (basic_block);
extern void add_phi_args_after_copy (basic_block *, unsigned, edge);
diff --git a/gcc/tree-eh.c b/gcc/tree-eh.c
index b83274b1fb8..e86c96b65e3 100644
--- a/gcc/tree-eh.c
+++ b/gcc/tree-eh.c
@@ -1550,6 +1550,8 @@ lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
/* Make sure that the last case is the default label, as one is required.
Then sort the labels, which is also required in GIMPLE. */
CASE_LOW (last_case) = NULL;
+ tree tem = case_label_vec.pop ();
+ gcc_assert (tem == last_case);
sort_case_labels (case_label_vec);
/* Build the switch statement, setting last_case to be the default
diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
index 0d941019f0e..616e85bd5a0 100644
--- a/gcc/tree-pass.h
+++ b/gcc/tree-pass.h
@@ -234,6 +234,7 @@ protected:
#define TODO_verify_flow (1 << 3)
#define TODO_verify_stmts (1 << 4)
#define TODO_cleanup_cfg (1 << 5)
+#define TODO_verify_il (1 << 6)
#define TODO_dump_symtab (1 << 7)
#define TODO_remove_functions (1 << 8)
#define TODO_rebuild_frequencies (1 << 9)
@@ -309,7 +310,8 @@ protected:
| TODO_update_ssa_only_virtuals)
#define TODO_verify_all \
- (TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts)
+ (TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts | TODO_verify_il \
+ | TODO_verify_rtl_sharing)
/* Register pass info. */
diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c
index 49bbee3deb7..4a24e6a1b70 100644
--- a/gcc/tree-sra.c
+++ b/gcc/tree-sra.c
@@ -2769,7 +2769,7 @@ sra_modify_expr (tree *expr, gimple_stmt_iterator *gsi, bool write)
{
location_t loc;
struct access *access;
- tree type, bfr;
+ tree type, bfr, orig_expr;
if (TREE_CODE (*expr) == BIT_FIELD_REF)
{
@@ -2785,6 +2785,7 @@ sra_modify_expr (tree *expr, gimple_stmt_iterator *gsi, bool write)
if (!access)
return false;
type = TREE_TYPE (*expr);
+ orig_expr = *expr;
loc = gimple_location (gsi_stmt (*gsi));
gimple_stmt_iterator alt_gsi = gsi_none ();
@@ -2811,8 +2812,7 @@ sra_modify_expr (tree *expr, gimple_stmt_iterator *gsi, bool write)
{
tree ref;
- ref = build_ref_for_model (loc, access->base, access->offset, access,
- NULL, false);
+ ref = build_ref_for_model (loc, orig_expr, 0, access, NULL, false);
if (write)
{
@@ -2863,7 +2863,7 @@ sra_modify_expr (tree *expr, gimple_stmt_iterator *gsi, bool write)
else
start_offset = chunk_size = 0;
- generate_subtree_copies (access->first_child, access->base, 0,
+ generate_subtree_copies (access->first_child, orig_expr, access->offset,
start_offset, chunk_size, gsi, write, write,
loc);
}
@@ -2877,53 +2877,70 @@ enum unscalarized_data_handling { SRA_UDH_NONE, /* Nothing done so far. */
SRA_UDH_RIGHT, /* Data flushed to the RHS. */
SRA_UDH_LEFT }; /* Data flushed to the LHS. */
+struct subreplacement_assignment_data
+{
+ /* Offset of the access representing the lhs of the assignment. */
+ HOST_WIDE_INT left_offset;
+
+ /* LHS and RHS of the original assignment. */
+ tree assignment_lhs, assignment_rhs;
+
+ /* Access representing the rhs of the whole assignment. */
+ struct access *top_racc;
+
+ /* Stmt iterator used for statement insertions after the original assignment.
+ It points to the main GSI used to traverse a BB during function body
+ modification. */
+ gimple_stmt_iterator *new_gsi;
+
+ /* Stmt iterator used for statement insertions before the original
+ assignment. Keeps on pointing to the original statement. */
+ gimple_stmt_iterator old_gsi;
+
+ /* Location of the assignment. */
+ location_t loc;
+
+ /* Keeps the information whether we have needed to refresh replacements of
+ the LHS and from which side of the assignments this takes place. */
+ enum unscalarized_data_handling refreshed;
+};
+
/* Store all replacements in the access tree rooted in TOP_RACC either to their
base aggregate if there are unscalarized data or directly to LHS of the
statement that is pointed to by GSI otherwise. */
-static enum unscalarized_data_handling
-handle_unscalarized_data_in_subtree (struct access *top_racc,
- gimple_stmt_iterator *gsi)
+static void
+handle_unscalarized_data_in_subtree (struct subreplacement_assignment_data *sad)
{
- if (top_racc->grp_unscalarized_data)
+ tree src;
+ if (sad->top_racc->grp_unscalarized_data)
{
- generate_subtree_copies (top_racc->first_child, top_racc->base, 0, 0, 0,
- gsi, false, false,
- gimple_location (gsi_stmt (*gsi)));
- return SRA_UDH_RIGHT;
+ src = sad->assignment_rhs;
+ sad->refreshed = SRA_UDH_RIGHT;
}
else
{
- tree lhs = gimple_assign_lhs (gsi_stmt (*gsi));
- generate_subtree_copies (top_racc->first_child, lhs, top_racc->offset,
- 0, 0, gsi, false, false,
- gimple_location (gsi_stmt (*gsi)));
- return SRA_UDH_LEFT;
+ src = sad->assignment_lhs;
+ sad->refreshed = SRA_UDH_LEFT;
}
+ generate_subtree_copies (sad->top_racc->first_child, src,
+ sad->top_racc->offset, 0, 0,
+ &sad->old_gsi, false, false, sad->loc);
}
-
/* Try to generate statements to load all sub-replacements in an access subtree
- formed by children of LACC from scalar replacements in the TOP_RACC subtree.
- If that is not possible, refresh the TOP_RACC base aggregate and load the
- accesses from it. LEFT_OFFSET is the offset of the left whole subtree being
- copied. NEW_GSI is stmt iterator used for statement insertions after the
- original assignment, OLD_GSI is used to insert statements before the
- assignment. *REFRESHED keeps the information whether we have needed to
- refresh replacements of the LHS and from which side of the assignments this
- takes place. */
+ formed by children of LACC from scalar replacements in the SAD->top_racc
+ subtree. If that is not possible, refresh the SAD->top_racc base aggregate
+ and load the accesses from it. */
static void
-load_assign_lhs_subreplacements (struct access *lacc, struct access *top_racc,
- HOST_WIDE_INT left_offset,
- gimple_stmt_iterator *old_gsi,
- gimple_stmt_iterator *new_gsi,
- enum unscalarized_data_handling *refreshed)
+load_assign_lhs_subreplacements (struct access *lacc,
+ struct subreplacement_assignment_data *sad)
{
- location_t loc = gimple_location (gsi_stmt (*old_gsi));
for (lacc = lacc->first_child; lacc; lacc = lacc->next_sibling)
{
- HOST_WIDE_INT offset = lacc->offset - left_offset + top_racc->offset;
+ HOST_WIDE_INT offset;
+ offset = lacc->offset - sad->left_offset + sad->top_racc->offset;
if (lacc->grp_to_be_replaced)
{
@@ -2931,53 +2948,57 @@ load_assign_lhs_subreplacements (struct access *lacc, struct access *top_racc,
gimple stmt;
tree rhs;
- racc = find_access_in_subtree (top_racc, offset, lacc->size);
+ racc = find_access_in_subtree (sad->top_racc, offset, lacc->size);
if (racc && racc->grp_to_be_replaced)
{
rhs = get_access_replacement (racc);
if (!useless_type_conversion_p (lacc->type, racc->type))
- rhs = fold_build1_loc (loc, VIEW_CONVERT_EXPR, lacc->type, rhs);
+ rhs = fold_build1_loc (sad->loc, VIEW_CONVERT_EXPR,
+ lacc->type, rhs);
if (racc->grp_partial_lhs && lacc->grp_partial_lhs)
- rhs = force_gimple_operand_gsi (old_gsi, rhs, true, NULL_TREE,
- true, GSI_SAME_STMT);
+ rhs = force_gimple_operand_gsi (&sad->old_gsi, rhs, true,
+ NULL_TREE, true, GSI_SAME_STMT);
}
else
{
/* No suitable access on the right hand side, need to load from
the aggregate. See if we have to update it first... */
- if (*refreshed == SRA_UDH_NONE)
- *refreshed = handle_unscalarized_data_in_subtree (top_racc,
- old_gsi);
+ if (sad->refreshed == SRA_UDH_NONE)
+ handle_unscalarized_data_in_subtree (sad);
- if (*refreshed == SRA_UDH_LEFT)
- rhs = build_ref_for_model (loc, lacc->base, lacc->offset, lacc,
- new_gsi, true);
+ if (sad->refreshed == SRA_UDH_LEFT)
+ rhs = build_ref_for_model (sad->loc, sad->assignment_lhs,
+ lacc->offset - sad->left_offset,
+ lacc, sad->new_gsi, true);
else
- rhs = build_ref_for_model (loc, top_racc->base, offset, lacc,
- new_gsi, true);
+ rhs = build_ref_for_model (sad->loc, sad->assignment_rhs,
+ lacc->offset - sad->left_offset,
+ lacc, sad->new_gsi, true);
if (lacc->grp_partial_lhs)
- rhs = force_gimple_operand_gsi (new_gsi, rhs, true, NULL_TREE,
+ rhs = force_gimple_operand_gsi (sad->new_gsi,
+ rhs, true, NULL_TREE,
false, GSI_NEW_STMT);
}
stmt = gimple_build_assign (get_access_replacement (lacc), rhs);
- gsi_insert_after (new_gsi, stmt, GSI_NEW_STMT);
- gimple_set_location (stmt, loc);
+ gsi_insert_after (sad->new_gsi, stmt, GSI_NEW_STMT);
+ gimple_set_location (stmt, sad->loc);
update_stmt (stmt);
sra_stats.subreplacements++;
}
else
{
- if (*refreshed == SRA_UDH_NONE
+ if (sad->refreshed == SRA_UDH_NONE
&& lacc->grp_read && !lacc->grp_covered)
- *refreshed = handle_unscalarized_data_in_subtree (top_racc,
- old_gsi);
+ handle_unscalarized_data_in_subtree (sad);
+
if (lacc && lacc->grp_to_be_debug_replaced)
{
gimple ds;
tree drhs;
- struct access *racc = find_access_in_subtree (top_racc, offset,
+ struct access *racc = find_access_in_subtree (sad->top_racc,
+ offset,
lacc->size);
if (racc && racc->grp_to_be_replaced)
@@ -2987,27 +3008,26 @@ load_assign_lhs_subreplacements (struct access *lacc, struct access *top_racc,
else
drhs = NULL;
}
- else if (*refreshed == SRA_UDH_LEFT)
- drhs = build_debug_ref_for_model (loc, lacc->base, lacc->offset,
- lacc);
- else if (*refreshed == SRA_UDH_RIGHT)
- drhs = build_debug_ref_for_model (loc, top_racc->base, offset,
- lacc);
+ else if (sad->refreshed == SRA_UDH_LEFT)
+ drhs = build_debug_ref_for_model (sad->loc, lacc->base,
+ lacc->offset, lacc);
+ else if (sad->refreshed == SRA_UDH_RIGHT)
+ drhs = build_debug_ref_for_model (sad->loc, sad->top_racc->base,
+ offset, lacc);
else
drhs = NULL_TREE;
if (drhs
&& !useless_type_conversion_p (lacc->type, TREE_TYPE (drhs)))
- drhs = fold_build1_loc (loc, VIEW_CONVERT_EXPR,
+ drhs = fold_build1_loc (sad->loc, VIEW_CONVERT_EXPR,
lacc->type, drhs);
ds = gimple_build_debug_bind (get_access_replacement (lacc),
- drhs, gsi_stmt (*old_gsi));
- gsi_insert_after (new_gsi, ds, GSI_NEW_STMT);
+ drhs, gsi_stmt (sad->old_gsi));
+ gsi_insert_after (sad->new_gsi, ds, GSI_NEW_STMT);
}
}
if (lacc->first_child)
- load_assign_lhs_subreplacements (lacc, top_racc, left_offset,
- old_gsi, new_gsi, refreshed);
+ load_assign_lhs_subreplacements (lacc, sad);
}
}
@@ -3053,7 +3073,7 @@ sra_modify_constructor_assign (gimple *stmt, gimple_stmt_iterator *gsi)
/* I have never seen this code path trigger but if it can happen the
following should handle it gracefully. */
if (access_has_children_p (acc))
- generate_subtree_copies (acc->first_child, acc->base, 0, 0, 0, gsi,
+ generate_subtree_copies (acc->first_child, lhs, acc->offset, 0, 0, gsi,
true, true, loc);
return SRA_AM_MODIFIED;
}
@@ -3261,7 +3281,7 @@ sra_modify_assign (gimple *stmt, gimple_stmt_iterator *gsi)
|| stmt_ends_bb_p (*stmt))
{
if (access_has_children_p (racc))
- generate_subtree_copies (racc->first_child, racc->base, 0, 0, 0,
+ generate_subtree_copies (racc->first_child, rhs, racc->offset, 0, 0,
gsi, false, false, loc);
if (access_has_children_p (lacc))
{
@@ -3271,7 +3291,7 @@ sra_modify_assign (gimple *stmt, gimple_stmt_iterator *gsi)
alt_gsi = gsi_start_edge (single_non_eh_succ (gsi_bb (*gsi)));
gsi = &alt_gsi;
}
- generate_subtree_copies (lacc->first_child, lacc->base, 0, 0, 0,
+ generate_subtree_copies (lacc->first_child, lhs, lacc->offset, 0, 0,
gsi, true, true, loc);
}
sra_stats.separate_lhs_rhs_handling++;
@@ -3301,21 +3321,26 @@ sra_modify_assign (gimple *stmt, gimple_stmt_iterator *gsi)
&& !lacc->grp_unscalarizable_region
&& !racc->grp_unscalarizable_region)
{
- gimple_stmt_iterator orig_gsi = *gsi;
- enum unscalarized_data_handling refreshed;
+ struct subreplacement_assignment_data sad;
+
+ sad.left_offset = lacc->offset;
+ sad.assignment_lhs = lhs;
+ sad.assignment_rhs = rhs;
+ sad.top_racc = racc;
+ sad.old_gsi = *gsi;
+ sad.new_gsi = gsi;
+ sad.loc = gimple_location (*stmt);
+ sad.refreshed = SRA_UDH_NONE;
if (lacc->grp_read && !lacc->grp_covered)
- refreshed = handle_unscalarized_data_in_subtree (racc, gsi);
- else
- refreshed = SRA_UDH_NONE;
+ handle_unscalarized_data_in_subtree (&sad);
- load_assign_lhs_subreplacements (lacc, racc, lacc->offset,
- &orig_gsi, gsi, &refreshed);
- if (refreshed != SRA_UDH_RIGHT)
+ load_assign_lhs_subreplacements (lacc, &sad);
+ if (sad.refreshed != SRA_UDH_RIGHT)
{
gsi_next (gsi);
unlink_stmt_vdef (*stmt);
- gsi_remove (&orig_gsi, true);
+ gsi_remove (&sad.old_gsi, true);
release_defs (*stmt);
sra_stats.deleted++;
return SRA_AM_REMOVED;
@@ -3344,7 +3369,7 @@ sra_modify_assign (gimple *stmt, gimple_stmt_iterator *gsi)
/* Restore the aggregate RHS from its components so the
prevailing aggregate copy does the right thing. */
if (access_has_children_p (racc))
- generate_subtree_copies (racc->first_child, racc->base, 0, 0, 0,
+ generate_subtree_copies (racc->first_child, rhs, racc->offset, 0, 0,
gsi, false, false, loc);
/* Re-load the components of the aggregate copy destination.
But use the RHS aggregate to load from to expose more
diff --git a/gcc/tree-ssa-alias.c b/gcc/tree-ssa-alias.c
index 4f21c4517d1..7781d632266 100644
--- a/gcc/tree-ssa-alias.c
+++ b/gcc/tree-ssa-alias.c
@@ -1645,6 +1645,7 @@ ref_maybe_used_by_call_p_1 (gimple call, ao_ref *ref)
case BUILT_IN_FREE:
case BUILT_IN_MALLOC:
case BUILT_IN_POSIX_MEMALIGN:
+ case BUILT_IN_ALIGNED_ALLOC:
case BUILT_IN_CALLOC:
case BUILT_IN_ALLOCA:
case BUILT_IN_ALLOCA_WITH_ALIGN:
@@ -1955,6 +1956,7 @@ call_may_clobber_ref_p_1 (gimple call, ao_ref *ref)
/* Allocating memory does not have any side-effects apart from
being the definition point for the pointer. */
case BUILT_IN_MALLOC:
+ case BUILT_IN_ALIGNED_ALLOC:
case BUILT_IN_CALLOC:
case BUILT_IN_STRDUP:
case BUILT_IN_STRNDUP:
diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c
index b1e21b8b47c..99e1920a070 100644
--- a/gcc/tree-ssa-ccp.c
+++ b/gcc/tree-ssa-ccp.c
@@ -1803,6 +1803,25 @@ evaluate_stmt (gimple stmt)
val = bit_value_assume_aligned (stmt, NULL_TREE, val, false);
break;
+ case BUILT_IN_ALIGNED_ALLOC:
+ {
+ tree align = get_constant_value (gimple_call_arg (stmt, 0));
+ if (align
+ && tree_fits_uhwi_p (align))
+ {
+ unsigned HOST_WIDE_INT aligni = tree_to_uhwi (align);
+ if (aligni > 1
+ /* align must be power-of-two */
+ && (aligni & (aligni - 1)) == 0)
+ {
+ val.lattice_val = CONSTANT;
+ val.value = build_int_cst (ptr_type_node, 0);
+ val.mask = double_int::from_shwi (-aligni);
+ }
+ }
+ break;
+ }
+
default:;
}
}
diff --git a/gcc/tree-ssa-dce.c b/gcc/tree-ssa-dce.c
index 6389f238604..e1edb2ee3ac 100644
--- a/gcc/tree-ssa-dce.c
+++ b/gcc/tree-ssa-dce.c
@@ -231,6 +231,7 @@ mark_stmt_if_obviously_necessary (gimple stmt, bool aggressive)
switch (DECL_FUNCTION_CODE (callee))
{
case BUILT_IN_MALLOC:
+ case BUILT_IN_ALIGNED_ALLOC:
case BUILT_IN_CALLOC:
case BUILT_IN_ALLOCA:
case BUILT_IN_ALLOCA_WITH_ALIGN:
@@ -573,6 +574,7 @@ mark_all_reaching_defs_necessary_1 (ao_ref *ref ATTRIBUTE_UNUSED,
switch (DECL_FUNCTION_CODE (callee))
{
case BUILT_IN_MALLOC:
+ case BUILT_IN_ALIGNED_ALLOC:
case BUILT_IN_CALLOC:
case BUILT_IN_ALLOCA:
case BUILT_IN_ALLOCA_WITH_ALIGN:
@@ -776,7 +778,8 @@ propagate_necessity (bool aggressive)
&& is_gimple_call (def_stmt = SSA_NAME_DEF_STMT (ptr))
&& (def_callee = gimple_call_fndecl (def_stmt))
&& DECL_BUILT_IN_CLASS (def_callee) == BUILT_IN_NORMAL
- && (DECL_FUNCTION_CODE (def_callee) == BUILT_IN_MALLOC
+ && (DECL_FUNCTION_CODE (def_callee) == BUILT_IN_ALIGNED_ALLOC
+ || DECL_FUNCTION_CODE (def_callee) == BUILT_IN_MALLOC
|| DECL_FUNCTION_CODE (def_callee) == BUILT_IN_CALLOC))
continue;
}
@@ -822,6 +825,7 @@ propagate_necessity (bool aggressive)
&& (DECL_FUNCTION_CODE (callee) == BUILT_IN_MEMSET
|| DECL_FUNCTION_CODE (callee) == BUILT_IN_MEMSET_CHK
|| DECL_FUNCTION_CODE (callee) == BUILT_IN_MALLOC
+ || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALIGNED_ALLOC
|| DECL_FUNCTION_CODE (callee) == BUILT_IN_CALLOC
|| DECL_FUNCTION_CODE (callee) == BUILT_IN_FREE
|| DECL_FUNCTION_CODE (callee) == BUILT_IN_VA_END
@@ -1229,7 +1233,8 @@ eliminate_unnecessary_stmts (void)
special logic we apply to malloc/free pair removal. */
&& (!(call = gimple_call_fndecl (stmt))
|| DECL_BUILT_IN_CLASS (call) != BUILT_IN_NORMAL
- || (DECL_FUNCTION_CODE (call) != BUILT_IN_MALLOC
+ || (DECL_FUNCTION_CODE (call) != BUILT_IN_ALIGNED_ALLOC
+ && DECL_FUNCTION_CODE (call) != BUILT_IN_MALLOC
&& DECL_FUNCTION_CODE (call) != BUILT_IN_CALLOC
&& DECL_FUNCTION_CODE (call) != BUILT_IN_ALLOCA
&& (DECL_FUNCTION_CODE (call)
diff --git a/gcc/tree-ssa-loop-manip.c b/gcc/tree-ssa-loop-manip.c
index 9dcbc530c36..6de2e4e40ec 100644
--- a/gcc/tree-ssa-loop-manip.c
+++ b/gcc/tree-ssa-loop-manip.c
@@ -598,7 +598,7 @@ verify_loop_closed_ssa (bool verify_ssa_p)
return;
if (verify_ssa_p)
- verify_ssa (false);
+ verify_ssa (false, true);
timevar_push (TV_VERIFY_LOOP_CLOSED);
diff --git a/gcc/tree-ssa.c b/gcc/tree-ssa.c
index e794dc1bfc3..856325e0de4 100644
--- a/gcc/tree-ssa.c
+++ b/gcc/tree-ssa.c
@@ -959,7 +959,7 @@ error:
TODO: verify the variable annotations. */
DEBUG_FUNCTION void
-verify_ssa (bool check_modified_stmt)
+verify_ssa (bool check_modified_stmt, bool check_ssa_operands)
{
size_t i;
basic_block bb;
@@ -1042,7 +1042,7 @@ verify_ssa (bool check_modified_stmt)
goto err;
}
- if (verify_ssa_operands (cfun, stmt))
+ if (check_ssa_operands && verify_ssa_operands (cfun, stmt))
{
print_gimple_stmt (stderr, stmt, 0, TDF_VOPS);
goto err;
diff --git a/gcc/tree-ssa.h b/gcc/tree-ssa.h
index 1b1a9869062..c866206d522 100644
--- a/gcc/tree-ssa.h
+++ b/gcc/tree-ssa.h
@@ -45,7 +45,7 @@ extern void insert_debug_temp_for_var_def (gimple_stmt_iterator *, tree);
extern void insert_debug_temps_for_defs (gimple_stmt_iterator *);
extern void reset_debug_uses (gimple);
extern void release_defs_bitset (bitmap toremove);
-extern void verify_ssa (bool);
+extern void verify_ssa (bool, bool);
extern void init_tree_ssa (struct function *);
extern void delete_tree_ssa (void);
extern bool tree_ssa_useless_type_conversion (tree);
diff --git a/gcc/tree-tailcall.c b/gcc/tree-tailcall.c
index 11a29659bc5..9ad25d81c6d 100644
--- a/gcc/tree-tailcall.c
+++ b/gcc/tree-tailcall.c
@@ -285,9 +285,19 @@ process_assignment (gimple stmt, gimple_stmt_iterator call, tree *m,
{
/* Reject a tailcall if the type conversion might need
additional code. */
- if (gimple_assign_cast_p (stmt)
- && TYPE_MODE (TREE_TYPE (dest)) != TYPE_MODE (TREE_TYPE (src_var)))
- return false;
+ if (gimple_assign_cast_p (stmt))
+ {
+ if (TYPE_MODE (TREE_TYPE (dest)) != TYPE_MODE (TREE_TYPE (src_var)))
+ return false;
+
+ /* Even if the type modes are the same, if the precision of the
+ type is smaller than mode's precision,
+ reduce_to_bit_field_precision would generate additional code. */
+ if (INTEGRAL_TYPE_P (TREE_TYPE (dest))
+ && (GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (dest)))
+ > TYPE_PRECISION (TREE_TYPE (dest))))
+ return false;
+ }
if (src_var != *ass_var)
return false;
diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index 890ee936fea..613c9e67103 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -4356,7 +4356,7 @@ debug_all_value_ranges (void)
/* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
create a new SSA name N and return the assertion assignment
- 'V = ASSERT_EXPR <V, V OP W>'. */
+ 'N = ASSERT_EXPR <V, V OP W>'. */
static gimple
build_assert_expr_for (tree cond, tree v)
@@ -6037,7 +6037,7 @@ process_assert_insertions (void)
}
else
{
- y = ASSERT_EXPR <y, x <= y>
+ y = ASSERT_EXPR <y, x >= y>
x = y + 3
}
diff --git a/include/ChangeLog b/include/ChangeLog
index 9d28cea1bbd..41103a4c8f9 100644
--- a/include/ChangeLog
+++ b/include/ChangeLog
@@ -1,3 +1,7 @@
+2014-04-30 Richard Sandiford <rdsandiford@googlemail.com>
+
+ * longlong.h (__i386__): Remove W_TYPE_SIZE==64 handling.
+
2014-04-22 Yufeng Zhang <yufeng.zhang@arm.com>
* longlong.h: Merge from glibc.
diff --git a/include/longlong.h b/include/longlong.h
index d45dbe2b73b..0770290a58b 100644
--- a/include/longlong.h
+++ b/include/longlong.h
@@ -483,7 +483,7 @@ extern UDItype __umulsidi3 (USItype, USItype);
#define UDIV_TIME 40
#endif /* 80x86 */
-#if (defined (__x86_64__) || defined (__i386__)) && W_TYPE_SIZE == 64
+#if defined (__x86_64__) && W_TYPE_SIZE == 64
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("add{q} {%5,%1|%1,%5}\n\tadc{q} {%3,%0|%0,%3}" \
: "=r" ((UDItype) (sh)), \
diff --git a/libgcc/ChangeLog b/libgcc/ChangeLog
index e2b28ee2c4d..e562c30235d 100644
--- a/libgcc/ChangeLog
+++ b/libgcc/ChangeLog
@@ -1,3 +1,10 @@
+2014-04-30 Bernd Edlinger <bernd.edlinger@hotmail.de>
+
+ Work around for current cygwin32 build problems.
+ * config/i386/cygming-crtbegin.c (__register_frame_info,
+ __deregister_frame_info, _Jv_RegisterClasses): Compile weak default
+ functions only for 64-bit systems.
+
2014-04-25 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
* config/i386/crtfastmath.c [!__x86_64__ && __sun__ && __svr4__]
diff --git a/libgcc/config/i386/cygming-crtbegin.c b/libgcc/config/i386/cygming-crtbegin.c
index eeb51d4c573..195b4637637 100644
--- a/libgcc/config/i386/cygming-crtbegin.c
+++ b/libgcc/config/i386/cygming-crtbegin.c
@@ -54,6 +54,11 @@ extern void __register_frame_info (__attribute__((unused)) const void *,
TARGET_ATTRIBUTE_WEAK;
extern void *__deregister_frame_info (__attribute__((unused)) const void *)
TARGET_ATTRIBUTE_WEAK;
+
+/* Work around for current cygwin32 build problems (Bug gas/16858).
+ Compile weak default functions only for 64-bit systems,
+ when absolutely necessary. */
+#ifdef __x86_64__
TARGET_ATTRIBUTE_WEAK void
__register_frame_info (__attribute__((unused)) const void *p,
__attribute__((unused)) struct object *o)
@@ -65,16 +70,19 @@ __deregister_frame_info (__attribute__((unused)) const void *p)
{
return (void*) 0;
}
+#endif
#endif /* DWARF2_UNWIND_INFO */
#if TARGET_USE_JCR_SECTION
extern void _Jv_RegisterClasses (__attribute__((unused)) const void *)
TARGET_ATTRIBUTE_WEAK;
+#ifdef __x86_64__
TARGET_ATTRIBUTE_WEAK void
_Jv_RegisterClasses (__attribute__((unused)) const void *p)
{
}
+#endif
#endif /* TARGET_USE_JCR_SECTION */
#if defined(HAVE_LD_RO_RW_SECTION_MIXING)