summaryrefslogtreecommitdiff
path: root/gcc/config/arm
diff options
context:
space:
mode:
authormrs <mrs@138bc75d-0d04-0410-961f-82ee72b054a4>2014-01-03 23:45:56 +0000
committermrs <mrs@138bc75d-0d04-0410-961f-82ee72b054a4>2014-01-03 23:45:56 +0000
commitfe644b69d013e77c7fc2db2a9863969d4f564561 (patch)
treea2cb1398bd46b8decaea84f1b7de8c7cd35e6046 /gcc/config/arm
parent90079d10dd355193fe289fa0bfcb0d7be880d45a (diff)
parent04e0495a6da35f3b0bcedbd75908cb6e9ba8ff8f (diff)
downloadgcc-fe644b69d013e77c7fc2db2a9863969d4f564561.tar.gz
Merge in trunk.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/branches/wide-int@206327 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/config/arm')
-rw-r--r--gcc/config/arm/README-interworking2
-rw-r--r--gcc/config/arm/aarch-common-protos.h2
-rw-r--r--gcc/config/arm/aarch-common.c2
-rw-r--r--gcc/config/arm/aarch-cost-tables.h2
-rw-r--r--gcc/config/arm/aout.h2
-rw-r--r--gcc/config/arm/arm-arches.def3
-rw-r--r--gcc/config/arm/arm-c.c2
-rw-r--r--gcc/config/arm/arm-cores.def198
-rw-r--r--gcc/config/arm/arm-fixed.md2
-rw-r--r--gcc/config/arm/arm-fpus.def2
-rw-r--r--gcc/config/arm/arm-generic.md2
-rw-r--r--gcc/config/arm/arm-ldmstm.ml11
-rw-r--r--gcc/config/arm/arm-modes.def2
-rw-r--r--gcc/config/arm/arm-opts.h7
-rw-r--r--gcc/config/arm/arm-protos.h7
-rw-r--r--gcc/config/arm/arm-tables.opt32
-rw-r--r--gcc/config/arm/arm-tune.md31
-rw-r--r--gcc/config/arm/arm.c440
-rw-r--r--gcc/config/arm/arm.h42
-rw-r--r--gcc/config/arm/arm.md25
-rw-r--r--gcc/config/arm/arm.opt2
-rw-r--r--gcc/config/arm/arm1020e.md2
-rw-r--r--gcc/config/arm/arm1026ejs.md2
-rw-r--r--gcc/config/arm/arm1136jfs.md2
-rw-r--r--gcc/config/arm/arm926ejs.md2
-rw-r--r--gcc/config/arm/arm_acle.h100
-rw-r--r--gcc/config/arm/arm_neon.h2282
-rw-r--r--gcc/config/arm/arm_neon_builtins.def14
-rw-r--r--gcc/config/arm/bpabi.h8
-rw-r--r--gcc/config/arm/coff.h2
-rw-r--r--gcc/config/arm/constraints.md12
-rw-r--r--gcc/config/arm/cortex-a15-neon.md2
-rw-r--r--gcc/config/arm/cortex-a15.md2
-rw-r--r--gcc/config/arm/cortex-a5.md2
-rw-r--r--gcc/config/arm/cortex-a53.md2
-rw-r--r--gcc/config/arm/cortex-a7.md2
-rw-r--r--gcc/config/arm/cortex-a8-neon.md2
-rw-r--r--gcc/config/arm/cortex-a8.md2
-rw-r--r--gcc/config/arm/cortex-a9-neon.md2
-rw-r--r--gcc/config/arm/cortex-a9.md2
-rw-r--r--gcc/config/arm/cortex-m4-fpu.md2
-rw-r--r--gcc/config/arm/cortex-m4.md2
-rw-r--r--gcc/config/arm/cortex-r4.md2
-rw-r--r--gcc/config/arm/cortex-r4f.md2
-rw-r--r--gcc/config/arm/crypto.def34
-rw-r--r--gcc/config/arm/crypto.md86
-rw-r--r--gcc/config/arm/driver-arm.c2
-rw-r--r--gcc/config/arm/elf.h2
-rw-r--r--gcc/config/arm/fa526.md2
-rw-r--r--gcc/config/arm/fa606te.md2
-rw-r--r--gcc/config/arm/fa626te.md2
-rw-r--r--gcc/config/arm/fa726te.md2
-rw-r--r--gcc/config/arm/fmp626.md2
-rwxr-xr-xgcc/config/arm/genopt.sh4
-rwxr-xr-xgcc/config/arm/gentune.sh4
-rw-r--r--gcc/config/arm/iterators.md59
-rw-r--r--gcc/config/arm/iwmmxt.md2
-rw-r--r--gcc/config/arm/iwmmxt2.md2
-rw-r--r--gcc/config/arm/ldmstm.md344
-rw-r--r--gcc/config/arm/ldrdstrd.md2
-rw-r--r--gcc/config/arm/linux-eabi.h2
-rw-r--r--gcc/config/arm/linux-elf.h2
-rw-r--r--gcc/config/arm/linux-gas.h2
-rw-r--r--gcc/config/arm/marvell-f-iwmmxt.md2
-rw-r--r--gcc/config/arm/marvell-pj4.md2
-rw-r--r--gcc/config/arm/mmintrin.h2
-rw-r--r--gcc/config/arm/neon-docgen.ml84
-rw-r--r--gcc/config/arm/neon-gen.ml103
-rw-r--r--gcc/config/arm/neon-testgen.ml5
-rw-r--r--gcc/config/arm/neon.md22
-rw-r--r--gcc/config/arm/neon.ml426
-rw-r--r--gcc/config/arm/netbsd-elf.h2
-rw-r--r--gcc/config/arm/predicates.md23
-rw-r--r--gcc/config/arm/rtems-eabi.h2
-rw-r--r--gcc/config/arm/semi.h2
-rw-r--r--gcc/config/arm/symbian.h2
-rw-r--r--gcc/config/arm/sync.md2
-rw-r--r--gcc/config/arm/t-aprofile7
-rw-r--r--gcc/config/arm/t-arm2
-rw-r--r--gcc/config/arm/t-arm-elf2
-rw-r--r--gcc/config/arm/t-linux-eabi2
-rw-r--r--gcc/config/arm/t-symbian2
-rw-r--r--gcc/config/arm/t-vxworks2
-rw-r--r--gcc/config/arm/thumb2.md2
-rw-r--r--gcc/config/arm/types.md23
-rw-r--r--gcc/config/arm/uclinux-eabi.h2
-rw-r--r--gcc/config/arm/uclinux-elf.h2
-rw-r--r--gcc/config/arm/unknown-elf.h2
-rw-r--r--gcc/config/arm/unspecs.md23
-rw-r--r--gcc/config/arm/vec-common.md2
-rw-r--r--gcc/config/arm/vfp.md16
-rw-r--r--gcc/config/arm/vfp11.md2
-rw-r--r--gcc/config/arm/vxworks.h2
-rw-r--r--gcc/config/arm/vxworks.opt2
94 files changed, 3483 insertions, 1118 deletions
diff --git a/gcc/config/arm/README-interworking b/gcc/config/arm/README-interworking
index fe322d5437c..3e36f12a832 100644
--- a/gcc/config/arm/README-interworking
+++ b/gcc/config/arm/README-interworking
@@ -742,7 +742,7 @@ used.
interworking as the --support-old-code switch has taken care if this.
-Copyright (C) 1998-2013 Free Software Foundation, Inc.
+Copyright (C) 1998-2014 Free Software Foundation, Inc.
Copying and distribution of this file, with or without modification,
are permitted in any medium without royalty provided the copyright
diff --git a/gcc/config/arm/aarch-common-protos.h b/gcc/config/arm/aarch-common-protos.h
index c3652a72c81..056fe56fc5d 100644
--- a/gcc/config/arm/aarch-common-protos.h
+++ b/gcc/config/arm/aarch-common-protos.h
@@ -1,6 +1,6 @@
/* Functions and structures shared between arm and aarch64.
- Copyright (C) 1991-2013 Free Software Foundation, Inc.
+ Copyright (C) 1991-2014 Free Software Foundation, Inc.
Contributed by ARM Ltd.
This file is part of GCC.
diff --git a/gcc/config/arm/aarch-common.c b/gcc/config/arm/aarch-common.c
index a46e6751a7b..c11f7e9544c 100644
--- a/gcc/config/arm/aarch-common.c
+++ b/gcc/config/arm/aarch-common.c
@@ -1,7 +1,7 @@
/* Dependency checks for instruction scheduling, shared between ARM and
AARCH64.
- Copyright (C) 1991-2013 Free Software Foundation, Inc.
+ Copyright (C) 1991-2014 Free Software Foundation, Inc.
Contributed by ARM Ltd.
This file is part of GCC.
diff --git a/gcc/config/arm/aarch-cost-tables.h b/gcc/config/arm/aarch-cost-tables.h
index d3e7dd2d799..a41ee8a3db6 100644
--- a/gcc/config/arm/aarch-cost-tables.h
+++ b/gcc/config/arm/aarch-cost-tables.h
@@ -1,6 +1,6 @@
/* RTX cost tables shared between arm and aarch64.
- Copyright (C) 2013 Free Software Foundation, Inc.
+ Copyright (C) 2013-2014 Free Software Foundation, Inc.
Contributed by ARM Ltd.
This file is part of GCC.
diff --git a/gcc/config/arm/aout.h b/gcc/config/arm/aout.h
index e9854bb32b7..51d32a9d461 100644
--- a/gcc/config/arm/aout.h
+++ b/gcc/config/arm/aout.h
@@ -1,5 +1,5 @@
/* Definitions of target machine for GNU compiler, for ARM with a.out
- Copyright (C) 1995-2013 Free Software Foundation, Inc.
+ Copyright (C) 1995-2014 Free Software Foundation, Inc.
Contributed by Richard Earnshaw (rearnsha@armltd.co.uk).
This file is part of GCC.
diff --git a/gcc/config/arm/arm-arches.def b/gcc/config/arm/arm-arches.def
index fcf34012262..ac543ee62ad 100644
--- a/gcc/config/arm/arm-arches.def
+++ b/gcc/config/arm/arm-arches.def
@@ -1,5 +1,5 @@
/* ARM CPU architectures.
- Copyright (C) 1991-2013 Free Software Foundation, Inc.
+ Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of GCC.
@@ -54,5 +54,6 @@ ARM_ARCH("armv7-r", cortexr4, 7R, FL_CO_PROC | FL_FOR_ARCH7R)
ARM_ARCH("armv7-m", cortexm3, 7M, FL_CO_PROC | FL_FOR_ARCH7M)
ARM_ARCH("armv7e-m", cortexm4, 7EM, FL_CO_PROC | FL_FOR_ARCH7EM)
ARM_ARCH("armv8-a", cortexa53, 8A, FL_CO_PROC | FL_FOR_ARCH8A)
+ARM_ARCH("armv8-a+crc",cortexa53, 8A,FL_CO_PROC | FL_CRC32 | FL_FOR_ARCH8A)
ARM_ARCH("iwmmxt", iwmmxt, 5TE, FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT)
ARM_ARCH("iwmmxt2", iwmmxt2, 5TE, FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT | FL_IWMMXT2)
diff --git a/gcc/config/arm/arm-c.c b/gcc/config/arm/arm-c.c
index 6d17cf44fec..af64f7a1f8a 100644
--- a/gcc/config/arm/arm-c.c
+++ b/gcc/config/arm/arm-c.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2013 Free Software Foundation, Inc.
+/* Copyright (C) 2007-2014 Free Software Foundation, Inc.
This file is part of GCC.
diff --git a/gcc/config/arm/arm-cores.def b/gcc/config/arm/arm-cores.def
index e7cea63beae..d961e250e46 100644
--- a/gcc/config/arm/arm-cores.def
+++ b/gcc/config/arm/arm-cores.def
@@ -1,5 +1,5 @@
/* ARM CPU Cores
- Copyright (C) 2003-2013 Free Software Foundation, Inc.
+ Copyright (C) 2003-2014 Free Software Foundation, Inc.
Written by CodeSourcery, LLC
This file is part of GCC.
@@ -20,10 +20,13 @@
/* Before using #include to read this file, define a macro:
- ARM_CORE(CORE_NAME, CORE_IDENT, ARCH, FLAGS, COSTS)
+ ARM_CORE(CORE_NAME, INTERNAL_IDENT, TUNE_IDENT, ARCH, FLAGS, COSTS)
The CORE_NAME is the name of the core, represented as a string constant.
- The CORE_IDENT is the name of the core, represented as an identifier.
+ The INTERNAL_IDENT is the name of the core represented as an identifier.
+ This must be unique for each entry in this table.
+ The TUNE_IDENT is the name of the core for which scheduling decisions
+ should be made, represented as an identifier.
ARCH is the architecture revision implemented by the chip.
FLAGS are the bitwise-or of the traits that apply to that core.
This need not include flags implied by the architecture.
@@ -35,109 +38,122 @@
Some tools assume no whitespace up to the first "," in each entry. */
/* V2/V2A Architecture Processors */
-ARM_CORE("arm2", arm2, 2, FL_CO_PROC | FL_MODE26, slowmul)
-ARM_CORE("arm250", arm250, 2, FL_CO_PROC | FL_MODE26, slowmul)
-ARM_CORE("arm3", arm3, 2, FL_CO_PROC | FL_MODE26, slowmul)
+ARM_CORE("arm2", arm2, arm2, 2, FL_CO_PROC | FL_MODE26, slowmul)
+ARM_CORE("arm250", arm250, arm250, 2, FL_CO_PROC | FL_MODE26, slowmul)
+ARM_CORE("arm3", arm3, arm3, 2, FL_CO_PROC | FL_MODE26, slowmul)
/* V3 Architecture Processors */
-ARM_CORE("arm6", arm6, 3, FL_CO_PROC | FL_MODE26, slowmul)
-ARM_CORE("arm60", arm60, 3, FL_CO_PROC | FL_MODE26, slowmul)
-ARM_CORE("arm600", arm600, 3, FL_CO_PROC | FL_MODE26 | FL_WBUF, slowmul)
-ARM_CORE("arm610", arm610, 3, FL_MODE26 | FL_WBUF, slowmul)
-ARM_CORE("arm620", arm620, 3, FL_CO_PROC | FL_MODE26 | FL_WBUF, slowmul)
-ARM_CORE("arm7", arm7, 3, FL_CO_PROC | FL_MODE26, slowmul)
-ARM_CORE("arm7d", arm7d, 3, FL_CO_PROC | FL_MODE26, slowmul)
-ARM_CORE("arm7di", arm7di, 3, FL_CO_PROC | FL_MODE26, slowmul)
-ARM_CORE("arm70", arm70, 3, FL_CO_PROC | FL_MODE26, slowmul)
-ARM_CORE("arm700", arm700, 3, FL_CO_PROC | FL_MODE26 | FL_WBUF, slowmul)
-ARM_CORE("arm700i", arm700i, 3, FL_CO_PROC | FL_MODE26 | FL_WBUF, slowmul)
-ARM_CORE("arm710", arm710, 3, FL_MODE26 | FL_WBUF, slowmul)
-ARM_CORE("arm720", arm720, 3, FL_MODE26 | FL_WBUF, slowmul)
-ARM_CORE("arm710c", arm710c, 3, FL_MODE26 | FL_WBUF, slowmul)
-ARM_CORE("arm7100", arm7100, 3, FL_MODE26 | FL_WBUF, slowmul)
-ARM_CORE("arm7500", arm7500, 3, FL_MODE26 | FL_WBUF, slowmul)
-/* Doesn't have an external co-proc, but does have embedded fpa. */
-ARM_CORE("arm7500fe", arm7500fe, 3, FL_CO_PROC | FL_MODE26 | FL_WBUF, slowmul)
+ARM_CORE("arm6", arm6, arm6, 3, FL_CO_PROC | FL_MODE26, slowmul)
+ARM_CORE("arm60", arm60, arm60, 3, FL_CO_PROC | FL_MODE26, slowmul)
+ARM_CORE("arm600", arm600, arm600, 3, FL_CO_PROC | FL_MODE26 | FL_WBUF, slowmul)
+ARM_CORE("arm610", arm610, arm610, 3, FL_MODE26 | FL_WBUF, slowmul)
+ARM_CORE("arm620", arm620, arm620, 3, FL_CO_PROC | FL_MODE26 | FL_WBUF, slowmul)
+ARM_CORE("arm7", arm7, arm7, 3, FL_CO_PROC | FL_MODE26, slowmul)
+ARM_CORE("arm7d", arm7d, arm7d, 3, FL_CO_PROC | FL_MODE26, slowmul)
+ARM_CORE("arm7di", arm7di, arm7di, 3, FL_CO_PROC | FL_MODE26, slowmul)
+ARM_CORE("arm70", arm70, arm70, 3, FL_CO_PROC | FL_MODE26, slowmul)
+ARM_CORE("arm700", arm700, arm700, 3, FL_CO_PROC | FL_MODE26 | FL_WBUF, slowmul)
+ARM_CORE("arm700i", arm700i, arm700i, 3, FL_CO_PROC | FL_MODE26 | FL_WBUF, slowmul)
+ARM_CORE("arm710", arm710, arm710, 3, FL_MODE26 | FL_WBUF, slowmul)
+ARM_CORE("arm720", arm720, arm720, 3, FL_MODE26 | FL_WBUF, slowmul)
+ARM_CORE("arm710c", arm710c, arm710c, 3, FL_MODE26 | FL_WBUF, slowmul)
+ARM_CORE("arm7100", arm7100, arm7100, 3, FL_MODE26 | FL_WBUF, slowmul)
+ARM_CORE("arm7500", arm7500, arm7500, 3, FL_MODE26 | FL_WBUF, slowmul)
+/* Doesn't have an external co-proc, but does have embedded fpa. */
+ARM_CORE("arm7500fe", arm7500fe, arm7500fe, 3, FL_CO_PROC | FL_MODE26 | FL_WBUF, slowmul)
/* V3M Architecture Processors */
/* arm7m doesn't exist on its own, but only with D, ("and", and I), but
those don't alter the code, so arm7m is sometimes used. */
-ARM_CORE("arm7m", arm7m, 3M, FL_CO_PROC | FL_MODE26, fastmul)
-ARM_CORE("arm7dm", arm7dm, 3M, FL_CO_PROC | FL_MODE26, fastmul)
-ARM_CORE("arm7dmi", arm7dmi, 3M, FL_CO_PROC | FL_MODE26, fastmul)
+ARM_CORE("arm7m", arm7m, arm7m, 3M, FL_CO_PROC | FL_MODE26, fastmul)
+ARM_CORE("arm7dm", arm7dm, arm7dm, 3M, FL_CO_PROC | FL_MODE26, fastmul)
+ARM_CORE("arm7dmi", arm7dmi, arm7dmi, 3M, FL_CO_PROC | FL_MODE26, fastmul)
/* V4 Architecture Processors */
-ARM_CORE("arm8", arm8, 4, FL_MODE26 | FL_LDSCHED, fastmul)
-ARM_CORE("arm810", arm810, 4, FL_MODE26 | FL_LDSCHED, fastmul)
-ARM_CORE("strongarm", strongarm, 4, FL_MODE26 | FL_LDSCHED | FL_STRONG, strongarm)
-ARM_CORE("strongarm110", strongarm110, 4, FL_MODE26 | FL_LDSCHED | FL_STRONG, strongarm)
-ARM_CORE("strongarm1100", strongarm1100, 4, FL_MODE26 | FL_LDSCHED | FL_STRONG, strongarm)
-ARM_CORE("strongarm1110", strongarm1110, 4, FL_MODE26 | FL_LDSCHED | FL_STRONG, strongarm)
-ARM_CORE("fa526", fa526, 4, FL_LDSCHED, fastmul)
-ARM_CORE("fa626", fa626, 4, FL_LDSCHED, fastmul)
+ARM_CORE("arm8", arm8, arm8, 4, FL_MODE26 | FL_LDSCHED, fastmul)
+ARM_CORE("arm810", arm810, arm810, 4, FL_MODE26 | FL_LDSCHED, fastmul)
+ARM_CORE("strongarm", strongarm, strongarm, 4, FL_MODE26 | FL_LDSCHED | FL_STRONG, strongarm)
+ARM_CORE("strongarm110", strongarm110, strongarm110, 4, FL_MODE26 | FL_LDSCHED | FL_STRONG, strongarm)
+ARM_CORE("strongarm1100", strongarm1100, strongarm1100, 4, FL_MODE26 | FL_LDSCHED | FL_STRONG, strongarm)
+ARM_CORE("strongarm1110", strongarm1110, strongarm1110, 4, FL_MODE26 | FL_LDSCHED | FL_STRONG, strongarm)
+ARM_CORE("fa526", fa526, fa526, 4, FL_LDSCHED, fastmul)
+ARM_CORE("fa626", fa626, fa626, 4, FL_LDSCHED, fastmul)
/* V4T Architecture Processors */
-ARM_CORE("arm7tdmi", arm7tdmi, 4T, FL_CO_PROC , fastmul)
-ARM_CORE("arm7tdmi-s", arm7tdmis, 4T, FL_CO_PROC , fastmul)
-ARM_CORE("arm710t", arm710t, 4T, FL_WBUF, fastmul)
-ARM_CORE("arm720t", arm720t, 4T, FL_WBUF, fastmul)
-ARM_CORE("arm740t", arm740t, 4T, FL_WBUF, fastmul)
-ARM_CORE("arm9", arm9, 4T, FL_LDSCHED, fastmul)
-ARM_CORE("arm9tdmi", arm9tdmi, 4T, FL_LDSCHED, fastmul)
-ARM_CORE("arm920", arm920, 4T, FL_LDSCHED, fastmul)
-ARM_CORE("arm920t", arm920t, 4T, FL_LDSCHED, fastmul)
-ARM_CORE("arm922t", arm922t, 4T, FL_LDSCHED, fastmul)
-ARM_CORE("arm940t", arm940t, 4T, FL_LDSCHED, fastmul)
-ARM_CORE("ep9312", ep9312, 4T, FL_LDSCHED, fastmul)
+ARM_CORE("arm7tdmi", arm7tdmi, arm7tdmi, 4T, FL_CO_PROC, fastmul)
+ARM_CORE("arm7tdmi-s", arm7tdmis, arm7tdmis, 4T, FL_CO_PROC, fastmul)
+ARM_CORE("arm710t", arm710t, arm710t, 4T, FL_WBUF, fastmul)
+ARM_CORE("arm720t", arm720t, arm720t, 4T, FL_WBUF, fastmul)
+ARM_CORE("arm740t", arm740t, arm740t, 4T, FL_WBUF, fastmul)
+ARM_CORE("arm9", arm9, arm9, 4T, FL_LDSCHED, fastmul)
+ARM_CORE("arm9tdmi", arm9tdmi, arm9tdmi, 4T, FL_LDSCHED, fastmul)
+ARM_CORE("arm920", arm920, arm920, 4T, FL_LDSCHED, fastmul)
+ARM_CORE("arm920t", arm920t, arm920t, 4T, FL_LDSCHED, fastmul)
+ARM_CORE("arm922t", arm922t, arm922t, 4T, FL_LDSCHED, fastmul)
+ARM_CORE("arm940t", arm940t, arm940t, 4T, FL_LDSCHED, fastmul)
+ARM_CORE("ep9312", ep9312, ep9312, 4T, FL_LDSCHED, fastmul)
/* V5T Architecture Processors */
-ARM_CORE("arm10tdmi", arm10tdmi, 5T, FL_LDSCHED, fastmul)
-ARM_CORE("arm1020t", arm1020t, 5T, FL_LDSCHED, fastmul)
+ARM_CORE("arm10tdmi", arm10tdmi, arm10tdmi, 5T, FL_LDSCHED, fastmul)
+ARM_CORE("arm1020t", arm1020t, arm1020t, 5T, FL_LDSCHED, fastmul)
/* V5TE Architecture Processors */
-ARM_CORE("arm9e", arm9e, 5TE, FL_LDSCHED, 9e)
-ARM_CORE("arm946e-s", arm946es, 5TE, FL_LDSCHED, 9e)
-ARM_CORE("arm966e-s", arm966es, 5TE, FL_LDSCHED, 9e)
-ARM_CORE("arm968e-s", arm968es, 5TE, FL_LDSCHED, 9e)
-ARM_CORE("arm10e", arm10e, 5TE, FL_LDSCHED, fastmul)
-ARM_CORE("arm1020e", arm1020e, 5TE, FL_LDSCHED, fastmul)
-ARM_CORE("arm1022e", arm1022e, 5TE, FL_LDSCHED, fastmul)
-ARM_CORE("xscale", xscale, 5TE, FL_LDSCHED | FL_STRONG | FL_XSCALE, xscale)
-ARM_CORE("iwmmxt", iwmmxt, 5TE, FL_LDSCHED | FL_STRONG | FL_XSCALE | FL_IWMMXT, xscale)
-ARM_CORE("iwmmxt2", iwmmxt2, 5TE, FL_LDSCHED | FL_STRONG | FL_XSCALE | FL_IWMMXT | FL_IWMMXT2, xscale)
-ARM_CORE("fa606te", fa606te, 5TE, FL_LDSCHED, 9e)
-ARM_CORE("fa626te", fa626te, 5TE, FL_LDSCHED, 9e)
-ARM_CORE("fmp626", fmp626, 5TE, FL_LDSCHED, 9e)
-ARM_CORE("fa726te", fa726te, 5TE, FL_LDSCHED, fa726te)
+ARM_CORE("arm9e", arm9e, arm9e, 5TE, FL_LDSCHED, 9e)
+ARM_CORE("arm946e-s", arm946es, arm946es, 5TE, FL_LDSCHED, 9e)
+ARM_CORE("arm966e-s", arm966es, arm966es, 5TE, FL_LDSCHED, 9e)
+ARM_CORE("arm968e-s", arm968es, arm968es, 5TE, FL_LDSCHED, 9e)
+ARM_CORE("arm10e", arm10e, arm10e, 5TE, FL_LDSCHED, fastmul)
+ARM_CORE("arm1020e", arm1020e, arm1020e, 5TE, FL_LDSCHED, fastmul)
+ARM_CORE("arm1022e", arm1022e, arm1022e, 5TE, FL_LDSCHED, fastmul)
+ARM_CORE("xscale", xscale, xscale, 5TE, FL_LDSCHED | FL_STRONG | FL_XSCALE, xscale)
+ARM_CORE("iwmmxt", iwmmxt, iwmmxt, 5TE, FL_LDSCHED | FL_STRONG | FL_XSCALE | FL_IWMMXT, xscale)
+ARM_CORE("iwmmxt2", iwmmxt2, iwmmxt2, 5TE, FL_LDSCHED | FL_STRONG | FL_XSCALE | FL_IWMMXT | FL_IWMMXT2, xscale)
+ARM_CORE("fa606te", fa606te, fa606te, 5TE, FL_LDSCHED, 9e)
+ARM_CORE("fa626te", fa626te, fa626te, 5TE, FL_LDSCHED, 9e)
+ARM_CORE("fmp626", fmp626, fmp626, 5TE, FL_LDSCHED, 9e)
+ARM_CORE("fa726te", fa726te, fa726te, 5TE, FL_LDSCHED, fa726te)
/* V5TEJ Architecture Processors */
-ARM_CORE("arm926ej-s", arm926ejs, 5TEJ, FL_LDSCHED, 9e)
-ARM_CORE("arm1026ej-s", arm1026ejs, 5TEJ, FL_LDSCHED, 9e)
+ARM_CORE("arm926ej-s", arm926ejs, arm926ejs, 5TEJ, FL_LDSCHED, 9e)
+ARM_CORE("arm1026ej-s", arm1026ejs, arm1026ejs, 5TEJ, FL_LDSCHED, 9e)
/* V6 Architecture Processors */
-ARM_CORE("arm1136j-s", arm1136js, 6J, FL_LDSCHED, 9e)
-ARM_CORE("arm1136jf-s", arm1136jfs, 6J, FL_LDSCHED | FL_VFPV2, 9e)
-ARM_CORE("arm1176jz-s", arm1176jzs, 6ZK, FL_LDSCHED, 9e)
-ARM_CORE("arm1176jzf-s", arm1176jzfs, 6ZK, FL_LDSCHED | FL_VFPV2, 9e)
-ARM_CORE("mpcorenovfp", mpcorenovfp, 6K, FL_LDSCHED, 9e)
-ARM_CORE("mpcore", mpcore, 6K, FL_LDSCHED | FL_VFPV2, 9e)
-ARM_CORE("arm1156t2-s", arm1156t2s, 6T2, FL_LDSCHED, v6t2)
-ARM_CORE("arm1156t2f-s", arm1156t2fs, 6T2, FL_LDSCHED | FL_VFPV2, v6t2)
-ARM_CORE("generic-armv7-a", genericv7a, 7A, FL_LDSCHED, cortex)
-ARM_CORE("cortex-a5", cortexa5, 7A, FL_LDSCHED, cortex_a5)
-ARM_CORE("cortex-a7", cortexa7, 7A, FL_LDSCHED | FL_THUMB_DIV | FL_ARM_DIV, cortex_a7)
-ARM_CORE("cortex-a8", cortexa8, 7A, FL_LDSCHED, cortex)
-ARM_CORE("cortex-a9", cortexa9, 7A, FL_LDSCHED, cortex_a9)
-ARM_CORE("cortex-a12", cortexa12, 7A, FL_LDSCHED | FL_THUMB_DIV | FL_ARM_DIV, cortex_a12)
-ARM_CORE("cortex-a15", cortexa15, 7A, FL_LDSCHED | FL_THUMB_DIV | FL_ARM_DIV, cortex_a15)
-ARM_CORE("cortex-a53", cortexa53, 8A, FL_LDSCHED, cortex_a53)
-ARM_CORE("cortex-r4", cortexr4, 7R, FL_LDSCHED, cortex)
-ARM_CORE("cortex-r4f", cortexr4f, 7R, FL_LDSCHED, cortex)
-ARM_CORE("cortex-r5", cortexr5, 7R, FL_LDSCHED | FL_ARM_DIV, cortex)
-ARM_CORE("cortex-r7", cortexr7, 7R, FL_LDSCHED | FL_ARM_DIV, cortex)
-ARM_CORE("cortex-m4", cortexm4, 7EM, FL_LDSCHED, v7m)
-ARM_CORE("cortex-m3", cortexm3, 7M, FL_LDSCHED, v7m)
-ARM_CORE("cortex-m1", cortexm1, 6M, FL_LDSCHED, v6m)
-ARM_CORE("cortex-m0", cortexm0, 6M, FL_LDSCHED, v6m)
-ARM_CORE("cortex-m0plus", cortexm0plus, 6M, FL_LDSCHED, v6m)
-ARM_CORE("marvell-pj4", marvell_pj4, 7A, FL_LDSCHED, 9e)
+ARM_CORE("arm1136j-s", arm1136js, arm1136js, 6J, FL_LDSCHED, 9e)
+ARM_CORE("arm1136jf-s", arm1136jfs, arm1136jfs, 6J, FL_LDSCHED | FL_VFPV2, 9e)
+ARM_CORE("arm1176jz-s", arm1176jzs, arm1176jzs, 6ZK, FL_LDSCHED, 9e)
+ARM_CORE("arm1176jzf-s", arm1176jzfs, arm1176jzfs, 6ZK, FL_LDSCHED | FL_VFPV2, 9e)
+ARM_CORE("mpcorenovfp", mpcorenovfp, mpcorenovfp, 6K, FL_LDSCHED, 9e)
+ARM_CORE("mpcore", mpcore, mpcore, 6K, FL_LDSCHED | FL_VFPV2, 9e)
+ARM_CORE("arm1156t2-s", arm1156t2s, arm1156t2s, 6T2, FL_LDSCHED, v6t2)
+ARM_CORE("arm1156t2f-s", arm1156t2fs, arm1156t2fs, 6T2, FL_LDSCHED | FL_VFPV2, v6t2)
+
+/* V6M Architecture Processors */
+ARM_CORE("cortex-m1", cortexm1, cortexm1, 6M, FL_LDSCHED, v6m)
+ARM_CORE("cortex-m0", cortexm0, cortexm0, 6M, FL_LDSCHED, v6m)
+ARM_CORE("cortex-m0plus", cortexm0plus, cortexm0plus, 6M, FL_LDSCHED, v6m)
+
+/* V7 Architecture Processors */
+ARM_CORE("generic-armv7-a", genericv7a, genericv7a, 7A, FL_LDSCHED, cortex)
+ARM_CORE("cortex-a5", cortexa5, cortexa5, 7A, FL_LDSCHED, cortex_a5)
+ARM_CORE("cortex-a7", cortexa7, cortexa7, 7A, FL_LDSCHED | FL_THUMB_DIV | FL_ARM_DIV, cortex_a7)
+ARM_CORE("cortex-a8", cortexa8, cortexa8, 7A, FL_LDSCHED, cortex)
+ARM_CORE("cortex-a9", cortexa9, cortexa9, 7A, FL_LDSCHED, cortex_a9)
+ARM_CORE("cortex-a12", cortexa12, cortexa15, 7A, FL_LDSCHED | FL_THUMB_DIV | FL_ARM_DIV, cortex_a12)
+ARM_CORE("cortex-a15", cortexa15, cortexa15, 7A, FL_LDSCHED | FL_THUMB_DIV | FL_ARM_DIV, cortex_a15)
+ARM_CORE("cortex-r4", cortexr4, cortexr4, 7R, FL_LDSCHED, cortex)
+ARM_CORE("cortex-r4f", cortexr4f, cortexr4f, 7R, FL_LDSCHED, cortex)
+ARM_CORE("cortex-r5", cortexr5, cortexr5, 7R, FL_LDSCHED | FL_ARM_DIV, cortex)
+ARM_CORE("cortex-r7", cortexr7, cortexr7, 7R, FL_LDSCHED | FL_ARM_DIV, cortex)
+ARM_CORE("cortex-m4", cortexm4, cortexm4, 7EM, FL_LDSCHED, v7m)
+ARM_CORE("cortex-m3", cortexm3, cortexm3, 7M, FL_LDSCHED, v7m)
+ARM_CORE("marvell-pj4", marvell_pj4, marvell_pj4, 7A, FL_LDSCHED, 9e)
+
+/* V7 big.LITTLE implementations */
+ARM_CORE("cortex-a15.cortex-a7", cortexa15cortexa7, cortexa7, 7A, FL_LDSCHED | FL_THUMB_DIV | FL_ARM_DIV, cortex_a15)
+
+/* V8 Architecture Processors */
+ARM_CORE("cortex-a53", cortexa53, cortexa53, 8A, FL_LDSCHED, cortex_a53)
+ARM_CORE("cortex-a57", cortexa57, cortexa15, 8A, FL_LDSCHED, cortex_a15)
+
+/* V8 big.LITTLE implementations */
+ARM_CORE("cortex-a57.cortex-a53", cortexa57cortexa53, cortexa53, 8A, FL_LDSCHED | FL_THUMB_DIV | FL_ARM_DIV, cortex_a15)
diff --git a/gcc/config/arm/arm-fixed.md b/gcc/config/arm/arm-fixed.md
index 3972a850990..4ab9d3597ce 100644
--- a/gcc/config/arm/arm-fixed.md
+++ b/gcc/config/arm/arm-fixed.md
@@ -1,4 +1,4 @@
-;; Copyright (C) 2011-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
;;
;; This file is part of GCC.
;;
diff --git a/gcc/config/arm/arm-fpus.def b/gcc/config/arm/arm-fpus.def
index 6543942b539..85d9693c1fe 100644
--- a/gcc/config/arm/arm-fpus.def
+++ b/gcc/config/arm/arm-fpus.def
@@ -1,5 +1,5 @@
/* ARM FPU variants.
- Copyright (C) 1991-2013 Free Software Foundation, Inc.
+ Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of GCC.
diff --git a/gcc/config/arm/arm-generic.md b/gcc/config/arm/arm-generic.md
index 8a3335055d1..b26c72c4431 100644
--- a/gcc/config/arm/arm-generic.md
+++ b/gcc/config/arm/arm-generic.md
@@ -1,5 +1,5 @@
;; Generic ARM Pipeline Description
-;; Copyright (C) 2003-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2003-2014 Free Software Foundation, Inc.
;;
;; This file is part of GCC.
;;
diff --git a/gcc/config/arm/arm-ldmstm.ml b/gcc/config/arm/arm-ldmstm.ml
index e615437b125..2d8f9e26756 100644
--- a/gcc/config/arm/arm-ldmstm.ml
+++ b/gcc/config/arm/arm-ldmstm.ml
@@ -1,5 +1,5 @@
(* Auto-generate ARM ldm/stm patterns
- Copyright (C) 2010-2013 Free Software Foundation, Inc.
+ Copyright (C) 2010-2014 Free Software Foundation, Inc.
Contributed by CodeSourcery.
This file is part of GCC.
@@ -67,10 +67,13 @@ let destreg nregs first op_type thumb =
Printf.sprintf ("(match_operand:SI %d \"s_register_operand\" \"%s%s\")")
(nregs + 1) (inout_constr op_type) (constr thumb)
+let reg_predicate thumb =
+ if thumb then "low_register_operand" else "arm_hard_general_register_operand"
+
let write_ldm_set thumb nregs offset opnr first =
let indent = " " in
Printf.printf "%s" (if first then " [" else indent);
- Printf.printf "(set (match_operand:SI %d \"arm_hard_register_operand\" \"\")\n" opnr;
+ Printf.printf "(set (match_operand:SI %d \"%s\" \"\")\n" opnr (reg_predicate thumb);
Printf.printf "%s (mem:SI " indent;
begin if offset != 0 then Printf.printf "(plus:SI " end;
Printf.printf "%s" (destreg nregs first IN thumb);
@@ -84,7 +87,7 @@ let write_stm_set thumb nregs offset opnr first =
begin if offset != 0 then Printf.printf "(plus:SI " end;
Printf.printf "%s" (destreg nregs first IN thumb);
begin if offset != 0 then Printf.printf " (const_int %d))" offset end;
- Printf.printf ")\n%s (match_operand:SI %d \"arm_hard_register_operand\" \"\"))" indent opnr
+ Printf.printf ")\n%s (match_operand:SI %d \"%s\" \"\"))" indent opnr (reg_predicate thumb)
let write_ldm_peep_set extra_indent nregs opnr first =
let indent = " " ^ extra_indent in
@@ -319,7 +322,7 @@ let _ =
"/* ARM ldm/stm instruction patterns. This file was automatically generated";
" using arm-ldmstm.ml. Please do not edit manually.";
"";
-" Copyright (C) 2010-2013 Free Software Foundation, Inc.";
+" Copyright (C) 2010-2014 Free Software Foundation, Inc.";
" Contributed by CodeSourcery.";
"";
" This file is part of GCC.";
diff --git a/gcc/config/arm/arm-modes.def b/gcc/config/arm/arm-modes.def
index cb17ed9f389..882aa55c53e 100644
--- a/gcc/config/arm/arm-modes.def
+++ b/gcc/config/arm/arm-modes.def
@@ -1,5 +1,5 @@
/* Definitions of target machine for GNU compiler, for ARM.
- Copyright (C) 2002-2013 Free Software Foundation, Inc.
+ Copyright (C) 2002-2014 Free Software Foundation, Inc.
Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
and Martin Simmons (@harleqn.co.uk).
More major hacks by Richard Earnshaw (rearnsha@arm.com)
diff --git a/gcc/config/arm/arm-opts.h b/gcc/config/arm/arm-opts.h
index a3ef36453c0..a8393975a61 100644
--- a/gcc/config/arm/arm-opts.h
+++ b/gcc/config/arm/arm-opts.h
@@ -1,5 +1,5 @@
/* Definitions for option handling for ARM.
- Copyright (C) 1991-2013 Free Software Foundation, Inc.
+ Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of GCC.
@@ -23,8 +23,9 @@
/* The various ARM cores. */
enum processor_type
{
-#define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
- IDENT,
+#undef ARM_CORE
+#define ARM_CORE(NAME, INTERNAL_IDENT, IDENT, ARCH, FLAGS, COSTS) \
+ INTERNAL_IDENT,
#include "arm-cores.def"
#undef ARM_CORE
/* Used to indicate that no processor has been specified. */
diff --git a/gcc/config/arm/arm-protos.h b/gcc/config/arm/arm-protos.h
index c5b16daae4f..13874ee6e50 100644
--- a/gcc/config/arm/arm-protos.h
+++ b/gcc/config/arm/arm-protos.h
@@ -1,5 +1,5 @@
/* Prototypes for exported functions defined in arm.c and pe.c
- Copyright (C) 1999-2013 Free Software Foundation, Inc.
+ Copyright (C) 1999-2014 Free Software Foundation, Inc.
Contributed by Richard Earnshaw (rearnsha@arm.com)
Minor hacks by Nick Clifton (nickc@cygnus.com)
@@ -276,6 +276,8 @@ struct tune_params
extern const struct tune_params *current_tune;
extern int vfp3_const_double_for_fract_bits (rtx);
+/* return power of two from operand, otherwise 0. */
+extern int vfp3_const_double_for_bits (rtx);
extern void arm_emit_coreregs_64bit_shift (enum rtx_code, rtx, rtx, rtx, rtx,
rtx);
@@ -289,4 +291,7 @@ extern bool arm_autoinc_modes_ok_p (enum machine_mode, enum arm_auto_incmodes);
extern void arm_emit_eabi_attribute (const char *, int, int);
+/* Defined in gcc/common/config/arm-common.c. */
+extern const char *arm_rewrite_selected_cpu (const char *name);
+
#endif /* ! GCC_ARM_PROTOS_H */
diff --git a/gcc/config/arm/arm-tables.opt b/gcc/config/arm/arm-tables.opt
index b3e7a7c62d7..4f674541de8 100644
--- a/gcc/config/arm/arm-tables.opt
+++ b/gcc/config/arm/arm-tables.opt
@@ -2,7 +2,7 @@
; Generated automatically by genopt.sh from arm-cores.def, arm-arches.def
; and arm-fpus.def.
-; Copyright (C) 2011-2013 Free Software Foundation, Inc.
+; Copyright (C) 2011-2014 Free Software Foundation, Inc.
;
; This file is part of GCC.
;
@@ -232,6 +232,15 @@ EnumValue
Enum(processor_type) String(arm1156t2f-s) Value(arm1156t2fs)
EnumValue
+Enum(processor_type) String(cortex-m1) Value(cortexm1)
+
+EnumValue
+Enum(processor_type) String(cortex-m0) Value(cortexm0)
+
+EnumValue
+Enum(processor_type) String(cortex-m0plus) Value(cortexm0plus)
+
+EnumValue
Enum(processor_type) String(generic-armv7-a) Value(genericv7a)
EnumValue
@@ -253,9 +262,6 @@ EnumValue
Enum(processor_type) String(cortex-a15) Value(cortexa15)
EnumValue
-Enum(processor_type) String(cortex-a53) Value(cortexa53)
-
-EnumValue
Enum(processor_type) String(cortex-r4) Value(cortexr4)
EnumValue
@@ -274,16 +280,19 @@ EnumValue
Enum(processor_type) String(cortex-m3) Value(cortexm3)
EnumValue
-Enum(processor_type) String(cortex-m1) Value(cortexm1)
+Enum(processor_type) String(marvell-pj4) Value(marvell_pj4)
EnumValue
-Enum(processor_type) String(cortex-m0) Value(cortexm0)
+Enum(processor_type) String(cortex-a15.cortex-a7) Value(cortexa15cortexa7)
EnumValue
-Enum(processor_type) String(cortex-m0plus) Value(cortexm0plus)
+Enum(processor_type) String(cortex-a53) Value(cortexa53)
EnumValue
-Enum(processor_type) String(marvell-pj4) Value(marvell_pj4)
+Enum(processor_type) String(cortex-a57) Value(cortexa57)
+
+EnumValue
+Enum(processor_type) String(cortex-a57.cortex-a53) Value(cortexa57cortexa53)
Enum
Name(arm_arch) Type(int)
@@ -362,10 +371,13 @@ EnumValue
Enum(arm_arch) String(armv8-a) Value(23)
EnumValue
-Enum(arm_arch) String(iwmmxt) Value(24)
+Enum(arm_arch) String(armv8-a+crc) Value(24)
+
+EnumValue
+Enum(arm_arch) String(iwmmxt) Value(25)
EnumValue
-Enum(arm_arch) String(iwmmxt2) Value(25)
+Enum(arm_arch) String(iwmmxt2) Value(26)
Enum
Name(arm_fpu) Type(int)
diff --git a/gcc/config/arm/arm-tune.md b/gcc/config/arm/arm-tune.md
index e10d0aa9544..954cab8efb1 100644
--- a/gcc/config/arm/arm-tune.md
+++ b/gcc/config/arm/arm-tune.md
@@ -1,5 +1,34 @@
;; -*- buffer-read-only: t -*-
;; Generated automatically by gentune.sh from arm-cores.def
(define_attr "tune"
- "arm2,arm250,arm3,arm6,arm60,arm600,arm610,arm620,arm7,arm7d,arm7di,arm70,arm700,arm700i,arm710,arm720,arm710c,arm7100,arm7500,arm7500fe,arm7m,arm7dm,arm7dmi,arm8,arm810,strongarm,strongarm110,strongarm1100,strongarm1110,fa526,fa626,arm7tdmi,arm7tdmis,arm710t,arm720t,arm740t,arm9,arm9tdmi,arm920,arm920t,arm922t,arm940t,ep9312,arm10tdmi,arm1020t,arm9e,arm946es,arm966es,arm968es,arm10e,arm1020e,arm1022e,xscale,iwmmxt,iwmmxt2,fa606te,fa626te,fmp626,fa726te,arm926ejs,arm1026ejs,arm1136js,arm1136jfs,arm1176jzs,arm1176jzfs,mpcorenovfp,mpcore,arm1156t2s,arm1156t2fs,genericv7a,cortexa5,cortexa7,cortexa8,cortexa9,cortexa12,cortexa15,cortexa53,cortexr4,cortexr4f,cortexr5,cortexr7,cortexm4,cortexm3,cortexm1,cortexm0,cortexm0plus,marvell_pj4"
+ "arm2,arm250,arm3,
+ arm6,arm60,arm600,
+ arm610,arm620,arm7,
+ arm7d,arm7di,arm70,
+ arm700,arm700i,arm710,
+ arm720,arm710c,arm7100,
+ arm7500,arm7500fe,arm7m,
+ arm7dm,arm7dmi,arm8,
+ arm810,strongarm,strongarm110,
+ strongarm1100,strongarm1110,fa526,
+ fa626,arm7tdmi,arm7tdmis,
+ arm710t,arm720t,arm740t,
+ arm9,arm9tdmi,arm920,
+ arm920t,arm922t,arm940t,
+ ep9312,arm10tdmi,arm1020t,
+ arm9e,arm946es,arm966es,
+ arm968es,arm10e,arm1020e,
+ arm1022e,xscale,iwmmxt,
+ iwmmxt2,fa606te,fa626te,
+ fmp626,fa726te,arm926ejs,
+ arm1026ejs,arm1136js,arm1136jfs,
+ arm1176jzs,arm1176jzfs,mpcorenovfp,
+ mpcore,arm1156t2s,arm1156t2fs,
+ cortexm1,cortexm0,cortexm0plus,
+ genericv7a,cortexa5,cortexa7,
+ cortexa8,cortexa9,cortexa12,
+ cortexa15,cortexr4,cortexr4f,
+ cortexr5,cortexr7,cortexm4,
+ cortexm3,marvell_pj4,cortexa15cortexa7,
+ cortexa53,cortexa57,cortexa57cortexa53"
(const (symbol_ref "((enum attr_tune) arm_tune)")))
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index ee0de684956..42450c71c27 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -1,5 +1,5 @@
/* Output routines for GCC for ARM.
- Copyright (C) 1991-2013 Free Software Foundation, Inc.
+ Copyright (C) 1991-2014 Free Software Foundation, Inc.
Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
and Martin Simmons (@harleqn.co.uk).
More major hacks by Richard Earnshaw (rearnsha@arm.com).
@@ -736,6 +736,7 @@ static int thumb_call_reg_needed;
#define FL_ARCH7 (1 << 22) /* Architecture 7. */
#define FL_ARM_DIV (1 << 23) /* Hardware divide (ARM mode). */
#define FL_ARCH8 (1 << 24) /* Architecture 8. */
+#define FL_CRC32 (1 << 25) /* ARMv8 CRC32 instructions. */
#define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
#define FL_IWMMXT2 (1 << 30) /* "Intel Wireless MMX2 technology". */
@@ -901,6 +902,9 @@ int arm_condexec_mask = 0;
/* The number of bits used in arm_condexec_mask. */
int arm_condexec_masklen = 0;
+/* Nonzero if chip supports the ARMv8 CRC instructions. */
+int arm_arch_crc = 0;
+
/* The condition codes of the ARM, and the inverse function. */
static const char * const arm_condition_codes[] =
{
@@ -1742,7 +1746,7 @@ const struct tune_params arm_fa726te_tune =
static const struct processors all_cores[] =
{
/* ARM Cores */
-#define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
+#define ARM_CORE(NAME, X, IDENT, ARCH, FLAGS, COSTS) \
{NAME, IDENT, #ARCH, BASE_ARCH_##ARCH, \
FLAGS | FL_FOR_ARCH##ARCH, &arm_##COSTS##_tune},
#include "arm-cores.def"
@@ -2251,7 +2255,10 @@ arm_option_override (void)
arm_selected_arch = &all_architectures[arm_arch_option];
if (global_options_set.x_arm_cpu_option)
- arm_selected_cpu = &all_cores[(int) arm_cpu_option];
+ {
+ arm_selected_cpu = &all_cores[(int) arm_cpu_option];
+ arm_selected_tune = &all_cores[(int) arm_cpu_option];
+ }
if (global_options_set.x_arm_tune_option)
arm_selected_tune = &all_cores[(int) arm_tune_option];
@@ -2477,6 +2484,7 @@ arm_option_override (void)
arm_arch_thumb_hwdiv = (insn_flags & FL_THUMB_DIV) != 0;
arm_arch_arm_hwdiv = (insn_flags & FL_ARM_DIV) != 0;
arm_tune_cortex_a9 = (arm_tune == cortexa9) != 0;
+ arm_arch_crc = (insn_flags & FL_CRC32) != 0;
if (arm_restrict_it == 2)
arm_restrict_it = arm_arch8 && TARGET_THUMB2;
@@ -18671,8 +18679,7 @@ arm_r3_live_at_start_p (void)
/* Just look at cfg info, which is still close enough to correct at this
point. This gives false positives for broken functions that might use
uninitialized data that happens to be allocated in r3, but who cares? */
- return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
- 3);
+ return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR_FOR_FN (cfun)), 3);
}
/* Compute the number of bytes used to store the static chain register on the
@@ -20698,11 +20705,13 @@ arm_expand_prologue (void)
whilst the frame is being created. We try the following
places in order:
- 1. The last argument register r3.
- 2. A slot on the stack above the frame. (This only
- works if the function is not a varargs function).
+ 1. The last argument register r3 if it is available.
+ 2. A slot on the stack above the frame if there are no
+ arguments to push onto the stack.
3. Register r3 again, after pushing the argument registers
- onto the stack.
+ onto the stack, if this is a varargs function.
+ 4. The last slot on the stack created for the arguments to
+ push, if this isn't a varargs function.
Note - we only need to tell the dwarf2 backend about the SP
adjustment in the second variant; the static chain register
@@ -20713,13 +20722,13 @@ arm_expand_prologue (void)
insn = emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
else if (args_to_push == 0)
{
- rtx dwarf;
+ rtx addr, dwarf;
gcc_assert(arm_compute_static_chain_stack_bytes() == 4);
saved_regs += 4;
- insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
- insn = emit_set_insn (gen_frame_mem (SImode, insn), ip_rtx);
+ addr = gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx);
+ insn = emit_set_insn (gen_frame_mem (SImode, addr), ip_rtx);
fp_offset = 4;
/* Just tell the dwarf backend that we adjusted SP. */
@@ -20733,21 +20742,38 @@ arm_expand_prologue (void)
{
/* Store the args on the stack. */
if (cfun->machine->uses_anonymous_args)
- insn = emit_multi_reg_push
- ((0xf0 >> (args_to_push / 4)) & 0xf);
+ {
+ insn
+ = emit_multi_reg_push ((0xf0 >> (args_to_push / 4)) & 0xf);
+ emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
+ saved_pretend_args = 1;
+ }
else
- insn = emit_insn
- (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
- GEN_INT (- args_to_push)));
+ {
+ rtx addr, dwarf;
- RTX_FRAME_RELATED_P (insn) = 1;
+ if (args_to_push == 4)
+ addr = gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx);
+ else
+ addr
+ = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
+ -args_to_push));
+
+ insn = emit_set_insn (gen_frame_mem (SImode, addr), ip_rtx);
- saved_pretend_args = 1;
+ /* Just tell the dwarf backend that we adjusted SP. */
+ dwarf
+ = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
+ -args_to_push));
+ add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf);
+ }
+
+ RTX_FRAME_RELATED_P (insn) = 1;
fp_offset = args_to_push;
args_to_push = 0;
-
- /* Now reuse r3 to preserve IP. */
- emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
}
}
@@ -20853,7 +20879,7 @@ arm_expand_prologue (void)
/* Recover the static chain register. */
if (!arm_r3_live_at_start_p () || saved_pretend_args)
insn = gen_rtx_REG (SImode, 3);
- else /* if (crtl->args.pretend_args_size == 0) */
+ else
{
insn = plus_constant (Pmode, hard_frame_pointer_rtx, 4);
insn = gen_frame_mem (SImode, insn);
@@ -21568,7 +21594,11 @@ arm_print_operand (FILE *stream, rtx x, int code)
case 'v':
gcc_assert (CONST_DOUBLE_P (x));
- fprintf (stream, "#%d", vfp3_const_double_for_fract_bits (x));
+ int result;
+ result = vfp3_const_double_for_fract_bits (x);
+ if (result == 0)
+ result = vfp3_const_double_for_bits (x);
+ fprintf (stream, "#%d", result);
return;
/* Register specifier for vld1.16/vst1.16. Translate the S register
@@ -23136,6 +23166,30 @@ enum arm_builtins
ARM_BUILTIN_WMERGE,
+ ARM_BUILTIN_CRC32B,
+ ARM_BUILTIN_CRC32H,
+ ARM_BUILTIN_CRC32W,
+ ARM_BUILTIN_CRC32CB,
+ ARM_BUILTIN_CRC32CH,
+ ARM_BUILTIN_CRC32CW,
+
+#undef CRYPTO1
+#undef CRYPTO2
+#undef CRYPTO3
+
+#define CRYPTO1(L, U, M1, M2) \
+ ARM_BUILTIN_CRYPTO_##U,
+#define CRYPTO2(L, U, M1, M2, M3) \
+ ARM_BUILTIN_CRYPTO_##U,
+#define CRYPTO3(L, U, M1, M2, M3, M4) \
+ ARM_BUILTIN_CRYPTO_##U,
+
+#include "crypto.def"
+
+#undef CRYPTO1
+#undef CRYPTO2
+#undef CRYPTO3
+
#include "arm_neon_builtins.def"
,ARM_BUILTIN_MAX
@@ -23157,6 +23211,9 @@ enum arm_builtins
static GTY(()) tree arm_builtin_decls[ARM_BUILTIN_MAX];
+#define NUM_DREG_TYPES 5
+#define NUM_QREG_TYPES 6
+
static void
arm_init_neon_builtins (void)
{
@@ -23170,6 +23227,7 @@ arm_init_neon_builtins (void)
tree neon_polyHI_type_node;
tree neon_intSI_type_node;
tree neon_intDI_type_node;
+ tree neon_intUTI_type_node;
tree neon_float_type_node;
tree intQI_pointer_node;
@@ -23232,9 +23290,9 @@ arm_init_neon_builtins (void)
tree void_ftype_pv4sf_v4sf_v4sf;
tree void_ftype_pv2di_v2di_v2di;
- tree reinterp_ftype_dreg[5][5];
- tree reinterp_ftype_qreg[5][5];
- tree dreg_types[5], qreg_types[5];
+ tree reinterp_ftype_dreg[NUM_DREG_TYPES][NUM_DREG_TYPES];
+ tree reinterp_ftype_qreg[NUM_QREG_TYPES][NUM_QREG_TYPES];
+ tree dreg_types[NUM_DREG_TYPES], qreg_types[NUM_QREG_TYPES];
/* Create distinguished type nodes for NEON vector element types,
and pointers to values of such types, so we can detect them later. */
@@ -23324,6 +23382,8 @@ arm_init_neon_builtins (void)
intUHI_type_node = make_unsigned_type (GET_MODE_PRECISION (HImode));
intUSI_type_node = make_unsigned_type (GET_MODE_PRECISION (SImode));
intUDI_type_node = make_unsigned_type (GET_MODE_PRECISION (DImode));
+ neon_intUTI_type_node = make_unsigned_type (GET_MODE_PRECISION (TImode));
+
(*lang_hooks.types.register_builtin_type) (intUQI_type_node,
"__builtin_neon_uqi");
@@ -23333,6 +23393,10 @@ arm_init_neon_builtins (void)
"__builtin_neon_usi");
(*lang_hooks.types.register_builtin_type) (intUDI_type_node,
"__builtin_neon_udi");
+ (*lang_hooks.types.register_builtin_type) (intUDI_type_node,
+ "__builtin_neon_poly64");
+ (*lang_hooks.types.register_builtin_type) (neon_intUTI_type_node,
+ "__builtin_neon_poly128");
/* Opaque integer types for structures of vectors. */
intEI_type_node = make_signed_type (GET_MODE_PRECISION (EImode));
@@ -23394,6 +23458,80 @@ arm_init_neon_builtins (void)
build_function_type_list (void_type_node, V2DI_pointer_node, V2DI_type_node,
V2DI_type_node, NULL);
+ if (TARGET_CRYPTO && TARGET_HARD_FLOAT)
+ {
+ tree V4USI_type_node =
+ build_vector_type_for_mode (intUSI_type_node, V4SImode);
+
+ tree V16UQI_type_node =
+ build_vector_type_for_mode (intUQI_type_node, V16QImode);
+
+ tree v16uqi_ftype_v16uqi
+ = build_function_type_list (V16UQI_type_node, V16UQI_type_node, NULL_TREE);
+
+ tree v16uqi_ftype_v16uqi_v16uqi
+ = build_function_type_list (V16UQI_type_node, V16UQI_type_node,
+ V16UQI_type_node, NULL_TREE);
+
+ tree v4usi_ftype_v4usi
+ = build_function_type_list (V4USI_type_node, V4USI_type_node, NULL_TREE);
+
+ tree v4usi_ftype_v4usi_v4usi
+ = build_function_type_list (V4USI_type_node, V4USI_type_node,
+ V4USI_type_node, NULL_TREE);
+
+ tree v4usi_ftype_v4usi_v4usi_v4usi
+ = build_function_type_list (V4USI_type_node, V4USI_type_node,
+ V4USI_type_node, V4USI_type_node, NULL_TREE);
+
+ tree uti_ftype_udi_udi
+ = build_function_type_list (neon_intUTI_type_node, intUDI_type_node,
+ intUDI_type_node, NULL_TREE);
+
+ #undef CRYPTO1
+ #undef CRYPTO2
+ #undef CRYPTO3
+ #undef C
+ #undef N
+ #undef CF
+ #undef FT1
+ #undef FT2
+ #undef FT3
+
+ #define C(U) \
+ ARM_BUILTIN_CRYPTO_##U
+ #define N(L) \
+ "__builtin_arm_crypto_"#L
+ #define FT1(R, A) \
+ R##_ftype_##A
+ #define FT2(R, A1, A2) \
+ R##_ftype_##A1##_##A2
+ #define FT3(R, A1, A2, A3) \
+ R##_ftype_##A1##_##A2##_##A3
+ #define CRYPTO1(L, U, R, A) \
+ arm_builtin_decls[C (U)] = add_builtin_function (N (L), FT1 (R, A), \
+ C (U), BUILT_IN_MD, \
+ NULL, NULL_TREE);
+ #define CRYPTO2(L, U, R, A1, A2) \
+ arm_builtin_decls[C (U)] = add_builtin_function (N (L), FT2 (R, A1, A2), \
+ C (U), BUILT_IN_MD, \
+ NULL, NULL_TREE);
+
+ #define CRYPTO3(L, U, R, A1, A2, A3) \
+ arm_builtin_decls[C (U)] = add_builtin_function (N (L), FT3 (R, A1, A2, A3), \
+ C (U), BUILT_IN_MD, \
+ NULL, NULL_TREE);
+ #include "crypto.def"
+
+ #undef CRYPTO1
+ #undef CRYPTO2
+ #undef CRYPTO3
+ #undef C
+ #undef N
+ #undef FT1
+ #undef FT2
+ #undef FT3
+ }
dreg_types[0] = V8QI_type_node;
dreg_types[1] = V4HI_type_node;
dreg_types[2] = V2SI_type_node;
@@ -23405,14 +23543,17 @@ arm_init_neon_builtins (void)
qreg_types[2] = V4SI_type_node;
qreg_types[3] = V4SF_type_node;
qreg_types[4] = V2DI_type_node;
+ qreg_types[5] = neon_intUTI_type_node;
- for (i = 0; i < 5; i++)
+ for (i = 0; i < NUM_QREG_TYPES; i++)
{
int j;
- for (j = 0; j < 5; j++)
+ for (j = 0; j < NUM_QREG_TYPES; j++)
{
- reinterp_ftype_dreg[i][j]
- = build_function_type_list (dreg_types[i], dreg_types[j], NULL);
+ if (i < NUM_DREG_TYPES && j < NUM_DREG_TYPES)
+ reinterp_ftype_dreg[i][j]
+ = build_function_type_list (dreg_types[i], dreg_types[j], NULL);
+
reinterp_ftype_qreg[i][j]
= build_function_type_list (qreg_types[i], qreg_types[j], NULL);
}
@@ -23627,10 +23768,14 @@ arm_init_neon_builtins (void)
case NEON_REINTERP:
{
- /* We iterate over 5 doubleword types, then 5 quadword
- types. V4HF is not a type used in reinterpret, so we translate
+ /* We iterate over NUM_DREG_TYPES doubleword types,
+ then NUM_QREG_TYPES quadword types.
+ V4HF is not a type used in reinterpret, so we translate
d->mode to the correct index in reinterp_ftype_dreg. */
- int rhs = (d->mode - ((d->mode > T_V4HF) ? 1 : 0)) % 5;
+ bool qreg_p
+ = GET_MODE_SIZE (insn_data[d->code].operand[0].mode) > 8;
+ int rhs = (d->mode - ((!qreg_p && (d->mode > T_V4HF)) ? 1 : 0))
+ % NUM_QREG_TYPES;
switch (insn_data[d->code].operand[0].mode)
{
case V8QImode: ftype = reinterp_ftype_dreg[0][rhs]; break;
@@ -23643,6 +23788,7 @@ arm_init_neon_builtins (void)
case V4SImode: ftype = reinterp_ftype_qreg[2][rhs]; break;
case V4SFmode: ftype = reinterp_ftype_qreg[3][rhs]; break;
case V2DImode: ftype = reinterp_ftype_qreg[4][rhs]; break;
+ case TImode: ftype = reinterp_ftype_qreg[5][rhs]; break;
default: gcc_unreachable ();
}
}
@@ -23693,6 +23839,9 @@ arm_init_neon_builtins (void)
}
}
+#undef NUM_DREG_TYPES
+#undef NUM_QREG_TYPES
+
#define def_mbuiltin(MASK, NAME, TYPE, CODE) \
do \
{ \
@@ -23715,7 +23864,7 @@ struct builtin_description
const enum rtx_code comparison;
const unsigned int flag;
};
-
+
static const struct builtin_description bdesc_2arg[] =
{
#define IWMMXT_BUILTIN(code, string, builtin) \
@@ -23821,6 +23970,33 @@ static const struct builtin_description bdesc_2arg[] =
IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
+
+#define CRC32_BUILTIN(L, U) \
+ {0, CODE_FOR_##L, "__builtin_arm_"#L, ARM_BUILTIN_##U, \
+ UNKNOWN, 0},
+ CRC32_BUILTIN (crc32b, CRC32B)
+ CRC32_BUILTIN (crc32h, CRC32H)
+ CRC32_BUILTIN (crc32w, CRC32W)
+ CRC32_BUILTIN (crc32cb, CRC32CB)
+ CRC32_BUILTIN (crc32ch, CRC32CH)
+ CRC32_BUILTIN (crc32cw, CRC32CW)
+#undef CRC32_BUILTIN
+
+
+#define CRYPTO_BUILTIN(L, U) \
+ {0, CODE_FOR_crypto_##L, "__builtin_arm_crypto_"#L, ARM_BUILTIN_CRYPTO_##U, \
+ UNKNOWN, 0},
+#undef CRYPTO1
+#undef CRYPTO2
+#undef CRYPTO3
+#define CRYPTO2(L, U, R, A1, A2) CRYPTO_BUILTIN (L, U)
+#define CRYPTO1(L, U, R, A)
+#define CRYPTO3(L, U, R, A1, A2, A3)
+#include "crypto.def"
+#undef CRYPTO1
+#undef CRYPTO2
+#undef CRYPTO3
+
};
static const struct builtin_description bdesc_1arg[] =
@@ -23849,8 +24025,28 @@ static const struct builtin_description bdesc_1arg[] =
IWMMXT_BUILTIN (tbcstv8qi, "tbcstb", TBCSTB)
IWMMXT_BUILTIN (tbcstv4hi, "tbcsth", TBCSTH)
IWMMXT_BUILTIN (tbcstv2si, "tbcstw", TBCSTW)
+
+#define CRYPTO1(L, U, R, A) CRYPTO_BUILTIN (L, U)
+#define CRYPTO2(L, U, R, A1, A2)
+#define CRYPTO3(L, U, R, A1, A2, A3)
+#include "crypto.def"
+#undef CRYPTO1
+#undef CRYPTO2
+#undef CRYPTO3
};
+static const struct builtin_description bdesc_3arg[] =
+{
+#define CRYPTO3(L, U, R, A1, A2, A3) CRYPTO_BUILTIN (L, U)
+#define CRYPTO1(L, U, R, A)
+#define CRYPTO2(L, U, R, A1, A2)
+#include "crypto.def"
+#undef CRYPTO1
+#undef CRYPTO2
+#undef CRYPTO3
+ };
+#undef CRYPTO_BUILTIN
+
/* Set up all the iWMMXt builtins. This is not called if
TARGET_IWMMXT is zero. */
@@ -24240,6 +24436,42 @@ arm_init_fp16_builtins (void)
}
static void
+arm_init_crc32_builtins ()
+{
+ tree si_ftype_si_qi
+ = build_function_type_list (unsigned_intSI_type_node,
+ unsigned_intSI_type_node,
+ unsigned_intQI_type_node, NULL_TREE);
+ tree si_ftype_si_hi
+ = build_function_type_list (unsigned_intSI_type_node,
+ unsigned_intSI_type_node,
+ unsigned_intHI_type_node, NULL_TREE);
+ tree si_ftype_si_si
+ = build_function_type_list (unsigned_intSI_type_node,
+ unsigned_intSI_type_node,
+ unsigned_intSI_type_node, NULL_TREE);
+
+ arm_builtin_decls[ARM_BUILTIN_CRC32B]
+ = add_builtin_function ("__builtin_arm_crc32b", si_ftype_si_qi,
+ ARM_BUILTIN_CRC32B, BUILT_IN_MD, NULL, NULL_TREE);
+ arm_builtin_decls[ARM_BUILTIN_CRC32H]
+ = add_builtin_function ("__builtin_arm_crc32h", si_ftype_si_hi,
+ ARM_BUILTIN_CRC32H, BUILT_IN_MD, NULL, NULL_TREE);
+ arm_builtin_decls[ARM_BUILTIN_CRC32W]
+ = add_builtin_function ("__builtin_arm_crc32w", si_ftype_si_si,
+ ARM_BUILTIN_CRC32W, BUILT_IN_MD, NULL, NULL_TREE);
+ arm_builtin_decls[ARM_BUILTIN_CRC32CB]
+ = add_builtin_function ("__builtin_arm_crc32cb", si_ftype_si_qi,
+ ARM_BUILTIN_CRC32CB, BUILT_IN_MD, NULL, NULL_TREE);
+ arm_builtin_decls[ARM_BUILTIN_CRC32CH]
+ = add_builtin_function ("__builtin_arm_crc32ch", si_ftype_si_hi,
+ ARM_BUILTIN_CRC32CH, BUILT_IN_MD, NULL, NULL_TREE);
+ arm_builtin_decls[ARM_BUILTIN_CRC32CW]
+ = add_builtin_function ("__builtin_arm_crc32cw", si_ftype_si_si,
+ ARM_BUILTIN_CRC32CW, BUILT_IN_MD, NULL, NULL_TREE);
+}
+
+static void
arm_init_builtins (void)
{
if (TARGET_REALLY_IWMMXT)
@@ -24250,6 +24482,9 @@ arm_init_builtins (void)
if (arm_fp16_format)
arm_init_fp16_builtins ();
+
+ if (TARGET_CRC32)
+ arm_init_crc32_builtins ();
}
/* Return the ARM builtin for CODE. */
@@ -24343,6 +24578,73 @@ safe_vector_operand (rtx x, enum machine_mode mode)
return x;
}
+/* Function to expand ternary builtins. */
+static rtx
+arm_expand_ternop_builtin (enum insn_code icode,
+ tree exp, rtx target)
+{
+ rtx pat;
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
+ tree arg1 = CALL_EXPR_ARG (exp, 1);
+ tree arg2 = CALL_EXPR_ARG (exp, 2);
+
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ rtx op2 = expand_normal (arg2);
+ rtx op3 = NULL_RTX;
+
+ /* The sha1c, sha1p, sha1m crypto builtins require a different vec_select
+ lane operand depending on endianness. */
+ bool builtin_sha1cpm_p = false;
+
+ if (insn_data[icode].n_operands == 5)
+ {
+ gcc_assert (icode == CODE_FOR_crypto_sha1c
+ || icode == CODE_FOR_crypto_sha1p
+ || icode == CODE_FOR_crypto_sha1m);
+ builtin_sha1cpm_p = true;
+ }
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
+ enum machine_mode mode2 = insn_data[icode].operand[3].mode;
+
+
+ if (VECTOR_MODE_P (mode0))
+ op0 = safe_vector_operand (op0, mode0);
+ if (VECTOR_MODE_P (mode1))
+ op1 = safe_vector_operand (op1, mode1);
+ if (VECTOR_MODE_P (mode2))
+ op2 = safe_vector_operand (op2, mode2);
+
+ if (! target
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ gcc_assert ((GET_MODE (op0) == mode0 || GET_MODE (op0) == VOIDmode)
+ && (GET_MODE (op1) == mode1 || GET_MODE (op1) == VOIDmode)
+ && (GET_MODE (op2) == mode2 || GET_MODE (op2) == VOIDmode));
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+ if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
+ op2 = copy_to_mode_reg (mode2, op2);
+ if (builtin_sha1cpm_p)
+ op3 = GEN_INT (TARGET_BIG_END ? 1 : 0);
+
+ if (builtin_sha1cpm_p)
+ pat = GEN_FCN (icode) (target, op0, op1, op2, op3);
+ else
+ pat = GEN_FCN (icode) (target, op0, op1, op2);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+}
+
/* Subroutine of arm_expand_builtin to take care of binop insns. */
static rtx
@@ -24392,8 +24694,16 @@ arm_expand_unop_builtin (enum insn_code icode,
rtx pat;
tree arg0 = CALL_EXPR_ARG (exp, 0);
rtx op0 = expand_normal (arg0);
+ rtx op1 = NULL_RTX;
enum machine_mode tmode = insn_data[icode].operand[0].mode;
enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+ bool builtin_sha1h_p = false;
+
+ if (insn_data[icode].n_operands == 3)
+ {
+ gcc_assert (icode == CODE_FOR_crypto_sha1h);
+ builtin_sha1h_p = true;
+ }
if (! target
|| GET_MODE (target) != tmode
@@ -24409,8 +24719,13 @@ arm_expand_unop_builtin (enum insn_code icode,
if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
op0 = copy_to_mode_reg (mode0, op0);
}
+ if (builtin_sha1h_p)
+ op1 = GEN_INT (TARGET_BIG_END ? 1 : 0);
- pat = GEN_FCN (icode) (target, op0);
+ if (builtin_sha1h_p)
+ pat = GEN_FCN (icode) (target, op0, op1);
+ else
+ pat = GEN_FCN (icode) (target, op0);
if (! pat)
return 0;
emit_insn (pat);
@@ -25368,6 +25683,10 @@ arm_expand_builtin (tree exp,
if (d->code == (const enum arm_builtins) fcode)
return arm_expand_unop_builtin (d->icode, exp, target, 0);
+ for (i = 0, d = bdesc_3arg; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
+ if (d->code == (const enum arm_builtins) fcode)
+ return arm_expand_ternop_builtin (d->icode, exp, target);
+
/* @@@ Should really do something sensible here. */
return NULL_RTX;
}
@@ -26852,8 +27171,8 @@ arm_expand_epilogue_apcs_frame (bool really_return)
if (crtl->calls_eh_return)
emit_insn (gen_addsi3 (stack_pointer_rtx,
- stack_pointer_rtx,
- GEN_INT (ARM_EH_STACKADJ_REGNUM)));
+ stack_pointer_rtx,
+ gen_rtx_REG (SImode, ARM_EH_STACKADJ_REGNUM)));
if (IS_STACKALIGN (func_type))
/* Restore the original stack pointer. Before prologue, the stack was
@@ -27523,11 +27842,30 @@ arm_file_start (void)
{
const char *fpu_name;
if (arm_selected_arch)
- asm_fprintf (asm_out_file, "\t.arch %s\n", arm_selected_arch->name);
+ {
+ const char* pos = strchr (arm_selected_arch->name, '+');
+ if (pos)
+ {
+ char buf[15];
+ gcc_assert (strlen (arm_selected_arch->name)
+ <= sizeof (buf) / sizeof (*pos));
+ strncpy (buf, arm_selected_arch->name,
+ (pos - arm_selected_arch->name) * sizeof (*pos));
+ buf[pos - arm_selected_arch->name] = '\0';
+ asm_fprintf (asm_out_file, "\t.arch %s\n", buf);
+ asm_fprintf (asm_out_file, "\t.arch_extension %s\n", pos + 1);
+ }
+ else
+ asm_fprintf (asm_out_file, "\t.arch %s\n", arm_selected_arch->name);
+ }
else if (strncmp (arm_selected_cpu->name, "generic", 7) == 0)
asm_fprintf (asm_out_file, "\t.arch %s\n", arm_selected_cpu->name + 8);
else
- asm_fprintf (asm_out_file, "\t.cpu %s\n", arm_selected_cpu->name);
+ {
+ const char* truncated_name
+ = arm_rewrite_selected_cpu (arm_selected_cpu->name);
+ asm_fprintf (asm_out_file, "\t.cpu %s\n", truncated_name);
+ }
if (TARGET_SOFT_FLOAT)
{
@@ -28976,6 +29314,7 @@ arm_issue_rate (void)
case cortexa7:
case cortexa8:
case cortexa9:
+ case cortexa12:
case cortexa53:
case fa726te:
case marvell_pj4:
@@ -29011,6 +29350,7 @@ static arm_mangle_map_entry arm_mangle_map[] = {
{ V2SFmode, "__builtin_neon_sf", "18__simd64_float32_t" },
{ V8QImode, "__builtin_neon_poly8", "16__simd64_poly8_t" },
{ V4HImode, "__builtin_neon_poly16", "17__simd64_poly16_t" },
+
/* 128-bit containerized types. */
{ V16QImode, "__builtin_neon_qi", "16__simd128_int8_t" },
{ V16QImode, "__builtin_neon_uqi", "17__simd128_uint8_t" },
@@ -29371,6 +29711,26 @@ vfp3_const_double_for_fract_bits (rtx operand)
}
return 0;
}
+
+int
+vfp3_const_double_for_bits (rtx operand)
+{
+ REAL_VALUE_TYPE r0;
+
+ if (!CONST_DOUBLE_P (operand))
+ return 0;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r0, operand);
+ if (exact_real_truncate (DFmode, &r0))
+ {
+ HOST_WIDE_INT value = real_to_integer (&r0);
+ value = value & 0xffffffff;
+ if ((value != 0) && ( (value & (value - 1)) == 0))
+ return int_log2 (value);
+ }
+
+ return 0;
+}
/* Emit a memory barrier around an atomic sequence according to MODEL. */
diff --git a/gcc/config/arm/arm.h b/gcc/config/arm/arm.h
index 8b8b80e19d3..409589d2dac 100644
--- a/gcc/config/arm/arm.h
+++ b/gcc/config/arm/arm.h
@@ -1,5 +1,5 @@
/* Definitions of target machine for GNU compiler, for ARM.
- Copyright (C) 1991-2013 Free Software Foundation, Inc.
+ Copyright (C) 1991-2014 Free Software Foundation, Inc.
Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
and Martin Simmons (@harleqn.co.uk).
More major hacks by Richard Earnshaw (rearnsha@arm.com)
@@ -49,8 +49,14 @@ extern char arm_arch_name[];
builtin_define ("__ARM_FEATURE_QBIT"); \
if (TARGET_ARM_SAT) \
builtin_define ("__ARM_FEATURE_SAT"); \
+ if (TARGET_CRYPTO) \
+ builtin_define ("__ARM_FEATURE_CRYPTO"); \
if (unaligned_access) \
builtin_define ("__ARM_FEATURE_UNALIGNED"); \
+ if (TARGET_CRC32) \
+ builtin_define ("__ARM_FEATURE_CRC32"); \
+ if (TARGET_32BIT) \
+ builtin_define ("__ARM_32BIT_STATE"); \
if (TARGET_ARM_FEATURE_LDREX) \
builtin_define_with_int_value ( \
"__ARM_FEATURE_LDREX", TARGET_ARM_FEATURE_LDREX); \
@@ -162,8 +168,8 @@ extern char arm_arch_name[];
enum target_cpus
{
-#define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
- TARGET_CPU_##IDENT,
+#define ARM_CORE(NAME, INTERNAL_IDENT, IDENT, ARCH, FLAGS, COSTS) \
+ TARGET_CPU_##INTERNAL_IDENT,
#include "arm-cores.def"
#undef ARM_CORE
TARGET_CPU_generic
@@ -274,6 +280,8 @@ extern void (*arm_lang_output_object_attributes_hook)(void);
#define TARGET_LDRD (arm_arch5e && ARM_DOUBLEWORD_ALIGN \
&& !TARGET_THUMB1)
+#define TARGET_CRC32 (arm_arch_crc)
+
/* The following two macros concern the ability to execute coprocessor
instructions for VFPv3 or NEON. TARGET_VFP3/TARGET_VFPD32 are currently
only ever tested when we know we are generating for VFP hardware; we need
@@ -561,6 +569,9 @@ extern int prefer_neon_for_64bits;
extern bool arm_disable_literal_pool;
#endif
+/* Nonzero if chip supports the ARMv8 CRC instructions. */
+extern int arm_arch_crc;
+
#ifndef TARGET_DEFAULT
#define TARGET_DEFAULT (MASK_APCS_FRAME)
#endif
@@ -1285,11 +1296,12 @@ enum reg_class
: NO_REGS))
#define THUMB_SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X) \
- ((CLASS) != LO_REGS && (CLASS) != BASE_REGS \
- ? ((true_regnum (X) == -1 ? LO_REGS \
- : (true_regnum (X) + HARD_REGNO_NREGS (0, MODE) > 8) ? LO_REGS \
- : NO_REGS)) \
- : NO_REGS)
+ (lra_in_progress ? NO_REGS \
+ : (CLASS) != LO_REGS && (CLASS) != BASE_REGS \
+ ? ((true_regnum (X) == -1 ? LO_REGS \
+ : (true_regnum (X) + HARD_REGNO_NREGS (0, MODE) > 8) ? LO_REGS \
+ : NO_REGS)) \
+ : NO_REGS)
/* Return the register class of a scratch register needed to copy IN into
or out of a register in CLASS in MODE. If it can be done directly,
@@ -2343,16 +2355,25 @@ extern int making_const_table;
instruction. */
#define MAX_LDM_STM_OPS 4
+#define BIG_LITTLE_SPEC \
+ " %{mcpu=*:%<mcpu=* -mcpu=%:rewrite_mcpu(%{mcpu=*:%*})}" \
+
+extern const char *arm_rewrite_mcpu (int argc, const char **argv);
+#define BIG_LITTLE_CPU_SPEC_FUNCTIONS \
+ { "rewrite_mcpu", arm_rewrite_mcpu },
+
#define ASM_CPU_SPEC \
" %{mcpu=generic-*:-march=%*;" \
- " :%{mcpu=*:-mcpu=%*} %{march=*:-march=%*}}"
+ " :%{march=*:-march=%*}}" \
+ BIG_LITTLE_SPEC
/* -mcpu=native handling only makes sense with compiler running on
an ARM chip. */
#if defined(__arm__)
extern const char *host_detect_local_cpu (int argc, const char **argv);
# define EXTRA_SPEC_FUNCTIONS \
- { "local_cpu_detect", host_detect_local_cpu },
+ { "local_cpu_detect", host_detect_local_cpu }, \
+ BIG_LITTLE_CPU_SPEC_FUNCTIONS
# define MCPU_MTUNE_NATIVE_SPECS \
" %{march=native:%<march=native %:local_cpu_detect(arch)}" \
@@ -2360,6 +2381,7 @@ extern const char *host_detect_local_cpu (int argc, const char **argv);
" %{mtune=native:%<mtune=native %:local_cpu_detect(tune)}"
#else
# define MCPU_MTUNE_NATIVE_SPECS ""
+# define EXTRA_SPEC_FUNCTIONS BIG_LITTLE_CPU_SPEC_FUNCTIONS
#endif
#define DRIVER_SELF_SPECS MCPU_MTUNE_NATIVE_SPECS
diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md
index 46fc4422d5c..2ddda020863 100644
--- a/gcc/config/arm/arm.md
+++ b/gcc/config/arm/arm.md
@@ -1,5 +1,5 @@
;;- Machine description for ARM for GNU compiler
-;; Copyright (C) 1991-2013 Free Software Foundation, Inc.
+;; Copyright (C) 1991-2014 Free Software Foundation, Inc.
;; Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
;; and Martin Simmons (@harleqn.co.uk).
;; More major hacks by Richard Earnshaw (rearnsha@arm.com).
@@ -293,7 +293,7 @@
neon_ext, neon_ext_q, neon_rbit, neon_rbit_q,\
neon_rev, neon_rev_q, neon_mul_b, neon_mul_b_q, neon_mul_h,\
neon_mul_h_q, neon_mul_s, neon_mul_s_q, neon_mul_b_long,\
- neon_mul_h_long, neon_mul_s_long, neon_mul_h_scalar,\
+ neon_mul_h_long, neon_mul_s_long, neon_mul_d_long, neon_mul_h_scalar,\
neon_mul_h_scalar_q, neon_mul_s_scalar, neon_mul_s_scalar_q,\
neon_mul_h_scalar_long, neon_mul_s_scalar_long, neon_sat_mul_b,\
neon_sat_mul_b_q, neon_sat_mul_h, neon_sat_mul_h_q,\
@@ -355,7 +355,9 @@
neon_fp_mla_s_scalar, neon_fp_mla_s_scalar_q, neon_fp_mla_d,\
neon_fp_mla_d_q, neon_fp_mla_d_scalar_q, neon_fp_sqrt_s,\
neon_fp_sqrt_s_q, neon_fp_sqrt_d, neon_fp_sqrt_d_q,\
- neon_fp_div_s, neon_fp_div_s_q, neon_fp_div_d, neon_fp_div_d_q")
+ neon_fp_div_s, neon_fp_div_s_q, neon_fp_div_d, neon_fp_div_d_q, crypto_aes,\
+ crypto_sha1_xor, crypto_sha1_fast, crypto_sha1_slow, crypto_sha256_fast,\
+ crypto_sha256_slow")
(const_string "yes")
(const_string "no")))
@@ -477,7 +479,7 @@
(define_attr "generic_vfp" "yes,no"
(const (if_then_else
(and (eq_attr "fpu" "vfp")
- (eq_attr "tune" "!arm1020e,arm1022e,cortexa5,cortexa7,cortexa8,cortexa9,cortexa12,cortexa53,cortexm4,marvell_pj4")
+ (eq_attr "tune" "!arm1020e,arm1022e,cortexa5,cortexa7,cortexa8,cortexa9,cortexa53,cortexm4,marvell_pj4")
(eq_attr "tune_cortexr4" "no"))
(const_string "yes")
(const_string "no"))))
@@ -12253,7 +12255,7 @@
[(set (match_operand:SI 1 "s_register_operand" "+rk")
(plus:SI (match_dup 1)
(match_operand:SI 2 "const_int_operand" "I")))
- (set (match_operand:DF 3 "arm_hard_register_operand" "")
+ (set (match_operand:DF 3 "vfp_hard_register_operand" "")
(mem:DF (match_dup 1)))])]
"TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
"*
@@ -12870,6 +12872,17 @@
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
+;; ARMv8 CRC32 instructions.
+(define_insn "<crc_variant>"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:<crc_mode> 2 "s_register_operand" "r")]
+ CRC))]
+ "TARGET_CRC32"
+ "<crc_variant>\\t%0, %1, %2"
+ [(set_attr "type" "crc")
+ (set_attr "conds" "unconditional")]
+)
;; Load the load/store double peephole optimizations.
(include "ldrdstrd.md")
@@ -12907,6 +12920,8 @@
(include "thumb2.md")
;; Neon patterns
(include "neon.md")
+;; Crypto patterns
+(include "crypto.md")
;; Synchronization Primitives
(include "sync.md")
;; Fixed-point patterns
diff --git a/gcc/config/arm/arm.opt b/gcc/config/arm/arm.opt
index 5fbac7becf2..8bcf199b6ba 100644
--- a/gcc/config/arm/arm.opt
+++ b/gcc/config/arm/arm.opt
@@ -1,6 +1,6 @@
; Options for the ARM port of the compiler.
-; Copyright (C) 2005-2013 Free Software Foundation, Inc.
+; Copyright (C) 2005-2014 Free Software Foundation, Inc.
;
; This file is part of GCC.
;
diff --git a/gcc/config/arm/arm1020e.md b/gcc/config/arm/arm1020e.md
index 7df84d52481..0206ea2af4d 100644
--- a/gcc/config/arm/arm1020e.md
+++ b/gcc/config/arm/arm1020e.md
@@ -1,5 +1,5 @@
;; ARM 1020E & ARM 1022E Pipeline Description
-;; Copyright (C) 2005-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2005-2014 Free Software Foundation, Inc.
;; Contributed by Richard Earnshaw (richard.earnshaw@arm.com)
;;
;; This file is part of GCC.
diff --git a/gcc/config/arm/arm1026ejs.md b/gcc/config/arm/arm1026ejs.md
index f5a0447f5da..3f290b475e0 100644
--- a/gcc/config/arm/arm1026ejs.md
+++ b/gcc/config/arm/arm1026ejs.md
@@ -1,5 +1,5 @@
;; ARM 1026EJ-S Pipeline Description
-;; Copyright (C) 2003-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2003-2014 Free Software Foundation, Inc.
;; Written by CodeSourcery, LLC.
;;
;; This file is part of GCC.
diff --git a/gcc/config/arm/arm1136jfs.md b/gcc/config/arm/arm1136jfs.md
index f6e0b8da8b6..9e941da765b 100644
--- a/gcc/config/arm/arm1136jfs.md
+++ b/gcc/config/arm/arm1136jfs.md
@@ -1,5 +1,5 @@
;; ARM 1136J[F]-S Pipeline Description
-;; Copyright (C) 2003-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2003-2014 Free Software Foundation, Inc.
;; Written by CodeSourcery, LLC.
;;
;; This file is part of GCC.
diff --git a/gcc/config/arm/arm926ejs.md b/gcc/config/arm/arm926ejs.md
index d2b0e9e3cf8..883935dcf62 100644
--- a/gcc/config/arm/arm926ejs.md
+++ b/gcc/config/arm/arm926ejs.md
@@ -1,5 +1,5 @@
;; ARM 926EJ-S Pipeline Description
-;; Copyright (C) 2003-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2003-2014 Free Software Foundation, Inc.
;; Written by CodeSourcery, LLC.
;;
;; This file is part of GCC.
diff --git a/gcc/config/arm/arm_acle.h b/gcc/config/arm/arm_acle.h
new file mode 100644
index 00000000000..aaa7affeeb7
--- /dev/null
+++ b/gcc/config/arm/arm_acle.h
@@ -0,0 +1,100 @@
+/* ARM Non-NEON ACLE intrinsics include file.
+
+ Copyright (C) 2013-2014 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _GCC_ARM_ACLE_H
+#define _GCC_ARM_ACLE_H
+
+#include <stdint.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __ARM_FEATURE_CRC32
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__crc32b (uint32_t __a, uint8_t __b)
+{
+ return __builtin_arm_crc32b (__a, __b);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__crc32h (uint32_t __a, uint16_t __b)
+{
+ return __builtin_arm_crc32h (__a, __b);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__crc32w (uint32_t __a, uint32_t __b)
+{
+ return __builtin_arm_crc32w (__a, __b);
+}
+
+#ifdef __ARM_32BIT_STATE
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__crc32d (uint32_t __a, uint64_t __b)
+{
+ uint32_t __d;
+
+ __d = __crc32w (__crc32w (__a, __b & 0xffffffffULL), __b >> 32);
+ return __d;
+}
+#endif
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__crc32cb (uint32_t __a, uint8_t __b)
+{
+ return __builtin_arm_crc32cb (__a, __b);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__crc32ch (uint32_t __a, uint16_t __b)
+{
+ return __builtin_arm_crc32ch (__a, __b);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__crc32cw (uint32_t __a, uint32_t __b)
+{
+ return __builtin_arm_crc32cw (__a, __b);
+}
+
+#ifdef __ARM_32BIT_STATE
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__crc32cd (uint32_t __a, uint64_t __b)
+{
+ uint32_t __d;
+
+ __d = __crc32cw (__crc32cw (__a, __b & 0xffffffffULL), __b >> 32);
+ return __d;
+}
+#endif
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/gcc/config/arm/arm_neon.h b/gcc/config/arm/arm_neon.h
index e23d03b9d10..37a6e611b48 100644
--- a/gcc/config/arm/arm_neon.h
+++ b/gcc/config/arm/arm_neon.h
@@ -1,7 +1,7 @@
/* ARM NEON intrinsics include file. This file is generated automatically
using neon-gen.ml. Please do not edit manually.
- Copyright (C) 2006-2013 Free Software Foundation, Inc.
+ Copyright (C) 2006-2014 Free Software Foundation, Inc.
Contributed by CodeSourcery.
This file is part of GCC.
@@ -42,10 +42,13 @@ typedef __builtin_neon_qi int8x8_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_hi int16x4_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_si int32x2_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_di int64x1_t;
-typedef __builtin_neon_sf float32x2_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_hf float16x4_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_sf float32x2_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_poly8 poly8x8_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_poly16 poly16x4_t __attribute__ ((__vector_size__ (8)));
+#ifdef __ARM_FEATURE_CRYPTO
+typedef __builtin_neon_poly64 poly64x1_t;
+#endif
typedef __builtin_neon_uqi uint8x8_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_uhi uint16x4_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_usi uint32x2_t __attribute__ ((__vector_size__ (8)));
@@ -57,6 +60,9 @@ typedef __builtin_neon_di int64x2_t __attribute__ ((__vector_size__ (16)));
typedef __builtin_neon_sf float32x4_t __attribute__ ((__vector_size__ (16)));
typedef __builtin_neon_poly8 poly8x16_t __attribute__ ((__vector_size__ (16)));
typedef __builtin_neon_poly16 poly16x8_t __attribute__ ((__vector_size__ (16)));
+#ifdef __ARM_FEATURE_CRYPTO
+typedef __builtin_neon_poly64 poly64x2_t __attribute__ ((__vector_size__ (16)));
+#endif
typedef __builtin_neon_uqi uint8x16_t __attribute__ ((__vector_size__ (16)));
typedef __builtin_neon_uhi uint16x8_t __attribute__ ((__vector_size__ (16)));
typedef __builtin_neon_usi uint32x4_t __attribute__ ((__vector_size__ (16)));
@@ -65,6 +71,10 @@ typedef __builtin_neon_udi uint64x2_t __attribute__ ((__vector_size__ (16)));
typedef float float32_t;
typedef __builtin_neon_poly8 poly8_t;
typedef __builtin_neon_poly16 poly16_t;
+#ifdef __ARM_FEATURE_CRYPTO
+typedef __builtin_neon_poly64 poly64_t;
+typedef __builtin_neon_poly128 poly128_t;
+#endif
typedef struct int8x8x2_t
{
@@ -176,6 +186,22 @@ typedef struct poly16x8x2_t
poly16x8_t val[2];
} poly16x8x2_t;
+#ifdef __ARM_FEATURE_CRYPTO
+typedef struct poly64x1x2_t
+{
+ poly64x1_t val[2];
+} poly64x1x2_t;
+#endif
+
+
+#ifdef __ARM_FEATURE_CRYPTO
+typedef struct poly64x2x2_t
+{
+ poly64x2_t val[2];
+} poly64x2x2_t;
+#endif
+
+
typedef struct int8x8x3_t
{
int8x8_t val[3];
@@ -286,6 +312,22 @@ typedef struct poly16x8x3_t
poly16x8_t val[3];
} poly16x8x3_t;
+#ifdef __ARM_FEATURE_CRYPTO
+typedef struct poly64x1x3_t
+{
+ poly64x1_t val[3];
+} poly64x1x3_t;
+#endif
+
+
+#ifdef __ARM_FEATURE_CRYPTO
+typedef struct poly64x2x3_t
+{
+ poly64x2_t val[3];
+} poly64x2x3_t;
+#endif
+
+
typedef struct int8x8x4_t
{
int8x8_t val[4];
@@ -396,6 +438,22 @@ typedef struct poly16x8x4_t
poly16x8_t val[4];
} poly16x8x4_t;
+#ifdef __ARM_FEATURE_CRYPTO
+typedef struct poly64x1x4_t
+{
+ poly64x1_t val[4];
+} poly64x1x4_t;
+#endif
+
+
+#ifdef __ARM_FEATURE_CRYPTO
+typedef struct poly64x2x4_t
+{
+ poly64x2_t val[4];
+} poly64x2x4_t;
+#endif
+
+
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vadd_s8 (int8x8_t __a, int8x8_t __b)
@@ -4361,6 +4419,14 @@ vrsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
return (uint64x2_t)__builtin_neon_vsra_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c, 4);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vsri_n_p64 (poly64x1_t __a, poly64x1_t __b, const int __c)
+{
+ return (poly64x1_t)__builtin_neon_vsri_ndi (__a, __b, __c);
+}
+
+#endif
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vsri_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
{
@@ -4421,6 +4487,14 @@ vsri_n_p16 (poly16x4_t __a, poly16x4_t __b, const int __c)
return (poly16x4_t)__builtin_neon_vsri_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vsriq_n_p64 (poly64x2_t __a, poly64x2_t __b, const int __c)
+{
+ return (poly64x2_t)__builtin_neon_vsri_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+#endif
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vsriq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
{
@@ -4481,6 +4555,14 @@ vsriq_n_p16 (poly16x8_t __a, poly16x8_t __b, const int __c)
return (poly16x8_t)__builtin_neon_vsri_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vsli_n_p64 (poly64x1_t __a, poly64x1_t __b, const int __c)
+{
+ return (poly64x1_t)__builtin_neon_vsli_ndi (__a, __b, __c);
+}
+
+#endif
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vsli_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
{
@@ -4541,6 +4623,14 @@ vsli_n_p16 (poly16x4_t __a, poly16x4_t __b, const int __c)
return (poly16x4_t)__builtin_neon_vsli_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vsliq_n_p64 (poly64x2_t __a, poly64x2_t __b, const int __c)
+{
+ return (poly64x2_t)__builtin_neon_vsli_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+#endif
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vsliq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
{
@@ -5309,6 +5399,14 @@ vsetq_lane_u64 (uint64_t __a, uint64x2_t __b, const int __c)
return (uint64x2_t)__builtin_neon_vset_lanev2di ((__builtin_neon_di) __a, (int64x2_t) __b, __c);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vcreate_p64 (uint64_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vcreatedi ((__builtin_neon_di) __a);
+}
+
+#endif
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vcreate_s8 (uint64_t __a)
{
@@ -5429,6 +5527,14 @@ vdup_n_p16 (poly16_t __a)
return (poly16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vdup_n_p64 (poly64_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vdup_ndi ((__builtin_neon_di) __a);
+}
+
+#endif
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vdup_n_s64 (int64_t __a)
{
@@ -5441,6 +5547,14 @@ vdup_n_u64 (uint64_t __a)
return (uint64x1_t)__builtin_neon_vdup_ndi ((__builtin_neon_di) __a);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vdupq_n_p64 (poly64_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vdup_nv2di ((__builtin_neon_di) __a);
+}
+
+#endif
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vdupq_n_s8 (int8_t __a)
{
@@ -5693,6 +5807,14 @@ vdup_lane_p16 (poly16x4_t __a, const int __b)
return (poly16x4_t)__builtin_neon_vdup_lanev4hi ((int16x4_t) __a, __b);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vdup_lane_p64 (poly64x1_t __a, const int __b)
+{
+ return (poly64x1_t)__builtin_neon_vdup_lanedi (__a, __b);
+}
+
+#endif
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vdup_lane_s64 (int64x1_t __a, const int __b)
{
@@ -5759,6 +5881,14 @@ vdupq_lane_p16 (poly16x4_t __a, const int __b)
return (poly16x8_t)__builtin_neon_vdup_lanev8hi ((int16x4_t) __a, __b);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vdupq_lane_p64 (poly64x1_t __a, const int __b)
+{
+ return (poly64x2_t)__builtin_neon_vdup_lanev2di (__a, __b);
+}
+
+#endif
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vdupq_lane_s64 (int64x1_t __a, const int __b)
{
@@ -5771,6 +5901,14 @@ vdupq_lane_u64 (uint64x1_t __a, const int __b)
return (uint64x2_t)__builtin_neon_vdup_lanev2di ((int64x1_t) __a, __b);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vcombine_p64 (poly64x1_t __a, poly64x1_t __b)
+{
+ return (poly64x2_t)__builtin_neon_vcombinedi (__a, __b);
+}
+
+#endif
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vcombine_s8 (int8x8_t __a, int8x8_t __b)
{
@@ -5837,6 +5975,14 @@ vcombine_p16 (poly16x4_t __a, poly16x4_t __b)
return (poly16x8_t)__builtin_neon_vcombinev4hi ((int16x4_t) __a, (int16x4_t) __b);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vget_high_p64 (poly64x2_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vget_highv2di ((int64x2_t) __a);
+}
+
+#endif
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vget_high_s8 (int8x16_t __a)
{
@@ -5957,6 +6103,14 @@ vget_low_p16 (poly16x8_t __a)
return (poly16x4_t)__builtin_neon_vget_lowv8hi ((int16x8_t) __a);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vget_low_p64 (poly64x2_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vget_lowv2di ((int64x2_t) __a);
+}
+
+#endif
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vget_low_s64 (int64x2_t __a)
{
@@ -7041,6 +7195,14 @@ vqdmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
return (int64x2_t)__builtin_neon_vqdmlsl_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vext_p64 (poly64x1_t __a, poly64x1_t __b, const int __c)
+{
+ return (poly64x1_t)__builtin_neon_vextdi (__a, __b, __c);
+}
+
+#endif
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vext_s8 (int8x8_t __a, int8x8_t __b, const int __c)
{
@@ -7107,6 +7269,14 @@ vext_p16 (poly16x4_t __a, poly16x4_t __b, const int __c)
return (poly16x4_t)__builtin_neon_vextv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vextq_p64 (poly64x2_t __a, poly64x2_t __b, const int __c)
+{
+ return (poly64x2_t)__builtin_neon_vextv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+#endif
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vextq_s8 (int8x16_t __a, int8x16_t __b, const int __c)
{
@@ -7389,6 +7559,14 @@ vrev16q_p8 (poly8x16_t __a)
return (poly8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 });
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vbsl_p64 (uint64x1_t __a, poly64x1_t __b, poly64x1_t __c)
+{
+ return (poly64x1_t)__builtin_neon_vbsldi ((int64x1_t) __a, __b, __c);
+}
+
+#endif
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vbsl_s8 (uint8x8_t __a, int8x8_t __b, int8x8_t __c)
{
@@ -7455,6 +7633,14 @@ vbsl_p16 (uint16x4_t __a, poly16x4_t __b, poly16x4_t __c)
return (poly16x4_t)__builtin_neon_vbslv4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vbslq_p64 (uint64x2_t __a, poly64x2_t __b, poly64x2_t __c)
+{
+ return (poly64x2_t)__builtin_neon_vbslv2di ((int64x2_t) __a, (int64x2_t) __b, (int64x2_t) __c);
+}
+
+#endif
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vbslq_s8 (uint8x16_t __a, int8x16_t __b, int8x16_t __c)
{
@@ -8007,6 +8193,14 @@ vuzpq_p16 (poly16x8_t __a, poly16x8_t __b)
return __rv;
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vld1_p64 (const poly64_t * __a)
+{
+ return (poly64x1_t)__builtin_neon_vld1di ((const __builtin_neon_di *) __a);
+}
+
+#endif
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vld1_s8 (const int8_t * __a)
{
@@ -8073,6 +8267,14 @@ vld1_p16 (const poly16_t * __a)
return (poly16x4_t)__builtin_neon_vld1v4hi ((const __builtin_neon_hi *) __a);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vld1q_p64 (const poly64_t * __a)
+{
+ return (poly64x2_t)__builtin_neon_vld1v2di ((const __builtin_neon_di *) __a);
+}
+
+#endif
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vld1q_s8 (const int8_t * __a)
{
@@ -8193,6 +8395,14 @@ vld1_lane_p16 (const poly16_t * __a, poly16x4_t __b, const int __c)
return (poly16x4_t)__builtin_neon_vld1_lanev4hi ((const __builtin_neon_hi *) __a, (int16x4_t) __b, __c);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vld1_lane_p64 (const poly64_t * __a, poly64x1_t __b, const int __c)
+{
+ return (poly64x1_t)__builtin_neon_vld1_lanedi ((const __builtin_neon_di *) __a, __b, __c);
+}
+
+#endif
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vld1_lane_s64 (const int64_t * __a, int64x1_t __b, const int __c)
{
@@ -8259,6 +8469,14 @@ vld1q_lane_p16 (const poly16_t * __a, poly16x8_t __b, const int __c)
return (poly16x8_t)__builtin_neon_vld1_lanev8hi ((const __builtin_neon_hi *) __a, (int16x8_t) __b, __c);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vld1q_lane_p64 (const poly64_t * __a, poly64x2_t __b, const int __c)
+{
+ return (poly64x2_t)__builtin_neon_vld1_lanev2di ((const __builtin_neon_di *) __a, (int64x2_t) __b, __c);
+}
+
+#endif
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vld1q_lane_s64 (const int64_t * __a, int64x2_t __b, const int __c)
{
@@ -8325,6 +8543,14 @@ vld1_dup_p16 (const poly16_t * __a)
return (poly16x4_t)__builtin_neon_vld1_dupv4hi ((const __builtin_neon_hi *) __a);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vld1_dup_p64 (const poly64_t * __a)
+{
+ return (poly64x1_t)__builtin_neon_vld1_dupdi ((const __builtin_neon_di *) __a);
+}
+
+#endif
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vld1_dup_s64 (const int64_t * __a)
{
@@ -8391,6 +8617,14 @@ vld1q_dup_p16 (const poly16_t * __a)
return (poly16x8_t)__builtin_neon_vld1_dupv8hi ((const __builtin_neon_hi *) __a);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vld1q_dup_p64 (const poly64_t * __a)
+{
+ return (poly64x2_t)__builtin_neon_vld1_dupv2di ((const __builtin_neon_di *) __a);
+}
+
+#endif
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vld1q_dup_s64 (const int64_t * __a)
{
@@ -8403,6 +8637,14 @@ vld1q_dup_u64 (const uint64_t * __a)
return (uint64x2_t)__builtin_neon_vld1_dupv2di ((const __builtin_neon_di *) __a);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_p64 (poly64_t * __a, poly64x1_t __b)
+{
+ __builtin_neon_vst1di ((__builtin_neon_di *) __a, __b);
+}
+
+#endif
__extension__ static __inline void __attribute__ ((__always_inline__))
vst1_s8 (int8_t * __a, int8x8_t __b)
{
@@ -8469,6 +8711,14 @@ vst1_p16 (poly16_t * __a, poly16x4_t __b)
__builtin_neon_vst1v4hi ((__builtin_neon_hi *) __a, (int16x4_t) __b);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_p64 (poly64_t * __a, poly64x2_t __b)
+{
+ __builtin_neon_vst1v2di ((__builtin_neon_di *) __a, (int64x2_t) __b);
+}
+
+#endif
__extension__ static __inline void __attribute__ ((__always_inline__))
vst1q_s8 (int8_t * __a, int8x16_t __b)
{
@@ -8589,6 +8839,14 @@ vst1_lane_p16 (poly16_t * __a, poly16x4_t __b, const int __c)
__builtin_neon_vst1_lanev4hi ((__builtin_neon_hi *) __a, (int16x4_t) __b, __c);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_p64 (poly64_t * __a, poly64x1_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanedi ((__builtin_neon_di *) __a, __b, __c);
+}
+
+#endif
__extension__ static __inline void __attribute__ ((__always_inline__))
vst1_lane_s64 (int64_t * __a, int64x1_t __b, const int __c)
{
@@ -8655,6 +8913,14 @@ vst1q_lane_p16 (poly16_t * __a, poly16x8_t __b, const int __c)
__builtin_neon_vst1_lanev8hi ((__builtin_neon_hi *) __a, (int16x8_t) __b, __c);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_p64 (poly64_t * __a, poly64x2_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev2di ((__builtin_neon_di *) __a, (int64x2_t) __b, __c);
+}
+
+#endif
__extension__ static __inline void __attribute__ ((__always_inline__))
vst1q_lane_s64 (int64_t * __a, int64x2_t __b, const int __c)
{
@@ -8739,6 +9005,16 @@ vld2_p16 (const poly16_t * __a)
return __rv.__i;
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1x2_t __attribute__ ((__always_inline__))
+vld2_p64 (const poly64_t * __a)
+{
+ union { poly64x1x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+#endif
__extension__ static __inline int64x1x2_t __attribute__ ((__always_inline__))
vld2_s64 (const int64_t * __a)
{
@@ -9034,6 +9310,16 @@ vld2_dup_p16 (const poly16_t * __a)
return __rv.__i;
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1x2_t __attribute__ ((__always_inline__))
+vld2_dup_p64 (const poly64_t * __a)
+{
+ union { poly64x1x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+#endif
__extension__ static __inline int64x1x2_t __attribute__ ((__always_inline__))
vld2_dup_s64 (const int64_t * __a)
{
@@ -9113,6 +9399,15 @@ vst2_p16 (poly16_t * __a, poly16x4x2_t __b)
__builtin_neon_vst2v4hi ((__builtin_neon_hi *) __a, __bu.__o);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_p64 (poly64_t * __a, poly64x1x2_t __b)
+{
+ union { poly64x1x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+#endif
__extension__ static __inline void __attribute__ ((__always_inline__))
vst2_s64 (int64_t * __a, int64x1x2_t __b)
{
@@ -9367,6 +9662,16 @@ vld3_p16 (const poly16_t * __a)
return __rv.__i;
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1x3_t __attribute__ ((__always_inline__))
+vld3_p64 (const poly64_t * __a)
+{
+ union { poly64x1x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+#endif
__extension__ static __inline int64x1x3_t __attribute__ ((__always_inline__))
vld3_s64 (const int64_t * __a)
{
@@ -9662,6 +9967,16 @@ vld3_dup_p16 (const poly16_t * __a)
return __rv.__i;
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1x3_t __attribute__ ((__always_inline__))
+vld3_dup_p64 (const poly64_t * __a)
+{
+ union { poly64x1x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+#endif
__extension__ static __inline int64x1x3_t __attribute__ ((__always_inline__))
vld3_dup_s64 (const int64_t * __a)
{
@@ -9741,6 +10056,15 @@ vst3_p16 (poly16_t * __a, poly16x4x3_t __b)
__builtin_neon_vst3v4hi ((__builtin_neon_hi *) __a, __bu.__o);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_p64 (poly64_t * __a, poly64x1x3_t __b)
+{
+ union { poly64x1x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+#endif
__extension__ static __inline void __attribute__ ((__always_inline__))
vst3_s64 (int64_t * __a, int64x1x3_t __b)
{
@@ -9995,6 +10319,16 @@ vld4_p16 (const poly16_t * __a)
return __rv.__i;
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1x4_t __attribute__ ((__always_inline__))
+vld4_p64 (const poly64_t * __a)
+{
+ union { poly64x1x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+#endif
__extension__ static __inline int64x1x4_t __attribute__ ((__always_inline__))
vld4_s64 (const int64_t * __a)
{
@@ -10290,6 +10624,16 @@ vld4_dup_p16 (const poly16_t * __a)
return __rv.__i;
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1x4_t __attribute__ ((__always_inline__))
+vld4_dup_p64 (const poly64_t * __a)
+{
+ union { poly64x1x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+#endif
__extension__ static __inline int64x1x4_t __attribute__ ((__always_inline__))
vld4_dup_s64 (const int64_t * __a)
{
@@ -10369,6 +10713,15 @@ vst4_p16 (poly16_t * __a, poly16x4x4_t __b)
__builtin_neon_vst4v4hi ((__builtin_neon_hi *) __a, __bu.__o);
}
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_p64 (poly64_t * __a, poly64x1x4_t __b)
+{
+ union { poly64x1x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+#endif
__extension__ static __inline void __attribute__ ((__always_inline__))
vst4_s64 (int64_t * __a, int64x1x4_t __b)
{
@@ -11033,23 +11386,25 @@ vornq_u64 (uint64x2_t __a, uint64x2_t __b)
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_s8 (int8x8_t __a)
+vreinterpret_p8_p16 (poly16x4_t __a)
{
- return (poly8x8_t)__builtin_neon_vreinterpretv8qiv8qi (__a);
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_s16 (int16x4_t __a)
+vreinterpret_p8_f32 (float32x2_t __a)
{
- return (poly8x8_t)__builtin_neon_vreinterpretv8qiv4hi (__a);
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a);
}
+#ifdef __ARM_FEATURE_CRYPTO
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_s32 (int32x2_t __a)
+vreinterpret_p8_p64 (poly64x1_t __a)
{
- return (poly8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a);
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
}
+#endif
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
vreinterpret_p8_s64 (int64x1_t __a)
{
@@ -11057,99 +11412,77 @@ vreinterpret_p8_s64 (int64x1_t __a)
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_f32 (float32x2_t __a)
+vreinterpret_p8_u64 (uint64x1_t __a)
{
- return (poly8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a);
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qidi ((int64x1_t) __a);
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_u8 (uint8x8_t __a)
+vreinterpret_p8_s8 (int8x8_t __a)
{
- return (poly8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv8qi (__a);
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_u16 (uint16x4_t __a)
+vreinterpret_p8_s16 (int16x4_t __a)
{
- return (poly8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv4hi (__a);
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_u32 (uint32x2_t __a)
+vreinterpret_p8_s32 (int32x2_t __a)
{
- return (poly8x8_t)__builtin_neon_vreinterpretv8qiv2si ((int32x2_t) __a);
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a);
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_u64 (uint64x1_t __a)
+vreinterpret_p8_u8 (uint8x8_t __a)
{
- return (poly8x8_t)__builtin_neon_vreinterpretv8qidi ((int64x1_t) __a);
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_p16 (poly16x4_t __a)
+vreinterpret_p8_u16 (uint16x4_t __a)
{
return (poly8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_s8 (int8x16_t __a)
-{
- return (poly8x16_t)__builtin_neon_vreinterpretv16qiv16qi (__a);
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_s16 (int16x8_t __a)
-{
- return (poly8x16_t)__builtin_neon_vreinterpretv16qiv8hi (__a);
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_s32 (int32x4_t __a)
-{
- return (poly8x16_t)__builtin_neon_vreinterpretv16qiv4si (__a);
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_s64 (int64x2_t __a)
-{
- return (poly8x16_t)__builtin_neon_vreinterpretv16qiv2di (__a);
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_f32 (float32x4_t __a)
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u32 (uint32x2_t __a)
{
- return (poly8x16_t)__builtin_neon_vreinterpretv16qiv4sf (__a);
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv2si ((int32x2_t) __a);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_u8 (uint8x16_t __a)
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_p8 (poly8x8_t __a)
{
- return (poly8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_u16 (uint16x8_t __a)
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_f32 (float32x2_t __a)
{
- return (poly8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_u32 (uint32x4_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_p64 (poly64x1_t __a)
{
- return (poly8x16_t)__builtin_neon_vreinterpretv16qiv4si ((int32x4_t) __a);
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_u64 (uint64x2_t __a)
+#endif
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s64 (int64x1_t __a)
{
- return (poly8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_p16 (poly16x8_t __a)
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u64 (uint64x1_t __a)
{
- return (poly8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hidi ((int64x1_t) __a);
}
__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
@@ -11171,18 +11504,6 @@ vreinterpret_p16_s32 (int32x2_t __a)
}
__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vreinterpret_p16_s64 (int64x1_t __a)
-{
- return (poly16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vreinterpret_p16_f32 (float32x2_t __a)
-{
- return (poly16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a);
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
vreinterpret_p16_u8 (uint8x8_t __a)
{
return (poly16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
@@ -11200,76 +11521,36 @@ vreinterpret_p16_u32 (uint32x2_t __a)
return (poly16x4_t)__builtin_neon_vreinterpretv4hiv2si ((int32x2_t) __a);
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vreinterpret_p16_u64 (uint64x1_t __a)
-{
- return (poly16x4_t)__builtin_neon_vreinterpretv4hidi ((int64x1_t) __a);
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vreinterpret_p16_p8 (poly8x8_t __a)
-{
- return (poly16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_s8 (int8x16_t __a)
-{
- return (poly16x8_t)__builtin_neon_vreinterpretv8hiv16qi (__a);
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_s16 (int16x8_t __a)
-{
- return (poly16x8_t)__builtin_neon_vreinterpretv8hiv8hi (__a);
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_s32 (int32x4_t __a)
-{
- return (poly16x8_t)__builtin_neon_vreinterpretv8hiv4si (__a);
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_s64 (int64x2_t __a)
-{
- return (poly16x8_t)__builtin_neon_vreinterpretv8hiv2di (__a);
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_f32 (float32x4_t __a)
-{
- return (poly16x8_t)__builtin_neon_vreinterpretv8hiv4sf (__a);
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_u8 (uint8x16_t __a)
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_p8 (poly8x8_t __a)
{
- return (poly16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv8qi ((int8x8_t) __a);
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_u16 (uint16x8_t __a)
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_p16 (poly16x4_t __a)
{
- return (poly16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv4hi ((int16x4_t) __a);
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_u32 (uint32x4_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_p64 (poly64x1_t __a)
{
- return (poly16x8_t)__builtin_neon_vreinterpretv8hiv4si ((int32x4_t) __a);
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfdi (__a);
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_u64 (uint64x2_t __a)
+#endif
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s64 (int64x1_t __a)
{
- return (poly16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfdi (__a);
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_p8 (poly8x16_t __a)
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u64 (uint64x1_t __a)
{
- return (poly16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfdi ((int64x1_t) __a);
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
@@ -11291,12 +11572,6 @@ vreinterpret_f32_s32 (int32x2_t __a)
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vreinterpret_f32_s64 (int64x1_t __a)
-{
- return (float32x2_t)__builtin_neon_vreinterpretv2sfdi (__a);
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vreinterpret_f32_u8 (uint8x8_t __a)
{
return (float32x2_t)__builtin_neon_vreinterpretv2sfv8qi ((int8x8_t) __a);
@@ -11314,82 +11589,124 @@ vreinterpret_f32_u32 (uint32x2_t __a)
return (float32x2_t)__builtin_neon_vreinterpretv2sfv2si ((int32x2_t) __a);
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vreinterpret_f32_u64 (uint64x1_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_p8 (poly8x8_t __a)
{
- return (float32x2_t)__builtin_neon_vreinterpretv2sfdi ((int64x1_t) __a);
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vreinterpret_f32_p8 (poly8x8_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_p16 (poly16x4_t __a)
{
- return (float32x2_t)__builtin_neon_vreinterpretv2sfv8qi ((int8x8_t) __a);
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vreinterpret_f32_p16 (poly16x4_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_f32 (float32x2_t __a)
{
- return (float32x2_t)__builtin_neon_vreinterpretv2sfv4hi ((int16x4_t) __a);
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv2sf (__a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_s8 (int8x16_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_s64 (int64x1_t __a)
{
- return (float32x4_t)__builtin_neon_vreinterpretv4sfv16qi (__a);
+ return (poly64x1_t)__builtin_neon_vreinterpretdidi (__a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_s16 (int16x8_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_u64 (uint64x1_t __a)
{
- return (float32x4_t)__builtin_neon_vreinterpretv4sfv8hi (__a);
+ return (poly64x1_t)__builtin_neon_vreinterpretdidi ((int64x1_t) __a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_s32 (int32x4_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_s8 (int8x8_t __a)
{
- return (float32x4_t)__builtin_neon_vreinterpretv4sfv4si (__a);
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv8qi (__a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_s64 (int64x2_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_s16 (int16x4_t __a)
{
- return (float32x4_t)__builtin_neon_vreinterpretv4sfv2di (__a);
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv4hi (__a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_u8 (uint8x16_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_s32 (int32x2_t __a)
{
- return (float32x4_t)__builtin_neon_vreinterpretv4sfv16qi ((int8x16_t) __a);
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv2si (__a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_u16 (uint16x8_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_u8 (uint8x8_t __a)
{
- return (float32x4_t)__builtin_neon_vreinterpretv4sfv8hi ((int16x8_t) __a);
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_u32 (uint32x4_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_u16 (uint16x4_t __a)
{
- return (float32x4_t)__builtin_neon_vreinterpretv4sfv4si ((int32x4_t) __a);
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_u64 (uint64x2_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_u32 (uint32x2_t __a)
{
- return (float32x4_t)__builtin_neon_vreinterpretv4sfv2di ((int64x2_t) __a);
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv2si ((int32x2_t) __a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_p8 (poly8x16_t __a)
+#endif
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_p8 (poly8x8_t __a)
{
- return (float32x4_t)__builtin_neon_vreinterpretv4sfv16qi ((int8x16_t) __a);
+ return (int64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_p16 (poly16x8_t __a)
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_p16 (poly16x4_t __a)
{
- return (float32x4_t)__builtin_neon_vreinterpretv4sfv8hi ((int16x8_t) __a);
+ return (int64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_f32 (float32x2_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv2sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_p64 (poly64x1_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdidi (__a);
+}
+
+#endif
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u64 (uint64x1_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdidi ((int64x1_t) __a);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
@@ -11411,12 +11728,6 @@ vreinterpret_s64_s32 (int32x2_t __a)
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vreinterpret_s64_f32 (float32x2_t __a)
-{
- return (int64x1_t)__builtin_neon_vreinterpretdiv2sf (__a);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vreinterpret_s64_u8 (uint8x8_t __a)
{
return (int64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
@@ -11434,550 +11745,1204 @@ vreinterpret_s64_u32 (uint32x2_t __a)
return (int64x1_t)__builtin_neon_vreinterpretdiv2si ((int32x2_t) __a);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vreinterpret_s64_u64 (uint64x1_t __a)
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_p8 (poly8x8_t __a)
{
- return (int64x1_t)__builtin_neon_vreinterpretdidi ((int64x1_t) __a);
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vreinterpret_s64_p8 (poly8x8_t __a)
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_p16 (poly16x4_t __a)
{
- return (int64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vreinterpret_s64_p16 (poly16x4_t __a)
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_f32 (float32x2_t __a)
{
- return (int64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv2sf (__a);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_s8 (int8x16_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_p64 (poly64x1_t __a)
{
- return (int64x2_t)__builtin_neon_vreinterpretv2div16qi (__a);
+ return (uint64x1_t)__builtin_neon_vreinterpretdidi (__a);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_s16 (int16x8_t __a)
+#endif
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s64 (int64x1_t __a)
{
- return (int64x2_t)__builtin_neon_vreinterpretv2div8hi (__a);
+ return (uint64x1_t)__builtin_neon_vreinterpretdidi (__a);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_s32 (int32x4_t __a)
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s8 (int8x8_t __a)
{
- return (int64x2_t)__builtin_neon_vreinterpretv2div4si (__a);
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv8qi (__a);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_f32 (float32x4_t __a)
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s16 (int16x4_t __a)
{
- return (int64x2_t)__builtin_neon_vreinterpretv2div4sf (__a);
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv4hi (__a);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_u8 (uint8x16_t __a)
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s32 (int32x2_t __a)
{
- return (int64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv2si (__a);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_u16 (uint16x8_t __a)
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_u8 (uint8x8_t __a)
{
- return (int64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_u32 (uint32x4_t __a)
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_u16 (uint16x4_t __a)
{
- return (int64x2_t)__builtin_neon_vreinterpretv2div4si ((int32x4_t) __a);
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_u64 (uint64x2_t __a)
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_u32 (uint32x2_t __a)
{
- return (int64x2_t)__builtin_neon_vreinterpretv2div2di ((int64x2_t) __a);
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv2si ((int32x2_t) __a);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_p8 (poly8x16_t __a)
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_p8 (poly8x8_t __a)
{
- return (int64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_p16 (poly16x8_t __a)
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_p16 (poly16x4_t __a)
{
- return (int64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_s8 (int8x8_t __a)
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_f32 (float32x2_t __a)
{
- return (uint64x1_t)__builtin_neon_vreinterpretdiv8qi (__a);
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_s16 (int16x4_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_p64 (poly64x1_t __a)
{
- return (uint64x1_t)__builtin_neon_vreinterpretdiv4hi (__a);
+ return (int8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_s32 (int32x2_t __a)
+#endif
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_s64 (int64x1_t __a)
{
- return (uint64x1_t)__builtin_neon_vreinterpretdiv2si (__a);
+ return (int8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_s64 (int64x1_t __a)
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u64 (uint64x1_t __a)
{
- return (uint64x1_t)__builtin_neon_vreinterpretdidi (__a);
+ return (int8x8_t)__builtin_neon_vreinterpretv8qidi ((int64x1_t) __a);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_f32 (float32x2_t __a)
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_s16 (int16x4_t __a)
{
- return (uint64x1_t)__builtin_neon_vreinterpretdiv2sf (__a);
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv4hi (__a);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_u8 (uint8x8_t __a)
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_s32 (int32x2_t __a)
{
- return (uint64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_u16 (uint16x4_t __a)
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u8 (uint8x8_t __a)
{
- return (uint64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_u32 (uint32x2_t __a)
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u16 (uint16x4_t __a)
{
- return (uint64x1_t)__builtin_neon_vreinterpretdiv2si ((int32x2_t) __a);
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_p8 (poly8x8_t __a)
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u32 (uint32x2_t __a)
{
- return (uint64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv2si ((int32x2_t) __a);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_p16 (poly16x4_t __a)
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_p8 (poly8x8_t __a)
{
- return (uint64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_s8 (int8x16_t __a)
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_p16 (poly16x4_t __a)
{
- return (uint64x2_t)__builtin_neon_vreinterpretv2div16qi (__a);
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv4hi ((int16x4_t) __a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_s16 (int16x8_t __a)
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_f32 (float32x2_t __a)
{
- return (uint64x2_t)__builtin_neon_vreinterpretv2div8hi (__a);
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_s32 (int32x4_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_p64 (poly64x1_t __a)
{
- return (uint64x2_t)__builtin_neon_vreinterpretv2div4si (__a);
+ return (int16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_s64 (int64x2_t __a)
+#endif
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_s64 (int64x1_t __a)
{
- return (uint64x2_t)__builtin_neon_vreinterpretv2div2di (__a);
+ return (int16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_f32 (float32x4_t __a)
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u64 (uint64x1_t __a)
{
- return (uint64x2_t)__builtin_neon_vreinterpretv2div4sf (__a);
+ return (int16x4_t)__builtin_neon_vreinterpretv4hidi ((int64x1_t) __a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_u8 (uint8x16_t __a)
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_s8 (int8x8_t __a)
{
- return (uint64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv8qi (__a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_u16 (uint16x8_t __a)
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_s32 (int32x2_t __a)
{
- return (uint64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv2si (__a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_u32 (uint32x4_t __a)
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u8 (uint8x8_t __a)
{
- return (uint64x2_t)__builtin_neon_vreinterpretv2div4si ((int32x4_t) __a);
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_p8 (poly8x16_t __a)
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u16 (uint16x4_t __a)
{
- return (uint64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv4hi ((int16x4_t) __a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_p16 (poly16x8_t __a)
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u32 (uint32x2_t __a)
{
- return (uint64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv2si ((int32x2_t) __a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_s16 (int16x4_t __a)
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_p8 (poly8x8_t __a)
{
- return (int8x8_t)__builtin_neon_vreinterpretv8qiv4hi (__a);
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_s32 (int32x2_t __a)
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_p16 (poly16x4_t __a)
{
- return (int8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a);
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_s64 (int64x1_t __a)
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_f32 (float32x2_t __a)
{
- return (int8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv2sf (__a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_f32 (float32x2_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_p64 (poly64x1_t __a)
{
- return (int8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a);
+ return (int32x2_t)__builtin_neon_vreinterpretv2sidi (__a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_u8 (uint8x8_t __a)
+#endif
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_s64 (int64x1_t __a)
{
- return (int8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
+ return (int32x2_t)__builtin_neon_vreinterpretv2sidi (__a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_u16 (uint16x4_t __a)
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u64 (uint64x1_t __a)
{
- return (int8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+ return (int32x2_t)__builtin_neon_vreinterpretv2sidi ((int64x1_t) __a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_u32 (uint32x2_t __a)
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_s8 (int8x8_t __a)
{
- return (int8x8_t)__builtin_neon_vreinterpretv8qiv2si ((int32x2_t) __a);
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv8qi (__a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_u64 (uint64x1_t __a)
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_s16 (int16x4_t __a)
{
- return (int8x8_t)__builtin_neon_vreinterpretv8qidi ((int64x1_t) __a);
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv4hi (__a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_p8 (poly8x8_t __a)
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u8 (uint8x8_t __a)
{
- return (int8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_p16 (poly16x4_t __a)
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u16 (uint16x4_t __a)
{
- return (int8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_s16 (int16x8_t __a)
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u32 (uint32x2_t __a)
{
- return (int8x16_t)__builtin_neon_vreinterpretv16qiv8hi (__a);
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv2si ((int32x2_t) __a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_s32 (int32x4_t __a)
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_p8 (poly8x8_t __a)
{
- return (int8x16_t)__builtin_neon_vreinterpretv16qiv4si (__a);
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_s64 (int64x2_t __a)
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_p16 (poly16x4_t __a)
{
- return (int8x16_t)__builtin_neon_vreinterpretv16qiv2di (__a);
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_f32 (float32x4_t __a)
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_f32 (float32x2_t __a)
{
- return (int8x16_t)__builtin_neon_vreinterpretv16qiv4sf (__a);
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_u8 (uint8x16_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_p64 (poly64x1_t __a)
{
- return (int8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_u16 (uint16x8_t __a)
+#endif
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s64 (int64x1_t __a)
{
- return (int8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_u32 (uint32x4_t __a)
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_u64 (uint64x1_t __a)
{
- return (int8x16_t)__builtin_neon_vreinterpretv16qiv4si ((int32x4_t) __a);
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qidi ((int64x1_t) __a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_u64 (uint64x2_t __a)
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s8 (int8x8_t __a)
{
- return (int8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv8qi (__a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_p8 (poly8x16_t __a)
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s16 (int16x4_t __a)
{
- return (int8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv4hi (__a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_p16 (poly16x8_t __a)
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s32 (int32x2_t __a)
{
- return (int8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_s8 (int8x8_t __a)
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_u16 (uint16x4_t __a)
{
- return (int16x4_t)__builtin_neon_vreinterpretv4hiv8qi (__a);
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_s32 (int32x2_t __a)
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_u32 (uint32x2_t __a)
{
- return (int16x4_t)__builtin_neon_vreinterpretv4hiv2si (__a);
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv2si ((int32x2_t) __a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_s64 (int64x1_t __a)
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_p8 (poly8x8_t __a)
{
- return (int16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_f32 (float32x2_t __a)
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_p16 (poly16x4_t __a)
{
- return (int16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a);
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv4hi ((int16x4_t) __a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_u8 (uint8x8_t __a)
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_f32 (float32x2_t __a)
{
- return (int16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_u16 (uint16x4_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_p64 (poly64x1_t __a)
{
- return (int16x4_t)__builtin_neon_vreinterpretv4hiv4hi ((int16x4_t) __a);
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_u32 (uint32x2_t __a)
+#endif
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s64 (int64x1_t __a)
{
- return (int16x4_t)__builtin_neon_vreinterpretv4hiv2si ((int32x2_t) __a);
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_u64 (uint64x1_t __a)
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_u64 (uint64x1_t __a)
{
- return (int16x4_t)__builtin_neon_vreinterpretv4hidi ((int64x1_t) __a);
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hidi ((int64x1_t) __a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_p8 (poly8x8_t __a)
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s8 (int8x8_t __a)
{
- return (int16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv8qi (__a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_p16 (poly16x4_t __a)
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s16 (int16x4_t __a)
{
- return (int16x4_t)__builtin_neon_vreinterpretv4hiv4hi ((int16x4_t) __a);
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv4hi (__a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_s8 (int8x16_t __a)
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s32 (int32x2_t __a)
{
- return (int16x8_t)__builtin_neon_vreinterpretv8hiv16qi (__a);
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv2si (__a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_s32 (int32x4_t __a)
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_u8 (uint8x8_t __a)
{
- return (int16x8_t)__builtin_neon_vreinterpretv8hiv4si (__a);
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_s64 (int64x2_t __a)
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_u32 (uint32x2_t __a)
{
- return (int16x8_t)__builtin_neon_vreinterpretv8hiv2di (__a);
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv2si ((int32x2_t) __a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_f32 (float32x4_t __a)
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_p8 (poly8x8_t __a)
{
- return (int16x8_t)__builtin_neon_vreinterpretv8hiv4sf (__a);
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_u8 (uint8x16_t __a)
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_p16 (poly16x4_t __a)
{
- return (int16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_u16 (uint16x8_t __a)
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_f32 (float32x2_t __a)
{
- return (int16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv2sf (__a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_u32 (uint32x4_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_p64 (poly64x1_t __a)
{
- return (int16x8_t)__builtin_neon_vreinterpretv8hiv4si ((int32x4_t) __a);
+ return (uint32x2_t)__builtin_neon_vreinterpretv2sidi (__a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_u64 (uint64x2_t __a)
+#endif
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s64 (int64x1_t __a)
{
- return (int16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+ return (uint32x2_t)__builtin_neon_vreinterpretv2sidi (__a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_p8 (poly8x16_t __a)
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_u64 (uint64x1_t __a)
{
- return (int16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+ return (uint32x2_t)__builtin_neon_vreinterpretv2sidi ((int64x1_t) __a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_p16 (poly16x8_t __a)
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s8 (int8x8_t __a)
{
- return (int16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv8qi (__a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_s8 (int8x8_t __a)
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s16 (int16x4_t __a)
{
- return (int32x2_t)__builtin_neon_vreinterpretv2siv8qi (__a);
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv4hi (__a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_s16 (int16x4_t __a)
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s32 (int32x2_t __a)
{
- return (int32x2_t)__builtin_neon_vreinterpretv2siv4hi (__a);
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv2si (__a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_s64 (int64x1_t __a)
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_u8 (uint8x8_t __a)
{
- return (int32x2_t)__builtin_neon_vreinterpretv2sidi (__a);
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_f32 (float32x2_t __a)
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_u16 (uint16x4_t __a)
{
- return (int32x2_t)__builtin_neon_vreinterpretv2siv2sf (__a);
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_u8 (uint8x8_t __a)
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_p16 (poly16x8_t __a)
{
- return (int32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_u16 (uint16x4_t __a)
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_f32 (float32x4_t __a)
{
- return (int32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv4sf (__a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_u32 (uint32x2_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_p64 (poly64x2_t __a)
{
- return (int32x2_t)__builtin_neon_vreinterpretv2siv2si ((int32x2_t) __a);
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_u64 (uint64x1_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_p128 (poly128_t __a)
{
- return (int32x2_t)__builtin_neon_vreinterpretv2sidi ((int64x1_t) __a);
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiti ((__builtin_neon_ti) __a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_p8 (poly8x8_t __a)
+#endif
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s64 (int64x2_t __a)
{
- return (int32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv2di (__a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_p16 (poly16x4_t __a)
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u64 (uint64x2_t __a)
{
- return (int32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_s8 (int8x16_t __a)
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s8 (int8x16_t __a)
{
- return (int32x4_t)__builtin_neon_vreinterpretv4siv16qi (__a);
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv16qi (__a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_s16 (int16x8_t __a)
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s16 (int16x8_t __a)
{
- return (int32x4_t)__builtin_neon_vreinterpretv4siv8hi (__a);
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv8hi (__a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_s64 (int64x2_t __a)
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s32 (int32x4_t __a)
{
- return (int32x4_t)__builtin_neon_vreinterpretv4siv2di (__a);
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv4si (__a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_f32 (float32x4_t __a)
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u8 (uint8x16_t __a)
{
- return (int32x4_t)__builtin_neon_vreinterpretv4siv4sf (__a);
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_u8 (uint8x16_t __a)
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u16 (uint16x8_t __a)
{
- return (int32x4_t)__builtin_neon_vreinterpretv4siv16qi ((int8x16_t) __a);
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_u16 (uint16x8_t __a)
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u32 (uint32x4_t __a)
{
- return (int32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv4si ((int32x4_t) __a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_u32 (uint32x4_t __a)
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_p8 (poly8x16_t __a)
{
- return (int32x4_t)__builtin_neon_vreinterpretv4siv4si ((int32x4_t) __a);
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_u64 (uint64x2_t __a)
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_f32 (float32x4_t __a)
{
- return (int32x4_t)__builtin_neon_vreinterpretv4siv2di ((int64x2_t) __a);
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv4sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_p64 (poly64x2_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_p128 (poly128_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiti ((__builtin_neon_ti) __a);
+}
+
+#endif
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s64 (int64x2_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv2di (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u64 (uint64x2_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s8 (int8x16_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv16qi (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s16 (int16x8_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv8hi (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s32 (int32x4_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv4si (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u8 (uint8x16_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u16 (uint16x8_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u32 (uint32x4_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_p8 (poly8x16_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_p16 (poly16x8_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv8hi ((int16x8_t) __a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_p64 (poly64x2_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_p128 (poly128_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfti ((__builtin_neon_ti) __a);
+}
+
+#endif
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s64 (int64x2_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv2di (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u64 (uint64x2_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s8 (int8x16_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv16qi (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s16 (int16x8_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv8hi (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s32 (int32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv4si (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u8 (uint8x16_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u16 (uint16x8_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u32 (uint32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv4si ((int32x4_t) __a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_p8 (poly8x16_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_p16 (poly16x8_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_f32 (float32x4_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div4sf (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_p128 (poly128_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2diti ((__builtin_neon_ti) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_s64 (int64x2_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div2di (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_u64 (uint64x2_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_s8 (int8x16_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div16qi (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_s16 (int16x8_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div8hi (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_s32 (int32x4_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div4si (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_u8 (uint8x16_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_u16 (uint16x8_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_u32 (uint32x4_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div4si ((int32x4_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_p8 (poly8x16_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv16qi ((int8x16_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_p16 (poly16x8_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv8hi ((int16x8_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_f32 (float32x4_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv4sf (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_p64 (poly64x2_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_s64 (int64x2_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv2di (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_u64 (uint64x2_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_s8 (int8x16_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv16qi (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_s16 (int16x8_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv8hi (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_s32 (int32x4_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv4si (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_u8 (uint8x16_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv16qi ((int8x16_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_u16 (uint16x8_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv8hi ((int16x8_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_u32 (uint32x4_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv4si ((int32x4_t) __a);
+}
+
+#endif
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_p8 (poly8x16_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_p16 (poly16x8_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_f32 (float32x4_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div4sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_p64 (poly64x2_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_p128 (poly128_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2diti ((__builtin_neon_ti) __a);
+}
+
+#endif
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u64 (uint64x2_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_s8 (int8x16_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div16qi (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_s16 (int16x8_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div8hi (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_s32 (int32x4_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div4si (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u8 (uint8x16_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u16 (uint16x8_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u32 (uint32x4_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_p8 (poly8x16_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_p16 (poly16x8_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_f32 (float32x4_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div4sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_p64 (poly64x2_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_p128 (poly128_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2diti ((__builtin_neon_ti) __a);
+}
+
+#endif
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s64 (int64x2_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div2di (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s8 (int8x16_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div16qi (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s16 (int16x8_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div8hi (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s32 (int32x4_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div4si (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_u8 (uint8x16_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_u16 (uint16x8_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_u32 (uint32x4_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_p8 (poly8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_p16 (poly16x8_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_f32 (float32x4_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv4sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_p64 (poly64x2_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_p128 (poly128_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiti ((__builtin_neon_ti) __a);
+}
+
+#endif
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_s64 (int64x2_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv2di (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u64 (uint64x2_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_s16 (int16x8_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv8hi (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_s32 (int32x4_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv4si (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u8 (uint8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u16 (uint16x8_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u32 (uint32x4_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_p8 (poly8x16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_p16 (poly16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_f32 (float32x4_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv4sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_p64 (poly64x2_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_p128 (poly128_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiti ((__builtin_neon_ti) __a);
+}
+
+#endif
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_s64 (int64x2_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv2di (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u64 (uint64x2_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_s8 (int8x16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv16qi (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_s32 (int32x4_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv4si (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u8 (uint8x16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u16 (uint16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u32 (uint32x4_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv4si ((int32x4_t) __a);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
@@ -11992,106 +12957,108 @@ vreinterpretq_s32_p16 (poly16x8_t __a)
return (int32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_s8 (int8x8_t __a)
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_f32 (float32x4_t __a)
{
- return (uint8x8_t)__builtin_neon_vreinterpretv8qiv8qi (__a);
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv4sf (__a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_s16 (int16x4_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_p64 (poly64x2_t __a)
{
- return (uint8x8_t)__builtin_neon_vreinterpretv8qiv4hi (__a);
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv2di ((int64x2_t) __a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_s32 (int32x2_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_p128 (poly128_t __a)
{
- return (uint8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a);
+ return (int32x4_t)__builtin_neon_vreinterpretv4siti ((__builtin_neon_ti) __a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_s64 (int64x1_t __a)
+#endif
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_s64 (int64x2_t __a)
{
- return (uint8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv2di (__a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_f32 (float32x2_t __a)
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u64 (uint64x2_t __a)
{
- return (uint8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a);
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv2di ((int64x2_t) __a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_u16 (uint16x4_t __a)
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_s8 (int8x16_t __a)
{
- return (uint8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv16qi (__a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_u32 (uint32x2_t __a)
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_s16 (int16x8_t __a)
{
- return (uint8x8_t)__builtin_neon_vreinterpretv8qiv2si ((int32x2_t) __a);
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv8hi (__a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_u64 (uint64x1_t __a)
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u8 (uint8x16_t __a)
{
- return (uint8x8_t)__builtin_neon_vreinterpretv8qidi ((int64x1_t) __a);
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv16qi ((int8x16_t) __a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_p8 (poly8x8_t __a)
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u16 (uint16x8_t __a)
{
- return (uint8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_p16 (poly16x4_t __a)
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u32 (uint32x4_t __a)
{
- return (uint8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv4si ((int32x4_t) __a);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_s8 (int8x16_t __a)
+vreinterpretq_u8_p8 (poly8x16_t __a)
{
- return (uint8x16_t)__builtin_neon_vreinterpretv16qiv16qi (__a);
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_s16 (int16x8_t __a)
+vreinterpretq_u8_p16 (poly16x8_t __a)
{
- return (uint8x16_t)__builtin_neon_vreinterpretv16qiv8hi (__a);
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_s32 (int32x4_t __a)
+vreinterpretq_u8_f32 (float32x4_t __a)
{
- return (uint8x16_t)__builtin_neon_vreinterpretv16qiv4si (__a);
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv4sf (__a);
}
+#ifdef __ARM_FEATURE_CRYPTO
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_s64 (int64x2_t __a)
+vreinterpretq_u8_p64 (poly64x2_t __a)
{
- return (uint8x16_t)__builtin_neon_vreinterpretv16qiv2di (__a);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_f32 (float32x4_t __a)
-{
- return (uint8x16_t)__builtin_neon_vreinterpretv16qiv4sf (__a);
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
}
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_u16 (uint16x8_t __a)
+vreinterpretq_u8_p128 (poly128_t __a)
{
- return (uint8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiti ((__builtin_neon_ti) __a);
}
+#endif
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_u32 (uint32x4_t __a)
+vreinterpretq_u8_s64 (int64x2_t __a)
{
- return (uint8x16_t)__builtin_neon_vreinterpretv16qiv4si ((int32x4_t) __a);
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv2di (__a);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
@@ -12101,75 +13068,79 @@ vreinterpretq_u8_u64 (uint64x2_t __a)
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_p8 (poly8x16_t __a)
+vreinterpretq_u8_s8 (int8x16_t __a)
{
- return (uint8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv16qi (__a);
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_p16 (poly16x8_t __a)
+vreinterpretq_u8_s16 (int16x8_t __a)
{
- return (uint8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv8hi (__a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_s8 (int8x8_t __a)
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s32 (int32x4_t __a)
{
- return (uint16x4_t)__builtin_neon_vreinterpretv4hiv8qi (__a);
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv4si (__a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_s16 (int16x4_t __a)
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_u16 (uint16x8_t __a)
{
- return (uint16x4_t)__builtin_neon_vreinterpretv4hiv4hi (__a);
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_s32 (int32x2_t __a)
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_u32 (uint32x4_t __a)
{
- return (uint16x4_t)__builtin_neon_vreinterpretv4hiv2si (__a);
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv4si ((int32x4_t) __a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_s64 (int64x1_t __a)
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_p8 (poly8x16_t __a)
{
- return (uint16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_f32 (float32x2_t __a)
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_p16 (poly16x8_t __a)
{
- return (uint16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a);
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_u8 (uint8x8_t __a)
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_f32 (float32x4_t __a)
{
- return (uint16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv4sf (__a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_u32 (uint32x2_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_p64 (poly64x2_t __a)
{
- return (uint16x4_t)__builtin_neon_vreinterpretv4hiv2si ((int32x2_t) __a);
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_u64 (uint64x1_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_p128 (poly128_t __a)
{
- return (uint16x4_t)__builtin_neon_vreinterpretv4hidi ((int64x1_t) __a);
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiti ((__builtin_neon_ti) __a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_p8 (poly8x8_t __a)
+#endif
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s64 (int64x2_t __a)
{
- return (uint16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv2di (__a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_p16 (poly16x4_t __a)
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_u64 (uint64x2_t __a)
{
- return (uint16x4_t)__builtin_neon_vreinterpretv4hiv4hi ((int16x4_t) __a);
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
@@ -12191,167 +13162,266 @@ vreinterpretq_u16_s32 (int32x4_t __a)
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_u16_s64 (int64x2_t __a)
+vreinterpretq_u16_u8 (uint8x16_t __a)
{
- return (uint16x8_t)__builtin_neon_vreinterpretv8hiv2di (__a);
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_u16_f32 (float32x4_t __a)
+vreinterpretq_u16_u32 (uint32x4_t __a)
{
- return (uint16x8_t)__builtin_neon_vreinterpretv8hiv4sf (__a);
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv4si ((int32x4_t) __a);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_u16_u8 (uint8x16_t __a)
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_p8 (poly8x16_t __a)
{
- return (uint16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv16qi ((int8x16_t) __a);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_u16_u32 (uint32x4_t __a)
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_p16 (poly16x8_t __a)
{
- return (uint16x8_t)__builtin_neon_vreinterpretv8hiv4si ((int32x4_t) __a);
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_u16_u64 (uint64x2_t __a)
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_f32 (float32x4_t __a)
{
- return (uint16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv4sf (__a);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_u16_p8 (poly8x16_t __a)
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_p64 (poly64x2_t __a)
{
- return (uint16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv2di ((int64x2_t) __a);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_u16_p16 (poly16x8_t __a)
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_p128 (poly128_t __a)
{
- return (uint16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siti ((__builtin_neon_ti) __a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_s8 (int8x8_t __a)
+#endif
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s64 (int64x2_t __a)
{
- return (uint32x2_t)__builtin_neon_vreinterpretv2siv8qi (__a);
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv2di (__a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_s16 (int16x4_t __a)
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_u64 (uint64x2_t __a)
{
- return (uint32x2_t)__builtin_neon_vreinterpretv2siv4hi (__a);
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv2di ((int64x2_t) __a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_s32 (int32x2_t __a)
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s8 (int8x16_t __a)
{
- return (uint32x2_t)__builtin_neon_vreinterpretv2siv2si (__a);
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv16qi (__a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_s64 (int64x1_t __a)
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s16 (int16x8_t __a)
{
- return (uint32x2_t)__builtin_neon_vreinterpretv2sidi (__a);
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv8hi (__a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_f32 (float32x2_t __a)
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s32 (int32x4_t __a)
{
- return (uint32x2_t)__builtin_neon_vreinterpretv2siv2sf (__a);
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv4si (__a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_u8 (uint8x8_t __a)
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_u8 (uint8x16_t __a)
{
- return (uint32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv16qi ((int8x16_t) __a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_u16 (uint16x4_t __a)
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_u16 (uint16x8_t __a)
{
- return (uint32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_u64 (uint64x1_t __a)
+
+#ifdef __ARM_FEATURE_CRYPTO
+
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vldrq_p128 (poly128_t const * __ptr)
{
- return (uint32x2_t)__builtin_neon_vreinterpretv2sidi ((int64x1_t) __a);
+#ifdef __ARM_BIG_ENDIAN
+ poly64_t* __ptmp = (poly64_t*) __ptr;
+ poly64_t __d0 = vld1_p64 (__ptmp);
+ poly64_t __d1 = vld1_p64 (__ptmp + 1);
+ return vreinterpretq_p128_p64 (vcombine_p64 (__d1, __d0));
+#else
+ return vreinterpretq_p128_p64 (vld1q_p64 ((poly64_t*) __ptr));
+#endif
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_p8 (poly8x8_t __a)
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vstrq_p128 (poly128_t * __ptr, poly128_t __val)
{
- return (uint32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
+#ifdef __ARM_BIG_ENDIAN
+ poly64x2_t __tmp = vreinterpretq_p64_p128 (__val);
+ poly64_t __d0 = vget_high_p64 (__tmp);
+ poly64_t __d1 = vget_low_p64 (__tmp);
+ vst1q_p64 ((poly64_t*) __ptr, vcombine_p64 (__d0, __d1));
+#else
+ vst1q_p64 ((poly64_t*) __ptr, vreinterpretq_p64_p128 (__val));
+#endif
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_p16 (poly16x4_t __a)
+/* The vceq_p64 intrinsic does not map to a single instruction.
+ Instead we emulate it by performing a 32-bit variant of the vceq
+ and applying a pairwise min reduction to the result.
+ vceq_u32 will produce two 32-bit halves, each of which will contain either
+ all ones or all zeros depending on whether the corresponding 32-bit
+ halves of the poly64_t were equal. The whole poly64_t values are equal
+ if and only if both halves are equal, i.e. vceq_u32 returns all ones.
+ If the result is all zeroes for any half then the whole result is zeroes.
+ This is what the pairwise min reduction achieves. */
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceq_p64 (poly64x1_t __a, poly64x1_t __b)
{
- return (uint32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
+ uint32x2_t __t_a = vreinterpret_u32_p64 (__a);
+ uint32x2_t __t_b = vreinterpret_u32_p64 (__b);
+ uint32x2_t __c = vceq_u32 (__t_a, __t_b);
+ uint32x2_t __m = vpmin_u32 (__c, __c);
+ return vreinterpret_u64_u32 (__m);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_s8 (int8x16_t __a)
+/* The vtst_p64 intrinsic does not map to a single instruction.
+ We emulate it in way similar to vceq_p64 above but here we do
+ a reduction with max since if any two corresponding bits
+ in the two poly64_t's match, then the whole result must be all ones. */
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vtst_p64 (poly64x1_t __a, poly64x1_t __b)
{
- return (uint32x4_t)__builtin_neon_vreinterpretv4siv16qi (__a);
+ uint32x2_t __t_a = vreinterpret_u32_p64 (__a);
+ uint32x2_t __t_b = vreinterpret_u32_p64 (__b);
+ uint32x2_t __c = vtst_u32 (__t_a, __t_b);
+ uint32x2_t __m = vpmax_u32 (__c, __c);
+ return vreinterpret_u64_u32 (__m);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaeseq_u8 (uint8x16_t __data, uint8x16_t __key)
+{
+ return __builtin_arm_crypto_aese (__data, __key);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaesdq_u8 (uint8x16_t __data, uint8x16_t __key)
+{
+ return __builtin_arm_crypto_aesd (__data, __key);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaesmcq_u8 (uint8x16_t __data)
+{
+ return __builtin_arm_crypto_aesmc (__data);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaesimcq_u8 (uint8x16_t __data)
+{
+ return __builtin_arm_crypto_aesimc (__data);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vsha1h_u32 (uint32_t __hash_e)
+{
+ uint32x4_t __t = vdupq_n_u32 (0);
+ __t = vsetq_lane_u32 (__hash_e, __t, 0);
+ __t = __builtin_arm_crypto_sha1h (__t);
+ return vgetq_lane_u32 (__t, 0);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_s16 (int16x8_t __a)
+vsha1cq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk)
{
- return (uint32x4_t)__builtin_neon_vreinterpretv4siv8hi (__a);
+ uint32x4_t __t = vdupq_n_u32 (0);
+ __t = vsetq_lane_u32 (__hash_e, __t, 0);
+ return __builtin_arm_crypto_sha1c (__hash_abcd, __t, __wk);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_s32 (int32x4_t __a)
+vsha1pq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk)
{
- return (uint32x4_t)__builtin_neon_vreinterpretv4siv4si (__a);
+ uint32x4_t __t = vdupq_n_u32 (0);
+ __t = vsetq_lane_u32 (__hash_e, __t, 0);
+ return __builtin_arm_crypto_sha1p (__hash_abcd, __t, __wk);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_s64 (int64x2_t __a)
+vsha1mq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk)
{
- return (uint32x4_t)__builtin_neon_vreinterpretv4siv2di (__a);
+ uint32x4_t __t = vdupq_n_u32 (0);
+ __t = vsetq_lane_u32 (__hash_e, __t, 0);
+ return __builtin_arm_crypto_sha1m (__hash_abcd, __t, __wk);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_f32 (float32x4_t __a)
+vsha1su0q_u32 (uint32x4_t __w0_3, uint32x4_t __w4_7, uint32x4_t __w8_11)
{
- return (uint32x4_t)__builtin_neon_vreinterpretv4siv4sf (__a);
+ return __builtin_arm_crypto_sha1su0 (__w0_3, __w4_7, __w8_11);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_u8 (uint8x16_t __a)
+vsha1su1q_u32 (uint32x4_t __tw0_3, uint32x4_t __w12_15)
{
- return (uint32x4_t)__builtin_neon_vreinterpretv4siv16qi ((int8x16_t) __a);
+ return __builtin_arm_crypto_sha1su1 (__tw0_3, __w12_15);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_u16 (uint16x8_t __a)
+vsha256hq_u32 (uint32x4_t __hash_abcd, uint32x4_t __hash_efgh, uint32x4_t __wk)
{
- return (uint32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
+ return __builtin_arm_crypto_sha256h (__hash_abcd, __hash_efgh, __wk);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_u64 (uint64x2_t __a)
+vsha256h2q_u32 (uint32x4_t __hash_abcd, uint32x4_t __hash_efgh, uint32x4_t __wk)
{
- return (uint32x4_t)__builtin_neon_vreinterpretv4siv2di ((int64x2_t) __a);
+ return __builtin_arm_crypto_sha256h2 (__hash_abcd, __hash_efgh, __wk);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_p8 (poly8x16_t __a)
+vsha256su0q_u32 (uint32x4_t __w0_3, uint32x4_t __w4_7)
{
- return (uint32x4_t)__builtin_neon_vreinterpretv4siv16qi ((int8x16_t) __a);
+ return __builtin_arm_crypto_sha256su0 (__w0_3, __w4_7);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_p16 (poly16x8_t __a)
+vsha256su1q_u32 (uint32x4_t __tw0_3, uint32x4_t __w8_11, uint32x4_t __w12_15)
{
- return (uint32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
+ return __builtin_arm_crypto_sha256su1 (__tw0_3, __w8_11, __w12_15);
+}
+
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vmull_p64 (poly64_t __a, poly64_t __b)
+{
+ return (poly128_t) __builtin_arm_crypto_vmullp64 ((uint64_t) __a, (uint64_t) __b);
}
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vmull_high_p64 (poly64x2_t __a, poly64x2_t __b)
+{
+ poly64_t __t1 = vget_high_p64 (__a);
+ poly64_t __t2 = vget_high_p64 (__b);
+
+ return (poly128_t) __builtin_arm_crypto_vmullp64 ((uint64_t) __t1, (uint64_t) __t2);
+}
+
+#endif
#ifdef __cplusplus
}
#endif
diff --git a/gcc/config/arm/arm_neon_builtins.def b/gcc/config/arm/arm_neon_builtins.def
index 92f1d7ad1c4..a00951ab65b 100644
--- a/gcc/config/arm/arm_neon_builtins.def
+++ b/gcc/config/arm/arm_neon_builtins.def
@@ -1,6 +1,5 @@
/* NEON builtin definitions for ARM.
- Copyright (C) 2013
- Free Software Foundation, Inc.
+ Copyright (C) 2013-2014 Free Software Foundation, Inc.
Contributed by ARM Ltd.
This file is part of GCC.
@@ -158,11 +157,12 @@ VAR5 (REINTERP, vreinterpretv4hi, v8qi, v4hi, v2si, v2sf, di),
VAR5 (REINTERP, vreinterpretv2si, v8qi, v4hi, v2si, v2sf, di),
VAR5 (REINTERP, vreinterpretv2sf, v8qi, v4hi, v2si, v2sf, di),
VAR5 (REINTERP, vreinterpretdi, v8qi, v4hi, v2si, v2sf, di),
-VAR5 (REINTERP, vreinterpretv16qi, v16qi, v8hi, v4si, v4sf, v2di),
-VAR5 (REINTERP, vreinterpretv8hi, v16qi, v8hi, v4si, v4sf, v2di),
-VAR5 (REINTERP, vreinterpretv4si, v16qi, v8hi, v4si, v4sf, v2di),
-VAR5 (REINTERP, vreinterpretv4sf, v16qi, v8hi, v4si, v4sf, v2di),
-VAR5 (REINTERP, vreinterpretv2di, v16qi, v8hi, v4si, v4sf, v2di),
+VAR6 (REINTERP, vreinterpretv16qi, v16qi, v8hi, v4si, v4sf, v2di, ti),
+VAR6 (REINTERP, vreinterpretv8hi, v16qi, v8hi, v4si, v4sf, v2di, ti),
+VAR6 (REINTERP, vreinterpretv4si, v16qi, v8hi, v4si, v4sf, v2di, ti),
+VAR6 (REINTERP, vreinterpretv4sf, v16qi, v8hi, v4si, v4sf, v2di, ti),
+VAR6 (REINTERP, vreinterpretv2di, v16qi, v8hi, v4si, v4sf, v2di, ti),
+VAR6 (REINTERP, vreinterpretti, v16qi, v8hi, v4si, v4sf, v2di, ti),
VAR10 (LOAD1, vld1,
v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di),
VAR10 (LOAD1LANE, vld1_lane,
diff --git a/gcc/config/arm/bpabi.h b/gcc/config/arm/bpabi.h
index b39c4a91a9d..0c0be67fb3f 100644
--- a/gcc/config/arm/bpabi.h
+++ b/gcc/config/arm/bpabi.h
@@ -1,5 +1,5 @@
/* Configuration file for ARM BPABI targets.
- Copyright (C) 2004-2013 Free Software Foundation, Inc.
+ Copyright (C) 2004-2014 Free Software Foundation, Inc.
Contributed by CodeSourcery, LLC
This file is part of GCC.
@@ -60,8 +60,11 @@
|mcpu=cortex-a7 \
|mcpu=cortex-a8|mcpu=cortex-a9|mcpu=cortex-a15 \
|mcpu=cortex-a12 \
+ |mcpu=cortex-a15.cortex-a7 \
|mcpu=marvell-pj4 \
|mcpu=cortex-a53 \
+ |mcpu=cortex-a57 \
+ |mcpu=cortex-a57.cortex-a53 \
|mcpu=generic-armv7-a \
|march=armv7-m|mcpu=cortex-m3 \
|march=armv7e-m|mcpu=cortex-m4 \
@@ -74,7 +77,10 @@
|mcpu=cortex-a7 \
|mcpu=cortex-a8|mcpu=cortex-a9|mcpu=cortex-a15 \
|mcpu=cortex-a12 \
+ |mcpu=cortex-a15.cortex-a7 \
|mcpu=cortex-a53 \
+ |mcpu=cortex-a57 \
+ |mcpu=cortex-a57.cortex-a53 \
|mcpu=marvell-pj4 \
|mcpu=generic-armv7-a \
|march=armv7-m|mcpu=cortex-m3 \
diff --git a/gcc/config/arm/coff.h b/gcc/config/arm/coff.h
index aaaebac4e45..7deb2389804 100644
--- a/gcc/config/arm/coff.h
+++ b/gcc/config/arm/coff.h
@@ -1,6 +1,6 @@
/* Definitions of target machine for GNU compiler.
For ARM with COFF object format.
- Copyright (C) 1995-2013 Free Software Foundation, Inc.
+ Copyright (C) 1995-2014 Free Software Foundation, Inc.
Contributed by Doug Evans (devans@cygnus.com).
This file is part of GCC.
diff --git a/gcc/config/arm/constraints.md b/gcc/config/arm/constraints.md
index e2a3099e041..85dd116cec0 100644
--- a/gcc/config/arm/constraints.md
+++ b/gcc/config/arm/constraints.md
@@ -1,5 +1,5 @@
;; Constraint definitions for ARM and Thumb
-;; Copyright (C) 2006-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2006-2014 Free Software Foundation, Inc.
;; Contributed by ARM Ltd.
;; This file is part of GCC.
@@ -31,7 +31,7 @@
;; 'H' was previously used for FPA.
;; The following multi-letter normal constraints have been used:
-;; in ARM/Thumb-2 state: Da, Db, Dc, Dd, Dn, Dl, DL, Do, Dv, Dy, Di, Dt, Dz
+;; in ARM/Thumb-2 state: Da, Db, Dc, Dd, Dn, Dl, DL, Do, Dv, Dy, Di, Dt, Dp, Dz
;; in Thumb-1 state: Pa, Pb, Pc, Pd, Pe
;; in Thumb-2 state: Pj, PJ, Ps, Pt, Pu, Pv, Pw, Px, Py
@@ -328,12 +328,18 @@
(and (match_code "const_double")
(match_test "TARGET_32BIT && TARGET_VFP_DOUBLE && vfp3_const_double_rtx (op)")))
-(define_constraint "Dt"
+(define_constraint "Dt"
"@internal
In ARM/ Thumb2 a const_double which can be used with a vcvt.f32.s32 with fract bits operation"
(and (match_code "const_double")
(match_test "TARGET_32BIT && TARGET_VFP && vfp3_const_double_for_fract_bits (op)")))
+(define_constraint "Dp"
+ "@internal
+ In ARM/ Thumb2 a const_double which can be used with a vcvt.s32.f32 with bits operation"
+ (and (match_code "const_double")
+ (match_test "TARGET_32BIT && TARGET_VFP && vfp3_const_double_for_bits (op)")))
+
(define_register_constraint "Ts" "(arm_restrict_it) ? LO_REGS : GENERAL_REGS"
"For arm_restrict_it the core registers @code{r0}-@code{r7}. GENERAL_REGS otherwise.")
diff --git a/gcc/config/arm/cortex-a15-neon.md b/gcc/config/arm/cortex-a15-neon.md
index ebb6b66f782..02d4a530b2b 100644
--- a/gcc/config/arm/cortex-a15-neon.md
+++ b/gcc/config/arm/cortex-a15-neon.md
@@ -1,5 +1,5 @@
;; ARM Cortex-A15 NEON pipeline description
-;; Copyright (C) 2012-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2012-2014 Free Software Foundation, Inc.
;;
;; This file is part of GCC.
;;
diff --git a/gcc/config/arm/cortex-a15.md b/gcc/config/arm/cortex-a15.md
index 5a31a097918..b3f126a7228 100644
--- a/gcc/config/arm/cortex-a15.md
+++ b/gcc/config/arm/cortex-a15.md
@@ -1,5 +1,5 @@
;; ARM Cortex-A15 pipeline description
-;; Copyright (C) 2011-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
;;
;; Written by Matthew Gretton-Dann <matthew.gretton-dann@arm.com>
diff --git a/gcc/config/arm/cortex-a5.md b/gcc/config/arm/cortex-a5.md
index 22e0a08f38e..eed098ef92a 100644
--- a/gcc/config/arm/cortex-a5.md
+++ b/gcc/config/arm/cortex-a5.md
@@ -1,5 +1,5 @@
;; ARM Cortex-A5 pipeline description
-;; Copyright (C) 2010-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2010-2014 Free Software Foundation, Inc.
;; Contributed by CodeSourcery.
;;
;; This file is part of GCC.
diff --git a/gcc/config/arm/cortex-a53.md b/gcc/config/arm/cortex-a53.md
index 48d0d03853f..deae8eba522 100644
--- a/gcc/config/arm/cortex-a53.md
+++ b/gcc/config/arm/cortex-a53.md
@@ -1,5 +1,5 @@
;; ARM Cortex-A53 pipeline description
-;; Copyright (C) 2013 Free Software Foundation, Inc.
+;; Copyright (C) 2013-2014 Free Software Foundation, Inc.
;;
;; Contributed by ARM Ltd.
;;
diff --git a/gcc/config/arm/cortex-a7.md b/gcc/config/arm/cortex-a7.md
index 7db6c5b24fb..8291d7fa928 100644
--- a/gcc/config/arm/cortex-a7.md
+++ b/gcc/config/arm/cortex-a7.md
@@ -1,5 +1,5 @@
;; ARM Cortex-A7 pipeline description
-;; Copyright (C) 2012-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2012-2014 Free Software Foundation, Inc.
;;
;; Contributed by ARM Ltd.
;; Based on cortex-a5.md which was originally contributed by CodeSourcery.
diff --git a/gcc/config/arm/cortex-a8-neon.md b/gcc/config/arm/cortex-a8-neon.md
index 6adfd136569..1bb0ab2377e 100644
--- a/gcc/config/arm/cortex-a8-neon.md
+++ b/gcc/config/arm/cortex-a8-neon.md
@@ -1,5 +1,5 @@
;; ARM Cortex-A8 NEON scheduling description.
-;; Copyright (C) 2007-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2007-2014 Free Software Foundation, Inc.
;; Contributed by CodeSourcery.
;; This file is part of GCC.
diff --git a/gcc/config/arm/cortex-a8.md b/gcc/config/arm/cortex-a8.md
index 1eade5e1244..b272472e0ce 100644
--- a/gcc/config/arm/cortex-a8.md
+++ b/gcc/config/arm/cortex-a8.md
@@ -1,5 +1,5 @@
;; ARM Cortex-A8 scheduling description.
-;; Copyright (C) 2007-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2007-2014 Free Software Foundation, Inc.
;; Contributed by CodeSourcery.
;; This file is part of GCC.
diff --git a/gcc/config/arm/cortex-a9-neon.md b/gcc/config/arm/cortex-a9-neon.md
index cd6b7a4fd36..3ff93f92402 100644
--- a/gcc/config/arm/cortex-a9-neon.md
+++ b/gcc/config/arm/cortex-a9-neon.md
@@ -1,5 +1,5 @@
;; ARM Cortex-A9 pipeline description
-;; Copyright (C) 2010-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2010-2014 Free Software Foundation, Inc.
;;
;; Neon pipeline description contributed by ARM Ltd.
;;
diff --git a/gcc/config/arm/cortex-a9.md b/gcc/config/arm/cortex-a9.md
index 7c62d8489ae..a888896c5d4 100644
--- a/gcc/config/arm/cortex-a9.md
+++ b/gcc/config/arm/cortex-a9.md
@@ -1,5 +1,5 @@
;; ARM Cortex-A9 pipeline description
-;; Copyright (C) 2008-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2008-2014 Free Software Foundation, Inc.
;; Originally written by CodeSourcery for VFP.
;;
;; Rewritten by Ramana Radhakrishnan <ramana.radhakrishnan@arm.com>
diff --git a/gcc/config/arm/cortex-m4-fpu.md b/gcc/config/arm/cortex-m4-fpu.md
index 2190938b65c..aa81e52efaf 100644
--- a/gcc/config/arm/cortex-m4-fpu.md
+++ b/gcc/config/arm/cortex-m4-fpu.md
@@ -1,5 +1,5 @@
;; ARM Cortex-M4 FPU pipeline description
-;; Copyright (C) 2010-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2010-2014 Free Software Foundation, Inc.
;; Contributed by CodeSourcery.
;;
;; This file is part of GCC.
diff --git a/gcc/config/arm/cortex-m4.md b/gcc/config/arm/cortex-m4.md
index 9ae4cc3143b..690ce751fb3 100644
--- a/gcc/config/arm/cortex-m4.md
+++ b/gcc/config/arm/cortex-m4.md
@@ -1,5 +1,5 @@
;; ARM Cortex-M4 pipeline description
-;; Copyright (C) 2010-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2010-2014 Free Software Foundation, Inc.
;; Contributed by CodeSourcery.
;;
;; This file is part of GCC.
diff --git a/gcc/config/arm/cortex-r4.md b/gcc/config/arm/cortex-r4.md
index 7a3ceeb15d7..f000124cb6f 100644
--- a/gcc/config/arm/cortex-r4.md
+++ b/gcc/config/arm/cortex-r4.md
@@ -1,5 +1,5 @@
;; ARM Cortex-R4 scheduling description.
-;; Copyright (C) 2007-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2007-2014 Free Software Foundation, Inc.
;; Contributed by CodeSourcery.
;; This file is part of GCC.
diff --git a/gcc/config/arm/cortex-r4f.md b/gcc/config/arm/cortex-r4f.md
index 1bc4249d4d1..25d949789cc 100644
--- a/gcc/config/arm/cortex-r4f.md
+++ b/gcc/config/arm/cortex-r4f.md
@@ -1,5 +1,5 @@
;; ARM Cortex-R4F VFP pipeline description
-;; Copyright (C) 2007-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2007-2014 Free Software Foundation, Inc.
;; Written by CodeSourcery.
;;
;; This file is part of GCC.
diff --git a/gcc/config/arm/crypto.def b/gcc/config/arm/crypto.def
new file mode 100644
index 00000000000..dc805d9ec64
--- /dev/null
+++ b/gcc/config/arm/crypto.def
@@ -0,0 +1,34 @@
+/* Cryptographic instruction builtin definitions.
+ Copyright (C) 2013-2014 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+CRYPTO2 (aesd, AESD, v16uqi, v16uqi, v16uqi)
+CRYPTO2 (aese, AESE, v16uqi, v16uqi, v16uqi)
+CRYPTO1 (aesimc, AESIMC, v16uqi, v16uqi)
+CRYPTO1 (aesmc, AESMC, v16uqi, v16uqi)
+CRYPTO1 (sha1h, SHA1H, v4usi, v4usi)
+CRYPTO2 (sha1su1, SHA1SU1, v4usi, v4usi, v4usi)
+CRYPTO2 (sha256su0, SHA256SU0, v4usi, v4usi, v4usi)
+CRYPTO3 (sha1c, SHA1C, v4usi, v4usi, v4usi, v4usi)
+CRYPTO3 (sha1m, SHA1M, v4usi, v4usi, v4usi, v4usi)
+CRYPTO3 (sha1p, SHA1P, v4usi, v4usi, v4usi, v4usi)
+CRYPTO3 (sha1su0, SHA1SU0, v4usi, v4usi, v4usi, v4usi)
+CRYPTO3 (sha256h, SHA256H, v4usi, v4usi, v4usi, v4usi)
+CRYPTO3 (sha256h2, SHA256H2, v4usi, v4usi, v4usi, v4usi)
+CRYPTO3 (sha256su1, SHA256SU1, v4usi, v4usi, v4usi, v4usi)
+CRYPTO2 (vmullp64, VMULLP64, uti, udi, udi)
diff --git a/gcc/config/arm/crypto.md b/gcc/config/arm/crypto.md
new file mode 100644
index 00000000000..9f249803d22
--- /dev/null
+++ b/gcc/config/arm/crypto.md
@@ -0,0 +1,86 @@
+;; ARMv8-A crypto patterns.
+;; Copyright (C) 2013-2014 Free Software Foundation, Inc.
+;; Contributed by ARM Ltd.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_insn "crypto_<crypto_pattern>"
+ [(set (match_operand:<crypto_mode> 0 "register_operand" "=w")
+ (unspec:<crypto_mode> [(match_operand:<crypto_mode> 1
+ "register_operand" "w")]
+ CRYPTO_UNARY))]
+ "TARGET_CRYPTO"
+ "<crypto_pattern>.<crypto_size_sfx>\\t%q0, %q1"
+ [(set_attr "type" "<crypto_type>")]
+)
+
+(define_insn "crypto_<crypto_pattern>"
+ [(set (match_operand:<crypto_mode> 0 "register_operand" "=w")
+ (unspec:<crypto_mode> [(match_operand:<crypto_mode> 1 "register_operand" "0")
+ (match_operand:<crypto_mode> 2 "register_operand" "w")]
+ CRYPTO_BINARY))]
+ "TARGET_CRYPTO"
+ "<crypto_pattern>.<crypto_size_sfx>\\t%q0, %q2"
+ [(set_attr "type" "<crypto_type>")]
+)
+
+(define_insn "crypto_<crypto_pattern>"
+ [(set (match_operand:<crypto_mode> 0 "register_operand" "=w")
+ (unspec:<crypto_mode> [(match_operand:<crypto_mode> 1 "register_operand" "0")
+ (match_operand:<crypto_mode> 2 "register_operand" "w")
+ (match_operand:<crypto_mode> 3 "register_operand" "w")]
+ CRYPTO_TERNARY))]
+ "TARGET_CRYPTO"
+ "<crypto_pattern>.<crypto_size_sfx>\\t%q0, %q2, %q3"
+ [(set_attr "type" "<crypto_type>")]
+)
+
+(define_insn "crypto_sha1h"
+ [(set (match_operand:V4SI 0 "register_operand" "=w")
+ (zero_extend:V4SI
+ (unspec:SI [(vec_select:SI
+ (match_operand:V4SI 1 "register_operand" "w")
+ (parallel [(match_operand:SI 2 "immediate_operand" "i")]))]
+ UNSPEC_SHA1H)))]
+ "TARGET_CRYPTO"
+ "sha1h.32\\t%q0, %q1"
+ [(set_attr "type" "crypto_sha1_fast")]
+)
+
+(define_insn "crypto_vmullp64"
+ [(set (match_operand:TI 0 "register_operand" "=w")
+ (unspec:TI [(match_operand:DI 1 "register_operand" "w")
+ (match_operand:DI 2 "register_operand" "w")]
+ UNSPEC_VMULLP64))]
+ "TARGET_CRYPTO"
+ "vmull.p64\\t%q0, %P1, %P2"
+ [(set_attr "type" "neon_mul_d_long")]
+)
+
+(define_insn "crypto_<crypto_pattern>"
+ [(set (match_operand:V4SI 0 "register_operand" "=w")
+ (unspec:<crypto_mode>
+ [(match_operand:<crypto_mode> 1 "register_operand" "0")
+ (vec_select:SI
+ (match_operand:<crypto_mode> 2 "register_operand" "w")
+ (parallel [(match_operand:SI 4 "immediate_operand" "i")]))
+ (match_operand:<crypto_mode> 3 "register_operand" "w")]
+ CRYPTO_SELECTING))]
+ "TARGET_CRYPTO"
+ "<crypto_pattern>.<crypto_size_sfx>\\t%q0, %q2, %q3"
+ [(set_attr "type" "<crypto_type>")]
+)
diff --git a/gcc/config/arm/driver-arm.c b/gcc/config/arm/driver-arm.c
index 7ec4996ed55..7460aee438d 100644
--- a/gcc/config/arm/driver-arm.c
+++ b/gcc/config/arm/driver-arm.c
@@ -1,5 +1,5 @@
/* Subroutines for the gcc driver.
- Copyright (C) 2011-2013 Free Software Foundation, Inc.
+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
This file is part of GCC.
diff --git a/gcc/config/arm/elf.h b/gcc/config/arm/elf.h
index 5e21a084286..2ac8c8d048c 100644
--- a/gcc/config/arm/elf.h
+++ b/gcc/config/arm/elf.h
@@ -1,6 +1,6 @@
/* Definitions of target machine for GNU compiler.
For ARM with ELF obj format.
- Copyright (C) 1995-2013 Free Software Foundation, Inc.
+ Copyright (C) 1995-2014 Free Software Foundation, Inc.
Contributed by Philip Blundell <philb@gnu.org> and
Catherine Moore <clm@cygnus.com>
diff --git a/gcc/config/arm/fa526.md b/gcc/config/arm/fa526.md
index 401abd3c0a0..c345fdf65e5 100644
--- a/gcc/config/arm/fa526.md
+++ b/gcc/config/arm/fa526.md
@@ -1,5 +1,5 @@
;; Faraday FA526 Pipeline Description
-;; Copyright (C) 2010-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2010-2014 Free Software Foundation, Inc.
;; Written by I-Jui Sung, based on ARM926EJ-S Pipeline Description.
;; This file is part of GCC.
diff --git a/gcc/config/arm/fa606te.md b/gcc/config/arm/fa606te.md
index 88347bc2d96..01ecfc88c3c 100644
--- a/gcc/config/arm/fa606te.md
+++ b/gcc/config/arm/fa606te.md
@@ -1,5 +1,5 @@
;; Faraday FA606TE Pipeline Description
-;; Copyright (C) 2010-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2010-2014 Free Software Foundation, Inc.
;; Written by Mingfeng Wu, based on ARM926EJ-S Pipeline Description.
;;
;; This file is part of GCC.
diff --git a/gcc/config/arm/fa626te.md b/gcc/config/arm/fa626te.md
index e6790a21215..e615bae3764 100644
--- a/gcc/config/arm/fa626te.md
+++ b/gcc/config/arm/fa626te.md
@@ -1,5 +1,5 @@
;; Faraday FA626TE Pipeline Description
-;; Copyright (C) 2010-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2010-2014 Free Software Foundation, Inc.
;; Written by I-Jui Sung, based on ARM926EJ-S Pipeline Description.
;;
;; This file is part of GCC.
diff --git a/gcc/config/arm/fa726te.md b/gcc/config/arm/fa726te.md
index d0a03981eec..225b2cfdd74 100644
--- a/gcc/config/arm/fa726te.md
+++ b/gcc/config/arm/fa726te.md
@@ -1,5 +1,5 @@
;; Faraday FA726TE Pipeline Description
-;; Copyright (C) 2010-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2010-2014 Free Software Foundation, Inc.
;; Written by I-Jui Sung, based on ARM926EJ-S Pipeline Description.
;;
;; This file is part of GCC.
diff --git a/gcc/config/arm/fmp626.md b/gcc/config/arm/fmp626.md
index ffb68570e37..439054da647 100644
--- a/gcc/config/arm/fmp626.md
+++ b/gcc/config/arm/fmp626.md
@@ -1,5 +1,5 @@
;; Faraday FA626TE Pipeline Description
-;; Copyright (C) 2010-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2010-2014 Free Software Foundation, Inc.
;; Written by Mingfeng Wu, based on ARM926EJ-S Pipeline Description.
;;
;; This file is part of GCC.
diff --git a/gcc/config/arm/genopt.sh b/gcc/config/arm/genopt.sh
index aaf34678331..68fdb564c12 100755
--- a/gcc/config/arm/genopt.sh
+++ b/gcc/config/arm/genopt.sh
@@ -1,6 +1,6 @@
#!/bin/sh
# Generate arm-tables.opt from the lists in *.def.
-# Copyright (C) 2011-2013 Free Software Foundation, Inc.
+# Copyright (C) 2011-2014 Free Software Foundation, Inc.
#
# This file is part of GCC.
#
@@ -23,7 +23,7 @@ cat <<EOF
; Generated automatically by genopt.sh from arm-cores.def, arm-arches.def
; and arm-fpus.def.
-; Copyright (C) 2011-2013 Free Software Foundation, Inc.
+; Copyright (C) 2011-2014 Free Software Foundation, Inc.
;
; This file is part of GCC.
;
diff --git a/gcc/config/arm/gentune.sh b/gcc/config/arm/gentune.sh
index 725ae0fe6d1..3fc7be912ab 100755
--- a/gcc/config/arm/gentune.sh
+++ b/gcc/config/arm/gentune.sh
@@ -1,7 +1,7 @@
#!/bin/sh
# Generate arm-tune.md, a file containing the tune attribute from the list of
# CPUs in arm-cores.def
-# Copyright (C) 2004-2013 Free Software Foundation, Inc.
+# Copyright (C) 2004-2014 Free Software Foundation, Inc.
#
# This file is part of GCC.
#
@@ -25,5 +25,5 @@ echo ";; Generated automatically by gentune.sh from arm-cores.def"
allcores=`awk -F'[(, ]+' '/^ARM_CORE/ { cores = cores$3"," } END { print cores } ' $1`
echo "(define_attr \"tune\""
-echo " \"$allcores\"" | sed -e 's/,"$/"/'
+echo " \"$allcores\"" | sed -e 's/,"$/"/' | sed -e 's/\([a-z0-9_]\+,[a-z0-9_]\+,[a-z0-9_]\+,\)/\1\n\t/g'
echo " (const (symbol_ref \"((enum attr_tune) arm_tune)\")))"
diff --git a/gcc/config/arm/iterators.md b/gcc/config/arm/iterators.md
index 66779a7b7fa..33e09e4ce24 100644
--- a/gcc/config/arm/iterators.md
+++ b/gcc/config/arm/iterators.md
@@ -1,5 +1,5 @@
;; Code and mode itertator and attribute definitions for the ARM backend
-;; Copyright (C) 2010-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2010-2014 Free Software Foundation, Inc.
;; Contributed by ARM Ltd.
;;
;; This file is part of GCC.
@@ -201,6 +201,20 @@
(define_int_iterator NEON_VRINT [UNSPEC_NVRINTP UNSPEC_NVRINTZ UNSPEC_NVRINTM
UNSPEC_NVRINTX UNSPEC_NVRINTA UNSPEC_NVRINTN])
+(define_int_iterator CRC [UNSPEC_CRC32B UNSPEC_CRC32H UNSPEC_CRC32W
+ UNSPEC_CRC32CB UNSPEC_CRC32CH UNSPEC_CRC32CW])
+
+(define_int_iterator CRYPTO_UNARY [UNSPEC_AESMC UNSPEC_AESIMC])
+
+(define_int_iterator CRYPTO_BINARY [UNSPEC_AESD UNSPEC_AESE
+ UNSPEC_SHA1SU1 UNSPEC_SHA256SU0])
+
+(define_int_iterator CRYPTO_TERNARY [UNSPEC_SHA1SU0 UNSPEC_SHA256H
+ UNSPEC_SHA256H2 UNSPEC_SHA256SU1])
+
+(define_int_iterator CRYPTO_SELECTING [UNSPEC_SHA1C UNSPEC_SHA1M
+ UNSPEC_SHA1P])
+
;;----------------------------------------------------------------------------
;; Mode attributes
;;----------------------------------------------------------------------------
@@ -518,6 +532,49 @@
(define_int_attr nvrint_variant [(UNSPEC_NVRINTZ "z") (UNSPEC_NVRINTP "p")
(UNSPEC_NVRINTA "a") (UNSPEC_NVRINTM "m")
(UNSPEC_NVRINTX "x") (UNSPEC_NVRINTN "n")])
+
+(define_int_attr crc_variant [(UNSPEC_CRC32B "crc32b") (UNSPEC_CRC32H "crc32h")
+ (UNSPEC_CRC32W "crc32w") (UNSPEC_CRC32CB "crc32cb")
+ (UNSPEC_CRC32CH "crc32ch") (UNSPEC_CRC32CW "crc32cw")])
+
+(define_int_attr crc_mode [(UNSPEC_CRC32B "QI") (UNSPEC_CRC32H "HI")
+ (UNSPEC_CRC32W "SI") (UNSPEC_CRC32CB "QI")
+ (UNSPEC_CRC32CH "HI") (UNSPEC_CRC32CW "SI")])
+
+(define_int_attr crypto_pattern [(UNSPEC_SHA1H "sha1h") (UNSPEC_AESMC "aesmc")
+ (UNSPEC_AESIMC "aesimc") (UNSPEC_AESD "aesd")
+ (UNSPEC_AESE "aese") (UNSPEC_SHA1SU1 "sha1su1")
+ (UNSPEC_SHA256SU0 "sha256su0") (UNSPEC_SHA1C "sha1c")
+ (UNSPEC_SHA1M "sha1m") (UNSPEC_SHA1P "sha1p")
+ (UNSPEC_SHA1SU0 "sha1su0") (UNSPEC_SHA256H "sha256h")
+ (UNSPEC_SHA256H2 "sha256h2")
+ (UNSPEC_SHA256SU1 "sha256su1")])
+
+(define_int_attr crypto_type
+ [(UNSPEC_AESE "crypto_aes") (UNSPEC_AESD "crypto_aes")
+ (UNSPEC_AESMC "crypto_aes") (UNSPEC_AESIMC "crypto_aes")
+ (UNSPEC_SHA1C "crypto_sha1_slow") (UNSPEC_SHA1P "crypto_sha1_slow")
+ (UNSPEC_SHA1M "crypto_sha1_slow") (UNSPEC_SHA1SU1 "crypto_sha1_fast")
+ (UNSPEC_SHA1SU0 "crypto_sha1_xor") (UNSPEC_SHA256H "crypto_sha256_slow")
+ (UNSPEC_SHA256H2 "crypto_sha256_slow") (UNSPEC_SHA256SU0 "crypto_sha256_fast")
+ (UNSPEC_SHA256SU1 "crypto_sha256_slow")])
+
+(define_int_attr crypto_size_sfx [(UNSPEC_SHA1H "32") (UNSPEC_AESMC "8")
+ (UNSPEC_AESIMC "8") (UNSPEC_AESD "8")
+ (UNSPEC_AESE "8") (UNSPEC_SHA1SU1 "32")
+ (UNSPEC_SHA256SU0 "32") (UNSPEC_SHA1C "32")
+ (UNSPEC_SHA1M "32") (UNSPEC_SHA1P "32")
+ (UNSPEC_SHA1SU0 "32") (UNSPEC_SHA256H "32")
+ (UNSPEC_SHA256H2 "32") (UNSPEC_SHA256SU1 "32")])
+
+(define_int_attr crypto_mode [(UNSPEC_SHA1H "V4SI") (UNSPEC_AESMC "V16QI")
+ (UNSPEC_AESIMC "V16QI") (UNSPEC_AESD "V16QI")
+ (UNSPEC_AESE "V16QI") (UNSPEC_SHA1SU1 "V4SI")
+ (UNSPEC_SHA256SU0 "V4SI") (UNSPEC_SHA1C "V4SI")
+ (UNSPEC_SHA1M "V4SI") (UNSPEC_SHA1P "V4SI")
+ (UNSPEC_SHA1SU0 "V4SI") (UNSPEC_SHA256H "V4SI")
+ (UNSPEC_SHA256H2 "V4SI") (UNSPEC_SHA256SU1 "V4SI")])
+
;; Both kinds of return insn.
(define_code_iterator returns [return simple_return])
(define_code_attr return_str [(return "") (simple_return "simple_")])
diff --git a/gcc/config/arm/iwmmxt.md b/gcc/config/arm/iwmmxt.md
index 62cdae21e3f..56ff3e9f3c7 100644
--- a/gcc/config/arm/iwmmxt.md
+++ b/gcc/config/arm/iwmmxt.md
@@ -1,5 +1,5 @@
;; Patterns for the Intel Wireless MMX technology architecture.
-;; Copyright (C) 2003-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2003-2014 Free Software Foundation, Inc.
;; Contributed by Red Hat.
;; This file is part of GCC.
diff --git a/gcc/config/arm/iwmmxt2.md b/gcc/config/arm/iwmmxt2.md
index 27a45f95b2e..b6e4b247687 100644
--- a/gcc/config/arm/iwmmxt2.md
+++ b/gcc/config/arm/iwmmxt2.md
@@ -1,5 +1,5 @@
;; Patterns for the Intel Wireless MMX technology architecture.
-;; Copyright (C) 2011-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
;; Written by Marvell, Inc.
;;
;; This file is part of GCC.
diff --git a/gcc/config/arm/ldmstm.md b/gcc/config/arm/ldmstm.md
index ad137d492e4..1a242907128 100644
--- a/gcc/config/arm/ldmstm.md
+++ b/gcc/config/arm/ldmstm.md
@@ -1,7 +1,7 @@
/* ARM ldm/stm instruction patterns. This file was automatically generated
using arm-ldmstm.ml. Please do not edit manually.
- Copyright (C) 2010-2013 Free Software Foundation, Inc.
+ Copyright (C) 2010-2014 Free Software Foundation, Inc.
Contributed by CodeSourcery.
This file is part of GCC.
@@ -23,15 +23,15 @@
(define_insn "*ldm4_ia"
[(match_parallel 0 "load_multiple_operation"
- [(set (match_operand:SI 1 "arm_hard_register_operand" "")
+ [(set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (match_operand:SI 5 "s_register_operand" "rk")))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int 4))))
- (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (set (match_operand:SI 3 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int 8))))
- (set (match_operand:SI 4 "arm_hard_register_operand" "")
+ (set (match_operand:SI 4 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int 12))))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
@@ -42,15 +42,15 @@
(define_insn "*thumb_ldm4_ia"
[(match_parallel 0 "load_multiple_operation"
- [(set (match_operand:SI 1 "arm_hard_register_operand" "")
+ [(set (match_operand:SI 1 "low_register_operand" "")
(mem:SI (match_operand:SI 5 "s_register_operand" "l")))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "low_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int 4))))
- (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (set (match_operand:SI 3 "low_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int 8))))
- (set (match_operand:SI 4 "arm_hard_register_operand" "")
+ (set (match_operand:SI 4 "low_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int 12))))])]
"TARGET_THUMB1 && XVECLEN (operands[0], 0) == 4"
@@ -61,15 +61,15 @@
[(match_parallel 0 "load_multiple_operation"
[(set (match_operand:SI 5 "s_register_operand" "+&rk")
(plus:SI (match_dup 5) (const_int 16)))
- (set (match_operand:SI 1 "arm_hard_register_operand" "")
+ (set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (match_dup 5)))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int 4))))
- (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (set (match_operand:SI 3 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int 8))))
- (set (match_operand:SI 4 "arm_hard_register_operand" "")
+ (set (match_operand:SI 4 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int 12))))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 5"
@@ -82,15 +82,15 @@
[(match_parallel 0 "load_multiple_operation"
[(set (match_operand:SI 5 "s_register_operand" "+&l")
(plus:SI (match_dup 5) (const_int 16)))
- (set (match_operand:SI 1 "arm_hard_register_operand" "")
+ (set (match_operand:SI 1 "low_register_operand" "")
(mem:SI (match_dup 5)))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "low_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int 4))))
- (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (set (match_operand:SI 3 "low_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int 8))))
- (set (match_operand:SI 4 "arm_hard_register_operand" "")
+ (set (match_operand:SI 4 "low_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int 12))))])]
"TARGET_THUMB1 && XVECLEN (operands[0], 0) == 5"
@@ -100,13 +100,13 @@
(define_insn "*stm4_ia"
[(match_parallel 0 "store_multiple_operation"
[(set (mem:SI (match_operand:SI 5 "s_register_operand" "rk"))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int 4)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))
+ (match_operand:SI 2 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int 8)))
- (match_operand:SI 3 "arm_hard_register_operand" ""))
+ (match_operand:SI 3 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int 12)))
- (match_operand:SI 4 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 4 "arm_hard_general_register_operand" ""))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
"stm%(ia%)\t%5, {%1, %2, %3, %4}"
[(set_attr "type" "store4")
@@ -118,13 +118,13 @@
[(set (match_operand:SI 5 "s_register_operand" "+&rk")
(plus:SI (match_dup 5) (const_int 16)))
(set (mem:SI (match_dup 5))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int 4)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))
+ (match_operand:SI 2 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int 8)))
- (match_operand:SI 3 "arm_hard_register_operand" ""))
+ (match_operand:SI 3 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int 12)))
- (match_operand:SI 4 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 4 "arm_hard_general_register_operand" ""))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 5"
"stm%(ia%)\t%5!, {%1, %2, %3, %4}"
[(set_attr "type" "store4")
@@ -136,29 +136,29 @@
[(set (match_operand:SI 5 "s_register_operand" "+&l")
(plus:SI (match_dup 5) (const_int 16)))
(set (mem:SI (match_dup 5))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "low_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int 4)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))
+ (match_operand:SI 2 "low_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int 8)))
- (match_operand:SI 3 "arm_hard_register_operand" ""))
+ (match_operand:SI 3 "low_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int 12)))
- (match_operand:SI 4 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 4 "low_register_operand" ""))])]
"TARGET_THUMB1 && XVECLEN (operands[0], 0) == 5"
"stm%(ia%)\t%5!, {%1, %2, %3, %4}"
[(set_attr "type" "store4")])
(define_insn "*ldm4_ib"
[(match_parallel 0 "load_multiple_operation"
- [(set (match_operand:SI 1 "arm_hard_register_operand" "")
+ [(set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_operand:SI 5 "s_register_operand" "rk")
(const_int 4))))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int 8))))
- (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (set (match_operand:SI 3 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int 12))))
- (set (match_operand:SI 4 "arm_hard_register_operand" "")
+ (set (match_operand:SI 4 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int 16))))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 4"
@@ -170,16 +170,16 @@
[(match_parallel 0 "load_multiple_operation"
[(set (match_operand:SI 5 "s_register_operand" "+&rk")
(plus:SI (match_dup 5) (const_int 16)))
- (set (match_operand:SI 1 "arm_hard_register_operand" "")
+ (set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int 4))))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int 8))))
- (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (set (match_operand:SI 3 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int 12))))
- (set (match_operand:SI 4 "arm_hard_register_operand" "")
+ (set (match_operand:SI 4 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int 16))))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 5"
@@ -190,13 +190,13 @@
(define_insn "*stm4_ib"
[(match_parallel 0 "store_multiple_operation"
[(set (mem:SI (plus:SI (match_operand:SI 5 "s_register_operand" "rk") (const_int 4)))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int 8)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))
+ (match_operand:SI 2 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int 12)))
- (match_operand:SI 3 "arm_hard_register_operand" ""))
+ (match_operand:SI 3 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int 16)))
- (match_operand:SI 4 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 4 "arm_hard_general_register_operand" ""))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 4"
"stm%(ib%)\t%5, {%1, %2, %3, %4}"
[(set_attr "type" "store4")
@@ -207,13 +207,13 @@
[(set (match_operand:SI 5 "s_register_operand" "+&rk")
(plus:SI (match_dup 5) (const_int 16)))
(set (mem:SI (plus:SI (match_dup 5) (const_int 4)))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int 8)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))
+ (match_operand:SI 2 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int 12)))
- (match_operand:SI 3 "arm_hard_register_operand" ""))
+ (match_operand:SI 3 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int 16)))
- (match_operand:SI 4 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 4 "arm_hard_general_register_operand" ""))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 5"
"stm%(ib%)\t%5!, {%1, %2, %3, %4}"
[(set_attr "type" "store4")
@@ -221,16 +221,16 @@
(define_insn "*ldm4_da"
[(match_parallel 0 "load_multiple_operation"
- [(set (match_operand:SI 1 "arm_hard_register_operand" "")
+ [(set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_operand:SI 5 "s_register_operand" "rk")
(const_int -12))))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int -8))))
- (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (set (match_operand:SI 3 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int -4))))
- (set (match_operand:SI 4 "arm_hard_register_operand" "")
+ (set (match_operand:SI 4 "arm_hard_general_register_operand" "")
(mem:SI (match_dup 5)))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 4"
"ldm%(da%)\t%5, {%1, %2, %3, %4}"
@@ -241,16 +241,16 @@
[(match_parallel 0 "load_multiple_operation"
[(set (match_operand:SI 5 "s_register_operand" "+&rk")
(plus:SI (match_dup 5) (const_int -16)))
- (set (match_operand:SI 1 "arm_hard_register_operand" "")
+ (set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int -12))))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int -8))))
- (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (set (match_operand:SI 3 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int -4))))
- (set (match_operand:SI 4 "arm_hard_register_operand" "")
+ (set (match_operand:SI 4 "arm_hard_general_register_operand" "")
(mem:SI (match_dup 5)))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 5"
"ldm%(da%)\t%5!, {%1, %2, %3, %4}"
@@ -260,13 +260,13 @@
(define_insn "*stm4_da"
[(match_parallel 0 "store_multiple_operation"
[(set (mem:SI (plus:SI (match_operand:SI 5 "s_register_operand" "rk") (const_int -12)))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int -8)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))
+ (match_operand:SI 2 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int -4)))
- (match_operand:SI 3 "arm_hard_register_operand" ""))
+ (match_operand:SI 3 "arm_hard_general_register_operand" ""))
(set (mem:SI (match_dup 5))
- (match_operand:SI 4 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 4 "arm_hard_general_register_operand" ""))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 4"
"stm%(da%)\t%5, {%1, %2, %3, %4}"
[(set_attr "type" "store4")
@@ -277,13 +277,13 @@
[(set (match_operand:SI 5 "s_register_operand" "+&rk")
(plus:SI (match_dup 5) (const_int -16)))
(set (mem:SI (plus:SI (match_dup 5) (const_int -12)))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int -8)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))
+ (match_operand:SI 2 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int -4)))
- (match_operand:SI 3 "arm_hard_register_operand" ""))
+ (match_operand:SI 3 "arm_hard_general_register_operand" ""))
(set (mem:SI (match_dup 5))
- (match_operand:SI 4 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 4 "arm_hard_general_register_operand" ""))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 5"
"stm%(da%)\t%5!, {%1, %2, %3, %4}"
[(set_attr "type" "store4")
@@ -291,16 +291,16 @@
(define_insn "*ldm4_db"
[(match_parallel 0 "load_multiple_operation"
- [(set (match_operand:SI 1 "arm_hard_register_operand" "")
+ [(set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_operand:SI 5 "s_register_operand" "rk")
(const_int -16))))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int -12))))
- (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (set (match_operand:SI 3 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int -8))))
- (set (match_operand:SI 4 "arm_hard_register_operand" "")
+ (set (match_operand:SI 4 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int -4))))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
@@ -313,16 +313,16 @@
[(match_parallel 0 "load_multiple_operation"
[(set (match_operand:SI 5 "s_register_operand" "+&rk")
(plus:SI (match_dup 5) (const_int -16)))
- (set (match_operand:SI 1 "arm_hard_register_operand" "")
+ (set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int -16))))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int -12))))
- (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (set (match_operand:SI 3 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int -8))))
- (set (match_operand:SI 4 "arm_hard_register_operand" "")
+ (set (match_operand:SI 4 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 5)
(const_int -4))))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 5"
@@ -334,13 +334,13 @@
(define_insn "*stm4_db"
[(match_parallel 0 "store_multiple_operation"
[(set (mem:SI (plus:SI (match_operand:SI 5 "s_register_operand" "rk") (const_int -16)))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int -12)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))
+ (match_operand:SI 2 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int -8)))
- (match_operand:SI 3 "arm_hard_register_operand" ""))
+ (match_operand:SI 3 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int -4)))
- (match_operand:SI 4 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 4 "arm_hard_general_register_operand" ""))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
"stm%(db%)\t%5, {%1, %2, %3, %4}"
[(set_attr "type" "store4")
@@ -352,13 +352,13 @@
[(set (match_operand:SI 5 "s_register_operand" "+&rk")
(plus:SI (match_dup 5) (const_int -16)))
(set (mem:SI (plus:SI (match_dup 5) (const_int -16)))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int -12)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))
+ (match_operand:SI 2 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int -8)))
- (match_operand:SI 3 "arm_hard_register_operand" ""))
+ (match_operand:SI 3 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int -4)))
- (match_operand:SI 4 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 4 "arm_hard_general_register_operand" ""))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 5"
"stm%(db%)\t%5!, {%1, %2, %3, %4}"
[(set_attr "type" "store4")
@@ -474,12 +474,12 @@
(define_insn "*ldm3_ia"
[(match_parallel 0 "load_multiple_operation"
- [(set (match_operand:SI 1 "arm_hard_register_operand" "")
+ [(set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (match_operand:SI 4 "s_register_operand" "rk")))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 4)
(const_int 4))))
- (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (set (match_operand:SI 3 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 4)
(const_int 8))))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
@@ -490,12 +490,12 @@
(define_insn "*thumb_ldm3_ia"
[(match_parallel 0 "load_multiple_operation"
- [(set (match_operand:SI 1 "arm_hard_register_operand" "")
+ [(set (match_operand:SI 1 "low_register_operand" "")
(mem:SI (match_operand:SI 4 "s_register_operand" "l")))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "low_register_operand" "")
(mem:SI (plus:SI (match_dup 4)
(const_int 4))))
- (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (set (match_operand:SI 3 "low_register_operand" "")
(mem:SI (plus:SI (match_dup 4)
(const_int 8))))])]
"TARGET_THUMB1 && XVECLEN (operands[0], 0) == 3"
@@ -506,12 +506,12 @@
[(match_parallel 0 "load_multiple_operation"
[(set (match_operand:SI 4 "s_register_operand" "+&rk")
(plus:SI (match_dup 4) (const_int 12)))
- (set (match_operand:SI 1 "arm_hard_register_operand" "")
+ (set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (match_dup 4)))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 4)
(const_int 4))))
- (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (set (match_operand:SI 3 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 4)
(const_int 8))))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
@@ -524,12 +524,12 @@
[(match_parallel 0 "load_multiple_operation"
[(set (match_operand:SI 4 "s_register_operand" "+&l")
(plus:SI (match_dup 4) (const_int 12)))
- (set (match_operand:SI 1 "arm_hard_register_operand" "")
+ (set (match_operand:SI 1 "low_register_operand" "")
(mem:SI (match_dup 4)))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "low_register_operand" "")
(mem:SI (plus:SI (match_dup 4)
(const_int 4))))
- (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (set (match_operand:SI 3 "low_register_operand" "")
(mem:SI (plus:SI (match_dup 4)
(const_int 8))))])]
"TARGET_THUMB1 && XVECLEN (operands[0], 0) == 4"
@@ -539,11 +539,11 @@
(define_insn "*stm3_ia"
[(match_parallel 0 "store_multiple_operation"
[(set (mem:SI (match_operand:SI 4 "s_register_operand" "rk"))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 4) (const_int 4)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))
+ (match_operand:SI 2 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 4) (const_int 8)))
- (match_operand:SI 3 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 3 "arm_hard_general_register_operand" ""))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
"stm%(ia%)\t%4, {%1, %2, %3}"
[(set_attr "type" "store3")
@@ -555,11 +555,11 @@
[(set (match_operand:SI 4 "s_register_operand" "+&rk")
(plus:SI (match_dup 4) (const_int 12)))
(set (mem:SI (match_dup 4))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 4) (const_int 4)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))
+ (match_operand:SI 2 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 4) (const_int 8)))
- (match_operand:SI 3 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 3 "arm_hard_general_register_operand" ""))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
"stm%(ia%)\t%4!, {%1, %2, %3}"
[(set_attr "type" "store3")
@@ -571,24 +571,24 @@
[(set (match_operand:SI 4 "s_register_operand" "+&l")
(plus:SI (match_dup 4) (const_int 12)))
(set (mem:SI (match_dup 4))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "low_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 4) (const_int 4)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))
+ (match_operand:SI 2 "low_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 4) (const_int 8)))
- (match_operand:SI 3 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 3 "low_register_operand" ""))])]
"TARGET_THUMB1 && XVECLEN (operands[0], 0) == 4"
"stm%(ia%)\t%4!, {%1, %2, %3}"
[(set_attr "type" "store3")])
(define_insn "*ldm3_ib"
[(match_parallel 0 "load_multiple_operation"
- [(set (match_operand:SI 1 "arm_hard_register_operand" "")
+ [(set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_operand:SI 4 "s_register_operand" "rk")
(const_int 4))))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 4)
(const_int 8))))
- (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (set (match_operand:SI 3 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 4)
(const_int 12))))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 3"
@@ -600,13 +600,13 @@
[(match_parallel 0 "load_multiple_operation"
[(set (match_operand:SI 4 "s_register_operand" "+&rk")
(plus:SI (match_dup 4) (const_int 12)))
- (set (match_operand:SI 1 "arm_hard_register_operand" "")
+ (set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 4)
(const_int 4))))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 4)
(const_int 8))))
- (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (set (match_operand:SI 3 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 4)
(const_int 12))))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 4"
@@ -617,11 +617,11 @@
(define_insn "*stm3_ib"
[(match_parallel 0 "store_multiple_operation"
[(set (mem:SI (plus:SI (match_operand:SI 4 "s_register_operand" "rk") (const_int 4)))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 4) (const_int 8)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))
+ (match_operand:SI 2 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 4) (const_int 12)))
- (match_operand:SI 3 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 3 "arm_hard_general_register_operand" ""))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 3"
"stm%(ib%)\t%4, {%1, %2, %3}"
[(set_attr "type" "store3")
@@ -632,11 +632,11 @@
[(set (match_operand:SI 4 "s_register_operand" "+&rk")
(plus:SI (match_dup 4) (const_int 12)))
(set (mem:SI (plus:SI (match_dup 4) (const_int 4)))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 4) (const_int 8)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))
+ (match_operand:SI 2 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 4) (const_int 12)))
- (match_operand:SI 3 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 3 "arm_hard_general_register_operand" ""))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 4"
"stm%(ib%)\t%4!, {%1, %2, %3}"
[(set_attr "type" "store3")
@@ -644,13 +644,13 @@
(define_insn "*ldm3_da"
[(match_parallel 0 "load_multiple_operation"
- [(set (match_operand:SI 1 "arm_hard_register_operand" "")
+ [(set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_operand:SI 4 "s_register_operand" "rk")
(const_int -8))))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 4)
(const_int -4))))
- (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (set (match_operand:SI 3 "arm_hard_general_register_operand" "")
(mem:SI (match_dup 4)))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 3"
"ldm%(da%)\t%4, {%1, %2, %3}"
@@ -661,13 +661,13 @@
[(match_parallel 0 "load_multiple_operation"
[(set (match_operand:SI 4 "s_register_operand" "+&rk")
(plus:SI (match_dup 4) (const_int -12)))
- (set (match_operand:SI 1 "arm_hard_register_operand" "")
+ (set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 4)
(const_int -8))))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 4)
(const_int -4))))
- (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (set (match_operand:SI 3 "arm_hard_general_register_operand" "")
(mem:SI (match_dup 4)))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 4"
"ldm%(da%)\t%4!, {%1, %2, %3}"
@@ -677,11 +677,11 @@
(define_insn "*stm3_da"
[(match_parallel 0 "store_multiple_operation"
[(set (mem:SI (plus:SI (match_operand:SI 4 "s_register_operand" "rk") (const_int -8)))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 4) (const_int -4)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))
+ (match_operand:SI 2 "arm_hard_general_register_operand" ""))
(set (mem:SI (match_dup 4))
- (match_operand:SI 3 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 3 "arm_hard_general_register_operand" ""))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 3"
"stm%(da%)\t%4, {%1, %2, %3}"
[(set_attr "type" "store3")
@@ -692,11 +692,11 @@
[(set (match_operand:SI 4 "s_register_operand" "+&rk")
(plus:SI (match_dup 4) (const_int -12)))
(set (mem:SI (plus:SI (match_dup 4) (const_int -8)))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 4) (const_int -4)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))
+ (match_operand:SI 2 "arm_hard_general_register_operand" ""))
(set (mem:SI (match_dup 4))
- (match_operand:SI 3 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 3 "arm_hard_general_register_operand" ""))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 4"
"stm%(da%)\t%4!, {%1, %2, %3}"
[(set_attr "type" "store3")
@@ -704,13 +704,13 @@
(define_insn "*ldm3_db"
[(match_parallel 0 "load_multiple_operation"
- [(set (match_operand:SI 1 "arm_hard_register_operand" "")
+ [(set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_operand:SI 4 "s_register_operand" "rk")
(const_int -12))))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 4)
(const_int -8))))
- (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (set (match_operand:SI 3 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 4)
(const_int -4))))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
@@ -723,13 +723,13 @@
[(match_parallel 0 "load_multiple_operation"
[(set (match_operand:SI 4 "s_register_operand" "+&rk")
(plus:SI (match_dup 4) (const_int -12)))
- (set (match_operand:SI 1 "arm_hard_register_operand" "")
+ (set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 4)
(const_int -12))))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 4)
(const_int -8))))
- (set (match_operand:SI 3 "arm_hard_register_operand" "")
+ (set (match_operand:SI 3 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 4)
(const_int -4))))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
@@ -741,11 +741,11 @@
(define_insn "*stm3_db"
[(match_parallel 0 "store_multiple_operation"
[(set (mem:SI (plus:SI (match_operand:SI 4 "s_register_operand" "rk") (const_int -12)))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 4) (const_int -8)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))
+ (match_operand:SI 2 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 4) (const_int -4)))
- (match_operand:SI 3 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 3 "arm_hard_general_register_operand" ""))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
"stm%(db%)\t%4, {%1, %2, %3}"
[(set_attr "type" "store3")
@@ -757,11 +757,11 @@
[(set (match_operand:SI 4 "s_register_operand" "+&rk")
(plus:SI (match_dup 4) (const_int -12)))
(set (mem:SI (plus:SI (match_dup 4) (const_int -12)))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 4) (const_int -8)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))
+ (match_operand:SI 2 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 4) (const_int -4)))
- (match_operand:SI 3 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 3 "arm_hard_general_register_operand" ""))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
"stm%(db%)\t%4!, {%1, %2, %3}"
[(set_attr "type" "store3")
@@ -863,9 +863,9 @@
(define_insn "*ldm2_ia"
[(match_parallel 0 "load_multiple_operation"
- [(set (match_operand:SI 1 "arm_hard_register_operand" "")
+ [(set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (match_operand:SI 3 "s_register_operand" "rk")))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 3)
(const_int 4))))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 2"
@@ -876,9 +876,9 @@
(define_insn "*thumb_ldm2_ia"
[(match_parallel 0 "load_multiple_operation"
- [(set (match_operand:SI 1 "arm_hard_register_operand" "")
+ [(set (match_operand:SI 1 "low_register_operand" "")
(mem:SI (match_operand:SI 3 "s_register_operand" "l")))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "low_register_operand" "")
(mem:SI (plus:SI (match_dup 3)
(const_int 4))))])]
"TARGET_THUMB1 && XVECLEN (operands[0], 0) == 2"
@@ -889,9 +889,9 @@
[(match_parallel 0 "load_multiple_operation"
[(set (match_operand:SI 3 "s_register_operand" "+&rk")
(plus:SI (match_dup 3) (const_int 8)))
- (set (match_operand:SI 1 "arm_hard_register_operand" "")
+ (set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (match_dup 3)))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 3)
(const_int 4))))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
@@ -904,9 +904,9 @@
[(match_parallel 0 "load_multiple_operation"
[(set (match_operand:SI 3 "s_register_operand" "+&l")
(plus:SI (match_dup 3) (const_int 8)))
- (set (match_operand:SI 1 "arm_hard_register_operand" "")
+ (set (match_operand:SI 1 "low_register_operand" "")
(mem:SI (match_dup 3)))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "low_register_operand" "")
(mem:SI (plus:SI (match_dup 3)
(const_int 4))))])]
"TARGET_THUMB1 && XVECLEN (operands[0], 0) == 3"
@@ -916,9 +916,9 @@
(define_insn "*stm2_ia"
[(match_parallel 0 "store_multiple_operation"
[(set (mem:SI (match_operand:SI 3 "s_register_operand" "rk"))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 3) (const_int 4)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 2 "arm_hard_general_register_operand" ""))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 2"
"stm%(ia%)\t%3, {%1, %2}"
[(set_attr "type" "store2")
@@ -930,9 +930,9 @@
[(set (match_operand:SI 3 "s_register_operand" "+&rk")
(plus:SI (match_dup 3) (const_int 8)))
(set (mem:SI (match_dup 3))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 3) (const_int 4)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 2 "arm_hard_general_register_operand" ""))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
"stm%(ia%)\t%3!, {%1, %2}"
[(set_attr "type" "store2")
@@ -944,19 +944,19 @@
[(set (match_operand:SI 3 "s_register_operand" "+&l")
(plus:SI (match_dup 3) (const_int 8)))
(set (mem:SI (match_dup 3))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "low_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 3) (const_int 4)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 2 "low_register_operand" ""))])]
"TARGET_THUMB1 && XVECLEN (operands[0], 0) == 3"
"stm%(ia%)\t%3!, {%1, %2}"
[(set_attr "type" "store2")])
(define_insn "*ldm2_ib"
[(match_parallel 0 "load_multiple_operation"
- [(set (match_operand:SI 1 "arm_hard_register_operand" "")
+ [(set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_operand:SI 3 "s_register_operand" "rk")
(const_int 4))))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 3)
(const_int 8))))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 2"
@@ -968,10 +968,10 @@
[(match_parallel 0 "load_multiple_operation"
[(set (match_operand:SI 3 "s_register_operand" "+&rk")
(plus:SI (match_dup 3) (const_int 8)))
- (set (match_operand:SI 1 "arm_hard_register_operand" "")
+ (set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 3)
(const_int 4))))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 3)
(const_int 8))))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 3"
@@ -982,9 +982,9 @@
(define_insn "*stm2_ib"
[(match_parallel 0 "store_multiple_operation"
[(set (mem:SI (plus:SI (match_operand:SI 3 "s_register_operand" "rk") (const_int 4)))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 3) (const_int 8)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 2 "arm_hard_general_register_operand" ""))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 2"
"stm%(ib%)\t%3, {%1, %2}"
[(set_attr "type" "store2")
@@ -995,9 +995,9 @@
[(set (match_operand:SI 3 "s_register_operand" "+&rk")
(plus:SI (match_dup 3) (const_int 8)))
(set (mem:SI (plus:SI (match_dup 3) (const_int 4)))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 3) (const_int 8)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 2 "arm_hard_general_register_operand" ""))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 3"
"stm%(ib%)\t%3!, {%1, %2}"
[(set_attr "type" "store2")
@@ -1005,10 +1005,10 @@
(define_insn "*ldm2_da"
[(match_parallel 0 "load_multiple_operation"
- [(set (match_operand:SI 1 "arm_hard_register_operand" "")
+ [(set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_operand:SI 3 "s_register_operand" "rk")
(const_int -4))))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (match_dup 3)))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 2"
"ldm%(da%)\t%3, {%1, %2}"
@@ -1019,10 +1019,10 @@
[(match_parallel 0 "load_multiple_operation"
[(set (match_operand:SI 3 "s_register_operand" "+&rk")
(plus:SI (match_dup 3) (const_int -8)))
- (set (match_operand:SI 1 "arm_hard_register_operand" "")
+ (set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 3)
(const_int -4))))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (match_dup 3)))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 3"
"ldm%(da%)\t%3!, {%1, %2}"
@@ -1032,9 +1032,9 @@
(define_insn "*stm2_da"
[(match_parallel 0 "store_multiple_operation"
[(set (mem:SI (plus:SI (match_operand:SI 3 "s_register_operand" "rk") (const_int -4)))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (match_dup 3))
- (match_operand:SI 2 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 2 "arm_hard_general_register_operand" ""))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 2"
"stm%(da%)\t%3, {%1, %2}"
[(set_attr "type" "store2")
@@ -1045,9 +1045,9 @@
[(set (match_operand:SI 3 "s_register_operand" "+&rk")
(plus:SI (match_dup 3) (const_int -8)))
(set (mem:SI (plus:SI (match_dup 3) (const_int -4)))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (match_dup 3))
- (match_operand:SI 2 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 2 "arm_hard_general_register_operand" ""))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 3"
"stm%(da%)\t%3!, {%1, %2}"
[(set_attr "type" "store2")
@@ -1055,10 +1055,10 @@
(define_insn "*ldm2_db"
[(match_parallel 0 "load_multiple_operation"
- [(set (match_operand:SI 1 "arm_hard_register_operand" "")
+ [(set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_operand:SI 3 "s_register_operand" "rk")
(const_int -8))))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 3)
(const_int -4))))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 2"
@@ -1071,10 +1071,10 @@
[(match_parallel 0 "load_multiple_operation"
[(set (match_operand:SI 3 "s_register_operand" "+&rk")
(plus:SI (match_dup 3) (const_int -8)))
- (set (match_operand:SI 1 "arm_hard_register_operand" "")
+ (set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 3)
(const_int -8))))
- (set (match_operand:SI 2 "arm_hard_register_operand" "")
+ (set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (plus:SI (match_dup 3)
(const_int -4))))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
@@ -1086,9 +1086,9 @@
(define_insn "*stm2_db"
[(match_parallel 0 "store_multiple_operation"
[(set (mem:SI (plus:SI (match_operand:SI 3 "s_register_operand" "rk") (const_int -8)))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 3) (const_int -4)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 2 "arm_hard_general_register_operand" ""))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 2"
"stm%(db%)\t%3, {%1, %2}"
[(set_attr "type" "store2")
@@ -1100,9 +1100,9 @@
[(set (match_operand:SI 3 "s_register_operand" "+&rk")
(plus:SI (match_dup 3) (const_int -8)))
(set (mem:SI (plus:SI (match_dup 3) (const_int -8)))
- (match_operand:SI 1 "arm_hard_register_operand" ""))
+ (match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 3) (const_int -4)))
- (match_operand:SI 2 "arm_hard_register_operand" ""))])]
+ (match_operand:SI 2 "arm_hard_general_register_operand" ""))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
"stm%(db%)\t%3!, {%1, %2}"
[(set_attr "type" "store2")
diff --git a/gcc/config/arm/ldrdstrd.md b/gcc/config/arm/ldrdstrd.md
index 58c883ef1cb..064033aaacd 100644
--- a/gcc/config/arm/ldrdstrd.md
+++ b/gcc/config/arm/ldrdstrd.md
@@ -1,6 +1,6 @@
;; ARM ldrd/strd peephole optimizations.
;;
-;; Copyright (C) 2013 Free Software Foundation, Inc.
+;; Copyright (C) 2013-2014 Free Software Foundation, Inc.
;;
;; Written by Greta Yorsh <greta.yorsh@arm.com>
diff --git a/gcc/config/arm/linux-eabi.h b/gcc/config/arm/linux-eabi.h
index 232c38d28ff..f1f3448f1b5 100644
--- a/gcc/config/arm/linux-eabi.h
+++ b/gcc/config/arm/linux-eabi.h
@@ -1,5 +1,5 @@
/* Configuration file for ARM GNU/Linux EABI targets.
- Copyright (C) 2004-2013 Free Software Foundation, Inc.
+ Copyright (C) 2004-2014 Free Software Foundation, Inc.
Contributed by CodeSourcery, LLC
This file is part of GCC.
diff --git a/gcc/config/arm/linux-elf.h b/gcc/config/arm/linux-elf.h
index 475e22079fc..5dc3328e839 100644
--- a/gcc/config/arm/linux-elf.h
+++ b/gcc/config/arm/linux-elf.h
@@ -1,5 +1,5 @@
/* Definitions for ARM running Linux-based GNU systems using ELF
- Copyright (C) 1993-2013 Free Software Foundation, Inc.
+ Copyright (C) 1993-2014 Free Software Foundation, Inc.
Contributed by Philip Blundell <philb@gnu.org>
This file is part of GCC.
diff --git a/gcc/config/arm/linux-gas.h b/gcc/config/arm/linux-gas.h
index 3312703e3a7..52a739c2674 100644
--- a/gcc/config/arm/linux-gas.h
+++ b/gcc/config/arm/linux-gas.h
@@ -1,6 +1,6 @@
/* Definitions of target machine for GNU compiler.
ARM Linux-based GNU systems version.
- Copyright (C) 1997-2013 Free Software Foundation, Inc.
+ Copyright (C) 1997-2014 Free Software Foundation, Inc.
Contributed by Russell King <rmk92@ecs.soton.ac.uk>.
This file is part of GCC.
diff --git a/gcc/config/arm/marvell-f-iwmmxt.md b/gcc/config/arm/marvell-f-iwmmxt.md
index 395549fd434..9968803ca98 100644
--- a/gcc/config/arm/marvell-f-iwmmxt.md
+++ b/gcc/config/arm/marvell-f-iwmmxt.md
@@ -1,5 +1,5 @@
;; Marvell WMMX2 pipeline description
-;; Copyright (C) 2011-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
;; Written by Marvell, Inc.
;; This file is part of GCC.
diff --git a/gcc/config/arm/marvell-pj4.md b/gcc/config/arm/marvell-pj4.md
index 880789600e0..0b9d6ebada1 100644
--- a/gcc/config/arm/marvell-pj4.md
+++ b/gcc/config/arm/marvell-pj4.md
@@ -1,5 +1,5 @@
;; Marvell ARM Processor Pipeline Description
-;; Copyright (C) 2010-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2010-2014 Free Software Foundation, Inc.
;; Contributed by Marvell.
;; This file is part of GCC.
diff --git a/gcc/config/arm/mmintrin.h b/gcc/config/arm/mmintrin.h
index 7e0360f823f..b906faca447 100644
--- a/gcc/config/arm/mmintrin.h
+++ b/gcc/config/arm/mmintrin.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002-2013 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
This file is part of GCC.
diff --git a/gcc/config/arm/neon-docgen.ml b/gcc/config/arm/neon-docgen.ml
index f17314f2ab3..5788a533e19 100644
--- a/gcc/config/arm/neon-docgen.ml
+++ b/gcc/config/arm/neon-docgen.ml
@@ -1,6 +1,6 @@
(* ARM NEON documentation generator.
- Copyright (C) 2006-2013 Free Software Foundation, Inc.
+ Copyright (C) 2006-2014 Free Software Foundation, Inc.
Contributed by CodeSourcery.
This file is part of GCC.
@@ -322,13 +322,92 @@ let document_group chan (group_title, group_extractor) =
let gnu_header chan =
List.iter (fun s -> Printf.fprintf chan "%s\n" s) [
- "@c Copyright (C) 2006-2013 Free Software Foundation, Inc.";
+ "@c Copyright (C) 2006-2014 Free Software Foundation, Inc.";
"@c This is part of the GCC manual.";
"@c For copying conditions, see the file gcc.texi.";
"";
"@c This file is generated automatically using gcc/config/arm/neon-docgen.ml";
"@c Please do not edit manually."]
+let crypto_doc =
+"
+@itemize @bullet
+@item poly128_t vldrq_p128(poly128_t const *)
+@end itemize
+
+@itemize @bullet
+@item void vstrq_p128(poly128_t *, poly128_t)
+@end itemize
+
+@itemize @bullet
+@item uint64x1_t vceq_p64 (poly64x1_t, poly64x1_t)
+@end itemize
+
+@itemize @bullet
+@item uint64x1_t vtst_p64 (poly64x1_t, poly64x1_t)
+@end itemize
+
+@itemize @bullet
+@item uint32_t vsha1h_u32 (uint32_t)
+@*@emph{Form of expected instruction(s):} @code{sha1h.32 @var{q0}, @var{q1}}
+@end itemize
+
+@itemize @bullet
+@item uint32x4_t vsha1cq_u32 (uint32x4_t, uint32_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha1c.32 @var{q0}, @var{q1}, @var{q2}}
+@end itemize
+
+@itemize @bullet
+@item uint32x4_t vsha1pq_u32 (uint32x4_t, uint32_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha1p.32 @var{q0}, @var{q1}, @var{q2}}
+@end itemize
+
+@itemize @bullet
+@item uint32x4_t vsha1mq_u32 (uint32x4_t, uint32_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha1m.32 @var{q0}, @var{q1}, @var{q2}}
+@end itemize
+
+@itemize @bullet
+@item uint32x4_t vsha1su0q_u32 (uint32x4_t, uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha1su0.32 @var{q0}, @var{q1}, @var{q2}}
+@end itemize
+
+@itemize @bullet
+@item uint32x4_t vsha1su1q_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha1su1.32 @var{q0}, @var{q1}, @var{q2}}
+@end itemize
+
+@itemize @bullet
+@item uint32x4_t vsha256hq_u32 (uint32x4_t, uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha256h.32 @var{q0}, @var{q1}, @var{q2}}
+@end itemize
+
+@itemize @bullet
+@item uint32x4_t vsha256h2q_u32 (uint32x4_t, uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha256h2.32 @var{q0}, @var{q1}, @var{q2}}
+@end itemize
+
+@itemize @bullet
+@item uint32x4_t vsha256su0q_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha256su0.32 @var{q0}, @var{q1}}
+@end itemize
+
+@itemize @bullet
+@item uint32x4_t vsha256su1q_u32 (uint32x4_t, uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{sha256su1.32 @var{q0}, @var{q1}, @var{q2}}
+@end itemize
+
+@itemize @bullet
+@item poly128_t vmull_p64 (poly64_t a, poly64_t b)
+@*@emph{Form of expected instruction(s):} @code{vmull.p64 @var{q0}, @var{d1}, @var{d2}}
+@end itemize
+
+@itemize @bullet
+@item poly128_t vmull_high_p64 (poly64x2_t a, poly64x2_t b)
+@*@emph{Form of expected instruction(s):} @code{vmull.p64 @var{q0}, @var{d1}, @var{d2}}
+@end itemize
+"
+
(* Program entry point. *)
let _ =
if Array.length Sys.argv <> 2 then
@@ -339,6 +418,7 @@ let _ =
let chan = open_out file in
gnu_header chan;
List.iter (document_group chan) intrinsic_groups;
+ Printf.fprintf chan "%s\n" crypto_doc;
close_out chan
with Sys_error sys ->
failwith ("Could not create output file " ^ file ^ ": " ^ sys)
diff --git a/gcc/config/arm/neon-gen.ml b/gcc/config/arm/neon-gen.ml
index 948b162ccfa..f3dd86b0ace 100644
--- a/gcc/config/arm/neon-gen.ml
+++ b/gcc/config/arm/neon-gen.ml
@@ -1,5 +1,5 @@
(* Auto-generate ARM Neon intrinsics header file.
- Copyright (C) 2006-2013 Free Software Foundation, Inc.
+ Copyright (C) 2006-2014 Free Software Foundation, Inc.
Contributed by CodeSourcery.
This file is part of GCC.
@@ -114,6 +114,7 @@ let rec signed_ctype = function
| T_uint32x4 -> T_int32x4
| T_uint64x1 -> T_int64x1
| T_uint64x2 -> T_int64x2
+ | T_poly64x2 -> T_int64x2
(* Cast to types defined by mode in arm.c, not random types pulled in from
the <stdint.h> header in use. This fixes incompatible pointer errors when
compiling with C++. *)
@@ -125,6 +126,8 @@ let rec signed_ctype = function
| T_float32 -> T_floatSF
| T_poly8 -> T_intQI
| T_poly16 -> T_intHI
+ | T_poly64 -> T_intDI
+ | T_poly128 -> T_intTI
| T_arrayof (n, elt) -> T_arrayof (n, signed_ctype elt)
| T_ptrto elt -> T_ptrto (signed_ctype elt)
| T_const elt -> T_const (signed_ctype elt)
@@ -362,80 +365,96 @@ let print_ops ops =
abase : "ARM" base name for the type (i.e. int in int8x8_t).
esize : element size.
enum : element count.
+ alevel: architecture level at which available.
*)
+type fpulevel = CRYPTO | ALL
+
let deftypes () =
let typeinfo = [
(* Doubleword vector types. *)
- "__builtin_neon_qi", "int", 8, 8;
- "__builtin_neon_hi", "int", 16, 4;
- "__builtin_neon_si", "int", 32, 2;
- "__builtin_neon_di", "int", 64, 1;
- "__builtin_neon_hf", "float", 16, 4;
- "__builtin_neon_sf", "float", 32, 2;
- "__builtin_neon_poly8", "poly", 8, 8;
- "__builtin_neon_poly16", "poly", 16, 4;
- "__builtin_neon_uqi", "uint", 8, 8;
- "__builtin_neon_uhi", "uint", 16, 4;
- "__builtin_neon_usi", "uint", 32, 2;
- "__builtin_neon_udi", "uint", 64, 1;
+ "__builtin_neon_qi", "int", 8, 8, ALL;
+ "__builtin_neon_hi", "int", 16, 4, ALL;
+ "__builtin_neon_si", "int", 32, 2, ALL;
+ "__builtin_neon_di", "int", 64, 1, ALL;
+ "__builtin_neon_hf", "float", 16, 4, ALL;
+ "__builtin_neon_sf", "float", 32, 2, ALL;
+ "__builtin_neon_poly8", "poly", 8, 8, ALL;
+ "__builtin_neon_poly16", "poly", 16, 4, ALL;
+ "__builtin_neon_poly64", "poly", 64, 1, CRYPTO;
+ "__builtin_neon_uqi", "uint", 8, 8, ALL;
+ "__builtin_neon_uhi", "uint", 16, 4, ALL;
+ "__builtin_neon_usi", "uint", 32, 2, ALL;
+ "__builtin_neon_udi", "uint", 64, 1, ALL;
(* Quadword vector types. *)
- "__builtin_neon_qi", "int", 8, 16;
- "__builtin_neon_hi", "int", 16, 8;
- "__builtin_neon_si", "int", 32, 4;
- "__builtin_neon_di", "int", 64, 2;
- "__builtin_neon_sf", "float", 32, 4;
- "__builtin_neon_poly8", "poly", 8, 16;
- "__builtin_neon_poly16", "poly", 16, 8;
- "__builtin_neon_uqi", "uint", 8, 16;
- "__builtin_neon_uhi", "uint", 16, 8;
- "__builtin_neon_usi", "uint", 32, 4;
- "__builtin_neon_udi", "uint", 64, 2
+ "__builtin_neon_qi", "int", 8, 16, ALL;
+ "__builtin_neon_hi", "int", 16, 8, ALL;
+ "__builtin_neon_si", "int", 32, 4, ALL;
+ "__builtin_neon_di", "int", 64, 2, ALL;
+ "__builtin_neon_sf", "float", 32, 4, ALL;
+ "__builtin_neon_poly8", "poly", 8, 16, ALL;
+ "__builtin_neon_poly16", "poly", 16, 8, ALL;
+ "__builtin_neon_poly64", "poly", 64, 2, CRYPTO;
+ "__builtin_neon_uqi", "uint", 8, 16, ALL;
+ "__builtin_neon_uhi", "uint", 16, 8, ALL;
+ "__builtin_neon_usi", "uint", 32, 4, ALL;
+ "__builtin_neon_udi", "uint", 64, 2, ALL
] in
List.iter
- (fun (cbase, abase, esize, enum) ->
+ (fun (cbase, abase, esize, enum, fpulevel) ->
let attr =
match enum with
1 -> ""
| _ -> Printf.sprintf "\t__attribute__ ((__vector_size__ (%d)))"
(esize * enum / 8) in
- Format.printf "typedef %s %s%dx%d_t%s;@\n" cbase abase esize enum attr)
+ if fpulevel == CRYPTO then
+ Format.printf "#ifdef __ARM_FEATURE_CRYPTO\n";
+ Format.printf "typedef %s %s%dx%d_t%s;@\n" cbase abase esize enum attr;
+ if fpulevel == CRYPTO then
+ Format.printf "#endif\n";)
typeinfo;
Format.print_newline ();
(* Extra types not in <stdint.h>. *)
Format.printf "typedef float float32_t;\n";
Format.printf "typedef __builtin_neon_poly8 poly8_t;\n";
- Format.printf "typedef __builtin_neon_poly16 poly16_t;\n"
+ Format.printf "typedef __builtin_neon_poly16 poly16_t;\n";
+ Format.printf "#ifdef __ARM_FEATURE_CRYPTO\n";
+ Format.printf "typedef __builtin_neon_poly64 poly64_t;\n";
+ Format.printf "typedef __builtin_neon_poly128 poly128_t;\n";
+ Format.printf "#endif\n"
-(* Output structs containing arrays, for load & store instructions etc. *)
+(* Output structs containing arrays, for load & store instructions etc.
+ poly128_t is deliberately not included here because it has no array types
+ defined for it. *)
let arrtypes () =
let typeinfo = [
- "int", 8; "int", 16;
- "int", 32; "int", 64;
- "uint", 8; "uint", 16;
- "uint", 32; "uint", 64;
- "float", 32; "poly", 8;
- "poly", 16
+ "int", 8, ALL; "int", 16, ALL;
+ "int", 32, ALL; "int", 64, ALL;
+ "uint", 8, ALL; "uint", 16, ALL;
+ "uint", 32, ALL; "uint", 64, ALL;
+ "float", 32, ALL; "poly", 8, ALL;
+ "poly", 16, ALL; "poly", 64, CRYPTO
] in
- let writestruct elname elsize regsize arrsize =
+ let writestruct elname elsize regsize arrsize fpulevel =
let elnum = regsize / elsize in
let structname =
Printf.sprintf "%s%dx%dx%d_t" elname elsize elnum arrsize in
let sfmt = start_function () in
- Format.printf "typedef struct %s" structname;
+ Format.printf "%stypedef struct %s"
+ (if fpulevel == CRYPTO then "#ifdef __ARM_FEATURE_CRYPTO\n" else "") structname;
open_braceblock sfmt;
Format.printf "%s%dx%d_t val[%d];" elname elsize elnum arrsize;
close_braceblock sfmt;
- Format.printf " %s;" structname;
+ Format.printf " %s;%s" structname (if fpulevel == CRYPTO then "\n#endif\n" else "");
end_function sfmt;
in
for n = 2 to 4 do
List.iter
- (fun (elname, elsize) ->
- writestruct elname elsize 64 n;
- writestruct elname elsize 128 n)
+ (fun (elname, elsize, alevel) ->
+ writestruct elname elsize 64 n alevel;
+ writestruct elname elsize 128 n alevel)
typeinfo
done
@@ -448,7 +467,7 @@ let _ =
"/* ARM NEON intrinsics include file. This file is generated automatically";
" using neon-gen.ml. Please do not edit manually.";
"";
-" Copyright (C) 2006-2013 Free Software Foundation, Inc.";
+" Copyright (C) 2006-2014 Free Software Foundation, Inc.";
" Contributed by CodeSourcery.";
"";
" This file is part of GCC.";
@@ -491,6 +510,8 @@ let _ =
print_ops ops;
Format.print_newline ();
print_ops reinterp;
+ print_ops reinterpq;
+ Format.printf "%s" crypto_intrinsics;
print_lines [
"#ifdef __cplusplus";
"}";
diff --git a/gcc/config/arm/neon-testgen.ml b/gcc/config/arm/neon-testgen.ml
index 543318bfcc6..df429f59e27 100644
--- a/gcc/config/arm/neon-testgen.ml
+++ b/gcc/config/arm/neon-testgen.ml
@@ -1,5 +1,5 @@
(* Auto-generate ARM Neon intrinsics tests.
- Copyright (C) 2006-2013 Free Software Foundation, Inc.
+ Copyright (C) 2006-2014 Free Software Foundation, Inc.
Contributed by CodeSourcery.
This file is part of GCC.
@@ -167,6 +167,7 @@ let effective_target features =
| _ -> false)
features with
Requires_feature "FMA" -> "arm_neonv2"
+ | Requires_feature "CRYPTO" -> "arm_crypto"
| Requires_arch 8 -> "arm_v8_neon"
| Requires_FP_bit 1 -> "arm_neon_fp16"
| _ -> assert false
@@ -300,5 +301,5 @@ let test_intrinsic_group dir (opcode, features, shape, name, munge, types) =
(* Program entry point. *)
let _ =
let directory = if Array.length Sys.argv <> 1 then Sys.argv.(1) else "." in
- List.iter (test_intrinsic_group directory) (reinterp @ ops)
+ List.iter (test_intrinsic_group directory) (reinterp @ reinterpq @ ops)
diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
index b2ac45e65f9..2f06e42ed31 100644
--- a/gcc/config/arm/neon.md
+++ b/gcc/config/arm/neon.md
@@ -1,5 +1,5 @@
;; ARM NEON coprocessor Machine Description
-;; Copyright (C) 2006-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2006-2014 Free Software Foundation, Inc.
;; Written by CodeSourcery.
;;
;; This file is part of GCC.
@@ -4259,9 +4259,19 @@
DONE;
})
+(define_expand "neon_vreinterpretti<mode>"
+ [(match_operand:TI 0 "s_register_operand" "")
+ (match_operand:VQXMOV 1 "s_register_operand" "")]
+ "TARGET_NEON"
+{
+ neon_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+
(define_expand "neon_vreinterpretv16qi<mode>"
[(match_operand:V16QI 0 "s_register_operand" "")
- (match_operand:VQX 1 "s_register_operand" "")]
+ (match_operand:VQXMOV 1 "s_register_operand" "")]
"TARGET_NEON"
{
neon_reinterpret (operands[0], operands[1]);
@@ -4270,7 +4280,7 @@
(define_expand "neon_vreinterpretv8hi<mode>"
[(match_operand:V8HI 0 "s_register_operand" "")
- (match_operand:VQX 1 "s_register_operand" "")]
+ (match_operand:VQXMOV 1 "s_register_operand" "")]
"TARGET_NEON"
{
neon_reinterpret (operands[0], operands[1]);
@@ -4279,7 +4289,7 @@
(define_expand "neon_vreinterpretv4si<mode>"
[(match_operand:V4SI 0 "s_register_operand" "")
- (match_operand:VQX 1 "s_register_operand" "")]
+ (match_operand:VQXMOV 1 "s_register_operand" "")]
"TARGET_NEON"
{
neon_reinterpret (operands[0], operands[1]);
@@ -4288,7 +4298,7 @@
(define_expand "neon_vreinterpretv4sf<mode>"
[(match_operand:V4SF 0 "s_register_operand" "")
- (match_operand:VQX 1 "s_register_operand" "")]
+ (match_operand:VQXMOV 1 "s_register_operand" "")]
"TARGET_NEON"
{
neon_reinterpret (operands[0], operands[1]);
@@ -4297,7 +4307,7 @@
(define_expand "neon_vreinterpretv2di<mode>"
[(match_operand:V2DI 0 "s_register_operand" "")
- (match_operand:VQX 1 "s_register_operand" "")]
+ (match_operand:VQXMOV 1 "s_register_operand" "")]
"TARGET_NEON"
{
neon_reinterpret (operands[0], operands[1]);
diff --git a/gcc/config/arm/neon.ml b/gcc/config/arm/neon.ml
index ca9a4c06aa6..4289b8ce005 100644
--- a/gcc/config/arm/neon.ml
+++ b/gcc/config/arm/neon.ml
@@ -1,7 +1,7 @@
(* Common code for ARM NEON header file, documentation and test case
generators.
- Copyright (C) 2006-2013 Free Software Foundation, Inc.
+ Copyright (C) 2006-2014 Free Software Foundation, Inc.
Contributed by CodeSourcery.
This file is part of GCC.
@@ -22,7 +22,7 @@
(* Shorthand types for vector elements. *)
type elts = S8 | S16 | S32 | S64 | F16 | F32 | U8 | U16 | U32 | U64 | P8 | P16
- | I8 | I16 | I32 | I64 | B8 | B16 | B32 | B64 | Conv of elts * elts
+ | P64 | P128 | I8 | I16 | I32 | I64 | B8 | B16 | B32 | B64 | Conv of elts * elts
| Cast of elts * elts | NoElts
type eltclass = Signed | Unsigned | Float | Poly | Int | Bits
@@ -47,13 +47,15 @@ type vectype = T_int8x8 | T_int8x16
| T_uint8 | T_uint16
| T_uint32 | T_uint64
| T_poly8 | T_poly16
+ | T_poly64 | T_poly64x1
+ | T_poly64x2 | T_poly128
| T_float16 | T_float32
| T_arrayof of int * vectype
| T_ptrto of vectype | T_const of vectype
| T_void | T_intQI
| T_intHI | T_intSI
- | T_intDI | T_floatHF
- | T_floatSF
+ | T_intDI | T_intTI
+ | T_floatHF | T_floatSF
(* The meanings of the following are:
TImode : "Tetra", two registers (four words).
@@ -96,7 +98,7 @@ type arity = Arity0 of vectype
| Arity4 of vectype * vectype * vectype * vectype * vectype
type vecmode = V8QI | V4HI | V4HF |V2SI | V2SF | DI
- | V16QI | V8HI | V4SI | V4SF | V2DI
+ | V16QI | V8HI | V4SI | V4SF | V2DI | TI
| QI | HI | SI | SF
type opcode =
@@ -299,7 +301,8 @@ let rec elt_width = function
S8 | U8 | P8 | I8 | B8 -> 8
| S16 | U16 | P16 | I16 | B16 | F16 -> 16
| S32 | F32 | U32 | I32 | B32 -> 32
- | S64 | U64 | I64 | B64 -> 64
+ | S64 | U64 | P64 | I64 | B64 -> 64
+ | P128 -> 128
| Conv (a, b) ->
let wa = elt_width a and wb = elt_width b in
if wa = wb then wa else raise (MixedMode (a, b))
@@ -309,7 +312,7 @@ let rec elt_width = function
let rec elt_class = function
S8 | S16 | S32 | S64 -> Signed
| U8 | U16 | U32 | U64 -> Unsigned
- | P8 | P16 -> Poly
+ | P8 | P16 | P64 | P128 -> Poly
| F16 | F32 -> Float
| I8 | I16 | I32 | I64 -> Int
| B8 | B16 | B32 | B64 -> Bits
@@ -330,6 +333,8 @@ let elt_of_class_width c w =
| Unsigned, 64 -> U64
| Poly, 8 -> P8
| Poly, 16 -> P16
+ | Poly, 64 -> P64
+ | Poly, 128 -> P128
| Int, 8 -> I8
| Int, 16 -> I16
| Int, 32 -> I32
@@ -402,7 +407,7 @@ let rec mode_of_elt ?argpos elt shape =
Float | ConvClass(_, Float) -> true | _ -> false in
let idx =
match elt_width elt with
- 8 -> 0 | 16 -> 1 | 32 -> 2 | 64 -> 3
+ 8 -> 0 | 16 -> 1 | 32 -> 2 | 64 -> 3 | 128 -> 4
| _ -> failwith "Bad element width"
in match shape with
All (_, Dreg) | By_scalar Dreg | Pair_result Dreg | Unary_scalar Dreg
@@ -413,7 +418,7 @@ let rec mode_of_elt ?argpos elt shape =
[| V8QI; V4HI; V2SI; DI |].(idx)
| All (_, Qreg) | By_scalar Qreg | Pair_result Qreg | Unary_scalar Qreg
| Binary_imm Qreg | Long_noreg Qreg | Wide_noreg Qreg ->
- [| V16QI; V8HI; if flt then V4SF else V4SI; V2DI |].(idx)
+ [| V16QI; V8HI; if flt then V4SF else V4SI; V2DI; TI|].(idx)
| All (_, (Corereg | PtrTo _ | CstPtrTo _)) ->
[| QI; HI; if flt then SF else SI; DI |].(idx)
| Long | Wide | Wide_lane | Wide_scalar
@@ -474,6 +479,8 @@ let type_for_elt shape elt no =
| U16 -> T_uint16x4
| U32 -> T_uint32x2
| U64 -> T_uint64x1
+ | P64 -> T_poly64x1
+ | P128 -> T_poly128
| F16 -> T_float16x4
| F32 -> T_float32x2
| P8 -> T_poly8x8
@@ -493,6 +500,8 @@ let type_for_elt shape elt no =
| F32 -> T_float32x4
| P8 -> T_poly8x16
| P16 -> T_poly16x8
+ | P64 -> T_poly64x2
+ | P128 -> T_poly128
| _ -> failwith "Bad elt type for Qreg"
end
| Corereg ->
@@ -507,6 +516,8 @@ let type_for_elt shape elt no =
| U64 -> T_uint64
| P8 -> T_poly8
| P16 -> T_poly16
+ | P64 -> T_poly64
+ | P128 -> T_poly128
| F32 -> T_float32
| _ -> failwith "Bad elt type for Corereg"
end
@@ -527,10 +538,10 @@ let type_for_elt shape elt no =
let vectype_size = function
T_int8x8 | T_int16x4 | T_int32x2 | T_int64x1
| T_uint8x8 | T_uint16x4 | T_uint32x2 | T_uint64x1
- | T_float32x2 | T_poly8x8 | T_poly16x4 | T_float16x4 -> 64
+ | T_float32x2 | T_poly8x8 | T_poly64x1 | T_poly16x4 | T_float16x4 -> 64
| T_int8x16 | T_int16x8 | T_int32x4 | T_int64x2
| T_uint8x16 | T_uint16x8 | T_uint32x4 | T_uint64x2
- | T_float32x4 | T_poly8x16 | T_poly16x8 -> 128
+ | T_float32x4 | T_poly8x16 | T_poly64x2 | T_poly16x8 -> 128
| _ -> raise Not_found
let inttype_for_array num elttype =
@@ -1041,14 +1052,22 @@ let ops =
"vRsraQ_n", shift_right_acc, su_8_64;
(* Vector shift right and insert. *)
+ Vsri, [Requires_feature "CRYPTO"], Use_operands [| Dreg; Dreg; Immed |], "vsri_n", shift_insert,
+ [P64];
Vsri, [], Use_operands [| Dreg; Dreg; Immed |], "vsri_n", shift_insert,
P8 :: P16 :: su_8_64;
+ Vsri, [Requires_feature "CRYPTO"], Use_operands [| Qreg; Qreg; Immed |], "vsriQ_n", shift_insert,
+ [P64];
Vsri, [], Use_operands [| Qreg; Qreg; Immed |], "vsriQ_n", shift_insert,
P8 :: P16 :: su_8_64;
(* Vector shift left and insert. *)
+ Vsli, [Requires_feature "CRYPTO"], Use_operands [| Dreg; Dreg; Immed |], "vsli_n", shift_insert,
+ [P64];
Vsli, [], Use_operands [| Dreg; Dreg; Immed |], "vsli_n", shift_insert,
P8 :: P16 :: su_8_64;
+ Vsli, [Requires_feature "CRYPTO"], Use_operands [| Qreg; Qreg; Immed |], "vsliQ_n", shift_insert,
+ [P64];
Vsli, [], Use_operands [| Qreg; Qreg; Immed |], "vsliQ_n", shift_insert,
P8 :: P16 :: su_8_64;
@@ -1135,6 +1154,11 @@ let ops =
(* Create vector from literal bit pattern. *)
Vcreate,
+ [Requires_feature "CRYPTO"; No_op], (* Not really, but it can yield various things that are too
+ hard for the test generator at this time. *)
+ Use_operands [| Dreg; Corereg |], "vcreate", create_vector,
+ [P64];
+ Vcreate,
[No_op], (* Not really, but it can yield various things that are too
hard for the test generator at this time. *)
Use_operands [| Dreg; Corereg |], "vcreate", create_vector,
@@ -1148,12 +1172,25 @@ let ops =
Use_operands [| Dreg; Corereg |], "vdup_n", bits_1,
pf_su_8_32;
Vdup_n,
+ [No_op; Requires_feature "CRYPTO";
+ Instruction_name ["vmov"];
+ Disassembles_as [Use_operands [| Dreg; Corereg; Corereg |]]],
+ Use_operands [| Dreg; Corereg |], "vdup_n", notype_1,
+ [P64];
+ Vdup_n,
[No_op;
Instruction_name ["vmov"];
Disassembles_as [Use_operands [| Dreg; Corereg; Corereg |]]],
Use_operands [| Dreg; Corereg |], "vdup_n", notype_1,
[S64; U64];
Vdup_n,
+ [No_op; Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| Qreg;
+ Alternatives [ Corereg;
+ Element_of_dreg ] |]]],
+ Use_operands [| Qreg; Corereg |], "vdupQ_n", bits_1,
+ [P64];
+ Vdup_n,
[Disassembles_as [Use_operands [| Qreg;
Alternatives [ Corereg;
Element_of_dreg ] |]]],
@@ -1206,21 +1243,33 @@ let ops =
[Disassembles_as [Use_operands [| Dreg; Element_of_dreg |]]],
Unary_scalar Dreg, "vdup_lane", bits_2, pf_su_8_32;
Vdup_lane,
+ [No_op; Requires_feature "CRYPTO"; Const_valuator (fun _ -> 0)],
+ Unary_scalar Dreg, "vdup_lane", bits_2, [P64];
+ Vdup_lane,
[No_op; Const_valuator (fun _ -> 0)],
Unary_scalar Dreg, "vdup_lane", bits_2, [S64; U64];
Vdup_lane,
[Disassembles_as [Use_operands [| Qreg; Element_of_dreg |]]],
Unary_scalar Qreg, "vdupQ_lane", bits_2, pf_su_8_32;
Vdup_lane,
+ [No_op; Requires_feature "CRYPTO"; Const_valuator (fun _ -> 0)],
+ Unary_scalar Qreg, "vdupQ_lane", bits_2, [P64];
+ Vdup_lane,
[No_op; Const_valuator (fun _ -> 0)],
Unary_scalar Qreg, "vdupQ_lane", bits_2, [S64; U64];
(* Combining vectors. *)
+ Vcombine, [Requires_feature "CRYPTO"; No_op],
+ Use_operands [| Qreg; Dreg; Dreg |], "vcombine", notype_2,
+ [P64];
Vcombine, [No_op],
Use_operands [| Qreg; Dreg; Dreg |], "vcombine", notype_2,
pf_su_8_64;
(* Splitting vectors. *)
+ Vget_high, [Requires_feature "CRYPTO"; No_op],
+ Use_operands [| Dreg; Qreg |], "vget_high",
+ notype_1, [P64];
Vget_high, [No_op],
Use_operands [| Dreg; Qreg |], "vget_high",
notype_1, pf_su_8_64;
@@ -1229,7 +1278,10 @@ let ops =
Fixed_vector_reg],
Use_operands [| Dreg; Qreg |], "vget_low",
notype_1, pf_su_8_32;
- Vget_low, [No_op],
+ Vget_low, [Requires_feature "CRYPTO"; No_op],
+ Use_operands [| Dreg; Qreg |], "vget_low",
+ notype_1, [P64];
+ Vget_low, [No_op],
Use_operands [| Dreg; Qreg |], "vget_low",
notype_1, [S64; U64];
@@ -1412,9 +1464,15 @@ let ops =
[S16; S32];
(* Vector extract. *)
+ Vext, [Requires_feature "CRYPTO"; Const_valuator (fun _ -> 0)],
+ Use_operands [| Dreg; Dreg; Dreg; Immed |], "vext", extend,
+ [P64];
Vext, [Const_valuator (fun _ -> 0)],
Use_operands [| Dreg; Dreg; Dreg; Immed |], "vext", extend,
pf_su_8_64;
+ Vext, [Requires_feature "CRYPTO"; Const_valuator (fun _ -> 0)],
+ Use_operands [| Qreg; Qreg; Qreg; Immed |], "vextQ", extend,
+ [P64];
Vext, [Const_valuator (fun _ -> 0)],
Use_operands [| Qreg; Qreg; Qreg; Immed |], "vextQ", extend,
pf_su_8_64;
@@ -1435,11 +1493,21 @@ let ops =
(* Bit selection. *)
Vbsl,
+ [Requires_feature "CRYPTO"; Instruction_name ["vbsl"; "vbit"; "vbif"];
+ Disassembles_as [Use_operands [| Dreg; Dreg; Dreg |]]],
+ Use_operands [| Dreg; Dreg; Dreg; Dreg |], "vbsl", bit_select,
+ [P64];
+ Vbsl,
[Instruction_name ["vbsl"; "vbit"; "vbif"];
Disassembles_as [Use_operands [| Dreg; Dreg; Dreg |]]],
Use_operands [| Dreg; Dreg; Dreg; Dreg |], "vbsl", bit_select,
pf_su_8_64;
Vbsl,
+ [Requires_feature "CRYPTO"; Instruction_name ["vbsl"; "vbit"; "vbif"];
+ Disassembles_as [Use_operands [| Qreg; Qreg; Qreg |]]],
+ Use_operands [| Qreg; Qreg; Qreg; Qreg |], "vbslQ", bit_select,
+ [P64];
+ Vbsl,
[Instruction_name ["vbsl"; "vbit"; "vbif"];
Disassembles_as [Use_operands [| Qreg; Qreg; Qreg |]]],
Use_operands [| Qreg; Qreg; Qreg; Qreg |], "vbslQ", bit_select,
@@ -1461,10 +1529,21 @@ let ops =
(* Element/structure loads. VLD1 variants. *)
Vldx 1,
+ [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (1, Dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| Dreg; CstPtrTo Corereg |], "vld1", bits_1,
+ [P64];
+ Vldx 1,
[Disassembles_as [Use_operands [| VecArray (1, Dreg);
CstPtrTo Corereg |]]],
Use_operands [| Dreg; CstPtrTo Corereg |], "vld1", bits_1,
pf_su_8_64;
+ Vldx 1, [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (2, Dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| Qreg; CstPtrTo Corereg |], "vld1Q", bits_1,
+ [P64];
Vldx 1, [Disassembles_as [Use_operands [| VecArray (2, Dreg);
CstPtrTo Corereg |]]],
Use_operands [| Qreg; CstPtrTo Corereg |], "vld1Q", bits_1,
@@ -1476,6 +1555,13 @@ let ops =
Use_operands [| Dreg; CstPtrTo Corereg; Dreg; Immed |],
"vld1_lane", bits_3, pf_su_8_32;
Vldx_lane 1,
+ [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (1, Dreg);
+ CstPtrTo Corereg |]];
+ Const_valuator (fun _ -> 0)],
+ Use_operands [| Dreg; CstPtrTo Corereg; Dreg; Immed |],
+ "vld1_lane", bits_3, [P64];
+ Vldx_lane 1,
[Disassembles_as [Use_operands [| VecArray (1, Dreg);
CstPtrTo Corereg |]];
Const_valuator (fun _ -> 0)],
@@ -1487,6 +1573,12 @@ let ops =
Use_operands [| Qreg; CstPtrTo Corereg; Qreg; Immed |],
"vld1Q_lane", bits_3, pf_su_8_32;
Vldx_lane 1,
+ [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (1, Dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| Qreg; CstPtrTo Corereg; Qreg; Immed |],
+ "vld1Q_lane", bits_3, [P64];
+ Vldx_lane 1,
[Disassembles_as [Use_operands [| VecArray (1, Dreg);
CstPtrTo Corereg |]]],
Use_operands [| Qreg; CstPtrTo Corereg; Qreg; Immed |],
@@ -1498,6 +1590,12 @@ let ops =
Use_operands [| Dreg; CstPtrTo Corereg |], "vld1_dup",
bits_1, pf_su_8_32;
Vldx_dup 1,
+ [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (1, Dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| Dreg; CstPtrTo Corereg |], "vld1_dup",
+ bits_1, [P64];
+ Vldx_dup 1,
[Disassembles_as [Use_operands [| VecArray (1, Dreg);
CstPtrTo Corereg |]]],
Use_operands [| Dreg; CstPtrTo Corereg |], "vld1_dup",
@@ -1510,16 +1608,32 @@ let ops =
(* Treated identically to vld1_dup above as we now
do a single load followed by a duplicate. *)
Vldx_dup 1,
+ [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (1, Dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| Qreg; CstPtrTo Corereg |], "vld1Q_dup",
+ bits_1, [P64];
+ Vldx_dup 1,
[Disassembles_as [Use_operands [| VecArray (1, Dreg);
CstPtrTo Corereg |]]],
Use_operands [| Qreg; CstPtrTo Corereg |], "vld1Q_dup",
bits_1, [S64; U64];
(* VST1 variants. *)
+ Vstx 1, [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (1, Dreg);
+ PtrTo Corereg |]]],
+ Use_operands [| PtrTo Corereg; Dreg |], "vst1",
+ store_1, [P64];
Vstx 1, [Disassembles_as [Use_operands [| VecArray (1, Dreg);
PtrTo Corereg |]]],
Use_operands [| PtrTo Corereg; Dreg |], "vst1",
store_1, pf_su_8_64;
+ Vstx 1, [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (2, Dreg);
+ PtrTo Corereg |]]],
+ Use_operands [| PtrTo Corereg; Qreg |], "vst1Q",
+ store_1, [P64];
Vstx 1, [Disassembles_as [Use_operands [| VecArray (2, Dreg);
PtrTo Corereg |]]],
Use_operands [| PtrTo Corereg; Qreg |], "vst1Q",
@@ -1531,6 +1645,13 @@ let ops =
Use_operands [| PtrTo Corereg; Dreg; Immed |],
"vst1_lane", store_3, pf_su_8_32;
Vstx_lane 1,
+ [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (1, Dreg);
+ CstPtrTo Corereg |]];
+ Const_valuator (fun _ -> 0)],
+ Use_operands [| PtrTo Corereg; Dreg; Immed |],
+ "vst1_lane", store_3, [P64];
+ Vstx_lane 1,
[Disassembles_as [Use_operands [| VecArray (1, Dreg);
CstPtrTo Corereg |]];
Const_valuator (fun _ -> 0)],
@@ -1542,6 +1663,12 @@ let ops =
Use_operands [| PtrTo Corereg; Qreg; Immed |],
"vst1Q_lane", store_3, pf_su_8_32;
Vstx_lane 1,
+ [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (1, Dreg);
+ CstPtrTo Corereg |]]],
+ Use_operands [| PtrTo Corereg; Qreg; Immed |],
+ "vst1Q_lane", store_3, [P64];
+ Vstx_lane 1,
[Disassembles_as [Use_operands [| VecArray (1, Dreg);
CstPtrTo Corereg |]]],
Use_operands [| PtrTo Corereg; Qreg; Immed |],
@@ -1550,6 +1677,9 @@ let ops =
(* VLD2 variants. *)
Vldx 2, [], Use_operands [| VecArray (2, Dreg); CstPtrTo Corereg |],
"vld2", bits_1, pf_su_8_32;
+ Vldx 2, [Requires_feature "CRYPTO"; Instruction_name ["vld1"]],
+ Use_operands [| VecArray (2, Dreg); CstPtrTo Corereg |],
+ "vld2", bits_1, [P64];
Vldx 2, [Instruction_name ["vld1"]],
Use_operands [| VecArray (2, Dreg); CstPtrTo Corereg |],
"vld2", bits_1, [S64; U64];
@@ -1581,6 +1711,12 @@ let ops =
Use_operands [| VecArray (2, Dreg); CstPtrTo Corereg |],
"vld2_dup", bits_1, pf_su_8_32;
Vldx_dup 2,
+ [Requires_feature "CRYPTO";
+ Instruction_name ["vld1"]; Disassembles_as [Use_operands
+ [| VecArray (2, Dreg); CstPtrTo Corereg |]]],
+ Use_operands [| VecArray (2, Dreg); CstPtrTo Corereg |],
+ "vld2_dup", bits_1, [P64];
+ Vldx_dup 2,
[Instruction_name ["vld1"]; Disassembles_as [Use_operands
[| VecArray (2, Dreg); CstPtrTo Corereg |]]],
Use_operands [| VecArray (2, Dreg); CstPtrTo Corereg |],
@@ -1591,6 +1727,12 @@ let ops =
PtrTo Corereg |]]],
Use_operands [| PtrTo Corereg; VecArray (2, Dreg) |], "vst2",
store_1, pf_su_8_32;
+ Vstx 2, [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (2, Dreg);
+ PtrTo Corereg |]];
+ Instruction_name ["vst1"]],
+ Use_operands [| PtrTo Corereg; VecArray (2, Dreg) |], "vst2",
+ store_1, [P64];
Vstx 2, [Disassembles_as [Use_operands [| VecArray (2, Dreg);
PtrTo Corereg |]];
Instruction_name ["vst1"]],
@@ -1619,6 +1761,9 @@ let ops =
(* VLD3 variants. *)
Vldx 3, [], Use_operands [| VecArray (3, Dreg); CstPtrTo Corereg |],
"vld3", bits_1, pf_su_8_32;
+ Vldx 3, [Requires_feature "CRYPTO"; Instruction_name ["vld1"]],
+ Use_operands [| VecArray (3, Dreg); CstPtrTo Corereg |],
+ "vld3", bits_1, [P64];
Vldx 3, [Instruction_name ["vld1"]],
Use_operands [| VecArray (3, Dreg); CstPtrTo Corereg |],
"vld3", bits_1, [S64; U64];
@@ -1650,6 +1795,12 @@ let ops =
Use_operands [| VecArray (3, Dreg); CstPtrTo Corereg |],
"vld3_dup", bits_1, pf_su_8_32;
Vldx_dup 3,
+ [Requires_feature "CRYPTO";
+ Instruction_name ["vld1"]; Disassembles_as [Use_operands
+ [| VecArray (3, Dreg); CstPtrTo Corereg |]]],
+ Use_operands [| VecArray (3, Dreg); CstPtrTo Corereg |],
+ "vld3_dup", bits_1, [P64];
+ Vldx_dup 3,
[Instruction_name ["vld1"]; Disassembles_as [Use_operands
[| VecArray (3, Dreg); CstPtrTo Corereg |]]],
Use_operands [| VecArray (3, Dreg); CstPtrTo Corereg |],
@@ -1660,6 +1811,12 @@ let ops =
PtrTo Corereg |]]],
Use_operands [| PtrTo Corereg; VecArray (3, Dreg) |], "vst3",
store_1, pf_su_8_32;
+ Vstx 3, [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (4, Dreg);
+ PtrTo Corereg |]];
+ Instruction_name ["vst1"]],
+ Use_operands [| PtrTo Corereg; VecArray (3, Dreg) |], "vst3",
+ store_1, [P64];
Vstx 3, [Disassembles_as [Use_operands [| VecArray (4, Dreg);
PtrTo Corereg |]];
Instruction_name ["vst1"]],
@@ -1688,6 +1845,9 @@ let ops =
(* VLD4/VST4 variants. *)
Vldx 4, [], Use_operands [| VecArray (4, Dreg); CstPtrTo Corereg |],
"vld4", bits_1, pf_su_8_32;
+ Vldx 4, [Requires_feature "CRYPTO"; Instruction_name ["vld1"]],
+ Use_operands [| VecArray (4, Dreg); CstPtrTo Corereg |],
+ "vld4", bits_1, [P64];
Vldx 4, [Instruction_name ["vld1"]],
Use_operands [| VecArray (4, Dreg); CstPtrTo Corereg |],
"vld4", bits_1, [S64; U64];
@@ -1719,6 +1879,12 @@ let ops =
Use_operands [| VecArray (4, Dreg); CstPtrTo Corereg |],
"vld4_dup", bits_1, pf_su_8_32;
Vldx_dup 4,
+ [Requires_feature "CRYPTO";
+ Instruction_name ["vld1"]; Disassembles_as [Use_operands
+ [| VecArray (4, Dreg); CstPtrTo Corereg |]]],
+ Use_operands [| VecArray (4, Dreg); CstPtrTo Corereg |],
+ "vld4_dup", bits_1, [P64];
+ Vldx_dup 4,
[Instruction_name ["vld1"]; Disassembles_as [Use_operands
[| VecArray (4, Dreg); CstPtrTo Corereg |]]],
Use_operands [| VecArray (4, Dreg); CstPtrTo Corereg |],
@@ -1728,6 +1894,12 @@ let ops =
PtrTo Corereg |]]],
Use_operands [| PtrTo Corereg; VecArray (4, Dreg) |], "vst4",
store_1, pf_su_8_32;
+ Vstx 4, [Requires_feature "CRYPTO";
+ Disassembles_as [Use_operands [| VecArray (4, Dreg);
+ PtrTo Corereg |]];
+ Instruction_name ["vst1"]],
+ Use_operands [| PtrTo Corereg; VecArray (4, Dreg) |], "vst4",
+ store_1, [P64];
Vstx 4, [Disassembles_as [Use_operands [| VecArray (4, Dreg);
PtrTo Corereg |]];
Instruction_name ["vst1"]],
@@ -1779,26 +1951,32 @@ let ops =
Vorn, [], All (3, Qreg), "vornQ", notype_2, su_8_64;
]
+let type_in_crypto_only t
+ = (t == P64) or (t == P128)
+
+let cross_product s1 s2
+ = List.filter (fun (e, e') -> e <> e')
+ (List.concat (List.map (fun e1 -> List.map (fun e2 -> (e1,e2)) s1) s2))
+
let reinterp =
- let elems = P8 :: P16 :: F32 :: su_8_64 in
- List.fold_right
- (fun convto acc ->
- let types = List.fold_right
- (fun convfrom acc ->
- if convfrom <> convto then
- Cast (convto, convfrom) :: acc
- else
- acc)
- elems
- []
- in
- let dconv = Vreinterp, [No_op], Use_operands [| Dreg; Dreg |],
- "vreinterpret", conv_1, types
- and qconv = Vreinterp, [No_op], Use_operands [| Qreg; Qreg |],
- "vreinterpretQ", conv_1, types in
- dconv :: qconv :: acc)
- elems
- []
+ let elems = P8 :: P16 :: F32 :: P64 :: su_8_64 in
+ let casts = cross_product elems elems in
+ List.map
+ (fun (convto, convfrom) ->
+ Vreinterp, (if (type_in_crypto_only convto) or (type_in_crypto_only convfrom)
+ then [Requires_feature "CRYPTO"] else []) @ [No_op], Use_operands [| Dreg; Dreg |],
+ "vreinterpret", conv_1, [Cast (convto, convfrom)])
+ casts
+
+let reinterpq =
+ let elems = P8 :: P16 :: F32 :: P64 :: P128 :: su_8_64 in
+ let casts = cross_product elems elems in
+ List.map
+ (fun (convto, convfrom) ->
+ Vreinterp, (if (type_in_crypto_only convto) or (type_in_crypto_only convfrom)
+ then [Requires_feature "CRYPTO"] else []) @ [No_op], Use_operands [| Qreg; Qreg |],
+ "vreinterpretQ", conv_1, [Cast (convto, convfrom)])
+ casts
(* Output routines. *)
@@ -1808,6 +1986,7 @@ let rec string_of_elt = function
| I8 -> "i8" | I16 -> "i16" | I32 -> "i32" | I64 -> "i64"
| B8 -> "8" | B16 -> "16" | B32 -> "32" | B64 -> "64"
| F16 -> "f16" | F32 -> "f32" | P8 -> "p8" | P16 -> "p16"
+ | P64 -> "p64" | P128 -> "p128"
| Conv (a, b) | Cast (a, b) -> string_of_elt a ^ "_" ^ string_of_elt b
| NoElts -> failwith "No elts"
@@ -1851,6 +2030,10 @@ let string_of_vectype vt =
| T_uint64 -> affix "uint64"
| T_poly8 -> affix "poly8"
| T_poly16 -> affix "poly16"
+ | T_poly64 -> affix "poly64"
+ | T_poly64x1 -> affix "poly64x1"
+ | T_poly64x2 -> affix "poly64x2"
+ | T_poly128 -> affix "poly128"
| T_float16 -> affix "float16"
| T_float32 -> affix "float32"
| T_immediate _ -> "const int"
@@ -1859,6 +2042,7 @@ let string_of_vectype vt =
| T_intHI -> "__builtin_neon_hi"
| T_intSI -> "__builtin_neon_si"
| T_intDI -> "__builtin_neon_di"
+ | T_intTI -> "__builtin_neon_ti"
| T_floatHF -> "__builtin_neon_hf"
| T_floatSF -> "__builtin_neon_sf"
| T_arrayof (num, base) ->
@@ -1884,7 +2068,7 @@ let string_of_mode = function
V8QI -> "v8qi" | V4HI -> "v4hi" | V4HF -> "v4hf" | V2SI -> "v2si"
| V2SF -> "v2sf" | DI -> "di" | V16QI -> "v16qi" | V8HI -> "v8hi"
| V4SI -> "v4si" | V4SF -> "v4sf" | V2DI -> "v2di" | QI -> "qi"
- | HI -> "hi" | SI -> "si" | SF -> "sf"
+ | HI -> "hi" | SI -> "si" | SF -> "sf" | TI -> "ti"
(* Use uppercase chars for letters which form part of the intrinsic name, but
should be omitted from the builtin name (the info is passed in an extra
@@ -1991,3 +2175,181 @@ let analyze_all_shapes features shape f =
| _ -> assert false
with Not_found -> [f shape]
+(* The crypto intrinsics have unconventional shapes and are not that
+ numerous to be worth the trouble of encoding here. We implement them
+ explicitly here. *)
+let crypto_intrinsics =
+"
+#ifdef __ARM_FEATURE_CRYPTO
+
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vldrq_p128 (poly128_t const * __ptr)
+{
+#ifdef __ARM_BIG_ENDIAN
+ poly64_t* __ptmp = (poly64_t*) __ptr;
+ poly64_t __d0 = vld1_p64 (__ptmp);
+ poly64_t __d1 = vld1_p64 (__ptmp + 1);
+ return vreinterpretq_p128_p64 (vcombine_p64 (__d1, __d0));
+#else
+ return vreinterpretq_p128_p64 (vld1q_p64 ((poly64_t*) __ptr));
+#endif
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vstrq_p128 (poly128_t * __ptr, poly128_t __val)
+{
+#ifdef __ARM_BIG_ENDIAN
+ poly64x2_t __tmp = vreinterpretq_p64_p128 (__val);
+ poly64_t __d0 = vget_high_p64 (__tmp);
+ poly64_t __d1 = vget_low_p64 (__tmp);
+ vst1q_p64 ((poly64_t*) __ptr, vcombine_p64 (__d0, __d1));
+#else
+ vst1q_p64 ((poly64_t*) __ptr, vreinterpretq_p64_p128 (__val));
+#endif
+}
+
+/* The vceq_p64 intrinsic does not map to a single instruction.
+ Instead we emulate it by performing a 32-bit variant of the vceq
+ and applying a pairwise min reduction to the result.
+ vceq_u32 will produce two 32-bit halves, each of which will contain either
+ all ones or all zeros depending on whether the corresponding 32-bit
+ halves of the poly64_t were equal. The whole poly64_t values are equal
+ if and only if both halves are equal, i.e. vceq_u32 returns all ones.
+ If the result is all zeroes for any half then the whole result is zeroes.
+ This is what the pairwise min reduction achieves. */
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceq_p64 (poly64x1_t __a, poly64x1_t __b)
+{
+ uint32x2_t __t_a = vreinterpret_u32_p64 (__a);
+ uint32x2_t __t_b = vreinterpret_u32_p64 (__b);
+ uint32x2_t __c = vceq_u32 (__t_a, __t_b);
+ uint32x2_t __m = vpmin_u32 (__c, __c);
+ return vreinterpret_u64_u32 (__m);
+}
+
+/* The vtst_p64 intrinsic does not map to a single instruction.
+ We emulate it in way similar to vceq_p64 above but here we do
+ a reduction with max since if any two corresponding bits
+ in the two poly64_t's match, then the whole result must be all ones. */
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vtst_p64 (poly64x1_t __a, poly64x1_t __b)
+{
+ uint32x2_t __t_a = vreinterpret_u32_p64 (__a);
+ uint32x2_t __t_b = vreinterpret_u32_p64 (__b);
+ uint32x2_t __c = vtst_u32 (__t_a, __t_b);
+ uint32x2_t __m = vpmax_u32 (__c, __c);
+ return vreinterpret_u64_u32 (__m);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaeseq_u8 (uint8x16_t __data, uint8x16_t __key)
+{
+ return __builtin_arm_crypto_aese (__data, __key);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaesdq_u8 (uint8x16_t __data, uint8x16_t __key)
+{
+ return __builtin_arm_crypto_aesd (__data, __key);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaesmcq_u8 (uint8x16_t __data)
+{
+ return __builtin_arm_crypto_aesmc (__data);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaesimcq_u8 (uint8x16_t __data)
+{
+ return __builtin_arm_crypto_aesimc (__data);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vsha1h_u32 (uint32_t __hash_e)
+{
+ uint32x4_t __t = vdupq_n_u32 (0);
+ __t = vsetq_lane_u32 (__hash_e, __t, 0);
+ __t = __builtin_arm_crypto_sha1h (__t);
+ return vgetq_lane_u32 (__t, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha1cq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk)
+{
+ uint32x4_t __t = vdupq_n_u32 (0);
+ __t = vsetq_lane_u32 (__hash_e, __t, 0);
+ return __builtin_arm_crypto_sha1c (__hash_abcd, __t, __wk);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha1pq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk)
+{
+ uint32x4_t __t = vdupq_n_u32 (0);
+ __t = vsetq_lane_u32 (__hash_e, __t, 0);
+ return __builtin_arm_crypto_sha1p (__hash_abcd, __t, __wk);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha1mq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk)
+{
+ uint32x4_t __t = vdupq_n_u32 (0);
+ __t = vsetq_lane_u32 (__hash_e, __t, 0);
+ return __builtin_arm_crypto_sha1m (__hash_abcd, __t, __wk);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha1su0q_u32 (uint32x4_t __w0_3, uint32x4_t __w4_7, uint32x4_t __w8_11)
+{
+ return __builtin_arm_crypto_sha1su0 (__w0_3, __w4_7, __w8_11);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha1su1q_u32 (uint32x4_t __tw0_3, uint32x4_t __w12_15)
+{
+ return __builtin_arm_crypto_sha1su1 (__tw0_3, __w12_15);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha256hq_u32 (uint32x4_t __hash_abcd, uint32x4_t __hash_efgh, uint32x4_t __wk)
+{
+ return __builtin_arm_crypto_sha256h (__hash_abcd, __hash_efgh, __wk);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha256h2q_u32 (uint32x4_t __hash_abcd, uint32x4_t __hash_efgh, uint32x4_t __wk)
+{
+ return __builtin_arm_crypto_sha256h2 (__hash_abcd, __hash_efgh, __wk);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha256su0q_u32 (uint32x4_t __w0_3, uint32x4_t __w4_7)
+{
+ return __builtin_arm_crypto_sha256su0 (__w0_3, __w4_7);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha256su1q_u32 (uint32x4_t __tw0_3, uint32x4_t __w8_11, uint32x4_t __w12_15)
+{
+ return __builtin_arm_crypto_sha256su1 (__tw0_3, __w8_11, __w12_15);
+}
+
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vmull_p64 (poly64_t __a, poly64_t __b)
+{
+ return (poly128_t) __builtin_arm_crypto_vmullp64 ((uint64_t) __a, (uint64_t) __b);
+}
+
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vmull_high_p64 (poly64x2_t __a, poly64x2_t __b)
+{
+ poly64_t __t1 = vget_high_p64 (__a);
+ poly64_t __t2 = vget_high_p64 (__b);
+
+ return (poly128_t) __builtin_arm_crypto_vmullp64 ((uint64_t) __t1, (uint64_t) __t2);
+}
+
+#endif
+"
diff --git a/gcc/config/arm/netbsd-elf.h b/gcc/config/arm/netbsd-elf.h
index 26d4f8e7362..9deda96791f 100644
--- a/gcc/config/arm/netbsd-elf.h
+++ b/gcc/config/arm/netbsd-elf.h
@@ -1,5 +1,5 @@
/* Definitions of target machine for GNU compiler, NetBSD/arm ELF version.
- Copyright (C) 2002-2013 Free Software Foundation, Inc.
+ Copyright (C) 2002-2014 Free Software Foundation, Inc.
Contributed by Wasabi Systems, Inc.
This file is part of GCC.
diff --git a/gcc/config/arm/predicates.md b/gcc/config/arm/predicates.md
index 29e1e5c8276..ce5c9a830cd 100644
--- a/gcc/config/arm/predicates.md
+++ b/gcc/config/arm/predicates.md
@@ -1,5 +1,5 @@
;; Predicate definitions for ARM and Thumb
-;; Copyright (C) 2004-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2004-2014 Free Software Foundation, Inc.
;; Contributed by ARM Ltd.
;; This file is part of GCC.
@@ -53,11 +53,11 @@
(ior (match_operand 0 "imm_for_neon_logic_operand")
(match_operand 0 "s_register_operand")))
-;; Any hard register.
-(define_predicate "arm_hard_register_operand"
+;; Any general register.
+(define_predicate "arm_hard_general_register_operand"
(match_code "reg")
{
- return REGNO (op) < FIRST_PSEUDO_REGISTER;
+ return REGNO (op) <= LAST_ARM_REGNUM;
})
;; A low register.
@@ -98,6 +98,12 @@
&& REGNO_REG_CLASS (REGNO (op)) == VFP_REGS)));
})
+(define_predicate "vfp_hard_register_operand"
+ (match_code "reg")
+{
+ return (IS_VFP_REGNUM (REGNO (op)));
+})
+
(define_predicate "zero_operand"
(and (match_code "const_int,const_double,const_vector")
(match_test "op == CONST0_RTX (mode)")))
@@ -639,8 +645,13 @@
(define_predicate "const_double_vcvt_power_of_two_reciprocal"
(and (match_code "const_double")
- (match_test "TARGET_32BIT && TARGET_VFP
- && vfp3_const_double_for_fract_bits (op)")))
+ (match_test "TARGET_32BIT && TARGET_VFP
+ && vfp3_const_double_for_fract_bits (op)")))
+
+(define_predicate "const_double_vcvt_power_of_two"
+ (and (match_code "const_double")
+ (match_test "TARGET_32BIT && TARGET_VFP
+ && vfp3_const_double_for_bits (op)")))
(define_predicate "neon_struct_operand"
(and (match_code "mem")
diff --git a/gcc/config/arm/rtems-eabi.h b/gcc/config/arm/rtems-eabi.h
index 77fcf1a8b2d..4bdcf0d87ba 100644
--- a/gcc/config/arm/rtems-eabi.h
+++ b/gcc/config/arm/rtems-eabi.h
@@ -1,5 +1,5 @@
/* Definitions for RTEMS based ARM systems using EABI.
- Copyright (C) 2011-2013 Free Software Foundation, Inc.
+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
This file is part of GCC.
diff --git a/gcc/config/arm/semi.h b/gcc/config/arm/semi.h
index b06c0af72eb..f937e47b9e7 100644
--- a/gcc/config/arm/semi.h
+++ b/gcc/config/arm/semi.h
@@ -1,5 +1,5 @@
/* Definitions of target machine for GNU compiler. ARM on semi-hosted platform
- Copyright (C) 1994-2013 Free Software Foundation, Inc.
+ Copyright (C) 1994-2014 Free Software Foundation, Inc.
Contributed by Richard Earnshaw (richard.earnshaw@arm.com)
This file is part of GCC.
diff --git a/gcc/config/arm/symbian.h b/gcc/config/arm/symbian.h
index 8c121bec4ab..777742d6e01 100644
--- a/gcc/config/arm/symbian.h
+++ b/gcc/config/arm/symbian.h
@@ -1,5 +1,5 @@
/* Configuration file for Symbian OS on ARM processors.
- Copyright (C) 2004-2013 Free Software Foundation, Inc.
+ Copyright (C) 2004-2014 Free Software Foundation, Inc.
Contributed by CodeSourcery, LLC
This file is part of GCC.
diff --git a/gcc/config/arm/sync.md b/gcc/config/arm/sync.md
index 8f7bd71c317..aa8e9abcf77 100644
--- a/gcc/config/arm/sync.md
+++ b/gcc/config/arm/sync.md
@@ -1,5 +1,5 @@
;; Machine description for ARM processor synchronization primitives.
-;; Copyright (C) 2010-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2010-2014 Free Software Foundation, Inc.
;; Written by Marcus Shawcroft (marcus.shawcroft@arm.com)
;; 64bit Atomics by Dave Gilbert (david.gilbert@linaro.org)
;;
diff --git a/gcc/config/arm/t-aprofile b/gcc/config/arm/t-aprofile
index ce45d4d210a..ad7ccd187be 100644
--- a/gcc/config/arm/t-aprofile
+++ b/gcc/config/arm/t-aprofile
@@ -1,4 +1,4 @@
-# Copyright (C) 2012-2013 Free Software Foundation, Inc.
+# Copyright (C) 2012-2014 Free Software Foundation, Inc.
#
# This file is part of GCC.
#
@@ -83,8 +83,11 @@ MULTILIB_EXCEPTIONS += *mcpu=cortex-a7/*mfpu=neon-fp-armv8*
MULTILIB_MATCHES += march?armv7-a=mcpu?cortex-a8
MULTILIB_MATCHES += march?armv7-a=mcpu?cortex-a9
MULTILIB_MATCHES += march?armv7-a=mcpu?cortex-a5
-MULTILIB_MATCHES += mcpu?cortex-a7=mcpu?cortex-a15
+MULTILIB_MATCHES += mcpu?cortex-a7=mcpu?cortex-a15=mcpu?cortex-a12
+MULTILIB_MATCHES += mcpu?cortex-a7=mcpu?cortex-a15.cortex-a7
MULTILIB_MATCHES += march?armv8-a=mcpu?cortex-a53
+MULTILIB_MATCHES += march?armv8-a=mcpu?cortex-a57
+MULTILIB_MATCHES += march?armv8-a=mcpu?cortex-a57.cortex-a53
# FPU matches
MULTILIB_MATCHES += mfpu?vfpv3-d16=mfpu?vfpv3
diff --git a/gcc/config/arm/t-arm b/gcc/config/arm/t-arm
index 20e79ef2680..99bd696e411 100644
--- a/gcc/config/arm/t-arm
+++ b/gcc/config/arm/t-arm
@@ -1,6 +1,6 @@
# Rules common to all arm targets
#
-# Copyright (C) 2004-2013 Free Software Foundation, Inc.
+# Copyright (C) 2004-2014 Free Software Foundation, Inc.
#
# This file is part of GCC.
#
diff --git a/gcc/config/arm/t-arm-elf b/gcc/config/arm/t-arm-elf
index 60747d335f6..8ef6b04ffbe 100644
--- a/gcc/config/arm/t-arm-elf
+++ b/gcc/config/arm/t-arm-elf
@@ -1,4 +1,4 @@
-# Copyright (C) 1998-2013 Free Software Foundation, Inc.
+# Copyright (C) 1998-2014 Free Software Foundation, Inc.
#
# This file is part of GCC.
#
diff --git a/gcc/config/arm/t-linux-eabi b/gcc/config/arm/t-linux-eabi
index 07e32b38de8..1087914b527 100644
--- a/gcc/config/arm/t-linux-eabi
+++ b/gcc/config/arm/t-linux-eabi
@@ -1,4 +1,4 @@
-# Copyright (C) 2005-2013 Free Software Foundation, Inc.
+# Copyright (C) 2005-2014 Free Software Foundation, Inc.
#
# This file is part of GCC.
#
diff --git a/gcc/config/arm/t-symbian b/gcc/config/arm/t-symbian
index c4c6bb6aaab..35ee0288969 100644
--- a/gcc/config/arm/t-symbian
+++ b/gcc/config/arm/t-symbian
@@ -1,4 +1,4 @@
-# Copyright (C) 2004-2013 Free Software Foundation, Inc.
+# Copyright (C) 2004-2014 Free Software Foundation, Inc.
#
# This file is part of GCC.
#
diff --git a/gcc/config/arm/t-vxworks b/gcc/config/arm/t-vxworks
index d3e32c91d58..802d8e4bdde 100644
--- a/gcc/config/arm/t-vxworks
+++ b/gcc/config/arm/t-vxworks
@@ -1,4 +1,4 @@
-# Copyright (C) 2003-2013 Free Software Foundation, Inc.
+# Copyright (C) 2003-2014 Free Software Foundation, Inc.
#
# This file is part of GCC.
#
diff --git a/gcc/config/arm/thumb2.md b/gcc/config/arm/thumb2.md
index b8b49fe7aee..4f247f82bf4 100644
--- a/gcc/config/arm/thumb2.md
+++ b/gcc/config/arm/thumb2.md
@@ -1,5 +1,5 @@
;; ARM Thumb-2 Machine Description
-;; Copyright (C) 2007-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2007-2014 Free Software Foundation, Inc.
;; Written by CodeSourcery, LLC.
;;
;; This file is part of GCC.
diff --git a/gcc/config/arm/types.md b/gcc/config/arm/types.md
index 6351f080b32..cc39cd11f4a 100644
--- a/gcc/config/arm/types.md
+++ b/gcc/config/arm/types.md
@@ -1,6 +1,6 @@
;; Instruction Classification for ARM for GNU compiler.
-;; Copyright (C) 1991-2013 Free Software Foundation, Inc.
+;; Copyright (C) 1991-2014 Free Software Foundation, Inc.
;; Contributed by ARM Ltd.
;; This file is part of GCC.
@@ -327,6 +327,7 @@
; neon_mul_b_long
; neon_mul_h_long
; neon_mul_s_long
+; neon_mul_d_long
; neon_mul_h_scalar
; neon_mul_h_scalar_q
; neon_mul_s_scalar
@@ -520,6 +521,15 @@
; neon_fp_div_s_q
; neon_fp_div_d
; neon_fp_div_d_q
+;
+; The classification below is for Crypto instructions.
+;
+; crypto_aes
+; crypto_sha1_xor
+; crypto_sha1_fast
+; crypto_sha1_slow
+; crypto_sha256_fast
+; crypto_sha256_slow
(define_attr "type"
"adc_imm,\
@@ -544,6 +554,7 @@
clz,\
no_insn,\
csel,\
+ crc,\
extend,\
f_cvt,\
f_cvtf2i,\
@@ -823,6 +834,7 @@
neon_mul_b_long,\
neon_mul_h_long,\
neon_mul_s_long,\
+ neon_mul_d_long,\
neon_mul_h_scalar,\
neon_mul_h_scalar_q,\
neon_mul_s_scalar,\
@@ -1037,7 +1049,14 @@
neon_fp_div_s,\
neon_fp_div_s_q,\
neon_fp_div_d,\
- neon_fp_div_d_q"
+ neon_fp_div_d_q,\
+\
+ crypto_aes,\
+ crypto_sha1_xor,\
+ crypto_sha1_fast,\
+ crypto_sha1_slow,\
+ crypto_sha256_fast,\
+ crypto_sha256_slow"
(const_string "untyped"))
; Is this an (integer side) multiply with a 32-bit (or smaller) result?
diff --git a/gcc/config/arm/uclinux-eabi.h b/gcc/config/arm/uclinux-eabi.h
index c22c1d7f348..b5055ce40bc 100644
--- a/gcc/config/arm/uclinux-eabi.h
+++ b/gcc/config/arm/uclinux-eabi.h
@@ -1,5 +1,5 @@
/* Definitions for ARM EABI ucLinux
- Copyright (C) 2006-2013 Free Software Foundation, Inc.
+ Copyright (C) 2006-2014 Free Software Foundation, Inc.
Contributed by Paul Brook <paul@codesourcery.com>
This file is part of GCC.
diff --git a/gcc/config/arm/uclinux-elf.h b/gcc/config/arm/uclinux-elf.h
index 74d63df4418..5cd4fe52799 100644
--- a/gcc/config/arm/uclinux-elf.h
+++ b/gcc/config/arm/uclinux-elf.h
@@ -1,5 +1,5 @@
/* Definitions for ARM running ucLinux using ELF
- Copyright (C) 1999-2013 Free Software Foundation, Inc.
+ Copyright (C) 1999-2014 Free Software Foundation, Inc.
Contributed by Philip Blundell <pb@nexus.co.uk>
This file is part of GCC.
diff --git a/gcc/config/arm/unknown-elf.h b/gcc/config/arm/unknown-elf.h
index 9d776b0585d..ec6f9a48894 100644
--- a/gcc/config/arm/unknown-elf.h
+++ b/gcc/config/arm/unknown-elf.h
@@ -1,5 +1,5 @@
/* Definitions for non-Linux based ARM systems using ELF
- Copyright (C) 1998-2013 Free Software Foundation, Inc.
+ Copyright (C) 1998-2014 Free Software Foundation, Inc.
Contributed by Catherine Moore <clm@cygnus.com>
This file is part of GCC.
diff --git a/gcc/config/arm/unspecs.md b/gcc/config/arm/unspecs.md
index 508603cf6c8..8caa953bcb9 100644
--- a/gcc/config/arm/unspecs.md
+++ b/gcc/config/arm/unspecs.md
@@ -1,5 +1,5 @@
;; Unspec defintions.
-;; Copyright (C) 2012-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2012-2014 Free Software Foundation, Inc.
;; Contributed by ARM Ltd.
;; This file is part of GCC.
@@ -149,6 +149,27 @@
(define_c_enum "unspec" [
UNSPEC_ASHIFT_SIGNED
UNSPEC_ASHIFT_UNSIGNED
+ UNSPEC_CRC32B
+ UNSPEC_CRC32H
+ UNSPEC_CRC32W
+ UNSPEC_CRC32CB
+ UNSPEC_CRC32CH
+ UNSPEC_CRC32CW
+ UNSPEC_AESD
+ UNSPEC_AESE
+ UNSPEC_AESIMC
+ UNSPEC_AESMC
+ UNSPEC_SHA1C
+ UNSPEC_SHA1M
+ UNSPEC_SHA1P
+ UNSPEC_SHA1H
+ UNSPEC_SHA1SU0
+ UNSPEC_SHA1SU1
+ UNSPEC_SHA256H
+ UNSPEC_SHA256H2
+ UNSPEC_SHA256SU0
+ UNSPEC_SHA256SU1
+ UNSPEC_VMULLP64
UNSPEC_LOAD_COUNT
UNSPEC_VABD
UNSPEC_VABDL
diff --git a/gcc/config/arm/vec-common.md b/gcc/config/arm/vec-common.md
index 9418018dae9..ba0b5880636 100644
--- a/gcc/config/arm/vec-common.md
+++ b/gcc/config/arm/vec-common.md
@@ -1,5 +1,5 @@
;; Machine Description for shared bits common to IWMMXT and Neon.
-;; Copyright (C) 2006-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2006-2014 Free Software Foundation, Inc.
;; Written by CodeSourcery.
;;
;; This file is part of GCC.
diff --git a/gcc/config/arm/vfp.md b/gcc/config/arm/vfp.md
index 6d0515a92b1..e1a48eeea82 100644
--- a/gcc/config/arm/vfp.md
+++ b/gcc/config/arm/vfp.md
@@ -1,5 +1,5 @@
;; ARM VFP instruction patterns
-;; Copyright (C) 2003-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2003-2014 Free Software Foundation, Inc.
;; Written by CodeSourcery.
;;
;; This file is part of GCC.
@@ -1253,6 +1253,20 @@
(set_attr "length" "8")]
)
+(define_insn "*combine_vcvtf2i"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (fix:SI (fix:SF (mult:SF (match_operand:SF 1 "s_register_operand" "t")
+ (match_operand 2
+ "const_double_vcvt_power_of_two" "Dp")))))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP3 && !flag_rounding_math"
+ "vcvt%?.s32.f32\\t%1, %1, %v2\;vmov%?\\t%0, %1"
+ [(set_attr "predicable" "yes")
+ (set_attr "predicable_short_it" "no")
+ (set_attr "ce_count" "2")
+ (set_attr "type" "f_cvtf2i")
+ (set_attr "length" "8")]
+ )
+
;; Store multiple insn used in function prologue.
(define_insn "*push_multi_vfp"
[(match_parallel 2 "multi_register_push"
diff --git a/gcc/config/arm/vfp11.md b/gcc/config/arm/vfp11.md
index 4cfa69efc24..2dbb2010079 100644
--- a/gcc/config/arm/vfp11.md
+++ b/gcc/config/arm/vfp11.md
@@ -1,5 +1,5 @@
;; ARM VFP11 pipeline description
-;; Copyright (C) 2003-2013 Free Software Foundation, Inc.
+;; Copyright (C) 2003-2014 Free Software Foundation, Inc.
;; Written by CodeSourcery.
;;
;; This file is part of GCC.
diff --git a/gcc/config/arm/vxworks.h b/gcc/config/arm/vxworks.h
index 480cc95e1f1..8bef16bc49f 100644
--- a/gcc/config/arm/vxworks.h
+++ b/gcc/config/arm/vxworks.h
@@ -1,6 +1,6 @@
/* Definitions of target machine for GCC,
for ARM with targeting the VXWorks run time environment.
- Copyright (C) 1999-2013 Free Software Foundation, Inc.
+ Copyright (C) 1999-2014 Free Software Foundation, Inc.
Contributed by: Mike Stump <mrs@wrs.com>
Brought up to date by CodeSourcery, LLC.
diff --git a/gcc/config/arm/vxworks.opt b/gcc/config/arm/vxworks.opt
index 911b4983e31..ae83422f808 100644
--- a/gcc/config/arm/vxworks.opt
+++ b/gcc/config/arm/vxworks.opt
@@ -1,6 +1,6 @@
; ARM VxWorks options.
-; Copyright (C) 2011-2013 Free Software Foundation, Inc.
+; Copyright (C) 2011-2014 Free Software Foundation, Inc.
;
; This file is part of GCC.
;