summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorjgreenhalgh <jgreenhalgh@138bc75d-0d04-0410-961f-82ee72b054a4>2016-06-17 10:28:34 +0000
committerjgreenhalgh <jgreenhalgh@138bc75d-0d04-0410-961f-82ee72b054a4>2016-06-17 10:28:34 +0000
commitaf3561b4294f8191bb20f99f23edafa8a889ad7d (patch)
tree001358e35d4b4225f841a8aedef8201baa585873
parent7fdca64786ecdbe82838a65d06c7ec10256bd213 (diff)
downloadgcc-af3561b4294f8191bb20f99f23edafa8a889ad7d.tar.gz
[Patch ARM arm_neon.h] s/__FAST_MATH/__FAST_MATH__/g
gcc/ * config/arm/arm_neon.h (vadd_f32): replace __FAST_MATH with __FAST_MATH__. (vaddq_f32): Likewise. (vmul_f32): Likewise. (vmulq_f32): Likewise. (vsub_f32): Likewise. (vsubq_f32): Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@237553 138bc75d-0d04-0410-961f-82ee72b054a4
-rw-r--r--gcc/ChangeLog10
-rw-r--r--gcc/config/arm/arm_neon.h12
2 files changed, 16 insertions, 6 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index d804252b76c..0f4846c2c4d 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,13 @@
+2016-06-17 James Greenhalgh <james.greenhalgh@arm.com>
+
+ * config/arm/arm_neon.h (vadd_f32): replace __FAST_MATH with
+ __FAST_MATH__.
+ (vaddq_f32): Likewise.
+ (vmul_f32): Likewise.
+ (vmulq_f32): Likewise.
+ (vsub_f32): Likewise.
+ (vsubq_f32): Likewise.
+
2016-06-17 Bin Cheng <bin.cheng@arm.com>
PR tree-optimization/71347
diff --git a/gcc/config/arm/arm_neon.h b/gcc/config/arm/arm_neon.h
index 7997cb4ad3a..32ee06c65f3 100644
--- a/gcc/config/arm/arm_neon.h
+++ b/gcc/config/arm/arm_neon.h
@@ -530,7 +530,7 @@ vadd_s32 (int32x2_t __a, int32x2_t __b)
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vadd_f32 (float32x2_t __a, float32x2_t __b)
{
-#ifdef __FAST_MATH
+#ifdef __FAST_MATH__
return __a + __b;
#else
return (float32x2_t) __builtin_neon_vaddv2sf (__a, __b);
@@ -594,7 +594,7 @@ vaddq_s64 (int64x2_t __a, int64x2_t __b)
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vaddq_f32 (float32x4_t __a, float32x4_t __b)
{
-#ifdef __FAST_MATH
+#ifdef __FAST_MATH__
return __a + __b;
#else
return (float32x4_t) __builtin_neon_vaddv4sf (__a, __b);
@@ -1030,7 +1030,7 @@ vmul_s32 (int32x2_t __a, int32x2_t __b)
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vmul_f32 (float32x2_t __a, float32x2_t __b)
{
-#ifdef __FAST_MATH
+#ifdef __FAST_MATH__
return __a * __b;
#else
return (float32x2_t) __builtin_neon_vmulfv2sf (__a, __b);
@@ -1077,7 +1077,7 @@ vmulq_s32 (int32x4_t __a, int32x4_t __b)
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vmulq_f32 (float32x4_t __a, float32x4_t __b)
{
-#ifdef __FAST_MATH
+#ifdef __FAST_MATH__
return __a * __b;
#else
return (float32x4_t) __builtin_neon_vmulfv4sf (__a, __b);
@@ -1678,7 +1678,7 @@ vsub_s32 (int32x2_t __a, int32x2_t __b)
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vsub_f32 (float32x2_t __a, float32x2_t __b)
{
-#ifdef __FAST_MATH
+#ifdef __FAST_MATH__
return __a - __b;
#else
return (float32x2_t) __builtin_neon_vsubv2sf (__a, __b);
@@ -1742,7 +1742,7 @@ vsubq_s64 (int64x2_t __a, int64x2_t __b)
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vsubq_f32 (float32x4_t __a, float32x4_t __b)
{
-#ifdef __FAST_MATH
+#ifdef __FAST_MATH__
return __a - __b;
#else
return (float32x4_t) __builtin_neon_vsubv4sf (__a, __b);