diff options
author | Paul A. Clarke <pc@us.ibm.com> | 2018-10-29 19:44:57 +0000 |
---|---|---|
committer | Paul Clarke <pc@gcc.gnu.org> | 2018-10-29 19:44:57 +0000 |
commit | 8505bf12ff693b71ae7f5cfeaf4cb55bf72a0c8a (patch) | |
tree | 26a177e1602c19f7b9779123abe8bc3794c4d7cc /gcc/config/rs6000/mmintrin.h | |
parent | a385474c79d28d89861d25720715ed361e912b1a (diff) | |
download | gcc-8505bf12ff693b71ae7f5cfeaf4cb55bf72a0c8a.tar.gz |
[rs6000] Consistently use '__vector' instead of 'vector'
Revision r265535 committed changes that used 'vector' instead of the
preferred '__vector'. There is a reason that '__vector' is preferred,
because it ensures no conflicts with C++ namespace. Indeed,
gcc/config/rs6000/xmmintrin.h undefines it, leading to errors:
gcc/include/xmmintrin.h:999:20: error: 'vector' undeclared (first use in this function); did you mean 'vec_or'?
gcc/include/xmmintrin.h:999:20: note: each undeclared identifier is reported only once for each function it appears in
gcc/include/xmmintrin.h:999:26: error: expected ')' before 'long'
gcc/include/xmmintrin.h:999:37: error: expected ')' before 'result'
Also fixed a few whitespace issues.
[gcc]
2018-10-29 Paul A. Clarke <pc@us.ibm.com>
* gcc/config/rs6000/mmintrin.h (_mm_packs_pi16, _mm_packs_pi32,
_mm_packs_pu16, _mm_unpackhi_pi8, _mm_unpacklo_pi8, _mm_add_pi8,
_mm_add_pi16, _mm_add_pi32, _mm_sub_pi8, _mm_sub_pi16, _mm_sub_pi32,
_mm_cmpgt_pi8, _mm_cmpeq_pi16, _mm_cmpgt_pi16, _mm_cmpeq_pi32,
_mm_cmpgt_pi32, _mm_adds_pi8, _mm_adds_pi16, _mm_adds_pu8,
_mm_adds_pu16, _mm_subs_pi8, _mm_subs_pi16, _mm_subs_pu8,
_mm_subs_pu16, _mm_madd_pi16, _mm_mulhi_pi16, _mm_mullo_pi16,
_mm_sll_pi16, _mm_sra_pi16, _mm_srl_pi16, _mm_set1_pi16, _mm_set1_pi8):
Change 'vector' to '__vector'.
* gcc/config/rs6000/xmmintrin.h (_mm_cvtps_pi32, _mm_cvttps_pi32,
_mm_cvtps_pi16, _mm_cvtps_pi8, _mm_max_pi16, _mm_max_pu8, _mm_min_pi16,
_mm_min_pu8, _mm_mulhi_pu16, _mm_shuffle_pi16, _mm_avg_pu8,
_mm_avg_pu16): Likewise. And, whitespace corrections.
From-SVN: r265601
Diffstat (limited to 'gcc/config/rs6000/mmintrin.h')
-rw-r--r-- | gcc/config/rs6000/mmintrin.h | 64 |
1 files changed, 32 insertions, 32 deletions
diff --git a/gcc/config/rs6000/mmintrin.h b/gcc/config/rs6000/mmintrin.h index 2f6bccaa7a3..7456c5bdec3 100644 --- a/gcc/config/rs6000/mmintrin.h +++ b/gcc/config/rs6000/mmintrin.h @@ -174,7 +174,7 @@ _mm_packs_pi16 (__m64 __m1, __m64 __m2) vm1 = (__vector signed short) (__vector unsigned long long) { __m2, __m1 }; vresult = vec_vpkshss (vm1, vm1); - return (__m64) ((vector long long) vresult)[0]; + return (__m64) ((__vector long long) vresult)[0]; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -194,7 +194,7 @@ _mm_packs_pi32 (__m64 __m1, __m64 __m2) vm1 = (__vector signed int) (__vector unsigned long long) { __m2, __m1 }; vresult = vec_vpkswss (vm1, vm1); - return (__m64) ((vector long long) vresult)[0]; + return (__m64) ((__vector long long) vresult)[0]; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -214,7 +214,7 @@ _mm_packs_pu16 (__m64 __m1, __m64 __m2) vm1 = (__vector signed short) (__vector unsigned long long) { __m2, __m1 }; vresult = vec_vpkshus (vm1, vm1); - return (__m64) ((vector long long) vresult)[0]; + return (__m64) ((__vector long long) vresult)[0]; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -235,7 +235,7 @@ _mm_unpackhi_pi8 (__m64 __m1, __m64 __m2) a = (__vector unsigned char)vec_splats (__m1); b = (__vector unsigned char)vec_splats (__m2); c = vec_mergel (a, b); - return (__m64) ((vector long long) c)[0]; + return (__m64) ((__vector long long) c)[0]; #else __m64_union m1, m2, res; @@ -316,7 +316,7 @@ _mm_unpacklo_pi8 (__m64 __m1, __m64 __m2) a = (__vector unsigned char)vec_splats (__m1); b = (__vector unsigned char)vec_splats (__m2); c = vec_mergel (a, b); - return (__m64) ((vector long long) c)[1]; + return (__m64) ((__vector long long) c)[1]; #else __m64_union m1, m2, res; @@ -397,7 +397,7 @@ _mm_add_pi8 (__m64 __m1, __m64 __m2) a = (__vector signed char)vec_splats (__m1); b = (__vector signed char)vec_splats (__m2); c = vec_add (a, b); - return (__m64) ((vector long long) c)[0]; + return (__m64) ((__vector long long) c)[0]; #else __m64_union m1, m2, res; @@ -433,7 +433,7 @@ _mm_add_pi16 (__m64 __m1, __m64 __m2) a = (__vector signed short)vec_splats (__m1); b = (__vector signed short)vec_splats (__m2); c = vec_add (a, b); - return (__m64) ((vector long long) c)[0]; + return (__m64) ((__vector long long) c)[0]; #else __m64_union m1, m2, res; @@ -465,7 +465,7 @@ _mm_add_pi32 (__m64 __m1, __m64 __m2) a = (__vector signed int)vec_splats (__m1); b = (__vector signed int)vec_splats (__m2); c = vec_add (a, b); - return (__m64) ((vector long long) c)[0]; + return (__m64) ((__vector long long) c)[0]; #else __m64_union m1, m2, res; @@ -495,7 +495,7 @@ _mm_sub_pi8 (__m64 __m1, __m64 __m2) a = (__vector signed char)vec_splats (__m1); b = (__vector signed char)vec_splats (__m2); c = vec_sub (a, b); - return (__m64) ((vector long long) c)[0]; + return (__m64) ((__vector long long) c)[0]; #else __m64_union m1, m2, res; @@ -531,7 +531,7 @@ _mm_sub_pi16 (__m64 __m1, __m64 __m2) a = (__vector signed short)vec_splats (__m1); b = (__vector signed short)vec_splats (__m2); c = vec_sub (a, b); - return (__m64) ((vector long long) c)[0]; + return (__m64) ((__vector long long) c)[0]; #else __m64_union m1, m2, res; @@ -563,7 +563,7 @@ _mm_sub_pi32 (__m64 __m1, __m64 __m2) a = (__vector signed int)vec_splats (__m1); b = (__vector signed int)vec_splats (__m2); c = vec_sub (a, b); - return (__m64) ((vector long long) c)[0]; + return (__m64) ((__vector long long) c)[0]; #else __m64_union m1, m2, res; @@ -753,7 +753,7 @@ _mm_cmpgt_pi8 (__m64 __m1, __m64 __m2) a = (__vector signed char)vec_splats (__m1); b = (__vector signed char)vec_splats (__m2); c = (__vector signed char)vec_cmpgt (a, b); - return (__m64) ((vector long long) c)[0]; + return (__m64) ((__vector long long) c)[0]; #else __m64_union m1, m2, res; @@ -790,7 +790,7 @@ _mm_cmpeq_pi16 (__m64 __m1, __m64 __m2) a = (__vector signed short)vec_splats (__m1); b = (__vector signed short)vec_splats (__m2); c = (__vector signed short)vec_cmpeq (a, b); - return (__m64) ((vector long long) c)[0]; + return (__m64) ((__vector long long) c)[0]; #else __m64_union m1, m2, res; @@ -821,7 +821,7 @@ _mm_cmpgt_pi16 (__m64 __m1, __m64 __m2) a = (__vector signed short)vec_splats (__m1); b = (__vector signed short)vec_splats (__m2); c = (__vector signed short)vec_cmpgt (a, b); - return (__m64) ((vector long long) c)[0]; + return (__m64) ((__vector long long) c)[0]; #else __m64_union m1, m2, res; @@ -854,7 +854,7 @@ _mm_cmpeq_pi32 (__m64 __m1, __m64 __m2) a = (__vector signed int)vec_splats (__m1); b = (__vector signed int)vec_splats (__m2); c = (__vector signed int)vec_cmpeq (a, b); - return (__m64) ((vector long long) c)[0]; + return (__m64) ((__vector long long) c)[0]; #else __m64_union m1, m2, res; @@ -883,7 +883,7 @@ _mm_cmpgt_pi32 (__m64 __m1, __m64 __m2) a = (__vector signed int)vec_splats (__m1); b = (__vector signed int)vec_splats (__m2); c = (__vector signed int)vec_cmpgt (a, b); - return (__m64) ((vector long long) c)[0]; + return (__m64) ((__vector long long) c)[0]; #else __m64_union m1, m2, res; @@ -914,7 +914,7 @@ _mm_adds_pi8 (__m64 __m1, __m64 __m2) a = (__vector signed char)vec_splats (__m1); b = (__vector signed char)vec_splats (__m2); c = vec_adds (a, b); - return (__m64) ((vector long long) c)[0]; + return (__m64) ((__vector long long) c)[0]; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -932,7 +932,7 @@ _mm_adds_pi16 (__m64 __m1, __m64 __m2) a = (__vector signed short)vec_splats (__m1); b = (__vector signed short)vec_splats (__m2); c = vec_adds (a, b); - return (__m64) ((vector long long) c)[0]; + return (__m64) ((__vector long long) c)[0]; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -950,7 +950,7 @@ _mm_adds_pu8 (__m64 __m1, __m64 __m2) a = (__vector unsigned char)vec_splats (__m1); b = (__vector unsigned char)vec_splats (__m2); c = vec_adds (a, b); - return (__m64) ((vector long long) c)[0]; + return (__m64) ((__vector long long) c)[0]; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -969,7 +969,7 @@ _mm_adds_pu16 (__m64 __m1, __m64 __m2) a = (__vector unsigned short)vec_splats (__m1); b = (__vector unsigned short)vec_splats (__m2); c = vec_adds (a, b); - return (__m64) ((vector long long) c)[0]; + return (__m64) ((__vector long long) c)[0]; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -988,7 +988,7 @@ _mm_subs_pi8 (__m64 __m1, __m64 __m2) a = (__vector signed char)vec_splats (__m1); b = (__vector signed char)vec_splats (__m2); c = vec_subs (a, b); - return (__m64) ((vector long long) c)[0]; + return (__m64) ((__vector long long) c)[0]; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -1007,7 +1007,7 @@ _mm_subs_pi16 (__m64 __m1, __m64 __m2) a = (__vector signed short)vec_splats (__m1); b = (__vector signed short)vec_splats (__m2); c = vec_subs (a, b); - return (__m64) ((vector long long) c)[0]; + return (__m64) ((__vector long long) c)[0]; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -1026,7 +1026,7 @@ _mm_subs_pu8 (__m64 __m1, __m64 __m2) a = (__vector unsigned char)vec_splats (__m1); b = (__vector unsigned char)vec_splats (__m2); c = vec_subs (a, b); - return (__m64) ((vector long long) c)[0]; + return (__m64) ((__vector long long) c)[0]; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -1045,7 +1045,7 @@ _mm_subs_pu16 (__m64 __m1, __m64 __m2) a = (__vector unsigned short)vec_splats (__m1); b = (__vector unsigned short)vec_splats (__m2); c = vec_subs (a, b); - return (__m64) ((vector long long) c)[0]; + return (__m64) ((__vector long long) c)[0]; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -1067,7 +1067,7 @@ _mm_madd_pi16 (__m64 __m1, __m64 __m2) a = (__vector signed short)vec_splats (__m1); b = (__vector signed short)vec_splats (__m2); c = vec_vmsumshm (a, b, zero); - return (__m64) ((vector long long) c)[0]; + return (__m64) ((__vector long long) c)[0]; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -1095,7 +1095,7 @@ _mm_mulhi_pi16 (__m64 __m1, __m64 __m2) w1 = vec_vmulosh (a, b); c = (__vector signed short)vec_perm (w0, w1, xform1); - return (__m64) ((vector long long) c)[0]; + return (__m64) ((__vector long long) c)[0]; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -1114,7 +1114,7 @@ _mm_mullo_pi16 (__m64 __m1, __m64 __m2) a = (__vector signed short)vec_splats (__m1); b = (__vector signed short)vec_splats (__m2); c = a * b; - return (__m64) ((vector long long) c)[0]; + return (__m64) ((__vector long long) c)[0]; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) @@ -1135,7 +1135,7 @@ _mm_sll_pi16 (__m64 __m, __m64 __count) m = (__vector signed short)vec_splats (__m); c = (__vector unsigned short)vec_splats ((unsigned short)__count); r = vec_sl (m, (__vector unsigned short)c); - return (__m64) ((vector long long) r)[0]; + return (__m64) ((__vector long long) r)[0]; } else return (0); @@ -1204,7 +1204,7 @@ _mm_sra_pi16 (__m64 __m, __m64 __count) m = (__vector signed short)vec_splats (__m); c = (__vector unsigned short)vec_splats ((unsigned short)__count); r = vec_sra (m, (__vector unsigned short)c); - return (__m64) ((vector long long) r)[0]; + return (__m64) ((__vector long long) r)[0]; } else return (0); @@ -1273,7 +1273,7 @@ _mm_srl_pi16 (__m64 __m, __m64 __count) m = (__vector unsigned short)vec_splats (__m); c = (__vector unsigned short)vec_splats ((unsigned short)__count); r = vec_sr (m, (__vector unsigned short)c); - return (__m64) ((vector long long) r)[0]; + return (__m64) ((__vector long long) r)[0]; } else return (0); @@ -1416,7 +1416,7 @@ _mm_set1_pi16 (short __w) __vector signed short w; w = (__vector signed short)vec_splats (__w); - return (__m64) ((vector long long) w)[0]; + return (__m64) ((__vector long long) w)[0]; #else __m64_union res; @@ -1436,7 +1436,7 @@ _mm_set1_pi8 (signed char __b) __vector signed char b; b = (__vector signed char)vec_splats (__b); - return (__m64) ((vector long long) b)[0]; + return (__m64) ((__vector long long) b)[0]; #else __m64_union res; |