diff options
37 files changed, 1818 insertions, 3 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index acea775367f..0f7df20db71 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -2,6 +2,123 @@ Mihail Ionescu <mihail.ionescu@arm.com> Srinath Parvathaneni <srinath.parvathaneni@arm.com> + * config/arm/arm_mve.h (vstrdq_scatter_base_p_s64): Define macro. + (vstrdq_scatter_base_p_u64): Likewise. + (vstrdq_scatter_base_s64): Likewise. + (vstrdq_scatter_base_u64): Likewise. + (vstrdq_scatter_offset_p_s64): Likewise. + (vstrdq_scatter_offset_p_u64): Likewise. + (vstrdq_scatter_offset_s64): Likewise. + (vstrdq_scatter_offset_u64): Likewise. + (vstrdq_scatter_shifted_offset_p_s64): Likewise. + (vstrdq_scatter_shifted_offset_p_u64): Likewise. + (vstrdq_scatter_shifted_offset_s64): Likewise. + (vstrdq_scatter_shifted_offset_u64): Likewise. + (vstrhq_scatter_offset_f16): Likewise. + (vstrhq_scatter_offset_p_f16): Likewise. + (vstrhq_scatter_shifted_offset_f16): Likewise. + (vstrhq_scatter_shifted_offset_p_f16): Likewise. + (vstrwq_scatter_base_f32): Likewise. + (vstrwq_scatter_base_p_f32): Likewise. + (vstrwq_scatter_offset_f32): Likewise. + (vstrwq_scatter_offset_p_f32): Likewise. + (vstrwq_scatter_offset_p_s32): Likewise. + (vstrwq_scatter_offset_p_u32): Likewise. + (vstrwq_scatter_offset_s32): Likewise. + (vstrwq_scatter_offset_u32): Likewise. + (vstrwq_scatter_shifted_offset_f32): Likewise. + (vstrwq_scatter_shifted_offset_p_f32): Likewise. + (vstrwq_scatter_shifted_offset_p_s32): Likewise. + (vstrwq_scatter_shifted_offset_p_u32): Likewise. + (vstrwq_scatter_shifted_offset_s32): Likewise. + (vstrwq_scatter_shifted_offset_u32): Likewise. + (__arm_vstrdq_scatter_base_p_s64): Define intrinsic. + (__arm_vstrdq_scatter_base_p_u64): Likewise. + (__arm_vstrdq_scatter_base_s64): Likewise. + (__arm_vstrdq_scatter_base_u64): Likewise. + (__arm_vstrdq_scatter_offset_p_s64): Likewise. + (__arm_vstrdq_scatter_offset_p_u64): Likewise. + (__arm_vstrdq_scatter_offset_s64): Likewise. + (__arm_vstrdq_scatter_offset_u64): Likewise. + (__arm_vstrdq_scatter_shifted_offset_p_s64): Likewise. + (__arm_vstrdq_scatter_shifted_offset_p_u64): Likewise. + (__arm_vstrdq_scatter_shifted_offset_s64): Likewise. + (__arm_vstrdq_scatter_shifted_offset_u64): Likewise. + (__arm_vstrwq_scatter_offset_p_s32): Likewise. + (__arm_vstrwq_scatter_offset_p_u32): Likewise. + (__arm_vstrwq_scatter_offset_s32): Likewise. + (__arm_vstrwq_scatter_offset_u32): Likewise. + (__arm_vstrwq_scatter_shifted_offset_p_s32): Likewise. + (__arm_vstrwq_scatter_shifted_offset_p_u32): Likewise. + (__arm_vstrwq_scatter_shifted_offset_s32): Likewise. + (__arm_vstrwq_scatter_shifted_offset_u32): Likewise. + (__arm_vstrhq_scatter_offset_f16): Likewise. + (__arm_vstrhq_scatter_offset_p_f16): Likewise. + (__arm_vstrhq_scatter_shifted_offset_f16): Likewise. + (__arm_vstrhq_scatter_shifted_offset_p_f16): Likewise. + (__arm_vstrwq_scatter_base_f32): Likewise. + (__arm_vstrwq_scatter_base_p_f32): Likewise. + (__arm_vstrwq_scatter_offset_f32): Likewise. + (__arm_vstrwq_scatter_offset_p_f32): Likewise. + (__arm_vstrwq_scatter_shifted_offset_f32): Likewise. + (__arm_vstrwq_scatter_shifted_offset_p_f32): Likewise. + (vstrhq_scatter_offset): Define polymorphic variant. + (vstrhq_scatter_offset_p): Likewise. + (vstrhq_scatter_shifted_offset): Likewise. + (vstrhq_scatter_shifted_offset_p): Likewise. + (vstrwq_scatter_base): Likewise. + (vstrwq_scatter_base_p): Likewise. + (vstrwq_scatter_offset): Likewise. + (vstrwq_scatter_offset_p): Likewise. + (vstrwq_scatter_shifted_offset): Likewise. + (vstrwq_scatter_shifted_offset_p): Likewise. + (vstrdq_scatter_base_p): Likewise. + (vstrdq_scatter_base): Likewise. + (vstrdq_scatter_offset_p): Likewise. + (vstrdq_scatter_offset): Likewise. + (vstrdq_scatter_shifted_offset_p): Likewise. + (vstrdq_scatter_shifted_offset): Likewise. + * config/arm/arm_mve_builtins.def (STRSBS): Use builtin qualifier. + (STRSBS_P): Likewise. + (STRSBU): Likewise. + (STRSBU_P): Likewise. + (STRSS): Likewise. + (STRSS_P): Likewise. + (STRSU): Likewise. + (STRSU_P): Likewise. + * config/arm/constraints.md (Ri): Define. + * config/arm/mve.md (VSTRDSBQ): Define iterator. + (VSTRDSOQ): Likewise. + (VSTRDSSOQ): Likewise. + (VSTRWSOQ): Likewise. + (VSTRWSSOQ): Likewise. + (mve_vstrdq_scatter_base_p_<supf>v2di): Define RTL pattern. + (mve_vstrdq_scatter_base_<supf>v2di): Likewise. + (mve_vstrdq_scatter_offset_p_<supf>v2di): Likewise. + (mve_vstrdq_scatter_offset_<supf>v2di): Likewise. + (mve_vstrdq_scatter_shifted_offset_p_<supf>v2di): Likewise. + (mve_vstrdq_scatter_shifted_offset_<supf>v2di): Likewise. + (mve_vstrhq_scatter_offset_fv8hf): Likewise. + (mve_vstrhq_scatter_offset_p_fv8hf): Likewise. + (mve_vstrhq_scatter_shifted_offset_fv8hf): Likewise. + (mve_vstrhq_scatter_shifted_offset_p_fv8hf): Likewise. + (mve_vstrwq_scatter_base_fv4sf): Likewise. + (mve_vstrwq_scatter_base_p_fv4sf): Likewise. + (mve_vstrwq_scatter_offset_fv4sf): Likewise. + (mve_vstrwq_scatter_offset_p_fv4sf): Likewise. + (mve_vstrwq_scatter_offset_p_<supf>v4si): Likewise. + (mve_vstrwq_scatter_offset_<supf>v4si): Likewise. + (mve_vstrwq_scatter_shifted_offset_fv4sf): Likewise. + (mve_vstrwq_scatter_shifted_offset_p_fv4sf): Likewise. + (mve_vstrwq_scatter_shifted_offset_p_<supf>v4si): Likewise. + (mve_vstrwq_scatter_shifted_offset_<supf>v4si): Likewise. + * config/arm/predicates.md (Ri): Define predicate to check immediate + is the range +/-1016 and multiple of 8. + +2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com> + Mihail Ionescu <mihail.ionescu@arm.com> + Srinath Parvathaneni <srinath.parvathaneni@arm.com> + * config/arm/arm_mve.h (vst1q_f32): Define macro. (vst1q_f16): Likewise. (vst1q_s8): Likewise. diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h index 7e32be6d583..5ea42bd6a5b 100644 --- a/gcc/config/arm/arm_mve.h +++ b/gcc/config/arm/arm_mve.h @@ -1868,6 +1868,36 @@ typedef struct { uint8x16_t val[4]; } uint8x16x4_t; #define vstrwq_p_f32(__addr, __value, __p) __arm_vstrwq_p_f32(__addr, __value, __p) #define vstrwq_p_s32(__addr, __value, __p) __arm_vstrwq_p_s32(__addr, __value, __p) #define vstrwq_p_u32(__addr, __value, __p) __arm_vstrwq_p_u32(__addr, __value, __p) +#define vstrdq_scatter_base_p_s64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_p_s64(__addr, __offset, __value, __p) +#define vstrdq_scatter_base_p_u64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_p_u64(__addr, __offset, __value, __p) +#define vstrdq_scatter_base_s64(__addr, __offset, __value) __arm_vstrdq_scatter_base_s64(__addr, __offset, __value) +#define vstrdq_scatter_base_u64(__addr, __offset, __value) __arm_vstrdq_scatter_base_u64(__addr, __offset, __value) +#define vstrdq_scatter_offset_p_s64(__base, __offset, __value, __p) __arm_vstrdq_scatter_offset_p_s64(__base, __offset, __value, __p) +#define vstrdq_scatter_offset_p_u64(__base, __offset, __value, __p) __arm_vstrdq_scatter_offset_p_u64(__base, __offset, __value, __p) +#define vstrdq_scatter_offset_s64(__base, __offset, __value) __arm_vstrdq_scatter_offset_s64(__base, __offset, __value) +#define vstrdq_scatter_offset_u64(__base, __offset, __value) __arm_vstrdq_scatter_offset_u64(__base, __offset, __value) +#define vstrdq_scatter_shifted_offset_p_s64(__base, __offset, __value, __p) __arm_vstrdq_scatter_shifted_offset_p_s64(__base, __offset, __value, __p) +#define vstrdq_scatter_shifted_offset_p_u64(__base, __offset, __value, __p) __arm_vstrdq_scatter_shifted_offset_p_u64(__base, __offset, __value, __p) +#define vstrdq_scatter_shifted_offset_s64(__base, __offset, __value) __arm_vstrdq_scatter_shifted_offset_s64(__base, __offset, __value) +#define vstrdq_scatter_shifted_offset_u64(__base, __offset, __value) __arm_vstrdq_scatter_shifted_offset_u64(__base, __offset, __value) +#define vstrhq_scatter_offset_f16(__base, __offset, __value) __arm_vstrhq_scatter_offset_f16(__base, __offset, __value) +#define vstrhq_scatter_offset_p_f16(__base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_f16(__base, __offset, __value, __p) +#define vstrhq_scatter_shifted_offset_f16(__base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_f16(__base, __offset, __value) +#define vstrhq_scatter_shifted_offset_p_f16(__base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_f16(__base, __offset, __value, __p) +#define vstrwq_scatter_base_f32(__addr, __offset, __value) __arm_vstrwq_scatter_base_f32(__addr, __offset, __value) +#define vstrwq_scatter_base_p_f32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_p_f32(__addr, __offset, __value, __p) +#define vstrwq_scatter_offset_f32(__base, __offset, __value) __arm_vstrwq_scatter_offset_f32(__base, __offset, __value) +#define vstrwq_scatter_offset_p_f32(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p_f32(__base, __offset, __value, __p) +#define vstrwq_scatter_offset_p_s32(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p_s32(__base, __offset, __value, __p) +#define vstrwq_scatter_offset_p_u32(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p_u32(__base, __offset, __value, __p) +#define vstrwq_scatter_offset_s32(__base, __offset, __value) __arm_vstrwq_scatter_offset_s32(__base, __offset, __value) +#define vstrwq_scatter_offset_u32(__base, __offset, __value) __arm_vstrwq_scatter_offset_u32(__base, __offset, __value) +#define vstrwq_scatter_shifted_offset_f32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_f32(__base, __offset, __value) +#define vstrwq_scatter_shifted_offset_p_f32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_f32(__base, __offset, __value, __p) +#define vstrwq_scatter_shifted_offset_p_s32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_s32(__base, __offset, __value, __p) +#define vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p) +#define vstrwq_scatter_shifted_offset_s32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_s32(__base, __offset, __value) +#define vstrwq_scatter_shifted_offset_u32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_u32(__base, __offset, __value) #endif __extension__ extern __inline void @@ -12171,6 +12201,146 @@ __arm_vstrwq_p_u32 (uint32_t * __addr, uint32x4_t __value, mve_pred16_t __p) __builtin_mve_vstrwq_p_uv4si ((__builtin_neon_si *) __addr, __value, __p); } +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_base_p_s64 (uint64x2_t __addr, const int __offset, int64x2_t __value, mve_pred16_t __p) +{ + __builtin_mve_vstrdq_scatter_base_p_sv2di (__addr, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_base_p_u64 (uint64x2_t __addr, const int __offset, uint64x2_t __value, mve_pred16_t __p) +{ + __builtin_mve_vstrdq_scatter_base_p_uv2di (__addr, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_base_s64 (uint64x2_t __addr, const int __offset, int64x2_t __value) +{ + __builtin_mve_vstrdq_scatter_base_sv2di (__addr, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_base_u64 (uint64x2_t __addr, const int __offset, uint64x2_t __value) +{ + __builtin_mve_vstrdq_scatter_base_uv2di (__addr, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_offset_p_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value, mve_pred16_t __p) +{ + __builtin_mve_vstrdq_scatter_offset_p_sv2di (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_offset_p_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value, mve_pred16_t __p) +{ + __builtin_mve_vstrdq_scatter_offset_p_uv2di (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_offset_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value) +{ + __builtin_mve_vstrdq_scatter_offset_sv2di (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_offset_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value) +{ + __builtin_mve_vstrdq_scatter_offset_uv2di (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_shifted_offset_p_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value, mve_pred16_t __p) +{ + __builtin_mve_vstrdq_scatter_shifted_offset_p_sv2di (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_shifted_offset_p_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value, mve_pred16_t __p) +{ + __builtin_mve_vstrdq_scatter_shifted_offset_p_uv2di (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_shifted_offset_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value) +{ + __builtin_mve_vstrdq_scatter_shifted_offset_sv2di (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrdq_scatter_shifted_offset_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value) +{ + __builtin_mve_vstrdq_scatter_shifted_offset_uv2di (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_offset_p_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) +{ + __builtin_mve_vstrwq_scatter_offset_p_sv4si ((__builtin_neon_si *) __base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_offset_p_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) +{ + __builtin_mve_vstrwq_scatter_offset_p_uv4si ((__builtin_neon_si *) __base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_offset_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value) +{ + __builtin_mve_vstrwq_scatter_offset_sv4si ((__builtin_neon_si *) __base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_offset_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value) +{ + __builtin_mve_vstrwq_scatter_offset_uv4si ((__builtin_neon_si *) __base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_shifted_offset_p_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p) +{ + __builtin_mve_vstrwq_scatter_shifted_offset_p_sv4si ((__builtin_neon_si *) __base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_shifted_offset_p_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p) +{ + __builtin_mve_vstrwq_scatter_shifted_offset_p_uv4si ((__builtin_neon_si *) __base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_shifted_offset_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value) +{ + __builtin_mve_vstrwq_scatter_shifted_offset_sv4si ((__builtin_neon_si *) __base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_shifted_offset_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value) +{ + __builtin_mve_vstrwq_scatter_shifted_offset_uv4si ((__builtin_neon_si *) __base, __offset, __value); +} + #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */ __extension__ extern __inline void @@ -14467,6 +14637,76 @@ __arm_vstrhq_p_f16 (float16_t * __addr, float16x8_t __value, mve_pred16_t __p) __builtin_mve_vstrhq_p_fv8hf (__addr, __value, __p); } +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_scatter_offset_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value) +{ + __builtin_mve_vstrhq_scatter_offset_fv8hf (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_scatter_offset_p_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value, mve_pred16_t __p) +{ + __builtin_mve_vstrhq_scatter_offset_p_fv8hf (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_scatter_shifted_offset_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value) +{ + __builtin_mve_vstrhq_scatter_shifted_offset_fv8hf (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrhq_scatter_shifted_offset_p_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value, mve_pred16_t __p) +{ + __builtin_mve_vstrhq_scatter_shifted_offset_p_fv8hf (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_base_f32 (uint32x4_t __addr, const int __offset, float32x4_t __value) +{ + __builtin_mve_vstrwq_scatter_base_fv4sf (__addr, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_base_p_f32 (uint32x4_t __addr, const int __offset, float32x4_t __value, mve_pred16_t __p) +{ + __builtin_mve_vstrwq_scatter_base_p_fv4sf (__addr, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_offset_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value) +{ + __builtin_mve_vstrwq_scatter_offset_fv4sf (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_offset_p_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value, mve_pred16_t __p) +{ + __builtin_mve_vstrwq_scatter_offset_p_fv4sf (__base, __offset, __value, __p); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_shifted_offset_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value) +{ + __builtin_mve_vstrwq_scatter_shifted_offset_fv4sf (__base, __offset, __value); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +__arm_vstrwq_scatter_shifted_offset_p_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value, mve_pred16_t __p) +{ + __builtin_mve_vstrwq_scatter_shifted_offset_p_fv4sf (__base, __offset, __value, __p); +} + #endif enum { @@ -17107,6 +17347,136 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_f32 (__ARM_mve_coerce(__p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t)));}) +#define vstrhq_scatter_offset(p0,p1,p2) __arm_vstrhq_scatter_offset(p0,p1,p2) +#define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ + int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ + int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \ + int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));}) + +#define vstrhq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) +#define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ + int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ + int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ + int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ + int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_p_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));}) + +#define vstrhq_scatter_shifted_offset(p0,p1,p2) __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) +#define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ + int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ + int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \ + int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));}) + +#define vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) +#define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ + int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ + int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ + int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \ + int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_f16 (__ARM_mve_coerce(__p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));}) + +#define vstrwq_scatter_base(p0,p1,p2) __arm_vstrwq_scatter_base(p0,p1,p2) +#define __arm_vstrwq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \ + int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ + int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_f32 (p0, p1, __ARM_mve_coerce(__p2, float32x4_t)));}) + +#define vstrwq_scatter_base_p(p0,p1,p2,p3) __arm_vstrwq_scatter_base_p(p0,p1,p2,p3) +#define __arm_vstrwq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_p_s32(p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ + int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_p_u32(p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ + int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_p_f32(p0, p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) + +#define vstrwq_scatter_offset(p0,p1,p2) __arm_vstrwq_scatter_offset(p0,p1,p2) +#define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ + int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ + int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));}) + +#define vstrwq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) +#define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ + int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ + int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_p_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) + +#define vstrwq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) +#define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ + int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ + int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_p_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) + +#define vstrwq_scatter_offset(p0,p1,p2) __arm_vstrwq_scatter_offset(p0,p1,p2) +#define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ + int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ + int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));}) + +#define vstrwq_scatter_shifted_offset(p0,p1,p2) __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) +#define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ + int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ + int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));}) + +#define vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) +#define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ + int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ + int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) + +#define vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) +#define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ + int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \ + int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));}) + +#define vstrwq_scatter_shifted_offset(p0,p1,p2) __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) +#define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ + int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \ + int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_f32 (__ARM_mve_coerce(__p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));}) + #else /* MVE Integer. */ #define vst4q(p0,p1) __arm_vst4q(p0,p1) @@ -19357,6 +19727,138 @@ extern void *__ARM_undef; int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));}) +#define vstrdq_scatter_base_p(p0,p1,p2,p3) __arm_vstrdq_scatter_base_p(p0,p1,p2,p3) +#define __arm_vstrdq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_p_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \ + int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_p_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));}) + +#define vstrdq_scatter_base(p0,p1,p2) __arm_vstrdq_scatter_base(p0,p1,p2) +#define __arm_vstrdq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t)), \ + int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t)));}) + +#define vstrdq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrdq_scatter_offset_p(p0,p1,p2,p3) +#define __arm_vstrdq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_offset_p_s64 (__ARM_mve_coerce(__p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \ + int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_offset_p_u64 (__ARM_mve_coerce(__p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));}) + +#define vstrdq_scatter_offset(p0,p1,p2) __arm_vstrdq_scatter_offset(p0,p1,p2) +#define __arm_vstrdq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_offset_s64 (__ARM_mve_coerce(__p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t)), \ + int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_offset_u64 (__ARM_mve_coerce(__p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t)));}) + +#define vstrdq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrdq_scatter_shifted_offset_p(p0,p1,p2,p3) +#define __arm_vstrdq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_shifted_offset_p_s64 (__ARM_mve_coerce(__p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \ + int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_shifted_offset_p_u64 (__ARM_mve_coerce(__p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));}) + +#define vstrdq_scatter_shifted_offset(p0,p1,p2) __arm_vstrdq_scatter_shifted_offset(p0,p1,p2) +#define __arm_vstrdq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_shifted_offset_s64 (__ARM_mve_coerce(__p0, int64_t *), __p1, __ARM_mve_coerce(__p2, int64x2_t)), \ + int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_shifted_offset_u64 (__ARM_mve_coerce(__p0, uint64_t *), __p1, __ARM_mve_coerce(__p2, uint64x2_t)));}) + +#define vstrhq_scatter_offset(p0,p1,p2) __arm_vstrhq_scatter_offset(p0,p1,p2) +#define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ + int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ + int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) + +#define vstrhq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) +#define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ + int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ + int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ + int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) + +#define vstrhq_scatter_shifted_offset(p0,p1,p2) __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) +#define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \ + int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \ + int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \ + int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));}) + +#define vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) +#define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \ + int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(__p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \ + int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \ + int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));}) + +#define vstrwq_scatter_offset(p0,p1,p2) __arm_vstrwq_scatter_offset(p0,p1,p2) +#define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ + int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) + +#define vstrwq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) +#define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ + int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) + +#define vstrwq_scatter_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) +#define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ + int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));}) + +#define vstrwq_scatter_offset(p0,p1,p2) __arm_vstrwq_scatter_offset(p0,p1,p2) +#define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ + int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) + +#define vstrwq_scatter_shifted_offset(p0,p1,p2) __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) +#define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \ + int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));}) + +#define vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) +#define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \ + __typeof(p1) __p1 = (p1); \ + __typeof(p2) __p2 = (p2); \ + _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \ + int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce(__p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \ + int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce(__p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));}) + #endif /* MVE Integer. */ #define vldrdq_gather_offset(p0,p1) __arm_vldrdq_gather_offset(p0,p1) diff --git a/gcc/config/arm/arm_mve_builtins.def b/gcc/config/arm/arm_mve_builtins.def index ca8ba6a7709..144547fbdd8 100644 --- a/gcc/config/arm/arm_mve_builtins.def +++ b/gcc/config/arm/arm_mve_builtins.def @@ -785,3 +785,33 @@ VAR1 (STRU, vstrwq_u, v4si) VAR1 (STRS_P, vstrwq_p_f, v4sf) VAR1 (STRS_P, vstrwq_p_s, v4si) VAR1 (STRU_P, vstrwq_p_u, v4si) +VAR1 (STRSBS, vstrdq_scatter_base_s, v2di) +VAR1 (STRSBS, vstrwq_scatter_base_f, v4sf) +VAR1 (STRSBS_P, vstrdq_scatter_base_p_s, v2di) +VAR1 (STRSBS_P, vstrwq_scatter_base_p_f, v4sf) +VAR1 (STRSBU, vstrdq_scatter_base_u, v2di) +VAR1 (STRSBU_P, vstrdq_scatter_base_p_u, v2di) +VAR1 (STRSS, vstrdq_scatter_offset_s, v2di) +VAR1 (STRSS, vstrdq_scatter_shifted_offset_s, v2di) +VAR1 (STRSS, vstrhq_scatter_offset_f, v8hf) +VAR1 (STRSS, vstrhq_scatter_shifted_offset_f, v8hf) +VAR1 (STRSS, vstrwq_scatter_offset_f, v4sf) +VAR1 (STRSS, vstrwq_scatter_offset_s, v4si) +VAR1 (STRSS, vstrwq_scatter_shifted_offset_f, v4sf) +VAR1 (STRSS, vstrwq_scatter_shifted_offset_s, v4si) +VAR1 (STRSS_P, vstrdq_scatter_offset_p_s, v2di) +VAR1 (STRSS_P, vstrdq_scatter_shifted_offset_p_s, v2di) +VAR1 (STRSS_P, vstrhq_scatter_offset_p_f, v8hf) +VAR1 (STRSS_P, vstrhq_scatter_shifted_offset_p_f, v8hf) +VAR1 (STRSS_P, vstrwq_scatter_offset_p_f, v4sf) +VAR1 (STRSS_P, vstrwq_scatter_offset_p_s, v4si) +VAR1 (STRSS_P, vstrwq_scatter_shifted_offset_p_f, v4sf) +VAR1 (STRSS_P, vstrwq_scatter_shifted_offset_p_s, v4si) +VAR1 (STRSU, vstrdq_scatter_offset_u, v2di) +VAR1 (STRSU, vstrdq_scatter_shifted_offset_u, v2di) +VAR1 (STRSU, vstrwq_scatter_offset_u, v4si) +VAR1 (STRSU, vstrwq_scatter_shifted_offset_u, v4si) +VAR1 (STRSU_P, vstrdq_scatter_offset_p_u, v2di) +VAR1 (STRSU_P, vstrdq_scatter_shifted_offset_p_u, v2di) +VAR1 (STRSU_P, vstrwq_scatter_offset_p_u, v4si) +VAR1 (STRSU_P, vstrwq_scatter_shifted_offset_p_u, v4si) diff --git a/gcc/config/arm/constraints.md b/gcc/config/arm/constraints.md index 2641669d8e0..41a85e2713b 100644 --- a/gcc/config/arm/constraints.md +++ b/gcc/config/arm/constraints.md @@ -35,7 +35,7 @@ ;; Dt, Dp, Dz, Tu ;; in Thumb-1 state: Pa, Pb, Pc, Pd, Pe ;; in Thumb-2 state: Ha, Pj, PJ, Ps, Pt, Pu, Pv, Pw, Px, Py, Pz, Rd, Rf, Rb, Ra, -;; Rg +;; Rg, Ri ;; in all states: Pf, Pg ;; The following memory constraints have been used: @@ -90,6 +90,10 @@ (match_test "TARGET_HAVE_MVE && ((ival == 1) || (ival == 2) || (ival == 4) || (ival == 8))"))) +;; True if the immediate is multiple of 8 and in range of -/+ 1016 for MVE. +(define_predicate "mve_vldrd_immediate" + (match_test "satisfies_constraint_Ri (op)")) + (define_register_constraint "t" "TARGET_32BIT ? VFP_LO_REGS : NO_REGS" "The VFP registers @code{s0}-@code{s31}.") diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md index 68cf57f6381..5667882e941 100644 --- a/gcc/config/arm/mve.md +++ b/gcc/config/arm/mve.md @@ -203,7 +203,11 @@ VLDRWQGO_S VLDRWQGO_U VLDRWQGSO_F VLDRWQGSO_S VLDRWQGSO_U VSTRHQ_F VST1Q_S VST1Q_U VSTRHQSO_S VSTRHQSO_U VSTRHQSSO_S VSTRHQSSO_U VSTRHQ_S - VSTRHQ_U VSTRWQ_S VSTRWQ_U VSTRWQ_F VST1Q_F]) + VSTRHQ_U VSTRWQ_S VSTRWQ_U VSTRWQ_F VST1Q_F VSTRDQSB_S + VSTRDQSB_U VSTRDQSO_S VSTRDQSO_U VSTRDQSSO_S + VSTRDQSSO_U VSTRWQSO_S VSTRWQSO_U VSTRWQSSO_S + VSTRWQSSO_U VSTRHQSO_F VSTRHQSSO_F VSTRWQSB_F + VSTRWQSO_F VSTRWQSSO_F]) (define_mode_attr MVE_CNVT [(V8HI "V8HF") (V4SI "V4SF") (V8HF "V8HI") (V4SF "V4SI")]) @@ -368,7 +372,11 @@ (VLDRWQGSO_S "s") (VLDRWQGSO_U "u") (VST1Q_S "s") (VST1Q_U "u") (VSTRHQSO_S "s") (VSTRHQSO_U "u") (VSTRHQSSO_S "s") (VSTRHQSSO_U "u") (VSTRHQ_S "s") - (VSTRHQ_U "u") (VSTRWQ_S "s") (VSTRWQ_U "u")]) + (VSTRHQ_U "u") (VSTRWQ_S "s") (VSTRWQ_U "u") + (VSTRDQSB_S "s") (VSTRDQSB_U "u") (VSTRDQSO_S "s") + (VSTRDQSO_U "u") (VSTRDQSSO_S "s") (VSTRDQSSO_U "u") + (VSTRWQSO_U "u") (VSTRWQSO_S "s") (VSTRWQSSO_U "u") + (VSTRWQSSO_S "s")]) (define_int_attr mode1 [(VCTP8Q "8") (VCTP16Q "16") (VCTP32Q "32") (VCTP64Q "64") (VCTP8Q_M "8") (VCTP16Q_M "16") @@ -612,6 +620,11 @@ (define_int_iterator VSTRHSSOQ [VSTRHQSSO_S VSTRHQSSO_U]) (define_int_iterator VSTRHQ [VSTRHQ_S VSTRHQ_U]) (define_int_iterator VSTRWQ [VSTRWQ_S VSTRWQ_U]) +(define_int_iterator VSTRDSBQ [VSTRDQSB_S VSTRDQSB_U]) +(define_int_iterator VSTRDSOQ [VSTRDQSO_S VSTRDQSO_U]) +(define_int_iterator VSTRDSSOQ [VSTRDQSSO_S VSTRDQSSO_U]) +(define_int_iterator VSTRWSOQ [VSTRWQSO_S VSTRWQSO_U]) +(define_int_iterator VSTRWSSOQ [VSTRWQSSO_S VSTRWQSSO_U]) (define_insn "*mve_mov<mode>" [(set (match_operand:MVE_types 0 "nonimmediate_operand" "=w,w,r,w,w,r,w,Us") @@ -9196,3 +9209,437 @@ emit_insn (gen_mve_vstr<V_sz_elem1>q_<supf><mode>(operands[0],operands[1])); DONE; }) + +;; +;; [vstrdq_scatter_base_p_s vstrdq_scatter_base_p_u] +;; +(define_insn "mve_vstrdq_scatter_base_p_<supf>v2di" + [(set (mem:BLK (scratch)) + (unspec:BLK + [(match_operand:V2DI 0 "s_register_operand" "w") + (match_operand:SI 1 "mve_vldrd_immediate" "Ri") + (match_operand:V2DI 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VSTRDSBQ)) + ] + "TARGET_HAVE_MVE" +{ + rtx ops[3]; + ops[0] = operands[0]; + ops[1] = operands[1]; + ops[2] = operands[2]; + output_asm_insn ("vpst\;\tvstrdt.u64\t%q2, [%q0, %1]",ops); + return ""; +} + [(set_attr "length" "8")]) + +;; +;; [vstrdq_scatter_base_s vstrdq_scatter_base_u] +;; +(define_insn "mve_vstrdq_scatter_base_<supf>v2di" + [(set (mem:BLK (scratch)) + (unspec:BLK + [(match_operand:V2DI 0 "s_register_operand" "=w") + (match_operand:SI 1 "mve_vldrd_immediate" "Ri") + (match_operand:V2DI 2 "s_register_operand" "w")] + VSTRDSBQ)) + ] + "TARGET_HAVE_MVE" +{ + rtx ops[3]; + ops[0] = operands[0]; + ops[1] = operands[1]; + ops[2] = operands[2]; + output_asm_insn ("vstrd.u64\t%q2, [%q0, %1]",ops); + return ""; +} + [(set_attr "length" "4")]) + +;; +;; [vstrdq_scatter_offset_p_s vstrdq_scatter_offset_p_u] +;; +(define_insn "mve_vstrdq_scatter_offset_p_<supf>v2di" + [(set (match_operand:V2DI 0 "memory_operand" "=Us") + (unspec:V2DI + [(match_operand:V2DI 1 "s_register_operand" "w") + (match_operand:V2DI 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VSTRDSOQ)) + ] + "TARGET_HAVE_MVE" +{ + rtx ops[3]; + ops[0] = operands[0]; + ops[1] = operands[1]; + ops[2] = operands[2]; + output_asm_insn ("vpst\;\tvstrdt.64\t%q2, [%m0, %q1]",ops); + return ""; +} + [(set_attr "length" "8")]) + +;; +;; [vstrdq_scatter_offset_s vstrdq_scatter_offset_u] +;; +(define_insn "mve_vstrdq_scatter_offset_<supf>v2di" + [(set (match_operand:V2DI 0 "memory_operand" "=Us") + (unspec:V2DI + [(match_operand:V2DI 1 "s_register_operand" "w") + (match_operand:V2DI 2 "s_register_operand" "w")] + VSTRDSOQ)) + ] + "TARGET_HAVE_MVE" +{ + rtx ops[3]; + ops[0] = operands[0]; + ops[1] = operands[1]; + ops[2] = operands[2]; + output_asm_insn ("vstrd.64\t%q2, [%m0, %q1]",ops); + return ""; +} + [(set_attr "length" "4")]) + +;; +;; [vstrdq_scatter_shifted_offset_p_s vstrdq_scatter_shifted_offset_p_u] +;; +(define_insn "mve_vstrdq_scatter_shifted_offset_p_<supf>v2di" + [(set (match_operand:V2DI 0 "memory_operand" "=Us") + (unspec:V2DI + [(match_operand:V2DI 1 "s_register_operand" "w") + (match_operand:V2DI 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VSTRDSSOQ)) + ] + "TARGET_HAVE_MVE" +{ + rtx ops[3]; + ops[0] = operands[0]; + ops[1] = operands[1]; + ops[2] = operands[2]; + output_asm_insn ("vpst\;\tvstrdt.64\t%q2, [%m0, %q1, UXTW #3]",ops); + return ""; +} + [(set_attr "length" "8")]) + +;; +;; [vstrdq_scatter_shifted_offset_s vstrdq_scatter_shifted_offset_u] +;; +(define_insn "mve_vstrdq_scatter_shifted_offset_<supf>v2di" + [(set (match_operand:V2DI 0 "memory_operand" "=Us") + (unspec:V2DI + [(match_operand:V2DI 1 "s_register_operand" "w") + (match_operand:V2DI 2 "s_register_operand" "w")] + VSTRDSSOQ)) + ] + "TARGET_HAVE_MVE" +{ + rtx ops[3]; + ops[0] = operands[0]; + ops[1] = operands[1]; + ops[2] = operands[2]; + output_asm_insn ("vstrd.64\t%q2, [%m0, %q1, UXTW #3]",ops); + return ""; +} + [(set_attr "length" "4")]) + +;; +;; [vstrhq_scatter_offset_f] +;; +(define_insn "mve_vstrhq_scatter_offset_fv8hf" + [(set (match_operand:V8HI 0 "memory_operand" "=Us") + (unspec:V8HI + [(match_operand:V8HI 1 "s_register_operand" "w") + (match_operand:V8HF 2 "s_register_operand" "w")] + VSTRHQSO_F)) + ] + "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT" +{ + rtx ops[3]; + ops[0] = operands[0]; + ops[1] = operands[1]; + ops[2] = operands[2]; + output_asm_insn ("vstrh.16\t%q2, [%m0, %q1]",ops); + return ""; +} + [(set_attr "length" "4")]) + +;; +;; [vstrhq_scatter_offset_p_f] +;; +(define_insn "mve_vstrhq_scatter_offset_p_fv8hf" + [(set (match_operand:V8HI 0 "memory_operand" "=Us") + (unspec:V8HI + [(match_operand:V8HI 1 "s_register_operand" "w") + (match_operand:V8HF 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VSTRHQSO_F)) + ] + "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT" +{ + rtx ops[3]; + ops[0] = operands[0]; + ops[1] = operands[1]; + ops[2] = operands[2]; + output_asm_insn ("vpst\n\tvstrht.16\t%q2, [%m0, %q1]",ops); + return ""; +} + [(set_attr "length" "8")]) + +;; +;; [vstrhq_scatter_shifted_offset_f] +;; +(define_insn "mve_vstrhq_scatter_shifted_offset_fv8hf" + [(set (match_operand:V8HI 0 "memory_operand" "=Us") + (unspec:V8HI + [(match_operand:V8HI 1 "s_register_operand" "w") + (match_operand:V8HF 2 "s_register_operand" "w")] + VSTRHQSSO_F)) + ] + "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT" +{ + rtx ops[3]; + ops[0] = operands[0]; + ops[1] = operands[1]; + ops[2] = operands[2]; + output_asm_insn ("vstrh.16\t%q2, [%m0, %q1, uxtw #1]",ops); + return ""; +} + [(set_attr "length" "4")]) + +;; +;; [vstrhq_scatter_shifted_offset_p_f] +;; +(define_insn "mve_vstrhq_scatter_shifted_offset_p_fv8hf" + [(set (match_operand:V8HI 0 "memory_operand" "=Us") + (unspec:V8HI + [(match_operand:V8HI 1 "s_register_operand" "w") + (match_operand:V8HF 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VSTRHQSSO_F)) + ] + "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT" +{ + rtx ops[3]; + ops[0] = operands[0]; + ops[1] = operands[1]; + ops[2] = operands[2]; + output_asm_insn ("vpst\n\tvstrht.16\t%q2, [%m0, %q1, uxtw #1]",ops); + return ""; +} + [(set_attr "length" "8")]) + +;; +;; [vstrwq_scatter_base_f] +;; +(define_insn "mve_vstrwq_scatter_base_fv4sf" + [(set (mem:BLK (scratch)) + (unspec:BLK + [(match_operand:V4SI 0 "s_register_operand" "w") + (match_operand:SI 1 "immediate_operand" "i") + (match_operand:V4SF 2 "s_register_operand" "w")] + VSTRWQSB_F)) + ] + "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT" +{ + rtx ops[3]; + ops[0] = operands[0]; + ops[1] = operands[1]; + ops[2] = operands[2]; + output_asm_insn ("vstrw.u32\t%q2, [%q0, %1]",ops); + return ""; +} + [(set_attr "length" "4")]) + +;; +;; [vstrwq_scatter_base_p_f] +;; +(define_insn "mve_vstrwq_scatter_base_p_fv4sf" + [(set (mem:BLK (scratch)) + (unspec:BLK + [(match_operand:V4SI 0 "s_register_operand" "w") + (match_operand:SI 1 "immediate_operand" "i") + (match_operand:V4SF 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VSTRWQSB_F)) + ] + "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT" +{ + rtx ops[3]; + ops[0] = operands[0]; + ops[1] = operands[1]; + ops[2] = operands[2]; + output_asm_insn ("vpst\n\tvstrwt.u32\t%q2, [%q0, %1]",ops); + return ""; +} + [(set_attr "length" "8")]) + +;; +;; [vstrwq_scatter_offset_f] +;; +(define_insn "mve_vstrwq_scatter_offset_fv4sf" + [(set (match_operand:V4SI 0 "memory_operand" "=Us") + (unspec:V4SI + [(match_operand:V4SI 1 "s_register_operand" "w") + (match_operand:V4SF 2 "s_register_operand" "w")] + VSTRWQSO_F)) + ] + "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT" +{ + rtx ops[3]; + ops[0] = operands[0]; + ops[1] = operands[1]; + ops[2] = operands[2]; + output_asm_insn ("vstrw.32\t%q2, [%m0, %q1]",ops); + return ""; +} + [(set_attr "length" "4")]) + +;; +;; [vstrwq_scatter_offset_p_f] +;; +(define_insn "mve_vstrwq_scatter_offset_p_fv4sf" + [(set (match_operand:V4SI 0 "memory_operand" "=Us") + (unspec:V4SI + [(match_operand:V4SI 1 "s_register_operand" "w") + (match_operand:V4SF 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VSTRWQSO_F)) + ] + "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT" +{ + rtx ops[3]; + ops[0] = operands[0]; + ops[1] = operands[1]; + ops[2] = operands[2]; + output_asm_insn ("vpst\n\tvstrwt.32\t%q2, [%m0, %q1]",ops); + return ""; +} + [(set_attr "length" "8")]) + +;; +;; [vstrwq_scatter_offset_p_s vstrwq_scatter_offset_p_u] +;; +(define_insn "mve_vstrwq_scatter_offset_p_<supf>v4si" + [(set (match_operand:V4SI 0 "memory_operand" "=Us") + (unspec:V4SI + [(match_operand:V4SI 1 "s_register_operand" "w") + (match_operand:V4SI 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VSTRWSOQ)) + ] + "TARGET_HAVE_MVE" +{ + rtx ops[3]; + ops[0] = operands[0]; + ops[1] = operands[1]; + ops[2] = operands[2]; + output_asm_insn ("vpst\n\tvstrwt.32\t%q2, [%m0, %q1]",ops); + return ""; +} + [(set_attr "length" "8")]) + +;; +;; [vstrwq_scatter_offset_s vstrwq_scatter_offset_u] +;; +(define_insn "mve_vstrwq_scatter_offset_<supf>v4si" + [(set (match_operand:V4SI 0 "memory_operand" "=Us") + (unspec:V4SI + [(match_operand:V4SI 1 "s_register_operand" "w") + (match_operand:V4SI 2 "s_register_operand" "w")] + VSTRWSOQ)) + ] + "TARGET_HAVE_MVE" +{ + rtx ops[3]; + ops[0] = operands[0]; + ops[1] = operands[1]; + ops[2] = operands[2]; + output_asm_insn ("vstrw.32\t%q2, [%m0, %q1]",ops); + return ""; +} + [(set_attr "length" "4")]) + +;; +;; [vstrwq_scatter_shifted_offset_f] +;; +(define_insn "mve_vstrwq_scatter_shifted_offset_fv4sf" + [(set (match_operand:V4SI 0 "memory_operand" "=Us") + (unspec:V4SI + [(match_operand:V4SI 1 "s_register_operand" "w") + (match_operand:V4SF 2 "s_register_operand" "w")] + VSTRWQSSO_F)) + ] + "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT" +{ + rtx ops[3]; + ops[0] = operands[0]; + ops[1] = operands[1]; + ops[2] = operands[2]; + output_asm_insn ("vstrw.32\t%q2, [%m0, %q1, uxtw #2]",ops); + return ""; +} + [(set_attr "length" "4")]) + +;; +;; [vstrwq_scatter_shifted_offset_p_f] +;; +(define_insn "mve_vstrwq_scatter_shifted_offset_p_fv4sf" + [(set (match_operand:V4SI 0 "memory_operand" "=Us") + (unspec:V4SI + [(match_operand:V4SI 1 "s_register_operand" "w") + (match_operand:V4SF 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VSTRWQSSO_F)) + ] + "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT" +{ + rtx ops[3]; + ops[0] = operands[0]; + ops[1] = operands[1]; + ops[2] = operands[2]; + output_asm_insn ("vpst\;\tvstrwt.32\t%q2, [%m0, %q1, uxtw #2]",ops); + return ""; +} + [(set_attr "length" "8")]) + +;; +;; [vstrwq_scatter_shifted_offset_p_s vstrwq_scatter_shifted_offset_p_u] +;; +(define_insn "mve_vstrwq_scatter_shifted_offset_p_<supf>v4si" + [(set (match_operand:V4SI 0 "memory_operand" "=Us") + (unspec:V4SI + [(match_operand:V4SI 1 "s_register_operand" "w") + (match_operand:V4SI 2 "s_register_operand" "w") + (match_operand:HI 3 "vpr_register_operand" "Up")] + VSTRWSSOQ)) + ] + "TARGET_HAVE_MVE" +{ + rtx ops[3]; + ops[0] = operands[0]; + ops[1] = operands[1]; + ops[2] = operands[2]; + output_asm_insn ("vpst\;\tvstrwt.32\t%q2, [%m0, %q1, uxtw #2]",ops); + return ""; +} + [(set_attr "length" "8")]) + +;; +;; [vstrwq_scatter_shifted_offset_s vstrwq_scatter_shifted_offset_u] +;; +(define_insn "mve_vstrwq_scatter_shifted_offset_<supf>v4si" + [(set (match_operand:V4SI 0 "memory_operand" "=Us") + (unspec:V4SI + [(match_operand:V4SI 1 "s_register_operand" "w") + (match_operand:V4SI 2 "s_register_operand" "w")] + VSTRWSSOQ)) + ] + "TARGET_HAVE_MVE" +{ + rtx ops[3]; + ops[0] = operands[0]; + ops[1] = operands[1]; + ops[2] = operands[2]; + output_asm_insn ("vstrw.32\t%q2, [%m0, %q1, uxtw #2]",ops); + return ""; +} + [(set_attr "length" "4")]) diff --git a/gcc/config/arm/predicates.md b/gcc/config/arm/predicates.md index 2b65e646b05..bb302ed5b42 100644 --- a/gcc/config/arm/predicates.md +++ b/gcc/config/arm/predicates.md @@ -59,6 +59,14 @@ (define_predicate "mve_imm_selective_upto_8" (match_test "satisfies_constraint_Rg (op)")) +;; True if the immediate is the range +/- 1016 and multiple of 8 for MVE. +(define_constraint "Ri" + "@internal In Thumb-2 state a constant is multiple of 8 and in range + of -/+ 1016 for MVE" + (and (match_code "const_int") + (match_test "TARGET_HAVE_MVE && (-1016 <= ival) && (ival <= 1016) + && ((ival % 8) == 0)"))) + ; Predicate for stack protector guard's address in ; stack_protect_combined_set_insn and stack_protect_combined_test_insn patterns (define_predicate "guard_addr_operand" diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 13a4d4f6a57..8fee29fad0d 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -2,6 +2,53 @@ Mihail Ionescu <mihail.ionescu@arm.com> Srinath Parvathaneni <srinath.parvathaneni@arm.com> + * gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_p_s64.c: New test. + * gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_p_u64.c: Likewise. + * gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_s64.c: Likewise. + * gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_u64.c: Likewise. + * gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_p_s64.c: Likewise. + * gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_p_u64.c: Likewise. + * gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_s64.c: Likewise. + * gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_u64.c: Likewise. + * gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_p_s64.c: + Likewise. + * gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_p_u64.c: + Likewise. + * gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_s64.c: + Likewise. + * gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_u64.c: + Likewise. + * gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_f16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_f16.c: Likewise. + * gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_f16.c: + Likewise. + * gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_f16.c: + Likewise. + * gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_f32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_p_f32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_f32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_p_f32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_p_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_p_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_s32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_u32.c: Likewise. + * gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_f32.c: + Likewise. + * gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_p_f32.c: + Likewise. + * gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_p_s32.c: + Likewise. + * gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_p_u32.c: + Likewise. + * gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_s32.c: + Likewise. + * gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_u32.c: + Likewise. + +2020-03-18 Andre Vieira <andre.simoesdiasvieira@arm.com> + Mihail Ionescu <mihail.ionescu@arm.com> + Srinath Parvathaneni <srinath.parvathaneni@arm.com> + * gcc.target/arm/mve/intrinsics/vst1q_f16.c: New test. * gcc.target/arm/mve/intrinsics/vst1q_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vst1q_s16.c: Likewise. diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_p_s64.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_p_s64.c new file mode 100644 index 00000000000..08996054ab5 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_p_s64.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (uint64x2_t addr, const int offset, int64x2_t value, mve_pred16_t p) +{ + vstrdq_scatter_base_p_s64 (addr, 8, value, p); +} + +/* { dg-final { scan-assembler "vstrdt.u64" } } */ + +void +foo1 (uint64x2_t addr, const int offset, int64x2_t value, mve_pred16_t p) +{ + vstrdq_scatter_base_p (addr, 8, value, p); +} + +/* { dg-final { scan-assembler "vstrdt.u64" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_p_u64.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_p_u64.c new file mode 100644 index 00000000000..65c9ddd6c5b --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_p_u64.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (uint64x2_t addr, const int offset, uint64x2_t value, mve_pred16_t p) +{ + vstrdq_scatter_base_p_u64 (addr, 8, value, p); +} + +/* { dg-final { scan-assembler "vstrdt.u64" } } */ + +void +foo1 (uint64x2_t addr, const int offset, uint64x2_t value, mve_pred16_t p) +{ + vstrdq_scatter_base_p (addr, 8, value, p); +} + +/* { dg-final { scan-assembler "vstrdt.u64" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_s64.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_s64.c new file mode 100644 index 00000000000..8ae6a96794c --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_s64.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (uint64x2_t addr, const int offset, int64x2_t value) +{ + vstrdq_scatter_base_s64 (addr, 1016, value); +} + +/* { dg-final { scan-assembler "vstrd.u64" } } */ + +void +foo1 (uint64x2_t addr, const int offset, int64x2_t value) +{ + vstrdq_scatter_base (addr, 1016, value); +} + +/* { dg-final { scan-assembler "vstrd.u64" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_u64.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_u64.c new file mode 100644 index 00000000000..da15b609798 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_base_u64.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (uint64x2_t addr, const int offset, uint64x2_t value) +{ + vstrdq_scatter_base_u64 (addr, 8, value); +} + +/* { dg-final { scan-assembler "vstrd.u64" } } */ + +void +foo1 (uint64x2_t addr, const int offset, uint64x2_t value) +{ + vstrdq_scatter_base (addr, 8, value); +} + +/* { dg-final { scan-assembler "vstrd.u64" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_p_s64.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_p_s64.c new file mode 100644 index 00000000000..01d2c682925 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_p_s64.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (int64_t * base, uint64x2_t offset, int64x2_t value, mve_pred16_t p) +{ + vstrdq_scatter_offset_p_s64 (base, offset, value, p); +} + +/* { dg-final { scan-assembler "vstrdt.64" } } */ + +void +foo1 (int64_t * base, uint64x2_t offset, int64x2_t value, mve_pred16_t p) +{ + vstrdq_scatter_offset_p (base, offset, value, p); +} + +/* { dg-final { scan-assembler "vstrdt.64" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_p_u64.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_p_u64.c new file mode 100644 index 00000000000..2458c78a02c --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_p_u64.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (uint64_t * base, uint64x2_t offset, uint64x2_t value, mve_pred16_t p) +{ + vstrdq_scatter_offset_p_u64 (base, offset, value, p); +} + +/* { dg-final { scan-assembler "vstrdt.64" } } */ + +void +foo1 (uint64_t * base, uint64x2_t offset, uint64x2_t value, mve_pred16_t p) +{ + vstrdq_scatter_offset_p (base, offset, value, p); +} + +/* { dg-final { scan-assembler "vstrdt.64" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_s64.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_s64.c new file mode 100644 index 00000000000..1e14a38a67f --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_s64.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (int64_t * base, uint64x2_t offset, int64x2_t value) +{ + vstrdq_scatter_offset_s64 (base, offset, value); +} + +/* { dg-final { scan-assembler "vstrd.64" } } */ + +void +foo1 (int64_t * base, uint64x2_t offset, int64x2_t value) +{ + vstrdq_scatter_offset (base, offset, value); +} + +/* { dg-final { scan-assembler "vstrd.64" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_u64.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_u64.c new file mode 100644 index 00000000000..fed19edaca9 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_offset_u64.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (uint64_t * base, uint64x2_t offset, uint64x2_t value) +{ + vstrdq_scatter_offset_u64 (base, offset, value); +} + +/* { dg-final { scan-assembler "vstrd.64" } } */ + +void +foo1 (uint64_t * base, uint64x2_t offset, uint64x2_t value) +{ + vstrdq_scatter_offset (base, offset, value); +} + +/* { dg-final { scan-assembler "vstrd.64" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_p_s64.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_p_s64.c new file mode 100644 index 00000000000..b93bdd7244c --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_p_s64.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (int64_t * base, uint64x2_t offset, int64x2_t value, mve_pred16_t p) +{ + vstrdq_scatter_shifted_offset_p_s64 (base, offset, value, p); +} + +/* { dg-final { scan-assembler "vstrdt.64" } } */ + +void +foo1 (int64_t * base, uint64x2_t offset, int64x2_t value, mve_pred16_t p) +{ + vstrdq_scatter_shifted_offset_p (base, offset, value, p); +} + +/* { dg-final { scan-assembler "vstrdt.64" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_p_u64.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_p_u64.c new file mode 100644 index 00000000000..9993028efde --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_p_u64.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (uint64_t * base, uint64x2_t offset, uint64x2_t value, mve_pred16_t p) +{ + vstrdq_scatter_shifted_offset_p_u64 (base, offset, value, p); +} + +/* { dg-final { scan-assembler "vstrdt.64" } } */ + +void +foo1 (uint64_t * base, uint64x2_t offset, uint64x2_t value, mve_pred16_t p) +{ + vstrdq_scatter_shifted_offset_p (base, offset, value, p); +} + +/* { dg-final { scan-assembler "vstrdt.64" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_s64.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_s64.c new file mode 100644 index 00000000000..5cb7aeda96f --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_s64.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (int64_t * base, uint64x2_t offset, int64x2_t value) +{ + vstrdq_scatter_shifted_offset_s64 (base, offset, value); +} + +/* { dg-final { scan-assembler "vstrd.64" } } */ + +void +foo1 (int64_t * base, uint64x2_t offset, int64x2_t value) +{ + vstrdq_scatter_shifted_offset (base, offset, value); +} + +/* { dg-final { scan-assembler "vstrd.64" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_u64.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_u64.c new file mode 100644 index 00000000000..7053953187a --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrdq_scatter_shifted_offset_u64.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (uint64_t * base, uint64x2_t offset, uint64x2_t value) +{ + vstrdq_scatter_shifted_offset_u64 (base, offset, value); +} + +/* { dg-final { scan-assembler "vstrd.64" } } */ + +void +foo1 (uint64_t * base, uint64x2_t offset, uint64x2_t value) +{ + vstrdq_scatter_shifted_offset (base, offset, value); +} + +/* { dg-final { scan-assembler "vstrd.64" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_f16.c new file mode 100644 index 00000000000..aea8adbb23a --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_f16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */ +/* { dg-add-options arm_v8_1m_mve_fp } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (float16_t * base, uint16x8_t offset, float16x8_t value) +{ + vstrhq_scatter_offset_f16 (base, offset, value); +} + +/* { dg-final { scan-assembler "vstrh.16" } } */ + +void +foo1 (float16_t * base, uint16x8_t offset, float16x8_t value) +{ + vstrhq_scatter_offset (base, offset, value); +} + +/* { dg-final { scan-assembler "vstrh.16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_f16.c new file mode 100644 index 00000000000..dc4ce1db14b --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_offset_p_f16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */ +/* { dg-add-options arm_v8_1m_mve_fp } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (float16_t * base, uint16x8_t offset, float16x8_t value, mve_pred16_t p) +{ + vstrhq_scatter_offset_p_f16 (base, offset, value, p); +} + +/* { dg-final { scan-assembler "vstrht.16" } } */ + +void +foo1 (float16_t * base, uint16x8_t offset, float16x8_t value, mve_pred16_t p) +{ + vstrhq_scatter_offset_p (base, offset, value, p); +} + +/* { dg-final { scan-assembler "vstrht.16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_f16.c new file mode 100644 index 00000000000..1c90cc01bb1 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_f16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */ +/* { dg-add-options arm_v8_1m_mve_fp } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (float16_t * base, uint16x8_t offset, float16x8_t value) +{ + vstrhq_scatter_shifted_offset_f16 (base, offset, value); +} + +/* { dg-final { scan-assembler "vstrh.16" } } */ + +void +foo1 (float16_t * base, uint16x8_t offset, float16x8_t value) +{ + vstrhq_scatter_shifted_offset (base, offset, value); +} + +/* { dg-final { scan-assembler "vstrh.16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_f16.c new file mode 100644 index 00000000000..e45d1d8a631 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrhq_scatter_shifted_offset_p_f16.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */ +/* { dg-add-options arm_v8_1m_mve_fp } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (float16_t * base, uint16x8_t offset, float16x8_t value, mve_pred16_t p) +{ + vstrhq_scatter_shifted_offset_p_f16 (base, offset, value, p); +} + +/* { dg-final { scan-assembler "vstrht.16" } } */ + +void +foo1 (float16_t * base, uint16x8_t offset, float16x8_t value, mve_pred16_t p) +{ + vstrhq_scatter_shifted_offset_p (base, offset, value, p); +} + +/* { dg-final { scan-assembler "vstrht.16" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_f32.c new file mode 100644 index 00000000000..7895a761a6e --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_f32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */ +/* { dg-add-options arm_v8_1m_mve_fp } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (uint32x4_t addr, float32x4_t value) +{ + vstrwq_scatter_base_f32 (addr, 8, value); +} + +/* { dg-final { scan-assembler "vstrw.u32" } } */ + +void +foo1 (uint32x4_t addr, float32x4_t value) +{ + vstrwq_scatter_base (addr, 8, value); +} + +/* { dg-final { scan-assembler "vstrw.u32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_p_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_p_f32.c new file mode 100644 index 00000000000..c0069c3bd05 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_base_p_f32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */ +/* { dg-add-options arm_v8_1m_mve_fp } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (uint32x4_t addr, float32x4_t value, mve_pred16_t p) +{ + vstrwq_scatter_base_p_f32 (addr, 8, value, p); +} + +/* { dg-final { scan-assembler "vstrwt.u32" } } */ + +void +foo1 (uint32x4_t addr, float32x4_t value, mve_pred16_t p) +{ + vstrwq_scatter_base_p (addr, 8, value, p); +} + +/* { dg-final { scan-assembler "vstrwt.u32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_f32.c new file mode 100644 index 00000000000..a70fb858a1c --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_f32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */ +/* { dg-add-options arm_v8_1m_mve_fp } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (float32_t * base, uint32x4_t offset, float32x4_t value) +{ + vstrwq_scatter_offset_f32 (base, offset, value); +} + +/* { dg-final { scan-assembler "vstrw.32" } } */ + +void +foo1 (float32_t * base, uint32x4_t offset, float32x4_t value) +{ + vstrwq_scatter_offset (base, offset, value); +} + +/* { dg-final { scan-assembler "vstrw.32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_p_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_p_f32.c new file mode 100644 index 00000000000..e8cc78278c1 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_p_f32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */ +/* { dg-add-options arm_v8_1m_mve_fp } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (float32_t * base, uint32x4_t offset, float32x4_t value, mve_pred16_t p) +{ + vstrwq_scatter_offset_p_f32 (base, offset, value, p); +} + +/* { dg-final { scan-assembler "vstrwt.32" } } */ + +void +foo1 (float32_t * base, uint32x4_t offset, float32x4_t value, mve_pred16_t p) +{ + vstrwq_scatter_offset_p (base, offset, value, p); +} + +/* { dg-final { scan-assembler "vstrwt.32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_p_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_p_s32.c new file mode 100644 index 00000000000..7802ad51352 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_p_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (int32_t * base, uint32x4_t offset, int32x4_t value, mve_pred16_t p) +{ + vstrwq_scatter_offset_p_s32 (base, offset, value, p); +} + +/* { dg-final { scan-assembler "vstrwt.32" } } */ + +void +foo1 (int32_t * base, uint32x4_t offset, int32x4_t value, mve_pred16_t p) +{ + vstrwq_scatter_offset_p (base, offset, value, p); +} + +/* { dg-final { scan-assembler "vstrwt.32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_p_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_p_u32.c new file mode 100644 index 00000000000..a01fb14ae40 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_p_u32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (uint32_t * base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p) +{ + vstrwq_scatter_offset_p_u32 (base, offset, value, p); +} + +/* { dg-final { scan-assembler "vstrwt.32" } } */ + +void +foo1 (uint32_t * base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p) +{ + vstrwq_scatter_offset_p (base, offset, value, p); +} + +/* { dg-final { scan-assembler "vstrwt.32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_s32.c new file mode 100644 index 00000000000..252381966c3 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (int32_t * base, uint32x4_t offset, int32x4_t value) +{ + vstrwq_scatter_offset_s32 (base, offset, value); +} + +/* { dg-final { scan-assembler "vstrw.32" } } */ + +void +foo1 (int32_t * base, uint32x4_t offset, int32x4_t value) +{ + vstrwq_scatter_offset (base, offset, value); +} + +/* { dg-final { scan-assembler "vstrw.32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_u32.c new file mode 100644 index 00000000000..ebdd83bfc98 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_offset_u32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (uint32_t * base, uint32x4_t offset, uint32x4_t value) +{ + vstrwq_scatter_offset_u32 (base, offset, value); +} + +/* { dg-final { scan-assembler "vstrw.32" } } */ + +void +foo1 (uint32_t * base, uint32x4_t offset, uint32x4_t value) +{ + vstrwq_scatter_offset (base, offset, value); +} + +/* { dg-final { scan-assembler "vstrw.32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_f32.c new file mode 100644 index 00000000000..ce4e5888b47 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_f32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */ +/* { dg-add-options arm_v8_1m_mve_fp } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (float32_t * base, uint32x4_t offset, float32x4_t value) +{ + vstrwq_scatter_shifted_offset_f32 (base, offset, value); +} + +/* { dg-final { scan-assembler "vstrw.32" } } */ + +void +foo1 (float32_t * base, uint32x4_t offset, float32x4_t value) +{ + vstrwq_scatter_shifted_offset (base, offset, value); +} + +/* { dg-final { scan-assembler "vstrw.32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_p_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_p_f32.c new file mode 100644 index 00000000000..452b540c7e8 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_p_f32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */ +/* { dg-add-options arm_v8_1m_mve_fp } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (float32_t * base, uint32x4_t offset, float32x4_t value, mve_pred16_t p) +{ + vstrwq_scatter_shifted_offset_p_f32 (base, offset, value, p); +} + +/* { dg-final { scan-assembler "vstrwt.32" } } */ + +void +foo1 (float32_t * base, uint32x4_t offset, float32x4_t value, mve_pred16_t p) +{ + vstrwq_scatter_shifted_offset_p (base, offset, value, p); +} + +/* { dg-final { scan-assembler "vstrwt.32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_p_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_p_s32.c new file mode 100644 index 00000000000..56ceae4e1be --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_p_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (int32_t * base, uint32x4_t offset, int32x4_t value, mve_pred16_t p) +{ + vstrwq_scatter_shifted_offset_p_s32 (base, offset, value, p); +} + +/* { dg-final { scan-assembler "vstrwt.32" } } */ + +void +foo1 (int32_t * base, uint32x4_t offset, int32x4_t value, mve_pred16_t p) +{ + vstrwq_scatter_shifted_offset_p (base, offset, value, p); +} + +/* { dg-final { scan-assembler "vstrwt.32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_p_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_p_u32.c new file mode 100644 index 00000000000..02c59704053 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_p_u32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (uint32_t * base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p) +{ + vstrwq_scatter_shifted_offset_p_u32 (base, offset, value, p); +} + +/* { dg-final { scan-assembler "vstrwt.32" } } */ + +void +foo1 (uint32_t * base, uint32x4_t offset, uint32x4_t value, mve_pred16_t p) +{ + vstrwq_scatter_shifted_offset_p (base, offset, value, p); +} + +/* { dg-final { scan-assembler "vstrwt.32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_s32.c new file mode 100644 index 00000000000..4b087277e0f --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_s32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (int32_t * base, uint32x4_t offset, int32x4_t value) +{ + vstrwq_scatter_shifted_offset_s32 (base, offset, value); +} + +/* { dg-final { scan-assembler "vstrw.32" } } */ + +void +foo1 (int32_t * base, uint32x4_t offset, int32x4_t value) +{ + vstrwq_scatter_shifted_offset (base, offset, value); +} + +/* { dg-final { scan-assembler "vstrw.32" } } */ diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_u32.c new file mode 100644 index 00000000000..6a9156c1505 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vstrwq_scatter_shifted_offset_u32.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_v8_1m_mve_ok } */ +/* { dg-add-options arm_v8_1m_mve } */ +/* { dg-additional-options "-O2" } */ + +#include "arm_mve.h" + +void +foo (uint32_t * base, uint32x4_t offset, uint32x4_t value) +{ + vstrwq_scatter_shifted_offset_u32 (base, offset, value); +} + +/* { dg-final { scan-assembler "vstrw.32" } } */ + +void +foo1 (uint32_t * base, uint32x4_t offset, uint32x4_t value) +{ + vstrwq_scatter_shifted_offset (base, offset, value); +} + +/* { dg-final { scan-assembler "vstrw.32" } } */ |