From d5ecead9406647c58e178bccc1f9d85259b087e5 Mon Sep 17 00:00:00 2001 From: James Greenhalgh Date: Thu, 23 Jan 2014 14:59:20 +0000 Subject: [PATCH] [AArch64_BE 4/4] Big-Endian lane numbering fix 2014-01-23 James Greenhalgh Alex Velenko * config/aarch64/arm_neon.h (vaddv_s8): __LANE0 cleanup. (vaddv_s16): Likewise. (vaddv_s32): Likewise. (vaddv_u8): Likewise. (vaddv_u16): Likewise. (vaddv_u32): Likewise. (vaddvq_s8): Likewise. (vaddvq_s16): Likewise. (vaddvq_s32): Likewise. (vaddvq_s64): Likewise. (vaddvq_u8): Likewise. (vaddvq_u16): Likewise. (vaddvq_u32): Likewise. (vaddvq_u64): Likewise. (vaddv_f32): Likewise. (vaddvq_f32): Likewise. (vaddvq_f64): Likewise. (vmaxv_f32): Likewise. (vmaxv_s8): Likewise. (vmaxv_s16): Likewise. (vmaxv_s32): Likewise. (vmaxv_u8): Likewise. (vmaxv_u16): Likewise. (vmaxv_u32): Likewise. (vmaxvq_f32): Likewise. (vmaxvq_f64): Likewise. (vmaxvq_s8): Likewise. (vmaxvq_s16): Likewise. (vmaxvq_s32): Likewise. (vmaxvq_u8): Likewise. (vmaxvq_u16): Likewise. (vmaxvq_u32): Likewise. (vmaxnmv_f32): Likewise. (vmaxnmvq_f32): Likewise. (vmaxnmvq_f64): Likewise. (vminv_f32): Likewise. (vminv_s8): Likewise. (vminv_s16): Likewise. (vminv_s32): Likewise. (vminv_u8): Likewise. (vminv_u16): Likewise. (vminv_u32): Likewise. (vminvq_f32): Likewise. (vminvq_f64): Likewise. (vminvq_s8): Likewise. (vminvq_s16): Likewise. (vminvq_s32): Likewise. (vminvq_u8): Likewise. (vminvq_u16): Likewise. (vminvq_u32): Likewise. (vminnmv_f32): Likewise. (vminnmvq_f32): Likewise. (vminnmvq_f64): Likewise. Co-Authored-By: Alex Velenko From-SVN: r206973 --- gcc/ChangeLog | 57 +++++++++++++++++ gcc/config/aarch64/arm_neon.h | 114 ++++++++++++++++------------------ 2 files changed, 110 insertions(+), 61 deletions(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index c00a9553d0b..753df289ad1 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,60 @@ +2014-01-23 James Greenhalgh + Alex Velenko + + * config/aarch64/arm_neon.h (vaddv_s8): __LANE0 cleanup. + (vaddv_s16): Likewise. + (vaddv_s32): Likewise. + (vaddv_u8): Likewise. + (vaddv_u16): Likewise. + (vaddv_u32): Likewise. + (vaddvq_s8): Likewise. + (vaddvq_s16): Likewise. + (vaddvq_s32): Likewise. + (vaddvq_s64): Likewise. + (vaddvq_u8): Likewise. + (vaddvq_u16): Likewise. + (vaddvq_u32): Likewise. + (vaddvq_u64): Likewise. + (vaddv_f32): Likewise. + (vaddvq_f32): Likewise. + (vaddvq_f64): Likewise. + (vmaxv_f32): Likewise. + (vmaxv_s8): Likewise. + (vmaxv_s16): Likewise. + (vmaxv_s32): Likewise. + (vmaxv_u8): Likewise. + (vmaxv_u16): Likewise. + (vmaxv_u32): Likewise. + (vmaxvq_f32): Likewise. + (vmaxvq_f64): Likewise. + (vmaxvq_s8): Likewise. + (vmaxvq_s16): Likewise. + (vmaxvq_s32): Likewise. + (vmaxvq_u8): Likewise. + (vmaxvq_u16): Likewise. + (vmaxvq_u32): Likewise. + (vmaxnmv_f32): Likewise. + (vmaxnmvq_f32): Likewise. + (vmaxnmvq_f64): Likewise. + (vminv_f32): Likewise. + (vminv_s8): Likewise. + (vminv_s16): Likewise. + (vminv_s32): Likewise. + (vminv_u8): Likewise. + (vminv_u16): Likewise. + (vminv_u32): Likewise. + (vminvq_f32): Likewise. + (vminvq_f64): Likewise. + (vminvq_s8): Likewise. + (vminvq_s16): Likewise. + (vminvq_s32): Likewise. + (vminvq_u8): Likewise. + (vminvq_u16): Likewise. + (vminvq_u32): Likewise. + (vminnmv_f32): Likewise. + (vminnmvq_f32): Likewise. + (vminnmvq_f64): Likewise. + 2014-01-23 James Greenhalgh * config/aarch64/aarch64-simd.md diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index 1dcff675f03..b6aee7a7426 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -15311,30 +15311,24 @@ vaddd_u64 (uint64x1_t __a, uint64x1_t __b) return __a + __b; } -#if __AARCH64EB__ -#define __LANE0(__t) ((__t) - 1) -#else -#define __LANE0(__t) 0 -#endif - /* vaddv */ __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vaddv_s8 (int8x8_t __a) { - return vget_lane_s8 (__builtin_aarch64_reduc_splus_v8qi (__a), __LANE0 (8)); + return vget_lane_s8 (__builtin_aarch64_reduc_splus_v8qi (__a), 0); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vaddv_s16 (int16x4_t __a) { - return vget_lane_s16 (__builtin_aarch64_reduc_splus_v4hi (__a), __LANE0 (4)); + return vget_lane_s16 (__builtin_aarch64_reduc_splus_v4hi (__a), 0); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vaddv_s32 (int32x2_t __a) { - return vget_lane_s32 (__builtin_aarch64_reduc_splus_v2si (__a), __LANE0 (2)); + return vget_lane_s32 (__builtin_aarch64_reduc_splus_v2si (__a), 0); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) @@ -15342,7 +15336,7 @@ vaddv_u8 (uint8x8_t __a) { return vget_lane_u8 ((uint8x8_t) __builtin_aarch64_reduc_uplus_v8qi ((int8x8_t) __a), - __LANE0 (8)); + 0); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) @@ -15350,7 +15344,7 @@ vaddv_u16 (uint16x4_t __a) { return vget_lane_u16 ((uint16x4_t) __builtin_aarch64_reduc_uplus_v4hi ((int16x4_t) __a), - __LANE0 (4)); + 0); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) @@ -15358,32 +15352,32 @@ vaddv_u32 (uint32x2_t __a) { return vget_lane_u32 ((uint32x2_t) __builtin_aarch64_reduc_uplus_v2si ((int32x2_t) __a), - __LANE0 (2)); + 0); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vaddvq_s8 (int8x16_t __a) { return vgetq_lane_s8 (__builtin_aarch64_reduc_splus_v16qi (__a), - __LANE0 (16)); + 0); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vaddvq_s16 (int16x8_t __a) { - return vgetq_lane_s16 (__builtin_aarch64_reduc_splus_v8hi (__a), __LANE0 (8)); + return vgetq_lane_s16 (__builtin_aarch64_reduc_splus_v8hi (__a), 0); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vaddvq_s32 (int32x4_t __a) { - return vgetq_lane_s32 (__builtin_aarch64_reduc_splus_v4si (__a), __LANE0 (4)); + return vgetq_lane_s32 (__builtin_aarch64_reduc_splus_v4si (__a), 0); } __extension__ static __inline int64_t __attribute__ ((__always_inline__)) vaddvq_s64 (int64x2_t __a) { - return vgetq_lane_s64 (__builtin_aarch64_reduc_splus_v2di (__a), __LANE0 (2)); + return vgetq_lane_s64 (__builtin_aarch64_reduc_splus_v2di (__a), 0); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) @@ -15391,7 +15385,7 @@ vaddvq_u8 (uint8x16_t __a) { return vgetq_lane_u8 ((uint8x16_t) __builtin_aarch64_reduc_uplus_v16qi ((int8x16_t) __a), - __LANE0 (16)); + 0); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) @@ -15399,7 +15393,7 @@ vaddvq_u16 (uint16x8_t __a) { return vgetq_lane_u16 ((uint16x8_t) __builtin_aarch64_reduc_uplus_v8hi ((int16x8_t) __a), - __LANE0 (8)); + 0); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) @@ -15407,7 +15401,7 @@ vaddvq_u32 (uint32x4_t __a) { return vgetq_lane_u32 ((uint32x4_t) __builtin_aarch64_reduc_uplus_v4si ((int32x4_t) __a), - __LANE0 (4)); + 0); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) @@ -15415,28 +15409,28 @@ vaddvq_u64 (uint64x2_t __a) { return vgetq_lane_u64 ((uint64x2_t) __builtin_aarch64_reduc_uplus_v2di ((int64x2_t) __a), - __LANE0 (2)); + 0); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vaddv_f32 (float32x2_t __a) { float32x2_t __t = __builtin_aarch64_reduc_splus_v2sf (__a); - return vget_lane_f32 (__t, __LANE0 (2)); + return vget_lane_f32 (__t, 0); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vaddvq_f32 (float32x4_t __a) { float32x4_t __t = __builtin_aarch64_reduc_splus_v4sf (__a); - return vgetq_lane_f32 (__t, __LANE0 (4)); + return vgetq_lane_f32 (__t, 0); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vaddvq_f64 (float64x2_t __a) { float64x2_t __t = __builtin_aarch64_reduc_splus_v2df (__a); - return vgetq_lane_f64 (__t, __LANE0 (2)); + return vgetq_lane_f64 (__t, 0); } /* vbsl */ @@ -19848,25 +19842,25 @@ __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vmaxv_f32 (float32x2_t __a) { return vget_lane_f32 (__builtin_aarch64_reduc_smax_nan_v2sf (__a), - __LANE0 (2)); + 0); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vmaxv_s8 (int8x8_t __a) { - return vget_lane_s8 (__builtin_aarch64_reduc_smax_v8qi (__a), __LANE0 (8)); + return vget_lane_s8 (__builtin_aarch64_reduc_smax_v8qi (__a), 0); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vmaxv_s16 (int16x4_t __a) { - return vget_lane_s16 (__builtin_aarch64_reduc_smax_v4hi (__a), __LANE0 (4)); + return vget_lane_s16 (__builtin_aarch64_reduc_smax_v4hi (__a), 0); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vmaxv_s32 (int32x2_t __a) { - return vget_lane_s32 (__builtin_aarch64_reduc_smax_v2si (__a), __LANE0 (2)); + return vget_lane_s32 (__builtin_aarch64_reduc_smax_v2si (__a), 0); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) @@ -19874,7 +19868,7 @@ vmaxv_u8 (uint8x8_t __a) { return vget_lane_u8 ((uint8x8_t) __builtin_aarch64_reduc_umax_v8qi ((int8x8_t) __a), - __LANE0 (8)); + 0); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) @@ -19882,7 +19876,7 @@ vmaxv_u16 (uint16x4_t __a) { return vget_lane_u16 ((uint16x4_t) __builtin_aarch64_reduc_umax_v4hi ((int16x4_t) __a), - __LANE0 (4)); + 0); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) @@ -19890,39 +19884,39 @@ vmaxv_u32 (uint32x2_t __a) { return vget_lane_u32 ((uint32x2_t) __builtin_aarch64_reduc_umax_v2si ((int32x2_t) __a), - __LANE0 (2)); + 0); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vmaxvq_f32 (float32x4_t __a) { return vgetq_lane_f32 (__builtin_aarch64_reduc_smax_nan_v4sf (__a), - __LANE0 (4)); + 0); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vmaxvq_f64 (float64x2_t __a) { return vgetq_lane_f64 (__builtin_aarch64_reduc_smax_nan_v2df (__a), - __LANE0 (2)); + 0); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vmaxvq_s8 (int8x16_t __a) { - return vgetq_lane_s8 (__builtin_aarch64_reduc_smax_v16qi (__a), __LANE0 (16)); + return vgetq_lane_s8 (__builtin_aarch64_reduc_smax_v16qi (__a), 0); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vmaxvq_s16 (int16x8_t __a) { - return vgetq_lane_s16 (__builtin_aarch64_reduc_smax_v8hi (__a), __LANE0 (8)); + return vgetq_lane_s16 (__builtin_aarch64_reduc_smax_v8hi (__a), 0); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vmaxvq_s32 (int32x4_t __a) { - return vgetq_lane_s32 (__builtin_aarch64_reduc_smax_v4si (__a), __LANE0 (4)); + return vgetq_lane_s32 (__builtin_aarch64_reduc_smax_v4si (__a), 0); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) @@ -19930,7 +19924,7 @@ vmaxvq_u8 (uint8x16_t __a) { return vgetq_lane_u8 ((uint8x16_t) __builtin_aarch64_reduc_umax_v16qi ((int8x16_t) __a), - __LANE0 (16)); + 0); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) @@ -19938,7 +19932,7 @@ vmaxvq_u16 (uint16x8_t __a) { return vgetq_lane_u16 ((uint16x8_t) __builtin_aarch64_reduc_umax_v8hi ((int16x8_t) __a), - __LANE0 (8)); + 0); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) @@ -19946,7 +19940,7 @@ vmaxvq_u32 (uint32x4_t __a) { return vgetq_lane_u32 ((uint32x4_t) __builtin_aarch64_reduc_umax_v4si ((int32x4_t) __a), - __LANE0 (4)); + 0); } /* vmaxnmv */ @@ -19955,19 +19949,19 @@ __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vmaxnmv_f32 (float32x2_t __a) { return vget_lane_f32 (__builtin_aarch64_reduc_smax_v2sf (__a), - __LANE0 (2)); + 0); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vmaxnmvq_f32 (float32x4_t __a) { - return vgetq_lane_f32 (__builtin_aarch64_reduc_smax_v4sf (__a), __LANE0 (4)); + return vgetq_lane_f32 (__builtin_aarch64_reduc_smax_v4sf (__a), 0); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vmaxnmvq_f64 (float64x2_t __a) { - return vgetq_lane_f64 (__builtin_aarch64_reduc_smax_v2df (__a), __LANE0 (2)); + return vgetq_lane_f64 (__builtin_aarch64_reduc_smax_v2df (__a), 0); } /* vmin */ @@ -20094,26 +20088,26 @@ __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vminv_f32 (float32x2_t __a) { return vget_lane_f32 (__builtin_aarch64_reduc_smin_nan_v2sf (__a), - __LANE0 (2)); + 0); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vminv_s8 (int8x8_t __a) { return vget_lane_s8 (__builtin_aarch64_reduc_smin_v8qi (__a), - __LANE0 (8)); + 0); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vminv_s16 (int16x4_t __a) { - return vget_lane_s16 (__builtin_aarch64_reduc_smin_v4hi (__a), __LANE0 (4)); + return vget_lane_s16 (__builtin_aarch64_reduc_smin_v4hi (__a), 0); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vminv_s32 (int32x2_t __a) { - return vget_lane_s32 (__builtin_aarch64_reduc_smin_v2si (__a), __LANE0 (2)); + return vget_lane_s32 (__builtin_aarch64_reduc_smin_v2si (__a), 0); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) @@ -20121,7 +20115,7 @@ vminv_u8 (uint8x8_t __a) { return vget_lane_u8 ((uint8x8_t) __builtin_aarch64_reduc_umin_v8qi ((int8x8_t) __a), - __LANE0 (8)); + 0); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) @@ -20129,7 +20123,7 @@ vminv_u16 (uint16x4_t __a) { return vget_lane_u16 ((uint16x4_t) __builtin_aarch64_reduc_umin_v4hi ((int16x4_t) __a), - __LANE0 (4)); + 0); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) @@ -20137,39 +20131,39 @@ vminv_u32 (uint32x2_t __a) { return vget_lane_u32 ((uint32x2_t) __builtin_aarch64_reduc_umin_v2si ((int32x2_t) __a), - __LANE0 (2)); + 0); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vminvq_f32 (float32x4_t __a) { return vgetq_lane_f32 (__builtin_aarch64_reduc_smin_nan_v4sf (__a), - __LANE0 (4)); + 0); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vminvq_f64 (float64x2_t __a) { return vgetq_lane_f64 (__builtin_aarch64_reduc_smin_nan_v2df (__a), - __LANE0 (2)); + 0); } __extension__ static __inline int8_t __attribute__ ((__always_inline__)) vminvq_s8 (int8x16_t __a) { - return vgetq_lane_s8 (__builtin_aarch64_reduc_smin_v16qi (__a), __LANE0 (16)); + return vgetq_lane_s8 (__builtin_aarch64_reduc_smin_v16qi (__a), 0); } __extension__ static __inline int16_t __attribute__ ((__always_inline__)) vminvq_s16 (int16x8_t __a) { - return vgetq_lane_s16 (__builtin_aarch64_reduc_smin_v8hi (__a), __LANE0 (8)); + return vgetq_lane_s16 (__builtin_aarch64_reduc_smin_v8hi (__a), 0); } __extension__ static __inline int32_t __attribute__ ((__always_inline__)) vminvq_s32 (int32x4_t __a) { - return vgetq_lane_s32 (__builtin_aarch64_reduc_smin_v4si (__a), __LANE0 (4)); + return vgetq_lane_s32 (__builtin_aarch64_reduc_smin_v4si (__a), 0); } __extension__ static __inline uint8_t __attribute__ ((__always_inline__)) @@ -20177,7 +20171,7 @@ vminvq_u8 (uint8x16_t __a) { return vgetq_lane_u8 ((uint8x16_t) __builtin_aarch64_reduc_umin_v16qi ((int8x16_t) __a), - __LANE0 (16)); + 0); } __extension__ static __inline uint16_t __attribute__ ((__always_inline__)) @@ -20185,7 +20179,7 @@ vminvq_u16 (uint16x8_t __a) { return vgetq_lane_u16 ((uint16x8_t) __builtin_aarch64_reduc_umin_v8hi ((int16x8_t) __a), - __LANE0 (8)); + 0); } __extension__ static __inline uint32_t __attribute__ ((__always_inline__)) @@ -20193,7 +20187,7 @@ vminvq_u32 (uint32x4_t __a) { return vgetq_lane_u32 ((uint32x4_t) __builtin_aarch64_reduc_umin_v4si ((int32x4_t) __a), - __LANE0 (4)); + 0); } /* vminnmv */ @@ -20201,19 +20195,19 @@ vminvq_u32 (uint32x4_t __a) __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vminnmv_f32 (float32x2_t __a) { - return vget_lane_f32 (__builtin_aarch64_reduc_smin_v2sf (__a), __LANE0 (2)); + return vget_lane_f32 (__builtin_aarch64_reduc_smin_v2sf (__a), 0); } __extension__ static __inline float32_t __attribute__ ((__always_inline__)) vminnmvq_f32 (float32x4_t __a) { - return vgetq_lane_f32 (__builtin_aarch64_reduc_smin_v4sf (__a), __LANE0 (4)); + return vgetq_lane_f32 (__builtin_aarch64_reduc_smin_v4sf (__a), 0); } __extension__ static __inline float64_t __attribute__ ((__always_inline__)) vminnmvq_f64 (float64x2_t __a) { - return vgetq_lane_f64 (__builtin_aarch64_reduc_smin_v2df (__a), __LANE0 (2)); + return vgetq_lane_f64 (__builtin_aarch64_reduc_smin_v2df (__a), 0); } /* vmla */ @@ -25329,8 +25323,6 @@ __INTERLEAVE_LIST (zip) /* End of optimal implementations in approved order. */ -#undef __LANE0 - #undef __aarch64_vget_lane_any #undef __aarch64_vget_lane_f32 #undef __aarch64_vget_lane_f64