Rewrite AArch64 UZP Intrinsics using __builtin_shuffle.

gcc/testsuite/ChangeLog:

	* gcc.target/aarch64/vuzps32_1.c: Expect zip1/2 insn rather than uzp1/2.
	* gcc.target/aarch64/vuzpu32_1.c: Likewise.
	* gcc.target/aarch64/vuzpf32_1.c: Likewise.

gcc/ChangeLog:

	* config/aarch64/arm_neon.h (vuzp1_f32, vuzp1_p8, vuzp1_p16, vuzp1_s8,
	vuzp1_s16, vuzp1_s32, vuzp1_u8, vuzp1_u16, vuzp1_u32, vuzp1q_f32,
	vuzp1q_f64, vuzp1q_p8, vuzp1q_p16, vuzp1q_s8, vuzp1q_s16, vuzp1q_s32,
	vuzp1q_s64, vuzp1q_u8, vuzp1q_u16, vuzp1q_u32, vuzp1q_u64, vuzp2_f32,
	vuzp2_p8, vuzp2_p16, vuzp2_s8, vuzp2_s16, vuzp2_s32, vuzp2_u8,
	vuzp2_u16, vuzp2_u32, vuzp2q_f32, vuzp2q_f64, vuzp2q_p8, vuzp2q_p16,
	vuzp2q_s8, vuzp2q_s16, vuzp2q_s32, vuzp2q_s64, vuzp2q_u8, vuzp2q_u16,
	vuzp2q_u32, vuzp2q_u64): Replace temporary asm with __builtin_shuffle.

From-SVN: r209943
This commit is contained in:
Alan Lawrence 2014-04-30 17:04:53 +00:00 committed by Alan Lawrence
parent e3fe9b5b5a
commit 7211512a5f
6 changed files with 455 additions and 467 deletions

View File

@ -1,3 +1,14 @@
2014-04-30 Alan Lawrence <alan.lawrence@arm.com>
* config/aarch64/arm_neon.h (vuzp1_f32, vuzp1_p8, vuzp1_p16, vuzp1_s8,
vuzp1_s16, vuzp1_s32, vuzp1_u8, vuzp1_u16, vuzp1_u32, vuzp1q_f32,
vuzp1q_f64, vuzp1q_p8, vuzp1q_p16, vuzp1q_s8, vuzp1q_s16, vuzp1q_s32,
vuzp1q_s64, vuzp1q_u8, vuzp1q_u16, vuzp1q_u32, vuzp1q_u64, vuzp2_f32,
vuzp2_p8, vuzp2_p16, vuzp2_s8, vuzp2_s16, vuzp2_s32, vuzp2_u8,
vuzp2_u16, vuzp2_u32, vuzp2q_f32, vuzp2q_f64, vuzp2q_p8, vuzp2q_p16,
vuzp2q_s8, vuzp2q_s16, vuzp2q_s32, vuzp2q_s64, vuzp2q_u8, vuzp2q_u16,
vuzp2q_u32, vuzp2q_u64): Replace temporary asm with __builtin_shuffle.
2014-04-30 Joern Rennecke <joern.rennecke@embecosm.com>
* config/arc/arc.opt (mlra): Move comment above option name

View File

@ -13199,467 +13199,6 @@ vtstq_p16 (poly16x8_t a, poly16x8_t b)
: /* No clobbers */);
return result;
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vuzp1_f32 (float32x2_t a, float32x2_t b)
{
float32x2_t result;
__asm__ ("uzp1 %0.2s,%1.2s,%2.2s"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
vuzp1_p8 (poly8x8_t a, poly8x8_t b)
{
poly8x8_t result;
__asm__ ("uzp1 %0.8b,%1.8b,%2.8b"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
vuzp1_p16 (poly16x4_t a, poly16x4_t b)
{
poly16x4_t result;
__asm__ ("uzp1 %0.4h,%1.4h,%2.4h"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vuzp1_s8 (int8x8_t a, int8x8_t b)
{
int8x8_t result;
__asm__ ("uzp1 %0.8b,%1.8b,%2.8b"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vuzp1_s16 (int16x4_t a, int16x4_t b)
{
int16x4_t result;
__asm__ ("uzp1 %0.4h,%1.4h,%2.4h"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vuzp1_s32 (int32x2_t a, int32x2_t b)
{
int32x2_t result;
__asm__ ("uzp1 %0.2s,%1.2s,%2.2s"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vuzp1_u8 (uint8x8_t a, uint8x8_t b)
{
uint8x8_t result;
__asm__ ("uzp1 %0.8b,%1.8b,%2.8b"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vuzp1_u16 (uint16x4_t a, uint16x4_t b)
{
uint16x4_t result;
__asm__ ("uzp1 %0.4h,%1.4h,%2.4h"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vuzp1_u32 (uint32x2_t a, uint32x2_t b)
{
uint32x2_t result;
__asm__ ("uzp1 %0.2s,%1.2s,%2.2s"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vuzp1q_f32 (float32x4_t a, float32x4_t b)
{
float32x4_t result;
__asm__ ("uzp1 %0.4s,%1.4s,%2.4s"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
vuzp1q_f64 (float64x2_t a, float64x2_t b)
{
float64x2_t result;
__asm__ ("uzp1 %0.2d,%1.2d,%2.2d"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
vuzp1q_p8 (poly8x16_t a, poly8x16_t b)
{
poly8x16_t result;
__asm__ ("uzp1 %0.16b,%1.16b,%2.16b"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
vuzp1q_p16 (poly16x8_t a, poly16x8_t b)
{
poly16x8_t result;
__asm__ ("uzp1 %0.8h,%1.8h,%2.8h"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vuzp1q_s8 (int8x16_t a, int8x16_t b)
{
int8x16_t result;
__asm__ ("uzp1 %0.16b,%1.16b,%2.16b"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vuzp1q_s16 (int16x8_t a, int16x8_t b)
{
int16x8_t result;
__asm__ ("uzp1 %0.8h,%1.8h,%2.8h"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vuzp1q_s32 (int32x4_t a, int32x4_t b)
{
int32x4_t result;
__asm__ ("uzp1 %0.4s,%1.4s,%2.4s"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vuzp1q_s64 (int64x2_t a, int64x2_t b)
{
int64x2_t result;
__asm__ ("uzp1 %0.2d,%1.2d,%2.2d"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vuzp1q_u8 (uint8x16_t a, uint8x16_t b)
{
uint8x16_t result;
__asm__ ("uzp1 %0.16b,%1.16b,%2.16b"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vuzp1q_u16 (uint16x8_t a, uint16x8_t b)
{
uint16x8_t result;
__asm__ ("uzp1 %0.8h,%1.8h,%2.8h"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vuzp1q_u32 (uint32x4_t a, uint32x4_t b)
{
uint32x4_t result;
__asm__ ("uzp1 %0.4s,%1.4s,%2.4s"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vuzp1q_u64 (uint64x2_t a, uint64x2_t b)
{
uint64x2_t result;
__asm__ ("uzp1 %0.2d,%1.2d,%2.2d"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vuzp2_f32 (float32x2_t a, float32x2_t b)
{
float32x2_t result;
__asm__ ("uzp2 %0.2s,%1.2s,%2.2s"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
vuzp2_p8 (poly8x8_t a, poly8x8_t b)
{
poly8x8_t result;
__asm__ ("uzp2 %0.8b,%1.8b,%2.8b"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
vuzp2_p16 (poly16x4_t a, poly16x4_t b)
{
poly16x4_t result;
__asm__ ("uzp2 %0.4h,%1.4h,%2.4h"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vuzp2_s8 (int8x8_t a, int8x8_t b)
{
int8x8_t result;
__asm__ ("uzp2 %0.8b,%1.8b,%2.8b"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vuzp2_s16 (int16x4_t a, int16x4_t b)
{
int16x4_t result;
__asm__ ("uzp2 %0.4h,%1.4h,%2.4h"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vuzp2_s32 (int32x2_t a, int32x2_t b)
{
int32x2_t result;
__asm__ ("uzp2 %0.2s,%1.2s,%2.2s"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vuzp2_u8 (uint8x8_t a, uint8x8_t b)
{
uint8x8_t result;
__asm__ ("uzp2 %0.8b,%1.8b,%2.8b"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vuzp2_u16 (uint16x4_t a, uint16x4_t b)
{
uint16x4_t result;
__asm__ ("uzp2 %0.4h,%1.4h,%2.4h"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vuzp2_u32 (uint32x2_t a, uint32x2_t b)
{
uint32x2_t result;
__asm__ ("uzp2 %0.2s,%1.2s,%2.2s"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vuzp2q_f32 (float32x4_t a, float32x4_t b)
{
float32x4_t result;
__asm__ ("uzp2 %0.4s,%1.4s,%2.4s"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
vuzp2q_f64 (float64x2_t a, float64x2_t b)
{
float64x2_t result;
__asm__ ("uzp2 %0.2d,%1.2d,%2.2d"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
vuzp2q_p8 (poly8x16_t a, poly8x16_t b)
{
poly8x16_t result;
__asm__ ("uzp2 %0.16b,%1.16b,%2.16b"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
vuzp2q_p16 (poly16x8_t a, poly16x8_t b)
{
poly16x8_t result;
__asm__ ("uzp2 %0.8h,%1.8h,%2.8h"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vuzp2q_s8 (int8x16_t a, int8x16_t b)
{
int8x16_t result;
__asm__ ("uzp2 %0.16b,%1.16b,%2.16b"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vuzp2q_s16 (int16x8_t a, int16x8_t b)
{
int16x8_t result;
__asm__ ("uzp2 %0.8h,%1.8h,%2.8h"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vuzp2q_s32 (int32x4_t a, int32x4_t b)
{
int32x4_t result;
__asm__ ("uzp2 %0.4s,%1.4s,%2.4s"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vuzp2q_s64 (int64x2_t a, int64x2_t b)
{
int64x2_t result;
__asm__ ("uzp2 %0.2d,%1.2d,%2.2d"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vuzp2q_u8 (uint8x16_t a, uint8x16_t b)
{
uint8x16_t result;
__asm__ ("uzp2 %0.16b,%1.16b,%2.16b"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vuzp2q_u16 (uint16x8_t a, uint16x8_t b)
{
uint16x8_t result;
__asm__ ("uzp2 %0.8h,%1.8h,%2.8h"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vuzp2q_u32 (uint32x4_t a, uint32x4_t b)
{
uint32x4_t result;
__asm__ ("uzp2 %0.4s,%1.4s,%2.4s"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vuzp2q_u64 (uint64x2_t a, uint64x2_t b)
{
uint64x2_t result;
__asm__ ("uzp2 %0.2d,%1.2d,%2.2d"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
/* End of temporary inline asm implementations. */
@ -25245,6 +24784,438 @@ vuqaddd_s64 (int64x1_t __a, uint64x1_t __b)
/* vuzp */
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vuzp1_f32 (float32x2_t __a, float32x2_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint32x2_t) {3, 1});
#else
return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2});
#endif
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
vuzp1_p8 (poly8x8_t __a, poly8x8_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint8x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
#else
return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
#endif
}
__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
vuzp1_p16 (poly16x4_t __a, poly16x4_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint16x4_t) {5, 7, 1, 3});
#else
return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 2, 4, 6});
#endif
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vuzp1_s8 (int8x8_t __a, int8x8_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint8x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
#else
return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
#endif
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vuzp1_s16 (int16x4_t __a, int16x4_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint16x4_t) {5, 7, 1, 3});
#else
return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 2, 4, 6});
#endif
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vuzp1_s32 (int32x2_t __a, int32x2_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint32x2_t) {3, 1});
#else
return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2});
#endif
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vuzp1_u8 (uint8x8_t __a, uint8x8_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint8x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
#else
return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
#endif
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vuzp1_u16 (uint16x4_t __a, uint16x4_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint16x4_t) {5, 7, 1, 3});
#else
return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 2, 4, 6});
#endif
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vuzp1_u32 (uint32x2_t __a, uint32x2_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint32x2_t) {3, 1});
#else
return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2});
#endif
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vuzp1q_f32 (float32x4_t __a, float32x4_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint32x4_t) {5, 7, 1, 3});
#else
return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 2, 4, 6});
#endif
}
__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
vuzp1q_f64 (float64x2_t __a, float64x2_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint64x2_t) {3, 1});
#else
return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2});
#endif
}
__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
vuzp1q_p8 (poly8x16_t __a, poly8x16_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint8x16_t)
{17, 19, 21, 23, 25, 27, 29, 31, 1, 3, 5, 7, 9, 11, 13, 15});
#else
return __builtin_shuffle (__a, __b, (uint8x16_t)
{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30});
#endif
}
__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
vuzp1q_p16 (poly16x8_t __a, poly16x8_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint16x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
#else
return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
#endif
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vuzp1q_s8 (int8x16_t __a, int8x16_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b,
(uint8x16_t) {17, 19, 21, 23, 25, 27, 29, 31, 1, 3, 5, 7, 9, 11, 13, 15});
#else
return __builtin_shuffle (__a, __b,
(uint8x16_t) {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30});
#endif
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vuzp1q_s16 (int16x8_t __a, int16x8_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint16x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
#else
return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
#endif
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vuzp1q_s32 (int32x4_t __a, int32x4_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint32x4_t) {5, 7, 1, 3});
#else
return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 2, 4, 6});
#endif
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vuzp1q_s64 (int64x2_t __a, int64x2_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint64x2_t) {3, 1});
#else
return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2});
#endif
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vuzp1q_u8 (uint8x16_t __a, uint8x16_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b,
(uint8x16_t) {17, 19, 21, 23, 25, 27, 29, 31, 1, 3, 5, 7, 9, 11, 13, 15});
#else
return __builtin_shuffle (__a, __b,
(uint8x16_t) {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30});
#endif
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vuzp1q_u16 (uint16x8_t __a, uint16x8_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint16x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
#else
return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
#endif
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vuzp1q_u32 (uint32x4_t __a, uint32x4_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint32x4_t) {5, 7, 1, 3});
#else
return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 2, 4, 6});
#endif
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vuzp1q_u64 (uint64x2_t __a, uint64x2_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint64x2_t) {3, 1});
#else
return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2});
#endif
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vuzp2_f32 (float32x2_t __a, float32x2_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint32x2_t) {2, 0});
#else
return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3});
#endif
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
vuzp2_p8 (poly8x8_t __a, poly8x8_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint8x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
#else
return __builtin_shuffle (__a, __b, (uint8x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
#endif
}
__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
vuzp2_p16 (poly16x4_t __a, poly16x4_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 6, 0, 2});
#else
return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 3, 5, 7});
#endif
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vuzp2_s8 (int8x8_t __a, int8x8_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint8x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
#else
return __builtin_shuffle (__a, __b, (uint8x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
#endif
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vuzp2_s16 (int16x4_t __a, int16x4_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 6, 0, 2});
#else
return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 3, 5, 7});
#endif
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vuzp2_s32 (int32x2_t __a, int32x2_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint32x2_t) {2, 0});
#else
return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3});
#endif
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vuzp2_u8 (uint8x8_t __a, uint8x8_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint8x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
#else
return __builtin_shuffle (__a, __b, (uint8x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
#endif
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vuzp2_u16 (uint16x4_t __a, uint16x4_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 6, 0, 2});
#else
return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 3, 5, 7});
#endif
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vuzp2_u32 (uint32x2_t __a, uint32x2_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint32x2_t) {2, 0});
#else
return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3});
#endif
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vuzp2q_f32 (float32x4_t __a, float32x4_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint32x4_t) {4, 6, 0, 2});
#else
return __builtin_shuffle (__a, __b, (uint32x4_t) {1, 3, 5, 7});
#endif
}
__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
vuzp2q_f64 (float64x2_t __a, float64x2_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint64x2_t) {2, 0});
#else
return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3});
#endif
}
__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
vuzp2q_p8 (poly8x16_t __a, poly8x16_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b,
(uint8x16_t) {16, 18, 20, 22, 24, 26, 28, 30, 0, 2, 4, 6, 8, 10, 12, 14});
#else
return __builtin_shuffle (__a, __b,
(uint8x16_t) {1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31});
#endif
}
__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
vuzp2q_p16 (poly16x8_t __a, poly16x8_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
#else
return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
#endif
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vuzp2q_s8 (int8x16_t __a, int8x16_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b,
(uint8x16_t) {16, 18, 20, 22, 24, 26, 28, 30, 0, 2, 4, 6, 8, 10, 12, 14});
#else
return __builtin_shuffle (__a, __b,
(uint8x16_t) {1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31});
#endif
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vuzp2q_s16 (int16x8_t __a, int16x8_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
#else
return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
#endif
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vuzp2q_s32 (int32x4_t __a, int32x4_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint32x4_t) {4, 6, 0, 2});
#else
return __builtin_shuffle (__a, __b, (uint32x4_t) {1, 3, 5, 7});
#endif
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vuzp2q_s64 (int64x2_t __a, int64x2_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint64x2_t) {2, 0});
#else
return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3});
#endif
}
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vuzp2q_u8 (uint8x16_t __a, uint8x16_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint8x16_t)
{16, 18, 20, 22, 24, 26, 28, 30, 0, 2, 4, 6, 8, 10, 12, 14});
#else
return __builtin_shuffle (__a, __b, (uint8x16_t)
{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31});
#endif
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vuzp2q_u16 (uint16x8_t __a, uint16x8_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
#else
return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
#endif
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vuzp2q_u32 (uint32x4_t __a, uint32x4_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint32x4_t) {4, 6, 0, 2});
#else
return __builtin_shuffle (__a, __b, (uint32x4_t) {1, 3, 5, 7});
#endif
}
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vuzp2q_u64 (uint64x2_t __a, uint64x2_t __b)
{
#ifdef __AARCH64EB__
return __builtin_shuffle (__a, __b, (uint64x2_t) {2, 0});
#else
return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3});
#endif
}
__INTERLEAVE_LIST (uzp)
/* vzip */

View File

@ -1,3 +1,9 @@
2014-04-30 Alan Lawrence <alan.lawrence@arm.com>
* gcc.target/aarch64/vuzps32_1.c: Expect zip1/2 insn rather than uzp1/2.
* gcc.target/aarch64/vuzpu32_1.c: Likewise.
* gcc.target/aarch64/vuzpf32_1.c: Likewise.
2014-04-30 Alan Lawrence <alan.lawrence@arm.com>
* gcc.target/aarch64/simd/vuzpf32_1.c: New file.

View File

@ -6,6 +6,6 @@
#include <arm_neon.h>
#include "vuzpf32.x"
/* { dg-final { scan-assembler-times "uzp1\[ \t\]+v\[0-9\]+\.2s, ?v\[0-9\]+\.2s, ?v\[0-9\]+\.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
/* { dg-final { scan-assembler-times "uzp2\[ \t\]+v\[0-9\]+\.2s, ?v\[0-9\]+\.2s, ?v\[0-9\]+\.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
/* { dg-final { scan-assembler-times "zip1\[ \t\]+v\[0-9\]+\.2s, ?v\[0-9\]+\.2s, ?v\[0-9\]+\.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
/* { dg-final { scan-assembler-times "zip2\[ \t\]+v\[0-9\]+\.2s, ?v\[0-9\]+\.2s, ?v\[0-9\]+\.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
/* { dg-final { cleanup-saved-temps } } */

View File

@ -6,6 +6,6 @@
#include <arm_neon.h>
#include "vuzps32.x"
/* { dg-final { scan-assembler-times "uzp1\[ \t\]+v\[0-9\]+\.2s, ?v\[0-9\]+\.2s, ?v\[0-9\]+\.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
/* { dg-final { scan-assembler-times "uzp2\[ \t\]+v\[0-9\]+\.2s, ?v\[0-9\]+\.2s, ?v\[0-9\]+\.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
/* { dg-final { scan-assembler-times "zip1\[ \t\]+v\[0-9\]+\.2s, ?v\[0-9\]+\.2s, ?v\[0-9\]+\.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
/* { dg-final { scan-assembler-times "zip2\[ \t\]+v\[0-9\]+\.2s, ?v\[0-9\]+\.2s, ?v\[0-9\]+\.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
/* { dg-final { cleanup-saved-temps } } */

View File

@ -6,6 +6,6 @@
#include <arm_neon.h>
#include "vuzpu32.x"
/* { dg-final { scan-assembler-times "uzp1\[ \t\]+v\[0-9\]+\.2s, ?v\[0-9\]+\.2s, ?v\[0-9\]+\.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
/* { dg-final { scan-assembler-times "uzp2\[ \t\]+v\[0-9\]+\.2s, ?v\[0-9\]+\.2s, ?v\[0-9\]+\.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
/* { dg-final { scan-assembler-times "zip1\[ \t\]+v\[0-9\]+\.2s, ?v\[0-9\]+\.2s, ?v\[0-9\]+\.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
/* { dg-final { scan-assembler-times "zip2\[ \t\]+v\[0-9\]+\.2s, ?v\[0-9\]+\.2s, ?v\[0-9\]+\.2s!?\(?:\[ \t\]+@\[a-zA-Z0-9 \]+\)?\n" 1 } } */
/* { dg-final { cleanup-saved-temps } } */