|
|
|
@ -13199,467 +13199,6 @@ vtstq_p16 (poly16x8_t a, poly16x8_t b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1_f32 (float32x2_t a, float32x2_t b)
|
|
|
|
|
{
|
|
|
|
|
float32x2_t result;
|
|
|
|
|
__asm__ ("uzp1 %0.2s,%1.2s,%2.2s"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1_p8 (poly8x8_t a, poly8x8_t b)
|
|
|
|
|
{
|
|
|
|
|
poly8x8_t result;
|
|
|
|
|
__asm__ ("uzp1 %0.8b,%1.8b,%2.8b"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1_p16 (poly16x4_t a, poly16x4_t b)
|
|
|
|
|
{
|
|
|
|
|
poly16x4_t result;
|
|
|
|
|
__asm__ ("uzp1 %0.4h,%1.4h,%2.4h"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1_s8 (int8x8_t a, int8x8_t b)
|
|
|
|
|
{
|
|
|
|
|
int8x8_t result;
|
|
|
|
|
__asm__ ("uzp1 %0.8b,%1.8b,%2.8b"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1_s16 (int16x4_t a, int16x4_t b)
|
|
|
|
|
{
|
|
|
|
|
int16x4_t result;
|
|
|
|
|
__asm__ ("uzp1 %0.4h,%1.4h,%2.4h"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1_s32 (int32x2_t a, int32x2_t b)
|
|
|
|
|
{
|
|
|
|
|
int32x2_t result;
|
|
|
|
|
__asm__ ("uzp1 %0.2s,%1.2s,%2.2s"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1_u8 (uint8x8_t a, uint8x8_t b)
|
|
|
|
|
{
|
|
|
|
|
uint8x8_t result;
|
|
|
|
|
__asm__ ("uzp1 %0.8b,%1.8b,%2.8b"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1_u16 (uint16x4_t a, uint16x4_t b)
|
|
|
|
|
{
|
|
|
|
|
uint16x4_t result;
|
|
|
|
|
__asm__ ("uzp1 %0.4h,%1.4h,%2.4h"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1_u32 (uint32x2_t a, uint32x2_t b)
|
|
|
|
|
{
|
|
|
|
|
uint32x2_t result;
|
|
|
|
|
__asm__ ("uzp1 %0.2s,%1.2s,%2.2s"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1q_f32 (float32x4_t a, float32x4_t b)
|
|
|
|
|
{
|
|
|
|
|
float32x4_t result;
|
|
|
|
|
__asm__ ("uzp1 %0.4s,%1.4s,%2.4s"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1q_f64 (float64x2_t a, float64x2_t b)
|
|
|
|
|
{
|
|
|
|
|
float64x2_t result;
|
|
|
|
|
__asm__ ("uzp1 %0.2d,%1.2d,%2.2d"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1q_p8 (poly8x16_t a, poly8x16_t b)
|
|
|
|
|
{
|
|
|
|
|
poly8x16_t result;
|
|
|
|
|
__asm__ ("uzp1 %0.16b,%1.16b,%2.16b"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1q_p16 (poly16x8_t a, poly16x8_t b)
|
|
|
|
|
{
|
|
|
|
|
poly16x8_t result;
|
|
|
|
|
__asm__ ("uzp1 %0.8h,%1.8h,%2.8h"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1q_s8 (int8x16_t a, int8x16_t b)
|
|
|
|
|
{
|
|
|
|
|
int8x16_t result;
|
|
|
|
|
__asm__ ("uzp1 %0.16b,%1.16b,%2.16b"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1q_s16 (int16x8_t a, int16x8_t b)
|
|
|
|
|
{
|
|
|
|
|
int16x8_t result;
|
|
|
|
|
__asm__ ("uzp1 %0.8h,%1.8h,%2.8h"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1q_s32 (int32x4_t a, int32x4_t b)
|
|
|
|
|
{
|
|
|
|
|
int32x4_t result;
|
|
|
|
|
__asm__ ("uzp1 %0.4s,%1.4s,%2.4s"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1q_s64 (int64x2_t a, int64x2_t b)
|
|
|
|
|
{
|
|
|
|
|
int64x2_t result;
|
|
|
|
|
__asm__ ("uzp1 %0.2d,%1.2d,%2.2d"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1q_u8 (uint8x16_t a, uint8x16_t b)
|
|
|
|
|
{
|
|
|
|
|
uint8x16_t result;
|
|
|
|
|
__asm__ ("uzp1 %0.16b,%1.16b,%2.16b"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1q_u16 (uint16x8_t a, uint16x8_t b)
|
|
|
|
|
{
|
|
|
|
|
uint16x8_t result;
|
|
|
|
|
__asm__ ("uzp1 %0.8h,%1.8h,%2.8h"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1q_u32 (uint32x4_t a, uint32x4_t b)
|
|
|
|
|
{
|
|
|
|
|
uint32x4_t result;
|
|
|
|
|
__asm__ ("uzp1 %0.4s,%1.4s,%2.4s"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1q_u64 (uint64x2_t a, uint64x2_t b)
|
|
|
|
|
{
|
|
|
|
|
uint64x2_t result;
|
|
|
|
|
__asm__ ("uzp1 %0.2d,%1.2d,%2.2d"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2_f32 (float32x2_t a, float32x2_t b)
|
|
|
|
|
{
|
|
|
|
|
float32x2_t result;
|
|
|
|
|
__asm__ ("uzp2 %0.2s,%1.2s,%2.2s"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2_p8 (poly8x8_t a, poly8x8_t b)
|
|
|
|
|
{
|
|
|
|
|
poly8x8_t result;
|
|
|
|
|
__asm__ ("uzp2 %0.8b,%1.8b,%2.8b"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2_p16 (poly16x4_t a, poly16x4_t b)
|
|
|
|
|
{
|
|
|
|
|
poly16x4_t result;
|
|
|
|
|
__asm__ ("uzp2 %0.4h,%1.4h,%2.4h"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2_s8 (int8x8_t a, int8x8_t b)
|
|
|
|
|
{
|
|
|
|
|
int8x8_t result;
|
|
|
|
|
__asm__ ("uzp2 %0.8b,%1.8b,%2.8b"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2_s16 (int16x4_t a, int16x4_t b)
|
|
|
|
|
{
|
|
|
|
|
int16x4_t result;
|
|
|
|
|
__asm__ ("uzp2 %0.4h,%1.4h,%2.4h"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2_s32 (int32x2_t a, int32x2_t b)
|
|
|
|
|
{
|
|
|
|
|
int32x2_t result;
|
|
|
|
|
__asm__ ("uzp2 %0.2s,%1.2s,%2.2s"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2_u8 (uint8x8_t a, uint8x8_t b)
|
|
|
|
|
{
|
|
|
|
|
uint8x8_t result;
|
|
|
|
|
__asm__ ("uzp2 %0.8b,%1.8b,%2.8b"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2_u16 (uint16x4_t a, uint16x4_t b)
|
|
|
|
|
{
|
|
|
|
|
uint16x4_t result;
|
|
|
|
|
__asm__ ("uzp2 %0.4h,%1.4h,%2.4h"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2_u32 (uint32x2_t a, uint32x2_t b)
|
|
|
|
|
{
|
|
|
|
|
uint32x2_t result;
|
|
|
|
|
__asm__ ("uzp2 %0.2s,%1.2s,%2.2s"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2q_f32 (float32x4_t a, float32x4_t b)
|
|
|
|
|
{
|
|
|
|
|
float32x4_t result;
|
|
|
|
|
__asm__ ("uzp2 %0.4s,%1.4s,%2.4s"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2q_f64 (float64x2_t a, float64x2_t b)
|
|
|
|
|
{
|
|
|
|
|
float64x2_t result;
|
|
|
|
|
__asm__ ("uzp2 %0.2d,%1.2d,%2.2d"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2q_p8 (poly8x16_t a, poly8x16_t b)
|
|
|
|
|
{
|
|
|
|
|
poly8x16_t result;
|
|
|
|
|
__asm__ ("uzp2 %0.16b,%1.16b,%2.16b"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2q_p16 (poly16x8_t a, poly16x8_t b)
|
|
|
|
|
{
|
|
|
|
|
poly16x8_t result;
|
|
|
|
|
__asm__ ("uzp2 %0.8h,%1.8h,%2.8h"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2q_s8 (int8x16_t a, int8x16_t b)
|
|
|
|
|
{
|
|
|
|
|
int8x16_t result;
|
|
|
|
|
__asm__ ("uzp2 %0.16b,%1.16b,%2.16b"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2q_s16 (int16x8_t a, int16x8_t b)
|
|
|
|
|
{
|
|
|
|
|
int16x8_t result;
|
|
|
|
|
__asm__ ("uzp2 %0.8h,%1.8h,%2.8h"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2q_s32 (int32x4_t a, int32x4_t b)
|
|
|
|
|
{
|
|
|
|
|
int32x4_t result;
|
|
|
|
|
__asm__ ("uzp2 %0.4s,%1.4s,%2.4s"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2q_s64 (int64x2_t a, int64x2_t b)
|
|
|
|
|
{
|
|
|
|
|
int64x2_t result;
|
|
|
|
|
__asm__ ("uzp2 %0.2d,%1.2d,%2.2d"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2q_u8 (uint8x16_t a, uint8x16_t b)
|
|
|
|
|
{
|
|
|
|
|
uint8x16_t result;
|
|
|
|
|
__asm__ ("uzp2 %0.16b,%1.16b,%2.16b"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2q_u16 (uint16x8_t a, uint16x8_t b)
|
|
|
|
|
{
|
|
|
|
|
uint16x8_t result;
|
|
|
|
|
__asm__ ("uzp2 %0.8h,%1.8h,%2.8h"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2q_u32 (uint32x4_t a, uint32x4_t b)
|
|
|
|
|
{
|
|
|
|
|
uint32x4_t result;
|
|
|
|
|
__asm__ ("uzp2 %0.4s,%1.4s,%2.4s"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2q_u64 (uint64x2_t a, uint64x2_t b)
|
|
|
|
|
{
|
|
|
|
|
uint64x2_t result;
|
|
|
|
|
__asm__ ("uzp2 %0.2d,%1.2d,%2.2d"
|
|
|
|
|
: "=w"(result)
|
|
|
|
|
: "w"(a), "w"(b)
|
|
|
|
|
: /* No clobbers */);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* End of temporary inline asm implementations. */
|
|
|
|
|
|
|
|
|
@ -25245,6 +24784,438 @@ vuqaddd_s64 (int64x1_t __a, uint64x1_t __b)
|
|
|
|
|
|
|
|
|
|
/* vuzp */
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1_f32 (float32x2_t __a, float32x2_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint32x2_t) {3, 1});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1_p8 (poly8x8_t __a, poly8x8_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint8x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1_p16 (poly16x4_t __a, poly16x4_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint16x4_t) {5, 7, 1, 3});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 2, 4, 6});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1_s8 (int8x8_t __a, int8x8_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint8x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1_s16 (int16x4_t __a, int16x4_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint16x4_t) {5, 7, 1, 3});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 2, 4, 6});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1_s32 (int32x2_t __a, int32x2_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint32x2_t) {3, 1});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1_u8 (uint8x8_t __a, uint8x8_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint8x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1_u16 (uint16x4_t __a, uint16x4_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint16x4_t) {5, 7, 1, 3});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 2, 4, 6});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1_u32 (uint32x2_t __a, uint32x2_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint32x2_t) {3, 1});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1q_f32 (float32x4_t __a, float32x4_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint32x4_t) {5, 7, 1, 3});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 2, 4, 6});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1q_f64 (float64x2_t __a, float64x2_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint64x2_t) {3, 1});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1q_p8 (poly8x16_t __a, poly8x16_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint8x16_t)
|
|
|
|
|
{17, 19, 21, 23, 25, 27, 29, 31, 1, 3, 5, 7, 9, 11, 13, 15});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint8x16_t)
|
|
|
|
|
{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1q_p16 (poly16x8_t __a, poly16x8_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint16x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1q_s8 (int8x16_t __a, int8x16_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b,
|
|
|
|
|
(uint8x16_t) {17, 19, 21, 23, 25, 27, 29, 31, 1, 3, 5, 7, 9, 11, 13, 15});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b,
|
|
|
|
|
(uint8x16_t) {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1q_s16 (int16x8_t __a, int16x8_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint16x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1q_s32 (int32x4_t __a, int32x4_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint32x4_t) {5, 7, 1, 3});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 2, 4, 6});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1q_s64 (int64x2_t __a, int64x2_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint64x2_t) {3, 1});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1q_u8 (uint8x16_t __a, uint8x16_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b,
|
|
|
|
|
(uint8x16_t) {17, 19, 21, 23, 25, 27, 29, 31, 1, 3, 5, 7, 9, 11, 13, 15});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b,
|
|
|
|
|
(uint8x16_t) {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1q_u16 (uint16x8_t __a, uint16x8_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint16x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1q_u32 (uint32x4_t __a, uint32x4_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint32x4_t) {5, 7, 1, 3});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 2, 4, 6});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp1q_u64 (uint64x2_t __a, uint64x2_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint64x2_t) {3, 1});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2_f32 (float32x2_t __a, float32x2_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint32x2_t) {2, 0});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2_p8 (poly8x8_t __a, poly8x8_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint8x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint8x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2_p16 (poly16x4_t __a, poly16x4_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 6, 0, 2});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 3, 5, 7});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2_s8 (int8x8_t __a, int8x8_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint8x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint8x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2_s16 (int16x4_t __a, int16x4_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 6, 0, 2});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 3, 5, 7});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2_s32 (int32x2_t __a, int32x2_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint32x2_t) {2, 0});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2_u8 (uint8x8_t __a, uint8x8_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint8x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint8x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2_u16 (uint16x4_t __a, uint16x4_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 6, 0, 2});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 3, 5, 7});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2_u32 (uint32x2_t __a, uint32x2_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint32x2_t) {2, 0});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2q_f32 (float32x4_t __a, float32x4_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint32x4_t) {4, 6, 0, 2});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint32x4_t) {1, 3, 5, 7});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2q_f64 (float64x2_t __a, float64x2_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint64x2_t) {2, 0});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2q_p8 (poly8x16_t __a, poly8x16_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b,
|
|
|
|
|
(uint8x16_t) {16, 18, 20, 22, 24, 26, 28, 30, 0, 2, 4, 6, 8, 10, 12, 14});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b,
|
|
|
|
|
(uint8x16_t) {1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2q_p16 (poly16x8_t __a, poly16x8_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2q_s8 (int8x16_t __a, int8x16_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b,
|
|
|
|
|
(uint8x16_t) {16, 18, 20, 22, 24, 26, 28, 30, 0, 2, 4, 6, 8, 10, 12, 14});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b,
|
|
|
|
|
(uint8x16_t) {1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2q_s16 (int16x8_t __a, int16x8_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2q_s32 (int32x4_t __a, int32x4_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint32x4_t) {4, 6, 0, 2});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint32x4_t) {1, 3, 5, 7});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2q_s64 (int64x2_t __a, int64x2_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint64x2_t) {2, 0});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2q_u8 (uint8x16_t __a, uint8x16_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint8x16_t)
|
|
|
|
|
{16, 18, 20, 22, 24, 26, 28, 30, 0, 2, 4, 6, 8, 10, 12, 14});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint8x16_t)
|
|
|
|
|
{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2q_u16 (uint16x8_t __a, uint16x8_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2q_u32 (uint32x4_t __a, uint32x4_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint32x4_t) {4, 6, 0, 2});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint32x4_t) {1, 3, 5, 7});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
|
|
|
|
|
vuzp2q_u64 (uint64x2_t __a, uint64x2_t __b)
|
|
|
|
|
{
|
|
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint64x2_t) {2, 0});
|
|
|
|
|
#else
|
|
|
|
|
return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3});
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__INTERLEAVE_LIST (uzp)
|
|
|
|
|
|
|
|
|
|
/* vzip */
|
|
|
|
|