[ARM][GCC][1/3x]: MVE intrinsics with ternary operands.
This patch supports following MVE ACLE intrinsics with ternary operands. vabavq_s8, vabavq_s16, vabavq_s32, vbicq_m_n_s16, vbicq_m_n_s32, vbicq_m_n_u16, vbicq_m_n_u32, vcmpeqq_m_f16, vcmpeqq_m_f32, vcvtaq_m_s16_f16, vcvtaq_m_u16_f16, vcvtaq_m_s32_f32, vcvtaq_m_u32_f32, vcvtq_m_f16_s16, vcvtq_m_f16_u16, vcvtq_m_f32_s32, vcvtq_m_f32_u32, vqrshrnbq_n_s16, vqrshrnbq_n_u16, vqrshrnbq_n_s32, vqrshrnbq_n_u32, vqrshrunbq_n_s16, vqrshrunbq_n_s32, vrmlaldavhaq_s32, vrmlaldavhaq_u32, vshlcq_s8, vshlcq_u8, vshlcq_s16, vshlcq_u16, vshlcq_s32, vshlcq_u32, vabavq_s8, vabavq_s16, vabavq_s32. Please refer to M-profile Vector Extension (MVE) intrinsics [1] for more details. [1] https://developer.arm.com/architectures/instruction-sets/simd-isas/helium/mve-intrinsics 2020-03-17 Andre Vieira <andre.simoesdiasvieira@arm.com> Mihail Ionescu <mihail.ionescu@arm.com> Srinath Parvathaneni <srinath.parvathaneni@arm.com> * config/arm/arm-builtins.c (TERNOP_UNONE_UNONE_UNONE_IMM_QUALIFIERS): Define qualifier for ternary operands. (TERNOP_UNONE_UNONE_NONE_NONE_QUALIFIERS): Likewise. (TERNOP_UNONE_NONE_UNONE_IMM_QUALIFIERS): Likewise. (TERNOP_NONE_NONE_UNONE_IMM_QUALIFIERS): Likewise. (TERNOP_UNONE_UNONE_NONE_IMM_QUALIFIERS): Likewise. (TERNOP_UNONE_UNONE_NONE_UNONE_QUALIFIERS): Likewise. (TERNOP_UNONE_UNONE_IMM_UNONE_QUALIFIERS): Likewise. (TERNOP_UNONE_NONE_NONE_UNONE_QUALIFIERS): Likewise. (TERNOP_NONE_NONE_NONE_IMM_QUALIFIERS): Likewise. (TERNOP_NONE_NONE_NONE_UNONE_QUALIFIERS): Likewise. (TERNOP_NONE_NONE_IMM_UNONE_QUALIFIERS): Likewise. (TERNOP_NONE_NONE_UNONE_UNONE_QUALIFIERS): Likewise. (TERNOP_UNONE_UNONE_UNONE_UNONE_QUALIFIERS): Likewise. (TERNOP_NONE_NONE_NONE_NONE_QUALIFIERS): Likewise. * config/arm/arm_mve.h (vabavq_s8): Define macro. (vabavq_s16): Likewise. (vabavq_s32): Likewise. (vbicq_m_n_s16): Likewise. (vbicq_m_n_s32): Likewise. (vbicq_m_n_u16): Likewise. (vbicq_m_n_u32): Likewise. (vcmpeqq_m_f16): Likewise. (vcmpeqq_m_f32): Likewise. (vcvtaq_m_s16_f16): Likewise. (vcvtaq_m_u16_f16): Likewise. (vcvtaq_m_s32_f32): Likewise. (vcvtaq_m_u32_f32): Likewise. (vcvtq_m_f16_s16): Likewise. (vcvtq_m_f16_u16): Likewise. (vcvtq_m_f32_s32): Likewise. (vcvtq_m_f32_u32): Likewise. (vqrshrnbq_n_s16): Likewise. (vqrshrnbq_n_u16): Likewise. (vqrshrnbq_n_s32): Likewise. (vqrshrnbq_n_u32): Likewise. (vqrshrunbq_n_s16): Likewise. (vqrshrunbq_n_s32): Likewise. (vrmlaldavhaq_s32): Likewise. (vrmlaldavhaq_u32): Likewise. (vshlcq_s8): Likewise. (vshlcq_u8): Likewise. (vshlcq_s16): Likewise. (vshlcq_u16): Likewise. (vshlcq_s32): Likewise. (vshlcq_u32): Likewise. (vabavq_u8): Likewise. (vabavq_u16): Likewise. (vabavq_u32): Likewise. (__arm_vabavq_s8): Define intrinsic. (__arm_vabavq_s16): Likewise. (__arm_vabavq_s32): Likewise. (__arm_vabavq_u8): Likewise. (__arm_vabavq_u16): Likewise. (__arm_vabavq_u32): Likewise. (__arm_vbicq_m_n_s16): Likewise. (__arm_vbicq_m_n_s32): Likewise. (__arm_vbicq_m_n_u16): Likewise. (__arm_vbicq_m_n_u32): Likewise. (__arm_vqrshrnbq_n_s16): Likewise. (__arm_vqrshrnbq_n_u16): Likewise. (__arm_vqrshrnbq_n_s32): Likewise. (__arm_vqrshrnbq_n_u32): Likewise. (__arm_vqrshrunbq_n_s16): Likewise. (__arm_vqrshrunbq_n_s32): Likewise. (__arm_vrmlaldavhaq_s32): Likewise. (__arm_vrmlaldavhaq_u32): Likewise. (__arm_vshlcq_s8): Likewise. (__arm_vshlcq_u8): Likewise. (__arm_vshlcq_s16): Likewise. (__arm_vshlcq_u16): Likewise. (__arm_vshlcq_s32): Likewise. (__arm_vshlcq_u32): Likewise. (__arm_vcmpeqq_m_f16): Likewise. (__arm_vcmpeqq_m_f32): Likewise. (__arm_vcvtaq_m_s16_f16): Likewise. (__arm_vcvtaq_m_u16_f16): Likewise. (__arm_vcvtaq_m_s32_f32): Likewise. (__arm_vcvtaq_m_u32_f32): Likewise. (__arm_vcvtq_m_f16_s16): Likewise. (__arm_vcvtq_m_f16_u16): Likewise. (__arm_vcvtq_m_f32_s32): Likewise. (__arm_vcvtq_m_f32_u32): Likewise. (vcvtaq_m): Define polymorphic variant. (vcvtq_m): Likewise. (vabavq): Likewise. (vshlcq): Likewise. (vbicq_m_n): Likewise. (vqrshrnbq_n): Likewise. (vqrshrunbq_n): Likewise. * config/arm/arm_mve_builtins.def (TERNOP_UNONE_UNONE_UNONE_IMM_QUALIFIERS): Use the builtin qualifer. (TERNOP_UNONE_UNONE_NONE_NONE_QUALIFIERS): Likewise. (TERNOP_UNONE_NONE_UNONE_IMM_QUALIFIERS): Likewise. (TERNOP_NONE_NONE_UNONE_IMM_QUALIFIERS): Likewise. (TERNOP_UNONE_UNONE_NONE_IMM_QUALIFIERS): Likewise. (TERNOP_UNONE_UNONE_NONE_UNONE_QUALIFIERS): Likewise. (TERNOP_UNONE_UNONE_IMM_UNONE_QUALIFIERS): Likewise. (TERNOP_UNONE_NONE_NONE_UNONE_QUALIFIERS): Likewise. (TERNOP_NONE_NONE_NONE_IMM_QUALIFIERS): Likewise. (TERNOP_NONE_NONE_NONE_UNONE_QUALIFIERS): Likewise. (TERNOP_NONE_NONE_IMM_UNONE_QUALIFIERS): Likewise. (TERNOP_NONE_NONE_UNONE_UNONE_QUALIFIERS): Likewise. (TERNOP_UNONE_UNONE_UNONE_UNONE_QUALIFIERS): Likewise. (TERNOP_NONE_NONE_NONE_NONE_QUALIFIERS): Likewise. * config/arm/mve.md (VBICQ_M_N): Define iterator. (VCVTAQ_M): Likewise. (VCVTQ_M_TO_F): Likewise. (VQRSHRNBQ_N): Likewise. (VABAVQ): Likewise. (VSHLCQ): Likewise. (VRMLALDAVHAQ): Likewise. (mve_vbicq_m_n_<supf><mode>): Define RTL pattern. (mve_vcmpeqq_m_f<mode>): Likewise. (mve_vcvtaq_m_<supf><mode>): Likewise. (mve_vcvtq_m_to_f_<supf><mode>): Likewise. (mve_vqrshrnbq_n_<supf><mode>): Likewise. (mve_vqrshrunbq_n_s<mode>): Likewise. (mve_vrmlaldavhaq_<supf>v4si): Likewise. (mve_vabavq_<supf><mode>): Likewise. (mve_vshlcq_<supf><mode>): Likewise. (mve_vshlcq_<supf><mode>): Likewise. (mve_vshlcq_vec_<supf><mode>): Define RTL expand. (mve_vshlcq_carry_<supf><mode>): Likewise. gcc/testsuite/ChangeLog: 2020-03-17 Andre Vieira <andre.simoesdiasvieira@arm.com> Mihail Ionescu <mihail.ionescu@arm.com> Srinath Parvathaneni <srinath.parvathaneni@arm.com> * gcc.target/arm/mve/intrinsics/vabavq_s16.c: New test. * gcc.target/arm/mve/intrinsics/vabavq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vabavq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vabavq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vabavq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vabavq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vbicq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vbicq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vbicq_m_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vbicq_m_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_m_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_m_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtaq_m_s16_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtaq_m_s32_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtaq_m_u16_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtaq_m_u32_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtq_m_f16_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtq_m_f16_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtq_m_f32_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtq_m_f32_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshrnbq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshrnbq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshrnbq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshrnbq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshrunbq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshrunbq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrmlaldavhaq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrmlaldavhaq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlcq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlcq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlcq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlcq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlcq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlcq_u8.c: Likewise.
This commit is contained in:
parent
f9355dee93
commit
0dad5b3368
129
gcc/ChangeLog
129
gcc/ChangeLog
@ -1,3 +1,132 @@
|
||||
2020-03-17 Andre Vieira <andre.simoesdiasvieira@arm.com>
|
||||
Mihail Ionescu <mihail.ionescu@arm.com>
|
||||
Srinath Parvathaneni <srinath.parvathaneni@arm.com>
|
||||
|
||||
* config/arm/arm-builtins.c (TERNOP_UNONE_UNONE_UNONE_IMM_QUALIFIERS):
|
||||
Define qualifier for ternary operands.
|
||||
(TERNOP_UNONE_UNONE_NONE_NONE_QUALIFIERS): Likewise.
|
||||
(TERNOP_UNONE_NONE_UNONE_IMM_QUALIFIERS): Likewise.
|
||||
(TERNOP_NONE_NONE_UNONE_IMM_QUALIFIERS): Likewise.
|
||||
(TERNOP_UNONE_UNONE_NONE_IMM_QUALIFIERS): Likewise.
|
||||
(TERNOP_UNONE_UNONE_NONE_UNONE_QUALIFIERS): Likewise.
|
||||
(TERNOP_UNONE_UNONE_IMM_UNONE_QUALIFIERS): Likewise.
|
||||
(TERNOP_UNONE_NONE_NONE_UNONE_QUALIFIERS): Likewise.
|
||||
(TERNOP_NONE_NONE_NONE_IMM_QUALIFIERS): Likewise.
|
||||
(TERNOP_NONE_NONE_NONE_UNONE_QUALIFIERS): Likewise.
|
||||
(TERNOP_NONE_NONE_IMM_UNONE_QUALIFIERS): Likewise.
|
||||
(TERNOP_NONE_NONE_UNONE_UNONE_QUALIFIERS): Likewise.
|
||||
(TERNOP_UNONE_UNONE_UNONE_UNONE_QUALIFIERS): Likewise.
|
||||
(TERNOP_NONE_NONE_NONE_NONE_QUALIFIERS): Likewise.
|
||||
* config/arm/arm_mve.h (vabavq_s8): Define macro.
|
||||
(vabavq_s16): Likewise.
|
||||
(vabavq_s32): Likewise.
|
||||
(vbicq_m_n_s16): Likewise.
|
||||
(vbicq_m_n_s32): Likewise.
|
||||
(vbicq_m_n_u16): Likewise.
|
||||
(vbicq_m_n_u32): Likewise.
|
||||
(vcmpeqq_m_f16): Likewise.
|
||||
(vcmpeqq_m_f32): Likewise.
|
||||
(vcvtaq_m_s16_f16): Likewise.
|
||||
(vcvtaq_m_u16_f16): Likewise.
|
||||
(vcvtaq_m_s32_f32): Likewise.
|
||||
(vcvtaq_m_u32_f32): Likewise.
|
||||
(vcvtq_m_f16_s16): Likewise.
|
||||
(vcvtq_m_f16_u16): Likewise.
|
||||
(vcvtq_m_f32_s32): Likewise.
|
||||
(vcvtq_m_f32_u32): Likewise.
|
||||
(vqrshrnbq_n_s16): Likewise.
|
||||
(vqrshrnbq_n_u16): Likewise.
|
||||
(vqrshrnbq_n_s32): Likewise.
|
||||
(vqrshrnbq_n_u32): Likewise.
|
||||
(vqrshrunbq_n_s16): Likewise.
|
||||
(vqrshrunbq_n_s32): Likewise.
|
||||
(vrmlaldavhaq_s32): Likewise.
|
||||
(vrmlaldavhaq_u32): Likewise.
|
||||
(vshlcq_s8): Likewise.
|
||||
(vshlcq_u8): Likewise.
|
||||
(vshlcq_s16): Likewise.
|
||||
(vshlcq_u16): Likewise.
|
||||
(vshlcq_s32): Likewise.
|
||||
(vshlcq_u32): Likewise.
|
||||
(vabavq_u8): Likewise.
|
||||
(vabavq_u16): Likewise.
|
||||
(vabavq_u32): Likewise.
|
||||
(__arm_vabavq_s8): Define intrinsic.
|
||||
(__arm_vabavq_s16): Likewise.
|
||||
(__arm_vabavq_s32): Likewise.
|
||||
(__arm_vabavq_u8): Likewise.
|
||||
(__arm_vabavq_u16): Likewise.
|
||||
(__arm_vabavq_u32): Likewise.
|
||||
(__arm_vbicq_m_n_s16): Likewise.
|
||||
(__arm_vbicq_m_n_s32): Likewise.
|
||||
(__arm_vbicq_m_n_u16): Likewise.
|
||||
(__arm_vbicq_m_n_u32): Likewise.
|
||||
(__arm_vqrshrnbq_n_s16): Likewise.
|
||||
(__arm_vqrshrnbq_n_u16): Likewise.
|
||||
(__arm_vqrshrnbq_n_s32): Likewise.
|
||||
(__arm_vqrshrnbq_n_u32): Likewise.
|
||||
(__arm_vqrshrunbq_n_s16): Likewise.
|
||||
(__arm_vqrshrunbq_n_s32): Likewise.
|
||||
(__arm_vrmlaldavhaq_s32): Likewise.
|
||||
(__arm_vrmlaldavhaq_u32): Likewise.
|
||||
(__arm_vshlcq_s8): Likewise.
|
||||
(__arm_vshlcq_u8): Likewise.
|
||||
(__arm_vshlcq_s16): Likewise.
|
||||
(__arm_vshlcq_u16): Likewise.
|
||||
(__arm_vshlcq_s32): Likewise.
|
||||
(__arm_vshlcq_u32): Likewise.
|
||||
(__arm_vcmpeqq_m_f16): Likewise.
|
||||
(__arm_vcmpeqq_m_f32): Likewise.
|
||||
(__arm_vcvtaq_m_s16_f16): Likewise.
|
||||
(__arm_vcvtaq_m_u16_f16): Likewise.
|
||||
(__arm_vcvtaq_m_s32_f32): Likewise.
|
||||
(__arm_vcvtaq_m_u32_f32): Likewise.
|
||||
(__arm_vcvtq_m_f16_s16): Likewise.
|
||||
(__arm_vcvtq_m_f16_u16): Likewise.
|
||||
(__arm_vcvtq_m_f32_s32): Likewise.
|
||||
(__arm_vcvtq_m_f32_u32): Likewise.
|
||||
(vcvtaq_m): Define polymorphic variant.
|
||||
(vcvtq_m): Likewise.
|
||||
(vabavq): Likewise.
|
||||
(vshlcq): Likewise.
|
||||
(vbicq_m_n): Likewise.
|
||||
(vqrshrnbq_n): Likewise.
|
||||
(vqrshrunbq_n): Likewise.
|
||||
* config/arm/arm_mve_builtins.def
|
||||
(TERNOP_UNONE_UNONE_UNONE_IMM_QUALIFIERS): Use the builtin qualifer.
|
||||
(TERNOP_UNONE_UNONE_NONE_NONE_QUALIFIERS): Likewise.
|
||||
(TERNOP_UNONE_NONE_UNONE_IMM_QUALIFIERS): Likewise.
|
||||
(TERNOP_NONE_NONE_UNONE_IMM_QUALIFIERS): Likewise.
|
||||
(TERNOP_UNONE_UNONE_NONE_IMM_QUALIFIERS): Likewise.
|
||||
(TERNOP_UNONE_UNONE_NONE_UNONE_QUALIFIERS): Likewise.
|
||||
(TERNOP_UNONE_UNONE_IMM_UNONE_QUALIFIERS): Likewise.
|
||||
(TERNOP_UNONE_NONE_NONE_UNONE_QUALIFIERS): Likewise.
|
||||
(TERNOP_NONE_NONE_NONE_IMM_QUALIFIERS): Likewise.
|
||||
(TERNOP_NONE_NONE_NONE_UNONE_QUALIFIERS): Likewise.
|
||||
(TERNOP_NONE_NONE_IMM_UNONE_QUALIFIERS): Likewise.
|
||||
(TERNOP_NONE_NONE_UNONE_UNONE_QUALIFIERS): Likewise.
|
||||
(TERNOP_UNONE_UNONE_UNONE_UNONE_QUALIFIERS): Likewise.
|
||||
(TERNOP_NONE_NONE_NONE_NONE_QUALIFIERS): Likewise.
|
||||
* config/arm/mve.md (VBICQ_M_N): Define iterator.
|
||||
(VCVTAQ_M): Likewise.
|
||||
(VCVTQ_M_TO_F): Likewise.
|
||||
(VQRSHRNBQ_N): Likewise.
|
||||
(VABAVQ): Likewise.
|
||||
(VSHLCQ): Likewise.
|
||||
(VRMLALDAVHAQ): Likewise.
|
||||
(mve_vbicq_m_n_<supf><mode>): Define RTL pattern.
|
||||
(mve_vcmpeqq_m_f<mode>): Likewise.
|
||||
(mve_vcvtaq_m_<supf><mode>): Likewise.
|
||||
(mve_vcvtq_m_to_f_<supf><mode>): Likewise.
|
||||
(mve_vqrshrnbq_n_<supf><mode>): Likewise.
|
||||
(mve_vqrshrunbq_n_s<mode>): Likewise.
|
||||
(mve_vrmlaldavhaq_<supf>v4si): Likewise.
|
||||
(mve_vabavq_<supf><mode>): Likewise.
|
||||
(mve_vshlcq_<supf><mode>): Likewise.
|
||||
(mve_vshlcq_<supf><mode>): Likewise.
|
||||
(mve_vshlcq_vec_<supf><mode>): Define RTL expand.
|
||||
(mve_vshlcq_carry_<supf><mode>): Likewise.
|
||||
|
||||
2020-03-17 Andre Vieira <andre.simoesdiasvieira@arm.com>
|
||||
Mihail Ionescu <mihail.ionescu@arm.com>
|
||||
Srinath Parvathaneni <srinath.parvathaneni@arm.com>
|
||||
|
@ -433,6 +433,96 @@ arm_binop_unone_unone_none_qualifiers[SIMD_MAX_BUILTIN_ARGS]
|
||||
#define BINOP_UNONE_UNONE_NONE_QUALIFIERS \
|
||||
(arm_binop_unone_unone_none_qualifiers)
|
||||
|
||||
static enum arm_type_qualifiers
|
||||
arm_ternop_unone_unone_unone_imm_qualifiers[SIMD_MAX_BUILTIN_ARGS]
|
||||
= { qualifier_unsigned, qualifier_unsigned, qualifier_unsigned,
|
||||
qualifier_immediate };
|
||||
#define TERNOP_UNONE_UNONE_UNONE_IMM_QUALIFIERS \
|
||||
(arm_ternop_unone_unone_unone_imm_qualifiers)
|
||||
|
||||
static enum arm_type_qualifiers
|
||||
arm_ternop_unone_unone_none_none_qualifiers[SIMD_MAX_BUILTIN_ARGS]
|
||||
= { qualifier_unsigned, qualifier_unsigned, qualifier_none, qualifier_none };
|
||||
#define TERNOP_UNONE_UNONE_NONE_NONE_QUALIFIERS \
|
||||
(arm_ternop_unone_unone_none_none_qualifiers)
|
||||
|
||||
static enum arm_type_qualifiers
|
||||
arm_ternop_unone_none_unone_imm_qualifiers[SIMD_MAX_BUILTIN_ARGS]
|
||||
= { qualifier_unsigned, qualifier_none, qualifier_unsigned,
|
||||
qualifier_immediate };
|
||||
#define TERNOP_UNONE_NONE_UNONE_IMM_QUALIFIERS \
|
||||
(arm_ternop_unone_none_unone_imm_qualifiers)
|
||||
|
||||
static enum arm_type_qualifiers
|
||||
arm_ternop_none_none_unone_imm_qualifiers[SIMD_MAX_BUILTIN_ARGS]
|
||||
= { qualifier_none, qualifier_none, qualifier_unsigned, qualifier_immediate };
|
||||
#define TERNOP_NONE_NONE_UNONE_IMM_QUALIFIERS \
|
||||
(arm_ternop_none_none_unone_imm_qualifiers)
|
||||
|
||||
static enum arm_type_qualifiers
|
||||
arm_ternop_unone_unone_none_imm_qualifiers[SIMD_MAX_BUILTIN_ARGS]
|
||||
= { qualifier_unsigned, qualifier_unsigned, qualifier_none,
|
||||
qualifier_immediate };
|
||||
#define TERNOP_UNONE_UNONE_NONE_IMM_QUALIFIERS \
|
||||
(arm_ternop_unone_unone_none_imm_qualifiers)
|
||||
|
||||
static enum arm_type_qualifiers
|
||||
arm_ternop_unone_unone_none_unone_qualifiers[SIMD_MAX_BUILTIN_ARGS]
|
||||
= { qualifier_unsigned, qualifier_unsigned, qualifier_none,
|
||||
qualifier_unsigned };
|
||||
#define TERNOP_UNONE_UNONE_NONE_UNONE_QUALIFIERS \
|
||||
(arm_ternop_unone_unone_none_unone_qualifiers)
|
||||
|
||||
static enum arm_type_qualifiers
|
||||
arm_ternop_unone_unone_imm_unone_qualifiers[SIMD_MAX_BUILTIN_ARGS]
|
||||
= { qualifier_unsigned, qualifier_unsigned, qualifier_immediate,
|
||||
qualifier_unsigned };
|
||||
#define TERNOP_UNONE_UNONE_IMM_UNONE_QUALIFIERS \
|
||||
(arm_ternop_unone_unone_imm_unone_qualifiers)
|
||||
|
||||
static enum arm_type_qualifiers
|
||||
arm_ternop_unone_none_none_unone_qualifiers[SIMD_MAX_BUILTIN_ARGS]
|
||||
= { qualifier_unsigned, qualifier_none, qualifier_none, qualifier_unsigned };
|
||||
#define TERNOP_UNONE_NONE_NONE_UNONE_QUALIFIERS \
|
||||
(arm_ternop_unone_none_none_unone_qualifiers)
|
||||
|
||||
static enum arm_type_qualifiers
|
||||
arm_ternop_none_none_none_imm_qualifiers[SIMD_MAX_BUILTIN_ARGS]
|
||||
= { qualifier_none, qualifier_none, qualifier_none, qualifier_immediate };
|
||||
#define TERNOP_NONE_NONE_NONE_IMM_QUALIFIERS \
|
||||
(arm_ternop_none_none_none_imm_qualifiers)
|
||||
|
||||
static enum arm_type_qualifiers
|
||||
arm_ternop_none_none_none_unone_qualifiers[SIMD_MAX_BUILTIN_ARGS]
|
||||
= { qualifier_none, qualifier_none, qualifier_none, qualifier_unsigned };
|
||||
#define TERNOP_NONE_NONE_NONE_UNONE_QUALIFIERS \
|
||||
(arm_ternop_none_none_none_unone_qualifiers)
|
||||
|
||||
static enum arm_type_qualifiers
|
||||
arm_ternop_none_none_imm_unone_qualifiers[SIMD_MAX_BUILTIN_ARGS]
|
||||
= { qualifier_none, qualifier_none, qualifier_immediate, qualifier_unsigned };
|
||||
#define TERNOP_NONE_NONE_IMM_UNONE_QUALIFIERS \
|
||||
(arm_ternop_none_none_imm_unone_qualifiers)
|
||||
|
||||
static enum arm_type_qualifiers
|
||||
arm_ternop_none_none_unone_unone_qualifiers[SIMD_MAX_BUILTIN_ARGS]
|
||||
= { qualifier_none, qualifier_none, qualifier_unsigned, qualifier_unsigned };
|
||||
#define TERNOP_NONE_NONE_UNONE_UNONE_QUALIFIERS \
|
||||
(arm_ternop_none_none_unone_unone_qualifiers)
|
||||
|
||||
static enum arm_type_qualifiers
|
||||
arm_ternop_unone_unone_unone_unone_qualifiers[SIMD_MAX_BUILTIN_ARGS]
|
||||
= { qualifier_unsigned, qualifier_unsigned, qualifier_unsigned,
|
||||
qualifier_unsigned };
|
||||
#define TERNOP_UNONE_UNONE_UNONE_UNONE_QUALIFIERS \
|
||||
(arm_ternop_unone_unone_unone_unone_qualifiers)
|
||||
|
||||
static enum arm_type_qualifiers
|
||||
arm_ternop_none_none_none_none_qualifiers[SIMD_MAX_BUILTIN_ARGS]
|
||||
= { qualifier_none, qualifier_none, qualifier_none, qualifier_none };
|
||||
#define TERNOP_NONE_NONE_NONE_NONE_QUALIFIERS \
|
||||
(arm_ternop_none_none_none_none_qualifiers)
|
||||
|
||||
/* End of Qualifier for MVE builtins. */
|
||||
|
||||
/* void ([T element type] *, T, immediate). */
|
||||
|
@ -742,6 +742,40 @@ typedef struct { uint8x16_t val[4]; } uint8x16x4_t;
|
||||
#define vcvttq_f16_f32(__a, __b) __arm_vcvttq_f16_f32(__a, __b)
|
||||
#define vcvtbq_f16_f32(__a, __b) __arm_vcvtbq_f16_f32(__a, __b)
|
||||
#define vaddlvaq_s32(__a, __b) __arm_vaddlvaq_s32(__a, __b)
|
||||
#define vabavq_s8(__a, __b, __c) __arm_vabavq_s8(__a, __b, __c)
|
||||
#define vabavq_s16(__a, __b, __c) __arm_vabavq_s16(__a, __b, __c)
|
||||
#define vabavq_s32(__a, __b, __c) __arm_vabavq_s32(__a, __b, __c)
|
||||
#define vbicq_m_n_s16(__a, __imm, __p) __arm_vbicq_m_n_s16(__a, __imm, __p)
|
||||
#define vbicq_m_n_s32(__a, __imm, __p) __arm_vbicq_m_n_s32(__a, __imm, __p)
|
||||
#define vbicq_m_n_u16(__a, __imm, __p) __arm_vbicq_m_n_u16(__a, __imm, __p)
|
||||
#define vbicq_m_n_u32(__a, __imm, __p) __arm_vbicq_m_n_u32(__a, __imm, __p)
|
||||
#define vcmpeqq_m_f16(__a, __b, __p) __arm_vcmpeqq_m_f16(__a, __b, __p)
|
||||
#define vcmpeqq_m_f32(__a, __b, __p) __arm_vcmpeqq_m_f32(__a, __b, __p)
|
||||
#define vcvtaq_m_s16_f16(__inactive, __a, __p) __arm_vcvtaq_m_s16_f16(__inactive, __a, __p)
|
||||
#define vcvtaq_m_u16_f16(__inactive, __a, __p) __arm_vcvtaq_m_u16_f16(__inactive, __a, __p)
|
||||
#define vcvtaq_m_s32_f32(__inactive, __a, __p) __arm_vcvtaq_m_s32_f32(__inactive, __a, __p)
|
||||
#define vcvtaq_m_u32_f32(__inactive, __a, __p) __arm_vcvtaq_m_u32_f32(__inactive, __a, __p)
|
||||
#define vcvtq_m_f16_s16(__inactive, __a, __p) __arm_vcvtq_m_f16_s16(__inactive, __a, __p)
|
||||
#define vcvtq_m_f16_u16(__inactive, __a, __p) __arm_vcvtq_m_f16_u16(__inactive, __a, __p)
|
||||
#define vcvtq_m_f32_s32(__inactive, __a, __p) __arm_vcvtq_m_f32_s32(__inactive, __a, __p)
|
||||
#define vcvtq_m_f32_u32(__inactive, __a, __p) __arm_vcvtq_m_f32_u32(__inactive, __a, __p)
|
||||
#define vqrshrnbq_n_s16(__a, __b, __imm) __arm_vqrshrnbq_n_s16(__a, __b, __imm)
|
||||
#define vqrshrnbq_n_u16(__a, __b, __imm) __arm_vqrshrnbq_n_u16(__a, __b, __imm)
|
||||
#define vqrshrnbq_n_s32(__a, __b, __imm) __arm_vqrshrnbq_n_s32(__a, __b, __imm)
|
||||
#define vqrshrnbq_n_u32(__a, __b, __imm) __arm_vqrshrnbq_n_u32(__a, __b, __imm)
|
||||
#define vqrshrunbq_n_s16(__a, __b, __imm) __arm_vqrshrunbq_n_s16(__a, __b, __imm)
|
||||
#define vqrshrunbq_n_s32(__a, __b, __imm) __arm_vqrshrunbq_n_s32(__a, __b, __imm)
|
||||
#define vrmlaldavhaq_s32(__a, __b, __c) __arm_vrmlaldavhaq_s32(__a, __b, __c)
|
||||
#define vrmlaldavhaq_u32(__a, __b, __c) __arm_vrmlaldavhaq_u32(__a, __b, __c)
|
||||
#define vshlcq_s8(__a, __b, __imm) __arm_vshlcq_s8(__a, __b, __imm)
|
||||
#define vshlcq_u8(__a, __b, __imm) __arm_vshlcq_u8(__a, __b, __imm)
|
||||
#define vshlcq_s16(__a, __b, __imm) __arm_vshlcq_s16(__a, __b, __imm)
|
||||
#define vshlcq_u16(__a, __b, __imm) __arm_vshlcq_u16(__a, __b, __imm)
|
||||
#define vshlcq_s32(__a, __b, __imm) __arm_vshlcq_s32(__a, __b, __imm)
|
||||
#define vshlcq_u32(__a, __b, __imm) __arm_vshlcq_u32(__a, __b, __imm)
|
||||
#define vabavq_u8(__a, __b, __c) __arm_vabavq_u8(__a, __b, __c)
|
||||
#define vabavq_u16(__a, __b, __c) __arm_vabavq_u16(__a, __b, __c)
|
||||
#define vabavq_u32(__a, __b, __c) __arm_vabavq_u32(__a, __b, __c)
|
||||
#endif
|
||||
|
||||
__extension__ extern __inline void
|
||||
@ -4485,6 +4519,186 @@ __arm_vaddlvaq_s32 (int64_t __a, int32x4_t __b)
|
||||
return __builtin_mve_vaddlvaq_sv4si (__a, __b);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint32_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vabavq_s8 (uint32_t __a, int8x16_t __b, int8x16_t __c)
|
||||
{
|
||||
return __builtin_mve_vabavq_sv16qi (__a, __b, __c);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint32_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vabavq_s16 (uint32_t __a, int16x8_t __b, int16x8_t __c)
|
||||
{
|
||||
return __builtin_mve_vabavq_sv8hi (__a, __b, __c);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint32_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vabavq_s32 (uint32_t __a, int32x4_t __b, int32x4_t __c)
|
||||
{
|
||||
return __builtin_mve_vabavq_sv4si (__a, __b, __c);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint32_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vabavq_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c)
|
||||
{
|
||||
return __builtin_mve_vabavq_uv16qi(__a, __b, __c);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint32_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vabavq_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c)
|
||||
{
|
||||
return __builtin_mve_vabavq_uv8hi(__a, __b, __c);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint32_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vabavq_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c)
|
||||
{
|
||||
return __builtin_mve_vabavq_uv4si(__a, __b, __c);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vbicq_m_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vbicq_m_n_sv8hi (__a, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int32x4_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vbicq_m_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vbicq_m_n_sv4si (__a, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vbicq_m_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vbicq_m_n_uv8hi (__a, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint32x4_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vbicq_m_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vbicq_m_n_uv4si (__a, __imm, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqrshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
|
||||
{
|
||||
return __builtin_mve_vqrshrnbq_n_sv8hi (__a, __b, __imm);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqrshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
|
||||
{
|
||||
return __builtin_mve_vqrshrnbq_n_uv8hi (__a, __b, __imm);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqrshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
|
||||
{
|
||||
return __builtin_mve_vqrshrnbq_n_sv4si (__a, __b, __imm);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqrshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
|
||||
{
|
||||
return __builtin_mve_vqrshrnbq_n_uv4si (__a, __b, __imm);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqrshrunbq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm)
|
||||
{
|
||||
return __builtin_mve_vqrshrunbq_n_sv8hi (__a, __b, __imm);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vqrshrunbq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm)
|
||||
{
|
||||
return __builtin_mve_vqrshrunbq_n_sv4si (__a, __b, __imm);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int64_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vrmlaldavhaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
|
||||
{
|
||||
return __builtin_mve_vrmlaldavhaq_sv4si (__a, __b, __c);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint64_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vrmlaldavhaq_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c)
|
||||
{
|
||||
return __builtin_mve_vrmlaldavhaq_uv4si (__a, __b, __c);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vshlcq_s8 (int8x16_t __a, uint32_t * __b, const int __imm)
|
||||
{
|
||||
int8x16_t __res = __builtin_mve_vshlcq_vec_sv16qi (__a, *__b, __imm);
|
||||
*__b = __builtin_mve_vshlcq_carry_sv16qi (__a, *__b, __imm);
|
||||
return __res;
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vshlcq_u8 (uint8x16_t __a, uint32_t * __b, const int __imm)
|
||||
{
|
||||
uint8x16_t __res = __builtin_mve_vshlcq_vec_uv16qi (__a, *__b, __imm);
|
||||
*__b = __builtin_mve_vshlcq_carry_uv16qi (__a, *__b, __imm);
|
||||
return __res;
|
||||
}
|
||||
|
||||
__extension__ extern __inline int16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vshlcq_s16 (int16x8_t __a, uint32_t * __b, const int __imm)
|
||||
{
|
||||
int16x8_t __res = __builtin_mve_vshlcq_vec_sv8hi (__a, *__b, __imm);
|
||||
*__b = __builtin_mve_vshlcq_carry_sv8hi (__a, *__b, __imm);
|
||||
return __res;
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vshlcq_u16 (uint16x8_t __a, uint32_t * __b, const int __imm)
|
||||
{
|
||||
uint16x8_t __res = __builtin_mve_vshlcq_vec_uv8hi (__a, *__b, __imm);
|
||||
*__b = __builtin_mve_vshlcq_carry_uv8hi (__a, *__b, __imm);
|
||||
return __res;
|
||||
}
|
||||
|
||||
__extension__ extern __inline int32x4_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vshlcq_s32 (int32x4_t __a, uint32_t * __b, const int __imm)
|
||||
{
|
||||
int32x4_t __res = __builtin_mve_vshlcq_vec_sv4si (__a, *__b, __imm);
|
||||
*__b = __builtin_mve_vshlcq_carry_sv4si (__a, *__b, __imm);
|
||||
return __res;
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint32x4_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vshlcq_u32 (uint32x4_t __a, uint32_t * __b, const int __imm)
|
||||
{
|
||||
uint32x4_t __res = __builtin_mve_vshlcq_vec_uv4si (__a, *__b, __imm);
|
||||
*__b = __builtin_mve_vshlcq_carry_uv4si (__a, *__b, __imm);
|
||||
return __res;
|
||||
}
|
||||
|
||||
#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
|
||||
|
||||
__extension__ extern __inline void
|
||||
@ -5443,6 +5657,76 @@ __arm_vcvtbq_f16_f32 (float16x8_t __a, float32x4_t __b)
|
||||
return __builtin_mve_vcvtbq_f16_f32v8hf (__a, __b);
|
||||
}
|
||||
|
||||
__extension__ extern __inline mve_pred16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vcmpeqq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vcmpeqq_m_fv8hf (__a, __b, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline mve_pred16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vcmpeqq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vcmpeqq_m_fv4sf (__a, __b, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vcvtaq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vcvtaq_m_sv8hi (__inactive, __a, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vcvtaq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vcvtaq_m_uv8hi (__inactive, __a, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline int32x4_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vcvtaq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vcvtaq_m_sv4si (__inactive, __a, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint32x4_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vcvtaq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vcvtaq_m_uv4si (__inactive, __a, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline float16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vcvtq_m_f16_s16 (float16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vcvtq_m_to_f_sv8hf (__inactive, __a, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline float16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vcvtq_m_f16_u16 (float16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vcvtq_m_to_f_uv8hf (__inactive, __a, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline float32x4_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vcvtq_m_f32_s32 (float32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vcvtq_m_to_f_sv4sf (__inactive, __a, __p);
|
||||
}
|
||||
|
||||
__extension__ extern __inline float32x4_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
__arm_vcvtq_m_f32_u32 (float32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
|
||||
{
|
||||
return __builtin_mve_vcvtq_m_to_f_uv4sf (__inactive, __a, __p);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
enum {
|
||||
@ -6033,25 +6317,26 @@ extern void *__ARM_undef;
|
||||
int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpgeq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \
|
||||
int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpgeq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));})
|
||||
|
||||
#define vcmpgeq(p0,p1) __arm_vcmpgeq(p0,p1)
|
||||
#define __arm_vcmpgeq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
|
||||
#define vcmpeqq_m(p0,p1,p2) __arm_vcmpeqq_m(p0,p1,p2)
|
||||
#define __arm_vcmpeqq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
|
||||
int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
|
||||
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
|
||||
int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
|
||||
int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgeq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
|
||||
int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgeq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
|
||||
|
||||
#define vcmpgtq_n(p0,p1) __arm_vcmpgtq_n(p0,p1)
|
||||
#define __arm_vcmpgtq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
|
||||
int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgtq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
|
||||
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgtq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
|
||||
int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgtq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
|
||||
int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpgtq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \
|
||||
int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpgtq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));})
|
||||
int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
|
||||
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
|
||||
int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
|
||||
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
|
||||
int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
|
||||
int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpeqq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
|
||||
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpeqq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
|
||||
int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpeqq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
|
||||
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpeqq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpeqq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \
|
||||
int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpeqq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2), \
|
||||
int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpeqq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
|
||||
int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpeqq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
|
||||
int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpeqq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t), p2), \
|
||||
int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpeqq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t), p2));})
|
||||
|
||||
#define vcmpgtq(p0,p1) __arm_vcmpgtq(p0,p1)
|
||||
#define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
|
||||
@ -6676,7 +6961,60 @@ extern void *__ARM_undef;
|
||||
int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpgtq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \
|
||||
int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpgtq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));})
|
||||
|
||||
#else /* MVE Interger. srinath*/
|
||||
#define vbicq_m_n(p0,p1,p2) __arm_vbicq_m_n(p0,p1,p2)
|
||||
#define __arm_vbicq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
|
||||
int (*)[__ARM_mve_type_int16x8_t]: __arm_vbicq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
|
||||
int (*)[__ARM_mve_type_int32x4_t]: __arm_vbicq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
|
||||
int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
|
||||
|
||||
#define vqrshrnbq(p0,p1,p2) __arm_vqrshrnbq(p0,p1,p2)
|
||||
#define __arm_vqrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
|
||||
int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
|
||||
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
|
||||
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
|
||||
|
||||
#define vqrshrunbq(p0,p1,p2) __arm_vqrshrunbq(p0,p1,p2)
|
||||
#define __arm_vqrshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
|
||||
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
|
||||
|
||||
#define vshlcq(p0,p1,p2) __arm_vshlcq(p0,p1,p2)
|
||||
#define __arm_vshlcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
|
||||
int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
|
||||
int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlcq_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
|
||||
int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
|
||||
int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlcq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
|
||||
int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
|
||||
|
||||
#define vcvtaq_m(p0,p1,p2) __arm_vcvtaq_m(p0,p1,p2)
|
||||
#define __arm_vcvtaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
|
||||
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtaq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
|
||||
int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtaq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtaq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
|
||||
int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtaq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
|
||||
|
||||
#define vcvtq_m(p0,p1,p2) __arm_vcvtq_m(p0,p1,p2)
|
||||
#define __arm_vcvtq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
__typeof(p2) __p2 = (p2); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
|
||||
int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcvtq_m_f16_s16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
|
||||
int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcvtq_m_f32_s32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
|
||||
int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcvtq_m_f16_u16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
|
||||
int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcvtq_m_f32_u32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
|
||||
|
||||
#else /* MVE Interger. */
|
||||
|
||||
#define vst4q(p0,p1) __arm_vst4q(p0,p1)
|
||||
#define __arm_vst4q(p0,p1) ({ __typeof(p0) __p0 = (p0); \
|
||||
@ -7653,6 +7991,77 @@ extern void *__ARM_undef;
|
||||
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
|
||||
int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
|
||||
|
||||
#define vabavq(p0,p1,p2) __arm_vabavq(p0,p1,p2)
|
||||
#define __arm_vabavq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
__typeof(p2) __p2 = (p2); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
|
||||
int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabavq_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
|
||||
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabavq_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
|
||||
int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabavq_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
|
||||
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabavq_u8 (__p0, __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabavq_u16 (__p0, __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
|
||||
int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabavq_u32 (__p0, __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
|
||||
|
||||
#define vshlcq(p0,p1,p2) __arm_vshlcq(p0,p1,p2)
|
||||
#define __arm_vshlcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
|
||||
int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
|
||||
int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlcq_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
|
||||
int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
|
||||
int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlcq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
|
||||
int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
|
||||
|
||||
#define vrmlaldavhaq(p0,p1,p2) __arm_vrmlaldavhaq(p0,p1,p2)
|
||||
#define __arm_vrmlaldavhaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
__typeof(p2) __p2 = (p2); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
|
||||
int (*)[__ARM_mve_type_int64_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhaq_s32 (__ARM_mve_coerce(__p0, int64_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
|
||||
int (*)[__ARM_mve_type_uint64_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhaq_u32 (__ARM_mve_coerce(__p0, uint64_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
|
||||
|
||||
#define vcmpeqq_m(p0,p1,p2) __arm_vcmpeqq_m(p0,p1,p2)
|
||||
#define __arm_vcmpeqq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
|
||||
int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
|
||||
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
|
||||
int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
|
||||
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
|
||||
int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
|
||||
int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpeqq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t), p2), \
|
||||
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpeqq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t), p2), \
|
||||
int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpeqq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t), p2), \
|
||||
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpeqq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t), p2), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpeqq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t), p2), \
|
||||
int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpeqq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t), p2));})
|
||||
|
||||
#define vbicq_m_n(p0,p1,p2) __arm_vbicq_m_n(p0,p1,p2)
|
||||
#define __arm_vbicq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
|
||||
int (*)[__ARM_mve_type_int16x8_t]: __arm_vbicq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
|
||||
int (*)[__ARM_mve_type_int32x4_t]: __arm_vbicq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
|
||||
int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
|
||||
|
||||
#define vqrshrnbq(p0,p1,p2) __arm_vqrshrnbq(p0,p1,p2)
|
||||
#define __arm_vqrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
|
||||
int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
|
||||
int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
|
||||
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
|
||||
|
||||
#define vqrshrunbq(p0,p1,p2) __arm_vqrshrunbq(p0,p1,p2)
|
||||
#define __arm_vqrshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
|
||||
__typeof(p1) __p1 = (p1); \
|
||||
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
|
||||
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
|
||||
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
|
||||
|
||||
#endif /* MVE Floating point. */
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
@ -18,276 +18,294 @@
|
||||
along with GCC; see the file COPYING3. If not see
|
||||
<http://www.gnu.org/licenses/>. */
|
||||
|
||||
VAR5(STORE1, vst4q, v16qi, v8hi, v4si, v8hf, v4sf)
|
||||
VAR2(UNOP_NONE_NONE, vrndxq_f, v8hf, v4sf)
|
||||
VAR2(UNOP_NONE_NONE, vrndq_f, v8hf, v4sf)
|
||||
VAR2(UNOP_NONE_NONE, vrndpq_f, v8hf, v4sf)
|
||||
VAR2(UNOP_NONE_NONE, vrndnq_f, v8hf, v4sf)
|
||||
VAR2(UNOP_NONE_NONE, vrndmq_f, v8hf, v4sf)
|
||||
VAR2(UNOP_NONE_NONE, vrndaq_f, v8hf, v4sf)
|
||||
VAR2(UNOP_NONE_NONE, vrev64q_f, v8hf, v4sf)
|
||||
VAR2(UNOP_NONE_NONE, vnegq_f, v8hf, v4sf)
|
||||
VAR2(UNOP_NONE_NONE, vdupq_n_f, v8hf, v4sf)
|
||||
VAR2(UNOP_NONE_NONE, vabsq_f, v8hf, v4sf)
|
||||
VAR1(UNOP_NONE_NONE, vrev32q_f, v8hf)
|
||||
VAR1(UNOP_NONE_NONE, vcvttq_f32_f16, v4sf)
|
||||
VAR1(UNOP_NONE_NONE, vcvtbq_f32_f16, v4sf)
|
||||
VAR2(UNOP_NONE_SNONE, vcvtq_to_f_s, v8hf, v4sf)
|
||||
VAR2(UNOP_NONE_UNONE, vcvtq_to_f_u, v8hf, v4sf)
|
||||
VAR3(UNOP_SNONE_SNONE, vrev64q_s, v16qi, v8hi, v4si)
|
||||
VAR3(UNOP_SNONE_SNONE, vqnegq_s, v16qi, v8hi, v4si)
|
||||
VAR3(UNOP_SNONE_SNONE, vqabsq_s, v16qi, v8hi, v4si)
|
||||
VAR3(UNOP_SNONE_SNONE, vnegq_s, v16qi, v8hi, v4si)
|
||||
VAR3(UNOP_SNONE_SNONE, vmvnq_s, v16qi, v8hi, v4si)
|
||||
VAR3(UNOP_SNONE_SNONE, vdupq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3(UNOP_SNONE_SNONE, vclzq_s, v16qi, v8hi, v4si)
|
||||
VAR3(UNOP_SNONE_SNONE, vclsq_s, v16qi, v8hi, v4si)
|
||||
VAR3(UNOP_SNONE_SNONE, vaddvq_s, v16qi, v8hi, v4si)
|
||||
VAR3(UNOP_SNONE_SNONE, vabsq_s, v16qi, v8hi, v4si)
|
||||
VAR2(UNOP_SNONE_SNONE, vrev32q_s, v16qi, v8hi)
|
||||
VAR2(UNOP_SNONE_SNONE, vmovltq_s, v16qi, v8hi)
|
||||
VAR2(UNOP_SNONE_SNONE, vmovlbq_s, v16qi, v8hi)
|
||||
VAR2(UNOP_SNONE_NONE, vcvtq_from_f_s, v8hi, v4si)
|
||||
VAR2(UNOP_SNONE_NONE, vcvtpq_s, v8hi, v4si)
|
||||
VAR2(UNOP_SNONE_NONE, vcvtnq_s, v8hi, v4si)
|
||||
VAR2(UNOP_SNONE_NONE, vcvtmq_s, v8hi, v4si)
|
||||
VAR2(UNOP_SNONE_NONE, vcvtaq_s, v8hi, v4si)
|
||||
VAR2(UNOP_SNONE_IMM, vmvnq_n_s, v8hi, v4si)
|
||||
VAR1(UNOP_SNONE_SNONE, vrev16q_s, v16qi)
|
||||
VAR1(UNOP_SNONE_SNONE, vaddlvq_s, v4si)
|
||||
VAR3(UNOP_UNONE_UNONE, vrev64q_u, v16qi, v8hi, v4si)
|
||||
VAR3(UNOP_UNONE_UNONE, vmvnq_u, v16qi, v8hi, v4si)
|
||||
VAR3(UNOP_UNONE_UNONE, vdupq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3(UNOP_UNONE_UNONE, vclzq_u, v16qi, v8hi, v4si)
|
||||
VAR3(UNOP_UNONE_UNONE, vaddvq_u, v16qi, v8hi, v4si)
|
||||
VAR2(UNOP_UNONE_UNONE, vrev32q_u, v16qi, v8hi)
|
||||
VAR2(UNOP_UNONE_UNONE, vmovltq_u, v16qi, v8hi)
|
||||
VAR2(UNOP_UNONE_UNONE, vmovlbq_u, v16qi, v8hi)
|
||||
VAR2(UNOP_UNONE_NONE, vcvtq_from_f_u, v8hi, v4si)
|
||||
VAR2(UNOP_UNONE_NONE, vcvtpq_u, v8hi, v4si)
|
||||
VAR2(UNOP_UNONE_NONE, vcvtnq_u, v8hi, v4si)
|
||||
VAR2(UNOP_UNONE_NONE, vcvtmq_u, v8hi, v4si)
|
||||
VAR2(UNOP_UNONE_NONE, vcvtaq_u, v8hi, v4si)
|
||||
VAR2(UNOP_UNONE_IMM, vmvnq_n_u, v8hi, v4si)
|
||||
VAR1(UNOP_UNONE_UNONE, vrev16q_u, v16qi)
|
||||
VAR1(UNOP_UNONE_UNONE, vaddlvq_u, v4si)
|
||||
VAR1(UNOP_UNONE_UNONE, vctp16q, hi)
|
||||
VAR1(UNOP_UNONE_UNONE, vctp32q, hi)
|
||||
VAR1(UNOP_UNONE_UNONE, vctp64q, hi)
|
||||
VAR1(UNOP_UNONE_UNONE, vctp8q, hi)
|
||||
VAR1(UNOP_UNONE_UNONE, vpnot, hi)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vsubq_n_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vbrsrq_n_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_IMM, vcvtq_n_to_f_s, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_UNONE_IMM, vcvtq_n_to_f_u, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_UNONE_UNONE, vcreateq_f, v8hf, v4sf)
|
||||
VAR2(BINOP_UNONE_NONE_IMM, vcvtq_n_from_f_u, v8hi, v4si)
|
||||
VAR2(BINOP_NONE_NONE_IMM, vcvtq_n_from_f_s, v8hi, v4si)
|
||||
VAR4(BINOP_UNONE_UNONE_UNONE, vcreateq_u, v16qi, v8hi, v4si, v2di)
|
||||
VAR4(BINOP_NONE_UNONE_UNONE, vcreateq_s, v16qi, v8hi, v4si, v2di)
|
||||
VAR3(BINOP_UNONE_UNONE_IMM, vshrq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_IMM, vshrq_n_s, v16qi, v8hi, v4si)
|
||||
VAR1(BINOP_NONE_NONE_UNONE, vaddlvq_p_s, v4si)
|
||||
VAR1(BINOP_UNONE_UNONE_UNONE, vaddlvq_p_u, v4si)
|
||||
VAR3(BINOP_UNONE_NONE_NONE, vcmpneq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vcmpneq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vshlq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_NONE, vshlq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vsubq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vsubq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vrmulhq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vrhaddq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vqsubq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vqsubq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vqaddq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vqaddq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vorrq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vornq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vmulq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vmulq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vmulltq_int_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vmullbq_int_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vmulhq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vmladavq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vminvq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vminq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vmaxvq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vmaxq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vhsubq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vhsubq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vhaddq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vhaddq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, veorq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vcmpneq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vcmphiq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vcmphiq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vcmpeqq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vcmpeqq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vcmpcsq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vcmpcsq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vcaddq_rot90_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vcaddq_rot270_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vbicq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vandq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vaddvq_p_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vaddvaq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vaddq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_UNONE, vabdq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_NONE, vshlq_r_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_NONE, vrshlq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_NONE, vrshlq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_NONE, vqshlq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_NONE, vqshlq_r_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_NONE, vqrshlq_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_NONE, vqrshlq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_NONE, vminavq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_NONE, vminaq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_NONE, vmaxavq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_NONE, vmaxaq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_NONE, vbrsrq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_IMM, vshlq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_IMM, vrshrq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_UNONE_IMM, vqshlq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_NONE_NONE, vcmpneq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_NONE_NONE, vcmpltq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_NONE_NONE, vcmpltq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_NONE_NONE, vcmpleq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_NONE_NONE, vcmpleq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_NONE_NONE, vcmpgtq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_NONE_NONE, vcmpgtq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_NONE_NONE, vcmpgeq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_NONE_NONE, vcmpgeq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_NONE_NONE, vcmpeqq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_NONE_NONE, vcmpeqq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_UNONE_NONE_IMM, vqshluq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_UNONE, vaddvq_p_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vsubq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vsubq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vshlq_r_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vrshlq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vrshlq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vrmulhq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vrhaddq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vqsubq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vqsubq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vqshlq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vqshlq_r_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vqrshlq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vqrshlq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vqrdmulhq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vqrdmulhq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vqdmulhq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vqdmulhq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vqaddq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vqaddq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vorrq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vornq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vmulq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vmulq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vmulltq_int_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vmullbq_int_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vmulhq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vmlsdavxq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vmlsdavq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vmladavxq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vmladavq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vminvq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vminq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vmaxvq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vmaxq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vhsubq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vhsubq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vhcaddq_rot90_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vhcaddq_rot270_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vhaddq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vhaddq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, veorq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vcaddq_rot90_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vcaddq_rot270_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vbrsrq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vbicq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vandq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vaddvaq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vaddq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_NONE, vabdq_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_IMM, vshlq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_IMM, vrshrq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3(BINOP_NONE_NONE_IMM, vqshlq_n_s, v16qi, v8hi, v4si)
|
||||
VAR2(BINOP_UNONE_UNONE_UNONE, vqmovntq_u, v8hi, v4si)
|
||||
VAR2(BINOP_UNONE_UNONE_UNONE, vqmovnbq_u, v8hi, v4si)
|
||||
VAR2(BINOP_UNONE_UNONE_UNONE, vmulltq_poly_p, v16qi, v8hi)
|
||||
VAR2(BINOP_UNONE_UNONE_UNONE, vmullbq_poly_p, v16qi, v8hi)
|
||||
VAR2(BINOP_UNONE_UNONE_UNONE, vmovntq_u, v8hi, v4si)
|
||||
VAR2(BINOP_UNONE_UNONE_UNONE, vmovnbq_u, v8hi, v4si)
|
||||
VAR2(BINOP_UNONE_UNONE_UNONE, vmlaldavq_u, v8hi, v4si)
|
||||
VAR2(BINOP_UNONE_UNONE_NONE, vqmovuntq_s, v8hi, v4si)
|
||||
VAR2(BINOP_UNONE_UNONE_NONE, vqmovunbq_s, v8hi, v4si)
|
||||
VAR2(BINOP_UNONE_UNONE_IMM, vshlltq_n_u, v16qi, v8hi)
|
||||
VAR2(BINOP_UNONE_UNONE_IMM, vshllbq_n_u, v16qi, v8hi)
|
||||
VAR2(BINOP_UNONE_UNONE_IMM, vorrq_n_u, v8hi, v4si)
|
||||
VAR2(BINOP_UNONE_UNONE_IMM, vbicq_n_u, v8hi, v4si)
|
||||
VAR2(BINOP_UNONE_NONE_NONE, vcmpneq_n_f, v8hf, v4sf)
|
||||
VAR2(BINOP_UNONE_NONE_NONE, vcmpneq_f, v8hf, v4sf)
|
||||
VAR2(BINOP_UNONE_NONE_NONE, vcmpltq_n_f, v8hf, v4sf)
|
||||
VAR2(BINOP_UNONE_NONE_NONE, vcmpltq_f, v8hf, v4sf)
|
||||
VAR2(BINOP_UNONE_NONE_NONE, vcmpleq_n_f, v8hf, v4sf)
|
||||
VAR2(BINOP_UNONE_NONE_NONE, vcmpleq_f, v8hf, v4sf)
|
||||
VAR2(BINOP_UNONE_NONE_NONE, vcmpgtq_n_f, v8hf, v4sf)
|
||||
VAR2(BINOP_UNONE_NONE_NONE, vcmpgtq_f, v8hf, v4sf)
|
||||
VAR2(BINOP_UNONE_NONE_NONE, vcmpgeq_n_f, v8hf, v4sf)
|
||||
VAR2(BINOP_UNONE_NONE_NONE, vcmpgeq_f, v8hf, v4sf)
|
||||
VAR2(BINOP_UNONE_NONE_NONE, vcmpeqq_n_f, v8hf, v4sf)
|
||||
VAR2(BINOP_UNONE_NONE_NONE, vcmpeqq_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vsubq_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vqmovntq_s, v8hi, v4si)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vqmovnbq_s, v8hi, v4si)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vqdmulltq_s, v8hi, v4si)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vqdmulltq_n_s, v8hi, v4si)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vqdmullbq_s, v8hi, v4si)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vqdmullbq_n_s, v8hi, v4si)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vorrq_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vornq_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vmulq_n_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vmulq_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vmovntq_s, v8hi, v4si)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vmovnbq_s, v8hi, v4si)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vmlsldavxq_s, v8hi, v4si)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vmlsldavq_s, v8hi, v4si)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vmlaldavxq_s, v8hi, v4si)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vmlaldavq_s, v8hi, v4si)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vminnmvq_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vminnmq_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vminnmavq_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vminnmaq_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vmaxnmvq_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vmaxnmq_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vmaxnmavq_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vmaxnmaq_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, veorq_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vcmulq_rot90_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vcmulq_rot270_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vcmulq_rot180_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vcmulq_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vcaddq_rot90_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vcaddq_rot270_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vbicq_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vandq_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vaddq_n_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_NONE, vabdq_f, v8hf, v4sf)
|
||||
VAR2(BINOP_NONE_NONE_IMM, vshlltq_n_s, v16qi, v8hi)
|
||||
VAR2(BINOP_NONE_NONE_IMM, vshllbq_n_s, v16qi, v8hi)
|
||||
VAR2(BINOP_NONE_NONE_IMM, vorrq_n_s, v8hi, v4si)
|
||||
VAR2(BINOP_NONE_NONE_IMM, vbicq_n_s, v8hi, v4si)
|
||||
VAR1(BINOP_UNONE_UNONE_UNONE, vrmlaldavhq_u, v4si)
|
||||
VAR1(BINOP_UNONE_UNONE_UNONE, vctp8q_m, hi)
|
||||
VAR1(BINOP_UNONE_UNONE_UNONE, vctp64q_m, hi)
|
||||
VAR1(BINOP_UNONE_UNONE_UNONE, vctp32q_m, hi)
|
||||
VAR1(BINOP_UNONE_UNONE_UNONE, vctp16q_m, hi)
|
||||
VAR1(BINOP_UNONE_UNONE_UNONE, vaddlvaq_u, v4si)
|
||||
VAR1(BINOP_NONE_NONE_NONE, vrmlsldavhxq_s, v4si)
|
||||
VAR1(BINOP_NONE_NONE_NONE, vrmlsldavhq_s, v4si)
|
||||
VAR1(BINOP_NONE_NONE_NONE, vrmlaldavhxq_s, v4si)
|
||||
VAR1(BINOP_NONE_NONE_NONE, vrmlaldavhq_s, v4si)
|
||||
VAR1(BINOP_NONE_NONE_NONE, vcvttq_f16_f32, v8hf)
|
||||
VAR1(BINOP_NONE_NONE_NONE, vcvtbq_f16_f32, v8hf)
|
||||
VAR1(BINOP_NONE_NONE_NONE, vaddlvaq_s, v4si)
|
||||
VAR5 (STORE1, vst4q, v16qi, v8hi, v4si, v8hf, v4sf)
|
||||
VAR2 (UNOP_NONE_NONE, vrndxq_f, v8hf, v4sf)
|
||||
VAR2 (UNOP_NONE_NONE, vrndq_f, v8hf, v4sf)
|
||||
VAR2 (UNOP_NONE_NONE, vrndpq_f, v8hf, v4sf)
|
||||
VAR2 (UNOP_NONE_NONE, vrndnq_f, v8hf, v4sf)
|
||||
VAR2 (UNOP_NONE_NONE, vrndmq_f, v8hf, v4sf)
|
||||
VAR2 (UNOP_NONE_NONE, vrndaq_f, v8hf, v4sf)
|
||||
VAR2 (UNOP_NONE_NONE, vrev64q_f, v8hf, v4sf)
|
||||
VAR2 (UNOP_NONE_NONE, vnegq_f, v8hf, v4sf)
|
||||
VAR2 (UNOP_NONE_NONE, vdupq_n_f, v8hf, v4sf)
|
||||
VAR2 (UNOP_NONE_NONE, vabsq_f, v8hf, v4sf)
|
||||
VAR1 (UNOP_NONE_NONE, vrev32q_f, v8hf)
|
||||
VAR1 (UNOP_NONE_NONE, vcvttq_f32_f16, v4sf)
|
||||
VAR1 (UNOP_NONE_NONE, vcvtbq_f32_f16, v4sf)
|
||||
VAR2 (UNOP_NONE_SNONE, vcvtq_to_f_s, v8hf, v4sf)
|
||||
VAR2 (UNOP_NONE_UNONE, vcvtq_to_f_u, v8hf, v4sf)
|
||||
VAR3 (UNOP_SNONE_SNONE, vrev64q_s, v16qi, v8hi, v4si)
|
||||
VAR3 (UNOP_SNONE_SNONE, vqnegq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (UNOP_SNONE_SNONE, vqabsq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (UNOP_SNONE_SNONE, vnegq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (UNOP_SNONE_SNONE, vmvnq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (UNOP_SNONE_SNONE, vdupq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (UNOP_SNONE_SNONE, vclzq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (UNOP_SNONE_SNONE, vclsq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (UNOP_SNONE_SNONE, vaddvq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (UNOP_SNONE_SNONE, vabsq_s, v16qi, v8hi, v4si)
|
||||
VAR2 (UNOP_SNONE_SNONE, vrev32q_s, v16qi, v8hi)
|
||||
VAR2 (UNOP_SNONE_SNONE, vmovltq_s, v16qi, v8hi)
|
||||
VAR2 (UNOP_SNONE_SNONE, vmovlbq_s, v16qi, v8hi)
|
||||
VAR2 (UNOP_SNONE_NONE, vcvtq_from_f_s, v8hi, v4si)
|
||||
VAR2 (UNOP_SNONE_NONE, vcvtpq_s, v8hi, v4si)
|
||||
VAR2 (UNOP_SNONE_NONE, vcvtnq_s, v8hi, v4si)
|
||||
VAR2 (UNOP_SNONE_NONE, vcvtmq_s, v8hi, v4si)
|
||||
VAR2 (UNOP_SNONE_NONE, vcvtaq_s, v8hi, v4si)
|
||||
VAR2 (UNOP_SNONE_IMM, vmvnq_n_s, v8hi, v4si)
|
||||
VAR1 (UNOP_SNONE_SNONE, vrev16q_s, v16qi)
|
||||
VAR1 (UNOP_SNONE_SNONE, vaddlvq_s, v4si)
|
||||
VAR3 (UNOP_UNONE_UNONE, vrev64q_u, v16qi, v8hi, v4si)
|
||||
VAR3 (UNOP_UNONE_UNONE, vmvnq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (UNOP_UNONE_UNONE, vdupq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3 (UNOP_UNONE_UNONE, vclzq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (UNOP_UNONE_UNONE, vaddvq_u, v16qi, v8hi, v4si)
|
||||
VAR2 (UNOP_UNONE_UNONE, vrev32q_u, v16qi, v8hi)
|
||||
VAR2 (UNOP_UNONE_UNONE, vmovltq_u, v16qi, v8hi)
|
||||
VAR2 (UNOP_UNONE_UNONE, vmovlbq_u, v16qi, v8hi)
|
||||
VAR2 (UNOP_UNONE_NONE, vcvtq_from_f_u, v8hi, v4si)
|
||||
VAR2 (UNOP_UNONE_NONE, vcvtpq_u, v8hi, v4si)
|
||||
VAR2 (UNOP_UNONE_NONE, vcvtnq_u, v8hi, v4si)
|
||||
VAR2 (UNOP_UNONE_NONE, vcvtmq_u, v8hi, v4si)
|
||||
VAR2 (UNOP_UNONE_NONE, vcvtaq_u, v8hi, v4si)
|
||||
VAR2 (UNOP_UNONE_IMM, vmvnq_n_u, v8hi, v4si)
|
||||
VAR1 (UNOP_UNONE_UNONE, vrev16q_u, v16qi)
|
||||
VAR1 (UNOP_UNONE_UNONE, vaddlvq_u, v4si)
|
||||
VAR1 (UNOP_UNONE_UNONE, vctp16q, hi)
|
||||
VAR1 (UNOP_UNONE_UNONE, vctp32q, hi)
|
||||
VAR1 (UNOP_UNONE_UNONE, vctp64q, hi)
|
||||
VAR1 (UNOP_UNONE_UNONE, vctp8q, hi)
|
||||
VAR1 (UNOP_UNONE_UNONE, vpnot, hi)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vsubq_n_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vbrsrq_n_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_IMM, vcvtq_n_to_f_s, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_UNONE_IMM, vcvtq_n_to_f_u, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_UNONE_UNONE, vcreateq_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_UNONE_NONE_IMM, vcvtq_n_from_f_u, v8hi, v4si)
|
||||
VAR2 (BINOP_NONE_NONE_IMM, vcvtq_n_from_f_s, v8hi, v4si)
|
||||
VAR4 (BINOP_UNONE_UNONE_UNONE, vcreateq_u, v16qi, v8hi, v4si, v2di)
|
||||
VAR4 (BINOP_NONE_UNONE_UNONE, vcreateq_s, v16qi, v8hi, v4si, v2di)
|
||||
VAR3 (BINOP_UNONE_UNONE_IMM, vshrq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_IMM, vshrq_n_s, v16qi, v8hi, v4si)
|
||||
VAR1 (BINOP_NONE_NONE_UNONE, vaddlvq_p_s, v4si)
|
||||
VAR1 (BINOP_UNONE_UNONE_UNONE, vaddlvq_p_u, v4si)
|
||||
VAR3 (BINOP_UNONE_NONE_NONE, vcmpneq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vcmpneq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vshlq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_NONE, vshlq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vsubq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vsubq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vrmulhq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vrhaddq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vqsubq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vqsubq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vqaddq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vqaddq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vorrq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vornq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vmulq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vmulq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vmulltq_int_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vmullbq_int_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vmulhq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vmladavq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vminvq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vminq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vmaxvq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vmaxq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vhsubq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vhsubq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vhaddq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vhaddq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, veorq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vcmpneq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vcmphiq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vcmphiq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vcmpeqq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vcmpeqq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vcmpcsq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vcmpcsq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vcaddq_rot90_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vcaddq_rot270_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vbicq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vandq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vaddvq_p_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vaddvaq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vaddq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_UNONE, vabdq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_NONE, vshlq_r_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_NONE, vrshlq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_NONE, vrshlq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_NONE, vqshlq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_NONE, vqshlq_r_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_NONE, vqrshlq_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_NONE, vqrshlq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_NONE, vminavq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_NONE, vminaq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_NONE, vmaxavq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_NONE, vmaxaq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_NONE, vbrsrq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_IMM, vshlq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_IMM, vrshrq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_UNONE_IMM, vqshlq_n_u, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_NONE_NONE, vcmpneq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_NONE_NONE, vcmpltq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_NONE_NONE, vcmpltq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_NONE_NONE, vcmpleq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_NONE_NONE, vcmpleq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_NONE_NONE, vcmpgtq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_NONE_NONE, vcmpgtq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_NONE_NONE, vcmpgeq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_NONE_NONE, vcmpgeq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_NONE_NONE, vcmpeqq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_NONE_NONE, vcmpeqq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_UNONE_NONE_IMM, vqshluq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_UNONE, vaddvq_p_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vsubq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vsubq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vshlq_r_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vrshlq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vrshlq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vrmulhq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vrhaddq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vqsubq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vqsubq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vqshlq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vqshlq_r_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vqrshlq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vqrshlq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vqrdmulhq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vqrdmulhq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vqdmulhq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vqdmulhq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vqaddq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vqaddq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vorrq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vornq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vmulq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vmulq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vmulltq_int_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vmullbq_int_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vmulhq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vmlsdavxq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vmlsdavq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vmladavxq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vmladavq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vminvq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vminq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vmaxvq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vmaxq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vhsubq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vhsubq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vhcaddq_rot90_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vhcaddq_rot270_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vhaddq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vhaddq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, veorq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vcaddq_rot90_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vcaddq_rot270_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vbrsrq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vbicq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vandq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vaddvaq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vaddq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_NONE, vabdq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_IMM, vshlq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_IMM, vrshrq_n_s, v16qi, v8hi, v4si)
|
||||
VAR3 (BINOP_NONE_NONE_IMM, vqshlq_n_s, v16qi, v8hi, v4si)
|
||||
VAR2 (BINOP_UNONE_UNONE_UNONE, vqmovntq_u, v8hi, v4si)
|
||||
VAR2 (BINOP_UNONE_UNONE_UNONE, vqmovnbq_u, v8hi, v4si)
|
||||
VAR2 (BINOP_UNONE_UNONE_UNONE, vmulltq_poly_p, v16qi, v8hi)
|
||||
VAR2 (BINOP_UNONE_UNONE_UNONE, vmullbq_poly_p, v16qi, v8hi)
|
||||
VAR2 (BINOP_UNONE_UNONE_UNONE, vmovntq_u, v8hi, v4si)
|
||||
VAR2 (BINOP_UNONE_UNONE_UNONE, vmovnbq_u, v8hi, v4si)
|
||||
VAR2 (BINOP_UNONE_UNONE_UNONE, vmlaldavq_u, v8hi, v4si)
|
||||
VAR2 (BINOP_UNONE_UNONE_NONE, vqmovuntq_s, v8hi, v4si)
|
||||
VAR2 (BINOP_UNONE_UNONE_NONE, vqmovunbq_s, v8hi, v4si)
|
||||
VAR2 (BINOP_UNONE_UNONE_IMM, vshlltq_n_u, v16qi, v8hi)
|
||||
VAR2 (BINOP_UNONE_UNONE_IMM, vshllbq_n_u, v16qi, v8hi)
|
||||
VAR2 (BINOP_UNONE_UNONE_IMM, vorrq_n_u, v8hi, v4si)
|
||||
VAR2 (BINOP_UNONE_UNONE_IMM, vbicq_n_u, v8hi, v4si)
|
||||
VAR2 (BINOP_UNONE_NONE_NONE, vcmpneq_n_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_UNONE_NONE_NONE, vcmpneq_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_UNONE_NONE_NONE, vcmpltq_n_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_UNONE_NONE_NONE, vcmpltq_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_UNONE_NONE_NONE, vcmpleq_n_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_UNONE_NONE_NONE, vcmpleq_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_UNONE_NONE_NONE, vcmpgtq_n_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_UNONE_NONE_NONE, vcmpgtq_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_UNONE_NONE_NONE, vcmpgeq_n_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_UNONE_NONE_NONE, vcmpgeq_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_UNONE_NONE_NONE, vcmpeqq_n_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_UNONE_NONE_NONE, vcmpeqq_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vsubq_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vqmovntq_s, v8hi, v4si)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vqmovnbq_s, v8hi, v4si)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vqdmulltq_s, v8hi, v4si)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vqdmulltq_n_s, v8hi, v4si)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vqdmullbq_s, v8hi, v4si)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vqdmullbq_n_s, v8hi, v4si)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vorrq_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vornq_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vmulq_n_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vmulq_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vmovntq_s, v8hi, v4si)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vmovnbq_s, v8hi, v4si)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vmlsldavxq_s, v8hi, v4si)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vmlsldavq_s, v8hi, v4si)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vmlaldavxq_s, v8hi, v4si)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vmlaldavq_s, v8hi, v4si)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vminnmvq_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vminnmq_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vminnmavq_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vminnmaq_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vmaxnmvq_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vmaxnmq_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vmaxnmavq_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vmaxnmaq_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, veorq_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vcmulq_rot90_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vcmulq_rot270_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vcmulq_rot180_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vcmulq_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vcaddq_rot90_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vcaddq_rot270_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vbicq_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vandq_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vaddq_n_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_NONE, vabdq_f, v8hf, v4sf)
|
||||
VAR2 (BINOP_NONE_NONE_IMM, vshlltq_n_s, v16qi, v8hi)
|
||||
VAR2 (BINOP_NONE_NONE_IMM, vshllbq_n_s, v16qi, v8hi)
|
||||
VAR2 (BINOP_NONE_NONE_IMM, vorrq_n_s, v8hi, v4si)
|
||||
VAR2 (BINOP_NONE_NONE_IMM, vbicq_n_s, v8hi, v4si)
|
||||
VAR1 (BINOP_UNONE_UNONE_UNONE, vrmlaldavhq_u, v4si)
|
||||
VAR1 (BINOP_UNONE_UNONE_UNONE, vctp8q_m, hi)
|
||||
VAR1 (BINOP_UNONE_UNONE_UNONE, vctp64q_m, hi)
|
||||
VAR1 (BINOP_UNONE_UNONE_UNONE, vctp32q_m, hi)
|
||||
VAR1 (BINOP_UNONE_UNONE_UNONE, vctp16q_m, hi)
|
||||
VAR1 (BINOP_UNONE_UNONE_UNONE, vaddlvaq_u, v4si)
|
||||
VAR1 (BINOP_NONE_NONE_NONE, vrmlsldavhxq_s, v4si)
|
||||
VAR1 (BINOP_NONE_NONE_NONE, vrmlsldavhq_s, v4si)
|
||||
VAR1 (BINOP_NONE_NONE_NONE, vrmlaldavhxq_s, v4si)
|
||||
VAR1 (BINOP_NONE_NONE_NONE, vrmlaldavhq_s, v4si)
|
||||
VAR1 (BINOP_NONE_NONE_NONE, vcvttq_f16_f32, v8hf)
|
||||
VAR1 (BINOP_NONE_NONE_NONE, vcvtbq_f16_f32, v8hf)
|
||||
VAR1 (BINOP_NONE_NONE_NONE, vaddlvaq_s, v4si)
|
||||
VAR2 (TERNOP_NONE_NONE_IMM_UNONE, vbicq_m_n_s, v8hi, v4si)
|
||||
VAR2 (TERNOP_UNONE_UNONE_IMM_UNONE, vbicq_m_n_u, v8hi, v4si)
|
||||
VAR2 (TERNOP_NONE_NONE_NONE_IMM, vqrshrnbq_n_s, v8hi, v4si)
|
||||
VAR2 (TERNOP_UNONE_UNONE_UNONE_IMM, vqrshrnbq_n_u, v8hi, v4si)
|
||||
VAR1 (TERNOP_NONE_NONE_NONE_NONE, vrmlaldavhaq_s, v4si)
|
||||
VAR1 (TERNOP_UNONE_UNONE_UNONE_UNONE, vrmlaldavhaq_u, v4si)
|
||||
VAR2 (TERNOP_NONE_NONE_UNONE_UNONE, vcvtq_m_to_f_u, v8hf, v4sf)
|
||||
VAR2 (TERNOP_NONE_NONE_NONE_UNONE, vcvtq_m_to_f_s, v8hf, v4sf)
|
||||
VAR2 (TERNOP_UNONE_NONE_NONE_UNONE, vcmpeqq_m_f, v8hf, v4sf)
|
||||
VAR3 (TERNOP_UNONE_NONE_UNONE_IMM, vshlcq_carry_s, v16qi, v8hi, v4si)
|
||||
VAR3 (TERNOP_UNONE_UNONE_UNONE_IMM, vshlcq_carry_u, v16qi, v8hi, v4si)
|
||||
VAR2 (TERNOP_UNONE_UNONE_NONE_IMM, vqrshrunbq_n_s, v8hi, v4si)
|
||||
VAR3 (TERNOP_UNONE_UNONE_NONE_NONE, vabavq_s, v16qi, v8hi, v4si)
|
||||
VAR3 (TERNOP_UNONE_UNONE_UNONE_UNONE, vabavq_u, v16qi, v8hi, v4si)
|
||||
VAR2 (TERNOP_UNONE_UNONE_NONE_UNONE, vcvtaq_m_u, v8hi, v4si)
|
||||
VAR2 (TERNOP_NONE_NONE_NONE_UNONE, vcvtaq_m_s, v8hi, v4si)
|
||||
VAR3 (TERNOP_UNONE_UNONE_UNONE_IMM, vshlcq_vec_u, v16qi, v8hi, v4si)
|
||||
VAR3 (TERNOP_NONE_NONE_UNONE_IMM, vshlcq_vec_s, v16qi, v8hi, v4si)
|
||||
|
@ -85,7 +85,11 @@
|
||||
VSHLLBQ_U VSHLLTQ_U VSHLLTQ_S VQMOVNTQ_U VQMOVNTQ_S
|
||||
VSHLLBQ_N_S VSHLLBQ_N_U VSHLLTQ_N_U VSHLLTQ_N_S
|
||||
VRMLALDAVHQ_U VRMLALDAVHQ_S VMULLTQ_POLY_P
|
||||
VMULLBQ_POLY_P])
|
||||
VMULLBQ_POLY_P VBICQ_M_N_S VBICQ_M_N_U VCMPEQQ_M_F
|
||||
VCVTAQ_M_S VCVTAQ_M_U VCVTQ_M_TO_F_S VCVTQ_M_TO_F_U
|
||||
VQRSHRNBQ_N_U VQRSHRNBQ_N_S VQRSHRUNBQ_N_S
|
||||
VRMLALDAVHAQ_S VABAVQ_S VABAVQ_U VSHLCQ_S VSHLCQ_U
|
||||
VRMLALDAVHAQ_U])
|
||||
|
||||
(define_mode_attr MVE_CNVT [(V8HI "V8HF") (V4SI "V4SF")
|
||||
(V8HF "V8HI") (V4SF "V4SI")])
|
||||
@ -146,7 +150,12 @@
|
||||
(VQMOVNBQ_U "u") (VQMOVNBQ_S "s") (VQMOVNTQ_S "s")
|
||||
(VQMOVNTQ_U "u") (VSHLLBQ_N_U "u") (VSHLLBQ_N_S "s")
|
||||
(VSHLLTQ_N_U "u") (VSHLLTQ_N_S "s") (VRMLALDAVHQ_U "u")
|
||||
(VRMLALDAVHQ_S "s")])
|
||||
(VRMLALDAVHQ_S "s") (VBICQ_M_N_S "s") (VBICQ_M_N_U "u")
|
||||
(VCVTAQ_M_S "s") (VCVTAQ_M_U "u") (VCVTQ_M_TO_F_S "s")
|
||||
(VCVTQ_M_TO_F_U "u") (VQRSHRNBQ_N_S "s")
|
||||
(VQRSHRNBQ_N_U "u") (VABAVQ_S "s") (VABAVQ_U "u")
|
||||
(VRMLALDAVHAQ_U "u") (VRMLALDAVHAQ_S "s") (VSHLCQ_S "s")
|
||||
(VSHLCQ_U "u")])
|
||||
|
||||
(define_int_attr mode1 [(VCTP8Q "8") (VCTP16Q "16") (VCTP32Q "32")
|
||||
(VCTP64Q "64") (VCTP8Q_M "8") (VCTP16Q_M "16")
|
||||
@ -241,6 +250,13 @@
|
||||
(define_int_iterator VSHLLBQ_N [VSHLLBQ_N_S VSHLLBQ_N_U])
|
||||
(define_int_iterator VSHLLTQ_N [VSHLLTQ_N_U VSHLLTQ_N_S])
|
||||
(define_int_iterator VRMLALDAVHQ [VRMLALDAVHQ_U VRMLALDAVHQ_S])
|
||||
(define_int_iterator VBICQ_M_N [VBICQ_M_N_S VBICQ_M_N_U])
|
||||
(define_int_iterator VCVTAQ_M [VCVTAQ_M_S VCVTAQ_M_U])
|
||||
(define_int_iterator VCVTQ_M_TO_F [VCVTQ_M_TO_F_S VCVTQ_M_TO_F_U])
|
||||
(define_int_iterator VQRSHRNBQ_N [VQRSHRNBQ_N_U VQRSHRNBQ_N_S])
|
||||
(define_int_iterator VABAVQ [VABAVQ_S VABAVQ_U])
|
||||
(define_int_iterator VSHLCQ [VSHLCQ_S VSHLCQ_U])
|
||||
(define_int_iterator VRMLALDAVHAQ [VRMLALDAVHAQ_S VRMLALDAVHAQ_U])
|
||||
|
||||
(define_insn "*mve_mov<mode>"
|
||||
[(set (match_operand:MVE_types 0 "nonimmediate_operand" "=w,w,r,w,w,r,w,Us")
|
||||
@ -3057,3 +3073,170 @@
|
||||
"vrmlaldavh.<supf>32 %Q0, %R0, %q1, %q2"
|
||||
[(set_attr "type" "mve_move")
|
||||
])
|
||||
|
||||
;;
|
||||
;; [vbicq_m_n_s, vbicq_m_n_u])
|
||||
;;
|
||||
(define_insn "mve_vbicq_m_n_<supf><mode>"
|
||||
[
|
||||
(set (match_operand:MVE_5 0 "s_register_operand" "=w")
|
||||
(unspec:MVE_5 [(match_operand:MVE_5 1 "s_register_operand" "0")
|
||||
(match_operand:SI 2 "immediate_operand" "i")
|
||||
(match_operand:HI 3 "vpr_register_operand" "Up")]
|
||||
VBICQ_M_N))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vpst\;vbict.i%#<V_sz_elem> %q0, %2"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
;;
|
||||
;; [vcmpeqq_m_f])
|
||||
;;
|
||||
(define_insn "mve_vcmpeqq_m_f<mode>"
|
||||
[
|
||||
(set (match_operand:HI 0 "vpr_register_operand" "=Up")
|
||||
(unspec:HI [(match_operand:MVE_0 1 "s_register_operand" "w")
|
||||
(match_operand:MVE_0 2 "s_register_operand" "w")
|
||||
(match_operand:HI 3 "vpr_register_operand" "Up")]
|
||||
VCMPEQQ_M_F))
|
||||
]
|
||||
"TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
|
||||
"vpst\;vcmpt.f%#<V_sz_elem> eq, %q1, %q2"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
;;
|
||||
;; [vcvtaq_m_u, vcvtaq_m_s])
|
||||
;;
|
||||
(define_insn "mve_vcvtaq_m_<supf><mode>"
|
||||
[
|
||||
(set (match_operand:MVE_5 0 "s_register_operand" "=w")
|
||||
(unspec:MVE_5 [(match_operand:MVE_5 1 "s_register_operand" "0")
|
||||
(match_operand:<MVE_CNVT> 2 "s_register_operand" "w")
|
||||
(match_operand:HI 3 "vpr_register_operand" "Up")]
|
||||
VCVTAQ_M))
|
||||
]
|
||||
"TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
|
||||
"vpst\;vcvtat.<supf>%#<V_sz_elem>.f%#<V_sz_elem>\t%q0, %q2"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
;;
|
||||
;; [vcvtq_m_to_f_s, vcvtq_m_to_f_u])
|
||||
;;
|
||||
(define_insn "mve_vcvtq_m_to_f_<supf><mode>"
|
||||
[
|
||||
(set (match_operand:MVE_0 0 "s_register_operand" "=w")
|
||||
(unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0")
|
||||
(match_operand:<MVE_CNVT> 2 "s_register_operand" "w")
|
||||
(match_operand:HI 3 "vpr_register_operand" "Up")]
|
||||
VCVTQ_M_TO_F))
|
||||
]
|
||||
"TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
|
||||
"vpst\;vcvtt.f%#<V_sz_elem>.<supf>%#<V_sz_elem> %q0, %q2"
|
||||
[(set_attr "type" "mve_move")
|
||||
(set_attr "length""8")])
|
||||
;;
|
||||
;; [vqrshrnbq_n_u, vqrshrnbq_n_s])
|
||||
;;
|
||||
(define_insn "mve_vqrshrnbq_n_<supf><mode>"
|
||||
[
|
||||
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
|
||||
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_5 2 "s_register_operand" "w")
|
||||
(match_operand:SI 3 "mve_imm_8" "Rb")]
|
||||
VQRSHRNBQ_N))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vqrshrnb.<supf>%#<V_sz_elem> %q0, %q2, %3"
|
||||
[(set_attr "type" "mve_move")
|
||||
])
|
||||
;;
|
||||
;; [vqrshrunbq_n_s])
|
||||
;;
|
||||
(define_insn "mve_vqrshrunbq_n_s<mode>"
|
||||
[
|
||||
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
|
||||
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_5 2 "s_register_operand" "w")
|
||||
(match_operand:SI 3 "mve_imm_8" "Rb")]
|
||||
VQRSHRUNBQ_N_S))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vqrshrunb.s%#<V_sz_elem>\t%q0, %q2, %3"
|
||||
[(set_attr "type" "mve_move")
|
||||
])
|
||||
;;
|
||||
;; [vrmlaldavhaq_s vrmlaldavhaq_u])
|
||||
;;
|
||||
(define_insn "mve_vrmlaldavhaq_<supf>v4si"
|
||||
[
|
||||
(set (match_operand:DI 0 "s_register_operand" "=r")
|
||||
(unspec:DI [(match_operand:DI 1 "s_register_operand" "0")
|
||||
(match_operand:V4SI 2 "s_register_operand" "w")
|
||||
(match_operand:V4SI 3 "s_register_operand" "w")]
|
||||
VRMLALDAVHAQ))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vrmlaldavha.<supf>32 %Q0, %R0, %q2, %q3"
|
||||
[(set_attr "type" "mve_move")
|
||||
])
|
||||
|
||||
;;
|
||||
;; [vabavq_s, vabavq_u])
|
||||
;;
|
||||
(define_insn "mve_vabavq_<supf><mode>"
|
||||
[
|
||||
(set (match_operand:SI 0 "s_register_operand" "=r")
|
||||
(unspec:SI [(match_operand:SI 1 "s_register_operand" "0")
|
||||
(match_operand:MVE_2 2 "s_register_operand" "w")
|
||||
(match_operand:MVE_2 3 "s_register_operand" "w")]
|
||||
VABAVQ))
|
||||
]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vabav.<supf>%#<V_sz_elem>\t%0, %q2, %q3"
|
||||
[(set_attr "type" "mve_move")
|
||||
])
|
||||
|
||||
;;
|
||||
;; [vshlcq_u vshlcq_s]
|
||||
;;
|
||||
(define_expand "mve_vshlcq_vec_<supf><mode>"
|
||||
[(match_operand:MVE_2 0 "s_register_operand")
|
||||
(match_operand:MVE_2 1 "s_register_operand")
|
||||
(match_operand:SI 2 "s_register_operand")
|
||||
(match_operand:SI 3 "mve_imm_32")
|
||||
(unspec:MVE_2 [(const_int 0)] VSHLCQ)]
|
||||
"TARGET_HAVE_MVE"
|
||||
{
|
||||
rtx ignore_wb = gen_reg_rtx (SImode);
|
||||
emit_insn(gen_mve_vshlcq_<supf><mode>(operands[0], ignore_wb, operands[1],
|
||||
operands[2], operands[3]));
|
||||
DONE;
|
||||
})
|
||||
|
||||
(define_expand "mve_vshlcq_carry_<supf><mode>"
|
||||
[(match_operand:SI 0 "s_register_operand")
|
||||
(match_operand:MVE_2 1 "s_register_operand")
|
||||
(match_operand:SI 2 "s_register_operand")
|
||||
(match_operand:SI 3 "mve_imm_32")
|
||||
(unspec:MVE_2 [(const_int 0)] VSHLCQ)]
|
||||
"TARGET_HAVE_MVE"
|
||||
{
|
||||
rtx ignore_vec = gen_reg_rtx (<MODE>mode);
|
||||
emit_insn(gen_mve_vshlcq_<supf><mode>(ignore_vec, operands[0], operands[1],
|
||||
operands[2], operands[3]));
|
||||
DONE;
|
||||
})
|
||||
|
||||
(define_insn "mve_vshlcq_<supf><mode>"
|
||||
[(set (match_operand:MVE_2 0 "s_register_operand" "=w")
|
||||
(unspec:MVE_2 [(match_operand:MVE_2 2 "s_register_operand" "0")
|
||||
(match_operand:SI 3 "s_register_operand" "1")
|
||||
(match_operand:SI 4 "mve_imm_32" "Rf")]
|
||||
VSHLCQ))
|
||||
(set (match_operand:SI 1 "s_register_operand" "=r")
|
||||
(unspec:SI [(match_dup 2)
|
||||
(match_dup 3)
|
||||
(match_dup 4)]
|
||||
VSHLCQ))]
|
||||
"TARGET_HAVE_MVE"
|
||||
"vshlc %q0, %1, %4")
|
||||
|
@ -1,3 +1,42 @@
|
||||
2020-03-17 Andre Vieira <andre.simoesdiasvieira@arm.com>
|
||||
Mihail Ionescu <mihail.ionescu@arm.com>
|
||||
Srinath Parvathaneni <srinath.parvathaneni@arm.com>
|
||||
|
||||
* gcc.target/arm/mve/intrinsics/vabavq_s16.c: New test.
|
||||
* gcc.target/arm/mve/intrinsics/vabavq_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vabavq_s8.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vabavq_u16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vabavq_u32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vabavq_u8.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vbicq_m_n_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vbicq_m_n_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vbicq_m_n_u16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vbicq_m_n_u32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcmpeqq_m_f16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcmpeqq_m_f32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcvtaq_m_s16_f16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcvtaq_m_s32_f32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcvtaq_m_u16_f16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcvtaq_m_u32_f32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcvtq_m_f16_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcvtq_m_f16_u16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcvtq_m_f32_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcvtq_m_f32_u32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqrshrnbq_n_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqrshrnbq_n_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqrshrnbq_n_u16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqrshrnbq_n_u32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqrshrunbq_n_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vqrshrunbq_n_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vrmlaldavhaq_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vrmlaldavhaq_u32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vshlcq_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vshlcq_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vshlcq_s8.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vshlcq_u16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vshlcq_u32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vshlcq_u8.c: Likewise.
|
||||
|
||||
2020-03-17 Andre Vieira <andre.simoesdiasvieira@arm.com>
|
||||
Mihail Ionescu <mihail.ionescu@arm.com>
|
||||
Srinath Parvathaneni <srinath.parvathaneni@arm.com>
|
||||
|
22
gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_s16.c
Normal file
22
gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_s16.c
Normal file
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint32_t
|
||||
foo (uint32_t a, int16x8_t b, int16x8_t c)
|
||||
{
|
||||
return vabavq_s16 (a, b, c);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vabav.s16" } } */
|
||||
|
||||
uint32_t
|
||||
foo1 (uint32_t a, int16x8_t b, int16x8_t c)
|
||||
{
|
||||
return vabavq (a, b, c);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vabav.s16" } } */
|
22
gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_s32.c
Normal file
22
gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_s32.c
Normal file
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint32_t
|
||||
foo (uint32_t a, int32x4_t b, int32x4_t c)
|
||||
{
|
||||
return vabavq_s32 (a, b, c);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vabav.s32" } } */
|
||||
|
||||
uint32_t
|
||||
foo1 (uint32_t a, int32x4_t b, int32x4_t c)
|
||||
{
|
||||
return vabavq (a, b, c);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vabav.s32" } } */
|
22
gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_s8.c
Normal file
22
gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_s8.c
Normal file
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint32_t
|
||||
foo (uint32_t a, int8x16_t b, int8x16_t c)
|
||||
{
|
||||
return vabavq_s8 (a, b, c);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vabav.s8" } } */
|
||||
|
||||
uint32_t
|
||||
foo1 (uint32_t a, int8x16_t b, int8x16_t c)
|
||||
{
|
||||
return vabavq (a, b, c);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vabav.s8" } } */
|
22
gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_u16.c
Normal file
22
gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_u16.c
Normal file
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint32_t
|
||||
foo (uint32_t a, uint16x8_t b, uint16x8_t c)
|
||||
{
|
||||
return vabavq_u16 (a, b, c);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vabav.u16" } } */
|
||||
|
||||
uint32_t
|
||||
foo1 (uint32_t a, uint16x8_t b, uint16x8_t c)
|
||||
{
|
||||
return vabavq (a, b, c);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vabav.u16" } } */
|
22
gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_u32.c
Normal file
22
gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_u32.c
Normal file
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint32_t
|
||||
foo (uint32_t a, uint32x4_t b, uint32x4_t c)
|
||||
{
|
||||
return vabavq_u32 (a, b, c);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vabav.u32" } } */
|
||||
|
||||
uint32_t
|
||||
foo1 (uint32_t a, uint32x4_t b, uint32x4_t c)
|
||||
{
|
||||
return vabavq (a, b, c);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vabav.u32" } } */
|
22
gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_u8.c
Normal file
22
gcc/testsuite/gcc.target/arm/mve/intrinsics/vabavq_u8.c
Normal file
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint32_t
|
||||
foo (uint32_t a, uint8x16_t b, uint8x16_t c)
|
||||
{
|
||||
return vabavq_u8 (a, b, c);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vabav.u8" } } */
|
||||
|
||||
uint32_t
|
||||
foo1 (uint32_t a, uint8x16_t b, uint8x16_t c)
|
||||
{
|
||||
return vabavq (a, b, c);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vabav.u8" } } */
|
23
gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_n_s16.c
Normal file
23
gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_n_s16.c
Normal file
@ -0,0 +1,23 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int16x8_t
|
||||
foo (int16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
return vbicq_m_n_s16 (a, 16, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vbict.i16" } } */
|
||||
|
||||
int16x8_t
|
||||
foo1 (int16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
return vbicq_m_n (a, 16, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
23
gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_n_s32.c
Normal file
23
gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_n_s32.c
Normal file
@ -0,0 +1,23 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int32x4_t
|
||||
foo (int32x4_t a, mve_pred16_t p)
|
||||
{
|
||||
return vbicq_m_n_s32 (a, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vbict.i32" } } */
|
||||
|
||||
int32x4_t
|
||||
foo1 (int32x4_t a, mve_pred16_t p)
|
||||
{
|
||||
return vbicq_m_n (a, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
23
gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_n_u16.c
Normal file
23
gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_n_u16.c
Normal file
@ -0,0 +1,23 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint16x8_t
|
||||
foo (uint16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
return vbicq_m_n_u16 (a, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vbict.i16" } } */
|
||||
|
||||
uint16x8_t
|
||||
foo1 (uint16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
return vbicq_m_n (a, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
23
gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_n_u32.c
Normal file
23
gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_m_n_u32.c
Normal file
@ -0,0 +1,23 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint32x4_t
|
||||
foo (uint32x4_t a, mve_pred16_t p)
|
||||
{
|
||||
return vbicq_m_n_u32 (a, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vbict.i32" } } */
|
||||
|
||||
uint32x4_t
|
||||
foo1 (uint32x4_t a, mve_pred16_t p)
|
||||
{
|
||||
return vbicq_m_n (a, 1, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
23
gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_f16.c
Normal file
23
gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_f16.c
Normal file
@ -0,0 +1,23 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve_fp } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
mve_pred16_t
|
||||
foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vcmpeqq_m_f16 (a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vcmpt.f16" } } */
|
||||
|
||||
mve_pred16_t
|
||||
foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vcmpeqq_m (a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
23
gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_f32.c
Normal file
23
gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_m_f32.c
Normal file
@ -0,0 +1,23 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve_fp } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
mve_pred16_t
|
||||
foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vcmpeqq_m_f32 (a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vcmpt.f32" } } */
|
||||
|
||||
mve_pred16_t
|
||||
foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
|
||||
{
|
||||
return vcmpeqq_m (a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
@ -0,0 +1,23 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve_fp } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int16x8_t
|
||||
foo (int16x8_t inactive, float16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
return vcvtaq_m_s16_f16 (inactive, a, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vcvtat.s16.f16" } } */
|
||||
|
||||
int16x8_t
|
||||
foo1 (int16x8_t inactive, float16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
return vcvtaq_m (inactive, a, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
@ -0,0 +1,23 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve_fp } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int32x4_t
|
||||
foo (int32x4_t inactive, float32x4_t a, mve_pred16_t p)
|
||||
{
|
||||
return vcvtaq_m_s32_f32 (inactive, a, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vcvtat.s32.f32" } } */
|
||||
|
||||
int32x4_t
|
||||
foo1 (int32x4_t inactive, float32x4_t a, mve_pred16_t p)
|
||||
{
|
||||
return vcvtaq_m (inactive, a, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
@ -0,0 +1,23 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve_fp } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint16x8_t
|
||||
foo (uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
return vcvtaq_m_u16_f16 (inactive, a, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vcvtat.u16.f16" } } */
|
||||
|
||||
uint16x8_t
|
||||
foo1 (uint16x8_t inactive, float16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
return vcvtaq_m (inactive, a, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
@ -0,0 +1,23 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve_fp } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint32x4_t
|
||||
foo (uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
|
||||
{
|
||||
return vcvtaq_m_u32_f32 (inactive, a, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vcvtat.u32.f32" } } */
|
||||
|
||||
uint32x4_t
|
||||
foo1 (uint32x4_t inactive, float32x4_t a, mve_pred16_t p)
|
||||
{
|
||||
return vcvtaq_m (inactive, a, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
@ -0,0 +1,23 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve_fp } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
float16x8_t
|
||||
foo (float16x8_t inactive, int16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
return vcvtq_m_f16_s16 (inactive, a, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vcvtt.f16.s16" } } */
|
||||
|
||||
float16x8_t
|
||||
foo1 (float16x8_t inactive, int16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
return vcvtq_m (inactive, a, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
@ -0,0 +1,23 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve_fp } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
float16x8_t
|
||||
foo (float16x8_t inactive, uint16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
return vcvtq_m_f16_u16 (inactive, a, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vcvtt.f16.u16" } } */
|
||||
|
||||
float16x8_t
|
||||
foo1 (float16x8_t inactive, uint16x8_t a, mve_pred16_t p)
|
||||
{
|
||||
return vcvtq_m (inactive, a, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
@ -0,0 +1,23 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve_fp } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
float32x4_t
|
||||
foo (float32x4_t inactive, int32x4_t a, mve_pred16_t p)
|
||||
{
|
||||
return vcvtq_m_f32_s32 (inactive, a, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vcvtt.f32.s32" } } */
|
||||
|
||||
float32x4_t
|
||||
foo1 (float32x4_t inactive, int32x4_t a, mve_pred16_t p)
|
||||
{
|
||||
return vcvtq_m (inactive, a, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
@ -0,0 +1,23 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve_fp } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
float32x4_t
|
||||
foo (float32x4_t inactive, uint32x4_t a, mve_pred16_t p)
|
||||
{
|
||||
return vcvtq_m_f32_u32 (inactive, a, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
/* { dg-final { scan-assembler "vcvtt.f32.u32" } } */
|
||||
|
||||
float32x4_t
|
||||
foo1 (float32x4_t inactive, uint32x4_t a, mve_pred16_t p)
|
||||
{
|
||||
return vcvtq_m (inactive, a, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int8x16_t
|
||||
foo (int8x16_t a, int16x8_t b)
|
||||
{
|
||||
return vqrshrnbq_n_s16 (a, b, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vqrshrnb.s16" } } */
|
||||
|
||||
int8x16_t
|
||||
foo1 (int8x16_t a, int16x8_t b)
|
||||
{
|
||||
return vqrshrnbq (a, b, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vqrshrnb.s16" } } */
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int16x8_t
|
||||
foo (int16x8_t a, int32x4_t b)
|
||||
{
|
||||
return vqrshrnbq_n_s32 (a, b, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vqrshrnb.s32" } } */
|
||||
|
||||
int16x8_t
|
||||
foo1 (int16x8_t a, int32x4_t b)
|
||||
{
|
||||
return vqrshrnbq (a, b, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vqrshrnb.s32" } } */
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint8x16_t
|
||||
foo (uint8x16_t a, uint16x8_t b)
|
||||
{
|
||||
return vqrshrnbq_n_u16 (a, b, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vqrshrnb.u16" } } */
|
||||
|
||||
uint8x16_t
|
||||
foo1 (uint8x16_t a, uint16x8_t b)
|
||||
{
|
||||
return vqrshrnbq (a, b, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vqrshrnb.u16" } } */
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint16x8_t
|
||||
foo (uint16x8_t a, uint32x4_t b)
|
||||
{
|
||||
return vqrshrnbq_n_u32 (a, b, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vqrshrnb.u32" } } */
|
||||
|
||||
uint16x8_t
|
||||
foo1 (uint16x8_t a, uint32x4_t b)
|
||||
{
|
||||
return vqrshrnbq (a, b, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vqrshrnb.u32" } } */
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint8x16_t
|
||||
foo (uint8x16_t a, int16x8_t b)
|
||||
{
|
||||
return vqrshrunbq_n_s16 (a, b, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vqrshrunb.s16" } } */
|
||||
|
||||
uint8x16_t
|
||||
foo1 (uint8x16_t a, int16x8_t b)
|
||||
{
|
||||
return vqrshrunbq (a, b, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vqrshrunb.s16" } } */
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint16x8_t
|
||||
foo (uint16x8_t a, int32x4_t b)
|
||||
{
|
||||
return vqrshrunbq_n_s32 (a, b, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vqrshrunb.s32" } } */
|
||||
|
||||
uint16x8_t
|
||||
foo1 (uint16x8_t a, int32x4_t b)
|
||||
{
|
||||
return vqrshrunbq (a, b, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vqrshrunb.s32" } } */
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int64_t
|
||||
foo (int64_t a, int32x4_t b, int32x4_t c)
|
||||
{
|
||||
return vrmlaldavhaq_s32 (a, b, c);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vrmlaldavha.s32" } } */
|
||||
|
||||
int64_t
|
||||
foo1 (int64_t a, int32x4_t b, int32x4_t c)
|
||||
{
|
||||
return vrmlaldavhaq (a, b, c);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vrmlaldavha.s32" } } */
|
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint64_t
|
||||
foo (uint64_t a, uint32x4_t b, uint32x4_t c)
|
||||
{
|
||||
return vrmlaldavhaq_u32 (a, b, c);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vrmlaldavha.u32" } } */
|
||||
|
||||
uint64_t
|
||||
foo1 (uint64_t a, uint32x4_t b, uint32x4_t c)
|
||||
{
|
||||
return vrmlaldavhaq (a, b, c);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vrmlaldavha.u32" } } */
|
22
gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_s16.c
Normal file
22
gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_s16.c
Normal file
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int16x8_t
|
||||
foo (int16x8_t a, uint32_t * b)
|
||||
{
|
||||
return vshlcq_s16 (a, b, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vshlc" } } */
|
||||
|
||||
int16x8_t
|
||||
foo1 (int16x8_t a, uint32_t * b)
|
||||
{
|
||||
return vshlcq (a, b, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vshlc" } } */
|
22
gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_s32.c
Normal file
22
gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_s32.c
Normal file
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int32x4_t
|
||||
foo (int32x4_t a, uint32_t * b)
|
||||
{
|
||||
return vshlcq_s32 (a, b, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vshlc" } } */
|
||||
|
||||
int32x4_t
|
||||
foo1 (int32x4_t a, uint32_t * b)
|
||||
{
|
||||
return vshlcq (a, b, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vshlc" } } */
|
22
gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_s8.c
Normal file
22
gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_s8.c
Normal file
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
int8x16_t
|
||||
foo (int8x16_t a, uint32_t * b)
|
||||
{
|
||||
return vshlcq_s8 (a, b, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vshlc" } } */
|
||||
|
||||
int8x16_t
|
||||
foo1 (int8x16_t a, uint32_t * b)
|
||||
{
|
||||
return vshlcq (a, b, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vshlc" } } */
|
22
gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_u16.c
Normal file
22
gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_u16.c
Normal file
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint16x8_t
|
||||
foo (uint16x8_t a, uint32_t * b)
|
||||
{
|
||||
return vshlcq_u16 (a, b, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vshlc" } } */
|
||||
|
||||
uint16x8_t
|
||||
foo1 (uint16x8_t a, uint32_t * b)
|
||||
{
|
||||
return vshlcq (a, b, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vshlc" } } */
|
22
gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_u32.c
Normal file
22
gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_u32.c
Normal file
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint32x4_t
|
||||
foo (uint32x4_t a, uint32_t * b)
|
||||
{
|
||||
return vshlcq_u32 (a, b, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vshlc" } } */
|
||||
|
||||
uint32x4_t
|
||||
foo1 (uint32x4_t a, uint32_t * b)
|
||||
{
|
||||
return vshlcq (a, b, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vshlc" } } */
|
22
gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_u8.c
Normal file
22
gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_u8.c
Normal file
@ -0,0 +1,22 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
|
||||
/* { dg-add-options arm_v8_1m_mve } */
|
||||
/* { dg-additional-options "-O2" } */
|
||||
|
||||
#include "arm_mve.h"
|
||||
|
||||
uint8x16_t
|
||||
foo (uint8x16_t a, uint32_t * b)
|
||||
{
|
||||
return vshlcq_u8 (a, b, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vshlc" } } */
|
||||
|
||||
uint8x16_t
|
||||
foo1 (uint8x16_t a, uint32_t * b)
|
||||
{
|
||||
return vshlcq (a, b, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vshlc" } } */
|
Loading…
Reference in New Issue
Block a user