diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index 0d611878c9d..6efc7706a41 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -315,6 +315,9 @@ BUILTIN_VSDQ_I (SHIFTIMM, sqshl_n, 0, ALL) BUILTIN_VSDQ_I (USHIFTIMM, uqshl_n, 0, ALL) + /* Implemented by aarch64_xtn2. */ + BUILTIN_VQN (UNOP, xtn2, 0, NONE) + /* Implemented by aarch64_reduc_plus_. */ BUILTIN_VALL (UNOP, reduc_plus_scal_, 10, NONE) diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index b6629bfa93e..65209686b7e 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -7271,6 +7271,42 @@ [(set_attr "type" "neon_shift_imm_narrow_q")] ) +(define_insn "aarch64_xtn2_le" + [(set (match_operand: 0 "register_operand" "=w") + (vec_concat: + (match_operand: 1 "register_operand" "0") + (truncate: (match_operand:VQN 2 "register_operand" "w"))))] + "TARGET_SIMD && !BYTES_BIG_ENDIAN" + "xtn2\t%0., %2." + [(set_attr "type" "neon_shift_imm_narrow_q")] +) + +(define_insn "aarch64_xtn2_be" + [(set (match_operand: 0 "register_operand" "=w") + (vec_concat: + (truncate: (match_operand:VQN 2 "register_operand" "w")) + (match_operand: 1 "register_operand" "0")))] + "TARGET_SIMD && BYTES_BIG_ENDIAN" + "xtn2\t%0., %2." + [(set_attr "type" "neon_shift_imm_narrow_q")] +) + +(define_expand "aarch64_xtn2" + [(match_operand: 0 "register_operand") + (match_operand: 1 "register_operand") + (truncate: (match_operand:VQN 2 "register_operand"))] + "TARGET_SIMD" + { + if (BYTES_BIG_ENDIAN) + emit_insn (gen_aarch64_xtn2_be (operands[0], operands[1], + operands[2])); + else + emit_insn (gen_aarch64_xtn2_le (operands[0], operands[1], + operands[2])); + DONE; + } +) + (define_insn "aarch64_bfdot" [(set (match_operand:VDQSF 0 "register_operand" "=w") (plus:VDQSF diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index d3a81dac475..b2a6b58f8c9 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -8751,72 +8751,45 @@ __extension__ extern __inline int8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmovn_high_s16 (int8x8_t __a, int16x8_t __b) { - int8x16_t __result = vcombine_s8 (__a, vcreate_s8 (__AARCH64_UINT64_C (0x0))); - __asm__ ("xtn2 %0.16b,%1.8h" - : "+w"(__result) - : "w"(__b) - : /* No clobbers */); - return __result; + return __builtin_aarch64_xtn2v8hi (__a, __b); } __extension__ extern __inline int16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmovn_high_s32 (int16x4_t __a, int32x4_t __b) { - int16x8_t __result = vcombine_s16 (__a, vcreate_s16 (__AARCH64_UINT64_C (0x0))); - __asm__ ("xtn2 %0.8h,%1.4s" - : "+w"(__result) - : "w"(__b) - : /* No clobbers */); - return __result; + return __builtin_aarch64_xtn2v4si (__a, __b); } __extension__ extern __inline int32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmovn_high_s64 (int32x2_t __a, int64x2_t __b) { - int32x4_t __result = vcombine_s32 (__a, vcreate_s32 (__AARCH64_UINT64_C (0x0))); - __asm__ ("xtn2 %0.4s,%1.2d" - : "+w"(__result) - : "w"(__b) - : /* No clobbers */); - return __result; + return __builtin_aarch64_xtn2v2di (__a, __b); } __extension__ extern __inline uint8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmovn_high_u16 (uint8x8_t __a, uint16x8_t __b) { - uint8x16_t __result = vcombine_u8 (__a, vcreate_u8 (__AARCH64_UINT64_C (0x0))); - __asm__ ("xtn2 %0.16b,%1.8h" - : "+w"(__result) - : "w"(__b) - : /* No clobbers */); - return __result; + return (uint8x16_t) + __builtin_aarch64_xtn2v8hi ((int8x8_t) __a, (int16x8_t) __b); } __extension__ extern __inline uint16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmovn_high_u32 (uint16x4_t __a, uint32x4_t __b) { - uint16x8_t __result = vcombine_u16 (__a, vcreate_u16 (__AARCH64_UINT64_C (0x0))); - __asm__ ("xtn2 %0.8h,%1.4s" - : "+w"(__result) - : "w"(__b) - : /* No clobbers */); - return __result; + return (uint16x8_t) + __builtin_aarch64_xtn2v4si ((int16x4_t) __a, (int32x4_t) __b); } __extension__ extern __inline uint32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmovn_high_u64 (uint32x2_t __a, uint64x2_t __b) { - uint32x4_t __result = vcombine_u32 (__a, vcreate_u32 (__AARCH64_UINT64_C (0x0))); - __asm__ ("xtn2 %0.4s,%1.2d" - : "+w"(__result) - : "w"(__b) - : /* No clobbers */); - return __result; + return (uint32x4_t) + __builtin_aarch64_xtn2v2di ((int32x2_t) __a, (int64x2_t) __b); } __extension__ extern __inline int8x8_t diff --git a/gcc/testsuite/gcc.target/aarch64/narrow_high-intrinsics.c b/gcc/testsuite/gcc.target/aarch64/narrow_high-intrinsics.c index 07d78030058..a2e0cb9b100 100644 --- a/gcc/testsuite/gcc.target/aarch64/narrow_high-intrinsics.c +++ b/gcc/testsuite/gcc.target/aarch64/narrow_high-intrinsics.c @@ -122,4 +122,4 @@ ONE (vmovn_high, uint32x4_t, uint32x2_t, uint64x2_t, u64) /* { dg-final { scan-assembler-times "uqxtn2 v" 3} } */ /* { dg-final { scan-assembler-times "sqxtn2 v" 3} } */ /* { dg-final { scan-assembler-times "sqxtun2 v" 3} } */ -/* { dg-final { scan-assembler-times "\\txtn2 v" 6} } */ +/* { dg-final { scan-assembler-times "\\txtn2\\tv" 6} } */