aarch64: Use RTL builtins for [su]mull_n intrinsics

Rewrite [su]mull_n Neon intrinsics to use RTL builtins rather than
inline assembly code, allowing for better scheduling and
optimization.

gcc/ChangeLog:

2021-01-19  Jonathan Wright  <jonathan.wright@arm.com>

	* config/aarch64/aarch64-simd-builtins.def: Add [su]mull_n
	builtin generator macros.
	* config/aarch64/aarch64-simd.md (aarch64_<su>mull_n<mode>):
	Define.
	* config/aarch64/arm_neon.h (vmull_n_s16): Use RTL builtin
	instead of inline asm.
	(vmull_n_s32): Likewise.
	(vmull_n_u16): Likewise.
	(vmull_n_u32): Likewise.
This commit is contained in:
Jonathan Wright 2021-01-19 22:44:24 +00:00
parent 9b588cfb42
commit ee4c4fe289
3 changed files with 20 additions and 24 deletions

View File

@ -271,6 +271,9 @@
BUILTIN_VQW (BINOP, vec_widen_smult_hi_, 10, NONE)
BUILTIN_VQW (BINOPU, vec_widen_umult_hi_, 10, NONE)
BUILTIN_VD_HSI (BINOP, smull_n, 0, NONE)
BUILTIN_VD_HSI (BINOPU, umull_n, 0, NONE)
BUILTIN_VD_HSI (TERNOP_LANE, vec_smult_lane_, 0, ALL)
BUILTIN_VD_HSI (QUADOP_LANE, vec_smlal_lane_, 0, ALL)
BUILTIN_VD_HSI (TERNOP_LANE, vec_smult_laneq_, 0, ALL)

View File

@ -2074,6 +2074,19 @@
[(set_attr "type" "neon_mul_<Vetype>_scalar_long")]
)
(define_insn "aarch64_<su>mull_n<mode>"
[(set (match_operand:<VWIDE> 0 "register_operand" "=w")
(mult:<VWIDE>
(ANY_EXTEND:<VWIDE>
(vec_duplicate:<VCOND>
(match_operand:<VEL> 2 "register_operand" "<h_con>")))
(ANY_EXTEND:<VWIDE>
(match_operand:VD_HSI 1 "register_operand" "w"))))]
"TARGET_SIMD"
"<su>mull\t%0.<Vwtype>, %1.<Vtype>, %2.<Vetype>[0]"
[(set_attr "type" "neon_mul_<Vetype>_scalar_long")]
)
;; vmlal_lane_s16 intrinsics
(define_insn "aarch64_vec_<su>mlal_lane<Qlane>"
[(set (match_operand:<VWIDE> 0 "register_operand" "=w")

View File

@ -8659,48 +8659,28 @@ __extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_n_s16 (int16x4_t __a, int16_t __b)
{
int32x4_t __result;
__asm__ ("smull %0.4s,%1.4h,%2.h[0]"
: "=w"(__result)
: "w"(__a), "x"(__b)
: /* No clobbers */);
return __result;
return __builtin_aarch64_smull_nv4hi (__a, __b);
}
__extension__ extern __inline int64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_n_s32 (int32x2_t __a, int32_t __b)
{
int64x2_t __result;
__asm__ ("smull %0.2d,%1.2s,%2.s[0]"
: "=w"(__result)
: "w"(__a), "w"(__b)
: /* No clobbers */);
return __result;
return __builtin_aarch64_smull_nv2si (__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_n_u16 (uint16x4_t __a, uint16_t __b)
{
uint32x4_t __result;
__asm__ ("umull %0.4s,%1.4h,%2.h[0]"
: "=w"(__result)
: "w"(__a), "x"(__b)
: /* No clobbers */);
return __result;
return __builtin_aarch64_umull_nv4hi_uuu (__a, __b);
}
__extension__ extern __inline uint64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_n_u32 (uint32x2_t __a, uint32_t __b)
{
uint64x2_t __result;
__asm__ ("umull %0.2d,%1.2s,%2.s[0]"
: "=w"(__result)
: "w"(__a), "w"(__b)
: /* No clobbers */);
return __result;
return __builtin_aarch64_umull_nv2si_uuu (__a, __b);
}
__extension__ extern __inline poly16x8_t