aarch64: Use RTL builtins for [su]mlal_n intrinsics

Rewrite [su]mlal_n Neon intrinsics to use RTL builtins rather than
inline assembly code, allowing for better scheduling and
optimization.

gcc/ChangeLog:

2021-01-26  Jonathan Wright  <jonathan.wright@arm.com>

	* config/aarch64/aarch64-simd-builtins.def: Add [su]mlal_n
	builtin generator macros.
	* config/aarch64/aarch64-simd.md (aarch64_<su>mlal_n<mode>):
	Define.
	* config/aarch64/arm_neon.h (vmlal_n_s16): Use RTL builtin
	instead of inline asm.
	(vmlal_n_s32): Likewise.
	(vmlal_n_u16): Likewise.
	(vmlal_n_u32): Likewise.
This commit is contained in:
Jonathan Wright 2021-01-26 23:12:46 +00:00
parent af66f4f1b0
commit 87301e3956
3 changed files with 23 additions and 24 deletions

View File

@ -202,6 +202,10 @@
BUILTIN_VD_BHSI (TERNOP, smlal, 0, NONE)
BUILTIN_VD_BHSI (TERNOPU, umlal, 0, NONE)
/* Implemented by aarch64_<su>mlal_n<mode>. */
BUILTIN_VD_HSI (TERNOP, smlal_n, 0, NONE)
BUILTIN_VD_HSI (TERNOPU, umlal_n, 0, NONE)
/* Implemented by aarch64_<su>mlsl_hi<mode>. */
BUILTIN_VQW (TERNOP, smlsl_hi, 0, NONE)
BUILTIN_VQW (TERNOPU, umlsl_hi, 0, NONE)

View File

@ -1932,6 +1932,21 @@
[(set_attr "type" "neon_mla_<Vetype>_long")]
)
(define_insn "aarch64_<su>mlal_n<mode>"
[(set (match_operand:<VWIDE> 0 "register_operand" "=w")
(plus:<VWIDE>
(mult:<VWIDE>
(ANY_EXTEND:<VWIDE>
(vec_duplicate:VD_HSI
(match_operand:<VEL> 3 "register_operand" "<h_con>")))
(ANY_EXTEND:<VWIDE>
(match_operand:VD_HSI 2 "register_operand" "w")))
(match_operand:<VWIDE> 1 "register_operand" "0")))]
"TARGET_SIMD"
"<su>mlal\t%0.<Vwtype>, %2.<Vtype>, %3.<Vetype>[0]"
[(set_attr "type" "neon_mla_<Vetype>_long")]
)
(define_insn "aarch64_<su>mlsl<mode>"
[(set (match_operand:<VWIDE> 0 "register_operand" "=w")
(minus:<VWIDE>

View File

@ -7614,48 +7614,28 @@ __extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
{
int32x4_t __result;
__asm__ ("smlal %0.4s,%2.4h,%3.h[0]"
: "=w"(__result)
: "0"(__a), "w"(__b), "x"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_smlal_nv4hi (__a, __b, __c);
}
__extension__ extern __inline int64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
{
int64x2_t __result;
__asm__ ("smlal %0.2d,%2.2s,%3.s[0]"
: "=w"(__result)
: "0"(__a), "w"(__b), "w"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_smlal_nv2si (__a, __b, __c);
}
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_n_u16 (uint32x4_t __a, uint16x4_t __b, uint16_t __c)
{
uint32x4_t __result;
__asm__ ("umlal %0.4s,%2.4h,%3.h[0]"
: "=w"(__result)
: "0"(__a), "w"(__b), "x"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_umlal_nv4hi_uuuu (__a, __b, __c);
}
__extension__ extern __inline uint64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_n_u32 (uint64x2_t __a, uint32x2_t __b, uint32_t __c)
{
uint64x2_t __result;
__asm__ ("umlal %0.2d,%2.2s,%3.s[0]"
: "=w"(__result)
: "0"(__a), "w"(__b), "w"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_umlal_nv2si_uuuu (__a, __b, __c);
}
__extension__ extern __inline int16x8_t