aarch64: Use RTL builtins for [su]mlal_high intrinsics

Rewrite [su]mlal_high Neon intrinsics to use RTL builtins rather than
inline assembly code, allowing for better scheduling and
optimization.

gcc/ChangeLog:

2021-01-27  Jonathan Wright  <jonathan.wright@arm.com>

	* config/aarch64/aarch64-simd-builtins.def: Add RTL builtin
	generator macros.
	* config/aarch64/aarch64-simd.md (*aarch64_<su>mlal_hi<mode>):
	Rename to...
	(aarch64_<su>mlal_hi<mode>_insn): This.
	(aarch64_<su>mlal_hi<mode>): Define.
	* config/aarch64/arm_neon.h (vmlal_high_s8): Use RTL builtin
	instead of inline asm.
	(vmlal_high_s16): Likewise.
	(vmlal_high_s32): Likewise.
	(vmlal_high_u8): Likewise.
	(vmlal_high_u16): Likewise.
	(vmlal_high_u32): Likewise.
This commit is contained in:
Jonathan Wright 2021-01-27 14:55:45 +00:00
parent 6dc82826ba
commit 719877b079
3 changed files with 25 additions and 37 deletions

View File

@ -240,6 +240,10 @@
BUILTIN_VQW (TERNOP, smlsl_hi, 0, NONE)
BUILTIN_VQW (TERNOPU, umlsl_hi, 0, NONE)
/* Implemented by aarch64_<su>mlal_hi<mode>. */
BUILTIN_VQW (TERNOP, smlal_hi, 0, NONE)
BUILTIN_VQW (TERNOPU, umlal_hi, 0, NONE)
BUILTIN_VSQN_HSDI (UNOPUS, sqmovun, 0, NONE)
/* Implemented by aarch64_sqxtun2<mode>. */

View File

@ -1976,7 +1976,7 @@
[(set_attr "type" "neon_mla_<Vetype>_long")]
)
(define_insn "*aarch64_<su>mlal_hi<mode>"
(define_insn "aarch64_<su>mlal_hi<mode>_insn"
[(set (match_operand:<VWIDE> 0 "register_operand" "=w")
(plus:<VWIDE>
(mult:<VWIDE>
@ -1992,6 +1992,20 @@
[(set_attr "type" "neon_mla_<Vetype>_long")]
)
(define_expand "aarch64_<su>mlal_hi<mode>"
[(match_operand:<VWIDE> 0 "register_operand")
(match_operand:<VWIDE> 1 "register_operand")
(ANY_EXTEND:<VWIDE>(match_operand:VQW 2 "register_operand"))
(match_operand:VQW 3 "register_operand")]
"TARGET_SIMD"
{
rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, <nunits>, true);
emit_insn (gen_aarch64_<su>mlal_hi<mode>_insn (operands[0], operands[1],
operands[2], p, operands[3]));
DONE;
}
)
(define_insn "*aarch64_<su>mlsl_lo<mode>"
[(set (match_operand:<VWIDE> 0 "register_operand" "=w")
(minus:<VWIDE>

View File

@ -7316,72 +7316,42 @@ __extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_high_s8 (int16x8_t __a, int8x16_t __b, int8x16_t __c)
{
int16x8_t __result;
__asm__ ("smlal2 %0.8h,%2.16b,%3.16b"
: "=w"(__result)
: "0"(__a), "w"(__b), "w"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_smlal_hiv16qi (__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c)
{
int32x4_t __result;
__asm__ ("smlal2 %0.4s,%2.8h,%3.8h"
: "=w"(__result)
: "0"(__a), "w"(__b), "w"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_smlal_hiv8hi (__a, __b, __c);
}
__extension__ extern __inline int64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c)
{
int64x2_t __result;
__asm__ ("smlal2 %0.2d,%2.4s,%3.4s"
: "=w"(__result)
: "0"(__a), "w"(__b), "w"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_smlal_hiv4si (__a, __b, __c);
}
__extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_high_u8 (uint16x8_t __a, uint8x16_t __b, uint8x16_t __c)
{
uint16x8_t __result;
__asm__ ("umlal2 %0.8h,%2.16b,%3.16b"
: "=w"(__result)
: "0"(__a), "w"(__b), "w"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_umlal_hiv16qi_uuuu (__a, __b, __c);
}
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_high_u16 (uint32x4_t __a, uint16x8_t __b, uint16x8_t __c)
{
uint32x4_t __result;
__asm__ ("umlal2 %0.4s,%2.8h,%3.8h"
: "=w"(__result)
: "0"(__a), "w"(__b), "w"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_umlal_hiv8hi_uuuu (__a, __b, __c);
}
__extension__ extern __inline uint64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_high_u32 (uint64x2_t __a, uint32x4_t __b, uint32x4_t __c)
{
uint64x2_t __result;
__asm__ ("umlal2 %0.2d,%2.4s,%3.4s"
: "=w"(__result)
: "0"(__a), "w"(__b), "w"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_umlal_hiv4si_uuuu (__a, __b, __c);
}
__extension__ extern __inline int32x4_t