aarch64: Use RTL builtins for integer mla_n intrinsics

Rewrite integer mla_n Neon intrinsics to use RTL builtins rather than
inline assembly code, allowing for better scheduling and
optimization.

gcc/ChangeLog:

2021-01-15  Jonathan Wright  <jonathan.wright@arm.com>

	* config/aarch64/aarch64-simd-builtins.def: Add mla_n builtin
	generator macro.
	* config/aarch64/aarch64-simd.md (*aarch64_mla_elt_merge<mode>):
	Rename to...
	(aarch64_mla_n<mode>): This.
	* config/aarch64/arm_neon.h (vmla_n_s16): Use RTL builtin
	instead of asm.
	(vmla_n_s32): Likewise.
	(vmla_n_u16): Likewise.
	(vmla_n_u32): Likewise.
	(vmlaq_n_s16): Likewise.
	(vmlaq_n_s32): Likewise.
	(vmlaq_n_u16): Likewise.
	(vmlaq_n_u32): Likewise.
This commit is contained in:
Jonathan Wright 2021-01-15 15:10:53 +00:00
parent f004d6d9fa
commit 9d66505a5d
3 changed files with 26 additions and 55 deletions

View File

@ -180,6 +180,8 @@
/* Implemented by aarch64_mla<mode>. */
BUILTIN_VDQ_BHSI (TERNOP, mla, 0, NONE)
/* Implemented by aarch64_mla_n<mode>. */
BUILTIN_VDQHS (TERNOP, mla_n, 0, NONE)
/* Implemented by aarch64_<su>mlsl<mode>. */
BUILTIN_VD_BHSI (TERNOP, smlsl, 0, NONE)

View File

@ -1384,15 +1384,16 @@
[(set_attr "type" "neon_mla_<Vetype>_scalar<q>")]
)
(define_insn "*aarch64_mla_elt_merge<mode>"
[(set (match_operand:VDQHS 0 "register_operand" "=w")
(define_insn "aarch64_mla_n<mode>"
[(set (match_operand:VDQHS 0 "register_operand" "=w")
(plus:VDQHS
(mult:VDQHS (vec_duplicate:VDQHS
(match_operand:<VEL> 1 "register_operand" "<h_con>"))
(match_operand:VDQHS 2 "register_operand" "w"))
(match_operand:VDQHS 3 "register_operand" "0")))]
(mult:VDQHS
(vec_duplicate:VDQHS
(match_operand:<VEL> 3 "register_operand" "<h_con>"))
(match_operand:VDQHS 2 "register_operand" "w"))
(match_operand:VDQHS 1 "register_operand" "0")))]
"TARGET_SIMD"
"mla\t%0.<Vtype>, %2.<Vtype>, %1.<Vetype>[0]"
"mla\t%0.<Vtype>, %2.<Vtype>, %3.<Vetype>[0]"
[(set_attr "type" "neon_mla_<Vetype>_scalar<q>")]
)

View File

@ -7246,48 +7246,32 @@ __extension__ extern __inline int16x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_n_s16 (int16x4_t __a, int16x4_t __b, int16_t __c)
{
int16x4_t __result;
__asm__ ("mla %0.4h,%2.4h,%3.h[0]"
: "=w"(__result)
: "0"(__a), "w"(__b), "x"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_mla_nv4hi (__a, __b, __c);
}
__extension__ extern __inline int32x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_n_s32 (int32x2_t __a, int32x2_t __b, int32_t __c)
{
int32x2_t __result;
__asm__ ("mla %0.2s,%2.2s,%3.s[0]"
: "=w"(__result)
: "0"(__a), "w"(__b), "w"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_mla_nv2si (__a, __b, __c);
}
__extension__ extern __inline uint16x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_n_u16 (uint16x4_t __a, uint16x4_t __b, uint16_t __c)
{
uint16x4_t __result;
__asm__ ("mla %0.4h,%2.4h,%3.h[0]"
: "=w"(__result)
: "0"(__a), "w"(__b), "x"(__c)
: /* No clobbers */);
return __result;
return (uint16x4_t) __builtin_aarch64_mla_nv4hi ((int16x4_t) __a,
(int16x4_t) __b,
(int16_t) __c);
}
__extension__ extern __inline uint32x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_n_u32 (uint32x2_t __a, uint32x2_t __b, uint32_t __c)
{
uint32x2_t __result;
__asm__ ("mla %0.2s,%2.2s,%3.s[0]"
: "=w"(__result)
: "0"(__a), "w"(__b), "w"(__c)
: /* No clobbers */);
return __result;
return (uint32x2_t) __builtin_aarch64_mla_nv2si ((int32x2_t) __a,
(int32x2_t) __b,
(int32_t) __c);
}
__extension__ extern __inline int8x8_t
@ -7763,48 +7747,32 @@ __extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
{
int16x8_t __result;
__asm__ ("mla %0.8h,%2.8h,%3.h[0]"
: "=w"(__result)
: "0"(__a), "w"(__b), "x"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_mla_nv8hi (__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
{
int32x4_t __result;
__asm__ ("mla %0.4s,%2.4s,%3.s[0]"
: "=w"(__result)
: "0"(__a), "w"(__b), "w"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_mla_nv4si (__a, __b, __c);
}
__extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
{
uint16x8_t __result;
__asm__ ("mla %0.8h,%2.8h,%3.h[0]"
: "=w"(__result)
: "0"(__a), "w"(__b), "x"(__c)
: /* No clobbers */);
return __result;
return (uint16x8_t) __builtin_aarch64_mla_nv8hi ((int16x8_t) __a,
(int16x8_t) __b,
(int16_t) __c);
}
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
{
uint32x4_t __result;
__asm__ ("mla %0.4s,%2.4s,%3.s[0]"
: "=w"(__result)
: "0"(__a), "w"(__b), "w"(__c)
: /* No clobbers */);
return __result;
return (uint32x4_t) __builtin_aarch64_mla_nv4si ((int32x4_t) __a,
(int32x4_t) __b,
(int32_t) __c);
}
__extension__ extern __inline int8x16_t