[PATCH] aarch64: Implement vmlsl[_high]* intrinsics using builtins

This patch reimplements some more intrinsics using RTL builtins in the
straightforward way.
Thankfully most of the RTL infrastructure is already in place for it.

gcc/
	* config/aarch64/aarch64-simd.md (*aarch64_<su>mlsl_hi<mode>):
	Rename to...
	(aarch64_<su>mlsl_hi<mode>): ... This.
	(aarch64_<su>mlsl_hi<mode>): Define.
	(*aarch64_<su>mlsl<mode): Rename to...
	(aarch64_<su>mlsl<mode): ... This.
	* config/aarch64/aarch64-simd-builtins.def (smlsl, umlsl,
	smlsl_hi, umlsl_hi): Define builtins.
	* config/aarch64/arm_neon.h (vmlsl_high_s8, vmlsl_high_s16,
	vmlsl_high_s32, vmlsl_high_u8, vmlsl_high_u16, vmlsl_high_u32,
	vmlsl_s8, vmlsl_s16, vmlsl_s32, vmlsl_u8,
	vmlsl_u16, vmlsl_u32): Reimplement with builtins.
This commit is contained in:
Kyrylo Tkachov 2021-01-15 17:55:57 +00:00
parent 7d0df0aeb6
commit d3959070aa
3 changed files with 36 additions and 74 deletions

View File

@ -178,6 +178,14 @@
/* Implemented by aarch64_xtn<mode>. */
BUILTIN_VQN (UNOP, xtn, 0, NONE)
/* Implemented by aarch64_<su>mlsl<mode>. */
BUILTIN_VD_BHSI (TERNOP, smlsl, 0, NONE)
BUILTIN_VD_BHSI (TERNOPU, umlsl, 0, NONE)
/* Implemented by aarch64_<su>mlsl_hi<mode>. */
BUILTIN_VQW (TERNOP, smlsl_hi, 0, NONE)
BUILTIN_VQW (TERNOPU, umlsl_hi, 0, NONE)
BUILTIN_VSQN_HSDI (UNOPUS, sqmovun, 0, ALL)
/* Implemented by aarch64_<sur>qmovn<mode>. */
BUILTIN_VSQN_HSDI (UNOP, sqmovn, 0, ALL)

View File

@ -1755,7 +1755,7 @@
[(set_attr "type" "neon_mla_<Vetype>_long")]
)
(define_insn "*aarch64_<su>mlsl_hi<mode>"
(define_insn "aarch64_<su>mlsl_hi<mode>_insn"
[(set (match_operand:<VWIDE> 0 "register_operand" "=w")
(minus:<VWIDE>
(match_operand:<VWIDE> 1 "register_operand" "0")
@ -1771,6 +1771,20 @@
[(set_attr "type" "neon_mla_<Vetype>_long")]
)
(define_expand "aarch64_<su>mlsl_hi<mode>"
[(match_operand:<VWIDE> 0 "register_operand")
(match_operand:<VWIDE> 1 "register_operand")
(ANY_EXTEND:<VWIDE>(match_operand:VQW 2 "register_operand"))
(match_operand:VQW 3 "register_operand")]
"TARGET_SIMD"
{
rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, <nunits>, true);
emit_insn (gen_aarch64_<su>mlsl_hi<mode>_insn (operands[0], operands[1],
operands[2], p, operands[3]));
DONE;
}
)
(define_insn "*aarch64_<su>mlal<mode>"
[(set (match_operand:<VWIDE> 0 "register_operand" "=w")
(plus:<VWIDE>
@ -1785,7 +1799,7 @@
[(set_attr "type" "neon_mla_<Vetype>_long")]
)
(define_insn "*aarch64_<su>mlsl<mode>"
(define_insn "aarch64_<su>mlsl<mode>"
[(set (match_operand:<VWIDE> 0 "register_operand" "=w")
(minus:<VWIDE>
(match_operand:<VWIDE> 1 "register_operand" "0")

View File

@ -8200,72 +8200,42 @@ __extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_high_s8 (int16x8_t __a, int8x16_t __b, int8x16_t __c)
{
int16x8_t __result;
__asm__ ("smlsl2 %0.8h,%2.16b,%3.16b"
: "=w"(__result)
: "0"(__a), "w"(__b), "w"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_smlsl_hiv16qi (__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c)
{
int32x4_t __result;
__asm__ ("smlsl2 %0.4s,%2.8h,%3.8h"
: "=w"(__result)
: "0"(__a), "w"(__b), "w"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_smlsl_hiv8hi (__a, __b, __c);
}
__extension__ extern __inline int64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c)
{
int64x2_t __result;
__asm__ ("smlsl2 %0.2d,%2.4s,%3.4s"
: "=w"(__result)
: "0"(__a), "w"(__b), "w"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_smlsl_hiv4si (__a, __b, __c);
}
__extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_high_u8 (uint16x8_t __a, uint8x16_t __b, uint8x16_t __c)
{
uint16x8_t __result;
__asm__ ("umlsl2 %0.8h,%2.16b,%3.16b"
: "=w"(__result)
: "0"(__a), "w"(__b), "w"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_umlsl_hiv16qi_uuuu (__a, __b, __c);
}
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_high_u16 (uint32x4_t __a, uint16x8_t __b, uint16x8_t __c)
{
uint32x4_t __result;
__asm__ ("umlsl2 %0.4s,%2.8h,%3.8h"
: "=w"(__result)
: "0"(__a), "w"(__b), "w"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_umlsl_hiv8hi_uuuu (__a, __b, __c);
}
__extension__ extern __inline uint64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_high_u32 (uint64x2_t __a, uint32x4_t __b, uint32x4_t __c)
{
uint64x2_t __result;
__asm__ ("umlsl2 %0.2d,%2.4s,%3.4s"
: "=w"(__result)
: "0"(__a), "w"(__b), "w"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_umlsl_hiv4si_uuuu (__a, __b, __c);
}
#define vmlsl_lane_s16(a, b, c, d) \
@ -8432,72 +8402,42 @@ __extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c)
{
int16x8_t __result;
__asm__ ("smlsl %0.8h, %2.8b, %3.8b"
: "=w"(__result)
: "0"(__a), "w"(__b), "w"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_smlslv8qi (__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
{
int32x4_t __result;
__asm__ ("smlsl %0.4s, %2.4h, %3.4h"
: "=w"(__result)
: "0"(__a), "w"(__b), "w"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_smlslv4hi (__a, __b, __c);
}
__extension__ extern __inline int64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
{
int64x2_t __result;
__asm__ ("smlsl %0.2d, %2.2s, %3.2s"
: "=w"(__result)
: "0"(__a), "w"(__b), "w"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_smlslv2si (__a, __b, __c);
}
__extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c)
{
uint16x8_t __result;
__asm__ ("umlsl %0.8h, %2.8b, %3.8b"
: "=w"(__result)
: "0"(__a), "w"(__b), "w"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_umlslv8qi_uuuu (__a, __b, __c);
}
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c)
{
uint32x4_t __result;
__asm__ ("umlsl %0.4s, %2.4h, %3.4h"
: "=w"(__result)
: "0"(__a), "w"(__b), "w"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_umlslv4hi_uuuu (__a, __b, __c);
}
__extension__ extern __inline uint64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c)
{
uint64x2_t __result;
__asm__ ("umlsl %0.2d, %2.2s, %3.2s"
: "=w"(__result)
: "0"(__a), "w"(__b), "w"(__c)
: /* No clobbers */);
return __result;
return __builtin_aarch64_umlslv2si_uuuu (__a, __b, __c);
}
__extension__ extern __inline float32x4_t