aarch64: Use RTL builtins for FP ml[as][q]_laneq intrinsics

Rewrite floating-point vml[as][q]_laneq Neon intrinsics to use RTL
builtins rather than relying on the GCC vector extensions. Using RTL
builtins allows control over the emission of fmla/fmls instructions
(which we don't want here.)

With this commit, the code generated by these intrinsics changes from
a fused multiply-add/subtract instruction to an fmul followed by an
fadd/fsub instruction. If the programmer really wants fmla/fmls
instructions, they can use the vfm[as] intrinsics.

gcc/ChangeLog:

2021-02-17  Jonathan Wright  <jonathan.wright@arm.com>

	* config/aarch64/aarch64-simd-builtins.def: Add
	float_ml[as][q]_laneq builtin generator macros.
	* config/aarch64/aarch64-simd.md (mul_laneq<mode>3): Define.
	(aarch64_float_mla_laneq<mode>): Define.
	(aarch64_float_mls_laneq<mode>): Define.
	* config/aarch64/arm_neon.h (vmla_laneq_f32): Use RTL builtin
	instead of GCC vector extensions.
	(vmlaq_laneq_f32): Likewise.
	(vmls_laneq_f32): Likewise.
	(vmlsq_laneq_f32): Likewise.
This commit is contained in:
Jonathan Wright 2021-02-17 13:13:52 +00:00
parent 1baf4ed878
commit d388179a79
3 changed files with 62 additions and 4 deletions

View File

@ -674,6 +674,8 @@
BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
BUILTIN_VDQSF (QUADOP_LANE, float_mla_lane, 0, FP)
BUILTIN_VDQSF (QUADOP_LANE, float_mls_lane, 0, FP)
BUILTIN_VDQSF (QUADOP_LANE, float_mla_laneq, 0, FP)
BUILTIN_VDQSF (QUADOP_LANE, float_mls_laneq, 0, FP)
/* Implemented by aarch64_simd_bsl<mode>. */
BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE)

View File

@ -734,6 +734,22 @@
[(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
)
(define_insn "mul_laneq<mode>3"
[(set (match_operand:VDQSF 0 "register_operand" "=w")
(mult:VDQSF
(vec_duplicate:VDQSF
(vec_select:<VEL>
(match_operand:V4SF 2 "register_operand" "w")
(parallel [(match_operand:SI 3 "immediate_operand" "i")])))
(match_operand:VDQSF 1 "register_operand" "w")))]
"TARGET_SIMD"
{
operands[3] = aarch64_endian_lane_rtx (V4SFmode, INTVAL (operands[3]));
return "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]";
}
[(set_attr "type" "neon_fp_mul_s_scalar<q>")]
)
(define_insn "*aarch64_mul3_elt_<vswap_width_name><mode>"
[(set (match_operand:VMUL_CHANGE_NLANES 0 "register_operand" "=w")
(mult:VMUL_CHANGE_NLANES
@ -2742,6 +2758,46 @@
}
)
(define_expand "aarch64_float_mla_laneq<mode>"
[(set (match_operand:VDQSF 0 "register_operand")
(plus:VDQSF
(mult:VDQSF
(vec_duplicate:VDQSF
(vec_select:<VEL>
(match_operand:V4SF 3 "register_operand")
(parallel [(match_operand:SI 4 "immediate_operand")])))
(match_operand:VDQSF 2 "register_operand"))
(match_operand:VDQSF 1 "register_operand")))]
"TARGET_SIMD"
{
rtx scratch = gen_reg_rtx (<MODE>mode);
emit_insn (gen_mul_laneq<mode>3 (scratch, operands[2],
operands[3], operands[4]));
emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
DONE;
}
)
(define_expand "aarch64_float_mls_laneq<mode>"
[(set (match_operand:VDQSF 0 "register_operand")
(minus:VDQSF
(match_operand:VDQSF 1 "register_operand")
(mult:VDQSF
(vec_duplicate:VDQSF
(vec_select:<VEL>
(match_operand:V4SF 3 "register_operand")
(parallel [(match_operand:SI 4 "immediate_operand")])))
(match_operand:VDQSF 2 "register_operand"))))]
"TARGET_SIMD"
{
rtx scratch = gen_reg_rtx (<MODE>mode);
emit_insn (gen_mul_laneq<mode>3 (scratch, operands[2],
operands[3], operands[4]));
emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
DONE;
}
)
(define_insn "fma<mode>4"
[(set (match_operand:VHSDF 0 "register_operand" "=w")
(fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w")

View File

@ -20420,7 +20420,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_laneq_f32 (float32x2_t __a, float32x2_t __b,
float32x4_t __c, const int __lane)
{
return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
return __builtin_aarch64_float_mla_laneqv2sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x4_t
@ -20504,7 +20504,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_laneq_f32 (float32x4_t __a, float32x4_t __b,
float32x4_t __c, const int __lane)
{
return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
return __builtin_aarch64_float_mla_laneqv4sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x8_t
@ -20618,7 +20618,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_laneq_f32 (float32x2_t __a, float32x2_t __b,
float32x4_t __c, const int __lane)
{
return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
return __builtin_aarch64_float_mls_laneqv2sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x4_t
@ -20702,7 +20702,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_laneq_f32 (float32x4_t __a, float32x4_t __b,
float32x4_t __c, const int __lane)
{
return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
return __builtin_aarch64_float_mls_laneqv4sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x8_t