[AArch64, 6/6] Reimplement vpadd intrinsics & extend rtl patterns to all modes

* config/aarch64/aarch64-builtins.def (faddp): New builtins for modes in
	VDQF.
	* config/aarch64/aarch64-simd.md (aarch64_faddp<mode>): New.
	(arch64_addpv4sf): Delete.
	(reduc_plus_scal_v4sf): Use "gen_aarch64_faddpv4sf" instead of
	"gen_aarch64_addpv4sf".
	* config/aarch64/arm_neon.h (vpadd_f32): Remove inline assembly.  Use
	builtin.
	(vpadds_f32): Likewise.
	(vpaddq_f32): Likewise.
	(vpaddq_f64): Likewise.

From-SVN: r237205
This commit is contained in:
Jiong Wang 2016-06-08 10:17:58 +00:00 committed by Jiong Wang
parent a672fa1247
commit 3629030e36
4 changed files with 53 additions and 55 deletions

View File

@ -1,3 +1,17 @@
2016-06-08 Jiong Wang <jiong.wang@arm.com>
* config/aarch64/aarch64-builtins.def (faddp): New builtins for modes in
VDQF.
* config/aarch64/aarch64-simd.md (aarch64_faddp<mode>): New.
(arch64_addpv4sf): Delete.
(reduc_plus_scal_v4sf): Use "gen_aarch64_faddpv4sf" instead of
"gen_aarch64_addpv4sf".
* config/aarch64/arm_neon.h (vpadd_f32): Remove inline assembly. Use
builtin.
(vpadds_f32): Likewise.
(vpaddq_f32): Likewise.
(vpaddq_f64): Likewise.
2016-06-08 Jiong Wang <jiong.wang@arm.com>
* config/aarch64/aarch64-builtins.def (fabd): New builtins for modes

View File

@ -460,3 +460,6 @@
/* Implemented by fabd<mode>3. */
BUILTIN_VALLF (BINOP, fabd, 3)
/* Implemented by aarch64_faddp<mode>. */
BUILTIN_VDQF (BINOP, faddp, 0)

View File

@ -1992,6 +1992,16 @@
}
)
(define_insn "aarch64_faddp<mode>"
[(set (match_operand:VDQF 0 "register_operand" "=w")
(unspec:VDQF [(match_operand:VDQF 1 "register_operand" "w")
(match_operand:VDQF 2 "register_operand" "w")]
UNSPEC_FADDV))]
"TARGET_SIMD"
"faddp\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
[(set_attr "type" "neon_fp_reduc_add_<Vetype><q>")]
)
(define_insn "aarch64_reduc_plus_internal<mode>"
[(set (match_operand:VDQV 0 "register_operand" "=w")
(unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
@ -2019,15 +2029,6 @@
[(set_attr "type" "neon_fp_reduc_add_<Vetype><q>")]
)
(define_insn "aarch64_addpv4sf"
[(set (match_operand:V4SF 0 "register_operand" "=w")
(unspec:V4SF [(match_operand:V4SF 1 "register_operand" "w")]
UNSPEC_FADDV))]
"TARGET_SIMD"
"faddp\\t%0.4s, %1.4s, %1.4s"
[(set_attr "type" "neon_fp_reduc_add_s_q")]
)
(define_expand "reduc_plus_scal_v4sf"
[(set (match_operand:SF 0 "register_operand")
(unspec:V4SF [(match_operand:V4SF 1 "register_operand")]
@ -2036,8 +2037,8 @@
{
rtx elt = GEN_INT (ENDIAN_LANE_N (V4SFmode, 0));
rtx scratch = gen_reg_rtx (V4SFmode);
emit_insn (gen_aarch64_addpv4sf (scratch, operands[1]));
emit_insn (gen_aarch64_addpv4sf (scratch, scratch));
emit_insn (gen_aarch64_faddpv4sf (scratch, operands[1], operands[1]));
emit_insn (gen_aarch64_faddpv4sf (scratch, scratch, scratch));
emit_insn (gen_aarch64_get_lanev4sf (operands[0], scratch, elt));
DONE;
})

View File

@ -8225,17 +8225,6 @@ vpadalq_u32 (uint64x2_t a, uint32x4_t b)
return result;
}
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vpadd_f32 (float32x2_t a, float32x2_t b)
{
float32x2_t result;
__asm__ ("faddp %0.2s,%1.2s,%2.2s"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vpaddl_s8 (int8x8_t a)
{
@ -8368,28 +8357,6 @@ vpaddlq_u32 (uint32x4_t a)
return result;
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vpaddq_f32 (float32x4_t a, float32x4_t b)
{
float32x4_t result;
__asm__ ("faddp %0.4s,%1.4s,%2.4s"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
vpaddq_f64 (float64x2_t a, float64x2_t b)
{
float64x2_t result;
__asm__ ("faddp %0.2d,%1.2d,%2.2d"
: "=w"(result)
: "w"(a), "w"(b)
: /* No clobbers */);
return result;
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vpaddq_s8 (int8x16_t a, int8x16_t b)
{
@ -8478,17 +8445,6 @@ vpaddq_u64 (uint64x2_t a, uint64x2_t b)
return result;
}
__extension__ static __inline float32_t __attribute__ ((__always_inline__))
vpadds_f32 (float32x2_t a)
{
float32_t result;
__asm__ ("faddp %s0,%1.2s"
: "=w"(result)
: "w"(a)
: /* No clobbers */);
return result;
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vqdmulh_n_s16 (int16x4_t a, int16_t b)
{
@ -18625,6 +18581,24 @@ vnegq_s64 (int64x2_t __a)
/* vpadd */
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vpadd_f32 (float32x2_t __a, float32x2_t __b)
{
return __builtin_aarch64_faddpv2sf (__a, __b);
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vpaddq_f32 (float32x4_t __a, float32x4_t __b)
{
return __builtin_aarch64_faddpv4sf (__a, __b);
}
__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
vpaddq_f64 (float64x2_t __a, float64x2_t __b)
{
return __builtin_aarch64_faddpv2df (__a, __b);
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vpadd_s8 (int8x8_t __a, int8x8_t __b)
{
@ -18664,6 +18638,12 @@ vpadd_u32 (uint32x2_t __a, uint32x2_t __b)
(int32x2_t) __b);
}
__extension__ static __inline float32_t __attribute__ ((__always_inline__))
vpadds_f32 (float32x2_t __a)
{
return __builtin_aarch64_reduc_plus_scal_v2sf (__a);
}
__extension__ static __inline float64_t __attribute__ ((__always_inline__))
vpaddd_f64 (float64x2_t __a)
{