rs6000.c (rs6000_expand_vector_init): For V4SF inits on power8 and above, use the VMRGEW instruction instead of a permute.

2016-09-20  Michael Meissner  <meissner@linux.vnet.ibm.com>

	* config/rs6000/rs6000.c (rs6000_expand_vector_init): For V4SF
	inits on power8 and above, use the VMRGEW instruction instead of a
	permute.

	* config/rs6000/altivec.md (UNSPEC_VMRGEW_DIRECT): New unspec.
	(p8_vmrgew_v4sf_direct): New VMRGEW insn for V4SF floating
	initialization.

From-SVN: r240272
This commit is contained in:
Michael Meissner 2016-09-20 16:22:24 +00:00 committed by Michael Meissner
parent c55b54403e
commit 00cc7cbfb5
3 changed files with 40 additions and 5 deletions

View File

@ -1,3 +1,13 @@
2016-09-20 Michael Meissner <meissner@linux.vnet.ibm.com>
* config/rs6000/rs6000.c (rs6000_expand_vector_init): For V4SF
inits on power8 and above, use the VMRGEW instruction instead of a
permute.
* config/rs6000/altivec.md (UNSPEC_VMRGEW_DIRECT): New unspec.
(p8_vmrgew_v4sf_direct): New VMRGEW insn for V4SF floating
initialization.
2016-09-20 Tamar Christina <tamar.christina@arm.com>
* config/aarch64/arm_neon.h

View File

@ -141,6 +141,7 @@
UNSPEC_VMRGH_DIRECT
UNSPEC_VMRGL_DIRECT
UNSPEC_VSPLT_DIRECT
UNSPEC_VMRGEW_DIRECT
UNSPEC_VSUMSWS_DIRECT
UNSPEC_VADDCUQ
UNSPEC_VADDEUQM
@ -1340,6 +1341,15 @@
}
[(set_attr "type" "vecperm")])
(define_insn "p8_vmrgew_v4sf_direct"
[(set (match_operand:V4SF 0 "register_operand" "=v")
(unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")
(match_operand:V4SF 2 "register_operand" "v")]
UNSPEC_VMRGEW_DIRECT))]
"TARGET_P8_VECTOR"
"vmrgew %0,%1,%2"
[(set_attr "type" "vecperm")])
(define_expand "vec_widen_umult_even_v16qi"
[(use (match_operand:V8HI 0 "register_operand" ""))
(use (match_operand:V16QI 1 "register_operand" ""))

View File

@ -6821,11 +6821,26 @@ rs6000_expand_vector_init (rtx target, rtx vals)
rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
rs6000_expand_extract_even (target, flt_even, flt_odd);
/* Use VMRGEW if we can instead of doing a permute. */
if (TARGET_P8_VECTOR)
{
emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
if (BYTES_BIG_ENDIAN)
emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
else
emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
}
else
{
emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
rs6000_expand_extract_even (target, flt_even, flt_odd);
}
}
return;
}