aarch64-simd.md (*aarch64_simd_mov<mode>): call splitter.

gcc/
	* config/aarch64/aarch64-simd.md (*aarch64_simd_mov<mode>): call splitter.
	(aarch64_simd_mov<mode>): New expander.
	(aarch64_simd_mov_to_<mode>low): New instruction pattern.
	(aarch64_simd_mov_to_<mode>high): Likewise.
	(aarch64_simd_mov_from_<mode>low): Likewise.
	(aarch64_simd_mov_from_<mode>high): Likewise.
	(aarch64_dup_lane<mode>): Update.
	(aarch64_dup_lanedi): New instruction pattern.
	* config/aarch64/aarch64-protos.h (aarch64_split_simd_move): New prototype.
	* config/aarch64/aarch64.c (aarch64_split_simd_move): New function.

	testsuites/
	* gcc.target/aarch64/scalar_intrinsics.c: Update.

From-SVN: r198680
This commit is contained in:
Sofiane Naci 2013-05-07 12:47:18 +00:00 committed by Sofiane Naci
parent a7a7d10ed5
commit fd4842cd67
6 changed files with 176 additions and 4 deletions

View File

@ -1,3 +1,16 @@
2013-05-07 Sofiane Naci <sofiane.naci@arm.com>
* config/aarch64/aarch64-simd.md (*aarch64_simd_mov<mode>): call splitter.
(aarch64_simd_mov<mode>): New expander.
(aarch64_simd_mov_to_<mode>low): New instruction pattern.
(aarch64_simd_mov_to_<mode>high): Likewise.
(aarch64_simd_mov_from_<mode>low): Likewise.
(aarch64_simd_mov_from_<mode>high): Likewise.
(aarch64_dup_lane<mode>): Update.
(aarch64_dup_lanedi): New instruction pattern.
* config/aarch64/aarch64-protos.h (aarch64_split_simd_move): New prototype.
* config/aarch64/aarch64.c (aarch64_split_simd_move): New function.
2013-05-07 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
* gimple-ssa-strength-reduction.c (lazy_create_slsr_reg): Remove.

View File

@ -219,6 +219,8 @@ void aarch64_split_128bit_move (rtx, rtx);
bool aarch64_split_128bit_move_p (rtx, rtx);
void aarch64_split_simd_move (rtx, rtx);
/* Check for a legitimate floating point constant for FMOV. */
bool aarch64_float_const_representable_p (rtx);

View File

@ -358,7 +358,7 @@
)
(define_insn "aarch64_dup_lane<mode>"
[(set (match_operand:SDQ_I 0 "register_operand" "=w")
[(set (match_operand:ALLX 0 "register_operand" "=w")
(vec_select:<VEL>
(match_operand:<VCON> 1 "register_operand" "w")
(parallel [(match_operand:SI 2 "immediate_operand" "i")])
@ -369,6 +369,19 @@
(set_attr "simd_mode" "<MODE>")]
)
(define_insn "aarch64_dup_lanedi"
[(set (match_operand:DI 0 "register_operand" "=w,r")
(vec_select:DI
(match_operand:V2DI 1 "register_operand" "w,w")
(parallel [(match_operand:SI 2 "immediate_operand" "i,i")])))]
"TARGET_SIMD"
"@
dup\\t%<v>0<Vmtype>, %1.<Vetype>[%2]
umov\t%0, %1.d[%2]"
[(set_attr "simd_type" "simd_dup")
(set_attr "simd_mode" "DI")]
)
(define_insn "aarch64_simd_dup<mode>"
[(set (match_operand:VDQF 0 "register_operand" "=w")
(vec_duplicate:VDQF (match_operand:<VEL> 1 "register_operand" "w")))]
@ -419,8 +432,8 @@
case 0: return "ld1\t{%0.<Vtype>}, %1";
case 1: return "st1\t{%1.<Vtype>}, %0";
case 2: return "orr\t%0.<Vbtype>, %1.<Vbtype>, %1.<Vbtype>";
case 3: return "umov\t%0, %1.d[0]\;umov\t%H0, %1.d[1]";
case 4: return "ins\t%0.d[0], %1\;ins\t%0.d[1], %H1";
case 3: return "#";
case 4: return "#";
case 5: return "#";
case 6:
return aarch64_output_simd_mov_immediate (&operands[1],
@ -454,6 +467,105 @@
aarch64_simd_disambiguate_copy (operands, dest, src, 2);
})
(define_split
[(set (match_operand:VQ 0 "register_operand" "")
(match_operand:VQ 1 "register_operand" ""))]
"TARGET_SIMD && reload_completed
&& ((FP_REGNUM_P (REGNO (operands[0])) && GP_REGNUM_P (REGNO (operands[1])))
|| (GP_REGNUM_P (REGNO (operands[0])) && FP_REGNUM_P (REGNO (operands[1]))))"
[(const_int 0)]
{
aarch64_split_simd_move (operands[0], operands[1]);
DONE;
})
(define_expand "aarch64_simd_mov<mode>"
[(set (match_operand:VQ 0)
(match_operand:VQ 1))]
"TARGET_SIMD"
{
rtx dst = operands[0];
rtx src = operands[1];
if (GP_REGNUM_P (REGNO (src)))
{
rtx low_part = gen_lowpart (<VHALF>mode, src);
rtx high_part = gen_highpart (<VHALF>mode, src);
emit_insn
(gen_aarch64_simd_mov_to_<mode>low (dst, low_part));
emit_insn
(gen_aarch64_simd_mov_to_<mode>high (dst, high_part));
}
else
{
rtx low_half = aarch64_simd_vect_par_cnst_half (<MODE>mode, false);
rtx high_half = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
rtx low_part = gen_lowpart (<VHALF>mode, dst);
rtx high_part = gen_highpart (<VHALF>mode, dst);
emit_insn
(gen_aarch64_simd_mov_from_<mode>low (low_part, src, low_half));
emit_insn
(gen_aarch64_simd_mov_from_<mode>high (high_part, src, high_half));
}
DONE;
}
)
(define_insn "aarch64_simd_mov_to_<mode>low"
[(set (zero_extract:VQ
(match_operand:VQ 0 "register_operand" "+w")
(const_int 64) (const_int 0))
(vec_concat:VQ
(match_operand:<VHALF> 1 "register_operand" "r")
(vec_duplicate:<VHALF> (const_int 0))))]
"TARGET_SIMD && reload_completed"
"ins\t%0.d[0], %1"
[(set_attr "simd_type" "simd_move")
(set_attr "simd_mode" "<MODE>")
(set_attr "length" "4")
])
(define_insn "aarch64_simd_mov_to_<mode>high"
[(set (zero_extract:VQ
(match_operand:VQ 0 "register_operand" "+w")
(const_int 64) (const_int 64))
(vec_concat:VQ
(match_operand:<VHALF> 1 "register_operand" "r")
(vec_duplicate:<VHALF> (const_int 0))))]
"TARGET_SIMD && reload_completed"
"ins\t%0.d[1], %1"
[(set_attr "simd_type" "simd_move")
(set_attr "simd_mode" "<MODE>")
(set_attr "length" "4")
])
(define_insn "aarch64_simd_mov_from_<mode>low"
[(set (match_operand:<VHALF> 0 "register_operand" "=r")
(vec_select:<VHALF>
(match_operand:VQ 1 "register_operand" "w")
(match_operand:VQ 2 "vect_par_cnst_lo_half" "")))]
"TARGET_SIMD && reload_completed"
"umov\t%0, %1.d[0]"
[(set_attr "simd_type" "simd_move")
(set_attr "simd_mode" "<MODE>")
(set_attr "length" "4")
])
(define_insn "aarch64_simd_mov_from_<mode>high"
[(set (match_operand:<VHALF> 0 "register_operand" "=r")
(vec_select:<VHALF>
(match_operand:VQ 1 "register_operand" "w")
(match_operand:VQ 2 "vect_par_cnst_hi_half" "")))]
"TARGET_SIMD && reload_completed"
"umov\t%0, %1.d[1]"
[(set_attr "simd_type" "simd_move")
(set_attr "simd_mode" "<MODE>")
(set_attr "length" "4")
])
(define_insn "orn<mode>3"
[(set (match_operand:VDQ 0 "register_operand" "=w")
(ior:VDQ (not:VDQ (match_operand:VDQ 1 "register_operand" "w"))

View File

@ -656,6 +656,47 @@ aarch64_split_128bit_move_p (rtx dst, rtx src)
|| ! (FP_REGNUM_P (REGNO (dst)) && FP_REGNUM_P (REGNO (src))));
}
/* Split a complex SIMD move. */
void
aarch64_split_simd_move (rtx dst, rtx src)
{
enum machine_mode src_mode = GET_MODE (src);
enum machine_mode dst_mode = GET_MODE (dst);
gcc_assert (VECTOR_MODE_P (dst_mode));
if (REG_P (dst) && REG_P (src))
{
gcc_assert (VECTOR_MODE_P (src_mode));
switch (src_mode)
{
case V16QImode:
emit_insn (gen_aarch64_simd_movv16qi (dst, src));
break;
case V8HImode:
emit_insn (gen_aarch64_simd_movv8hi (dst, src));
break;
case V4SImode:
emit_insn (gen_aarch64_simd_movv4si (dst, src));
break;
case V2DImode:
emit_insn (gen_aarch64_simd_movv2di (dst, src));
break;
case V4SFmode:
emit_insn (gen_aarch64_simd_movv4sf (dst, src));
break;
case V2DFmode:
emit_insn (gen_aarch64_simd_movv2df (dst, src));
break;
default:
gcc_unreachable ();
}
return;
}
}
static rtx
aarch64_force_temporary (rtx x, rtx value)
{

View File

@ -1,3 +1,7 @@
2013-05-07 Sofiane Naci <sofiane.naci@arm.com>
* gcc.target/aarch64/scalar_intrinsics.c: Update.
2013-05-07 Richard Biener <rguenther@suse.de>
PR middle-end/57190

View File

@ -223,7 +223,7 @@ test_vdups_lane_u32 (uint32x4_t a)
return vdups_lane_u32 (a, 2);
}
/* { dg-final { scan-assembler-times "\\tdup\\td\[0-9\]+, v\[0-9\]+\.d" 2 } } */
/* { dg-final { scan-assembler-times "\\tumov\\tx\[0-9\]+, v\[0-9\]+\.d" 2 } } */
int64x1_t
test_vdupd_lane_s64 (int64x2_t a)