[AArch64] Rename cmp_result iterator

The comparison results provided by the V_cmp_result/v_cmp_result
attribute were simply the corresponding integer vector.  We'd also
like to have easy access to the integer vector for SVE, but using
"cmp_result" would be confusing because SVE comparisons return
predicates instead of vectors.  This patch therefore renames the
attributes to the more general V_INT_EQUIV/v_int_equiv instead.

As to the capitalisation: there are already many iterators that use
all lowercase vs. all uppercase names to distinguish all lowercase
vs. all uppercase expansions (e.g. fcvt_target and FCVT_TARGET).
It's also the convention used for the built-in mode/MODE/code/CODE/etc.
attributes.  IMO those names are easier to read at a glance, rather than
relying on a single letter's difference.

2017-08-22  Richard Sandiford  <richard.sandiford@linaro.org>
	    Alan Hayward  <alan.hayward@arm.com>
	    David Sherwood  <david.sherwood@arm.com>

gcc/
	* config/aarch64/iterators.md (V_cmp_result): Rename to...
	(V_INT_EQUIV): ...this.
	(v_cmp_result): Rename to...
	(v_int_equiv): ...this.
	* config/aarch64/aarch64.md (xorsign<mode>3): Update accordingly.
	* config/aarch64/aarch64-simd.md (xorsign<mode>3): Likewise.
	(copysign<mode>3): Likewise.
	(aarch64_simd_bsl<mode>_internal): Likewise.
	(aarch64_simd_bsl<mode>): Likewise.
	(vec_cmp<mode><mode>): Likewise.
	(vcond<mode><mode>): Likewise.
	(vcond<v_cmp_mixed><mode>): Likewise.
	(vcondu<mode><v_cmp_mixed>): Likewise.
	(aarch64_cm<optab><mode>): Likewise.
	(aarch64_cmtst<mode>): Likewise.
	(aarch64_fac<optab><mode>): Likewise.
	(vec_perm_const<mode>): Likewise.
	(vcond_mask_<mode><v_cmp_result>): Rename to...
	(vcond_mask_<mode><v_int_equiv>): ...this.
	(vec_cmp<mode><v_cmp_result>): Rename to...
	(vec_cmp<mode><v_int_equiv>): ...this.

Co-Authored-By: Alan Hayward <alan.hayward@arm.com>
Co-Authored-By: David Sherwood <david.sherwood@arm.com>

From-SVN: r251556
This commit is contained in:
Richard Sandiford 2017-08-31 09:52:38 +00:00 committed by Richard Sandiford
parent fca7d0a4fd
commit 5f5653148b
4 changed files with 113 additions and 87 deletions

View File

@ -1,3 +1,29 @@
2017-08-31 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
* config/aarch64/iterators.md (V_cmp_result): Rename to...
(V_INT_EQUIV): ...this.
(v_cmp_result): Rename to...
(v_int_equiv): ...this.
* config/aarch64/aarch64.md (xorsign<mode>3): Update accordingly.
* config/aarch64/aarch64-simd.md (xorsign<mode>3): Likewise.
(copysign<mode>3): Likewise.
(aarch64_simd_bsl<mode>_internal): Likewise.
(aarch64_simd_bsl<mode>): Likewise.
(vec_cmp<mode><mode>): Likewise.
(vcond<mode><mode>): Likewise.
(vcond<v_cmp_mixed><mode>): Likewise.
(vcondu<mode><v_cmp_mixed>): Likewise.
(aarch64_cm<optab><mode>): Likewise.
(aarch64_cmtst<mode>): Likewise.
(aarch64_fac<optab><mode>): Likewise.
(vec_perm_const<mode>): Likewise.
(vcond_mask_<mode><v_cmp_result>): Rename to...
(vcond_mask_<mode><v_int_equiv>): ...this.
(vec_cmp<mode><v_cmp_result>): Rename to...
(vec_cmp<mode><v_int_equiv>): ...this.
2017-08-31 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>

View File

@ -364,7 +364,7 @@
"TARGET_SIMD"
{
machine_mode imode = <V_cmp_result>mode;
machine_mode imode = <V_INT_EQUIV>mode;
rtx v_bitmask = gen_reg_rtx (imode);
rtx op1x = gen_reg_rtx (imode);
rtx op2x = gen_reg_rtx (imode);
@ -375,11 +375,11 @@
int bits = GET_MODE_UNIT_BITSIZE (<MODE>mode) - 1;
emit_move_insn (v_bitmask,
aarch64_simd_gen_const_vector_dup (<V_cmp_result>mode,
aarch64_simd_gen_const_vector_dup (<V_INT_EQUIV>mode,
HOST_WIDE_INT_M1U << bits));
emit_insn (gen_and<v_cmp_result>3 (op2x, v_bitmask, arg2));
emit_insn (gen_xor<v_cmp_result>3 (op1x, arg1, op2x));
emit_insn (gen_and<v_int_equiv>3 (op2x, v_bitmask, arg2));
emit_insn (gen_xor<v_int_equiv>3 (op1x, arg1, op2x));
emit_move_insn (operands[0],
lowpart_subreg (<MODE>mode, op1x, imode));
DONE;
@ -392,11 +392,11 @@
(match_operand:VHSDF 2 "register_operand")]
"TARGET_FLOAT && TARGET_SIMD"
{
rtx v_bitmask = gen_reg_rtx (<V_cmp_result>mode);
rtx v_bitmask = gen_reg_rtx (<V_INT_EQUIV>mode);
int bits = GET_MODE_UNIT_BITSIZE (<MODE>mode) - 1;
emit_move_insn (v_bitmask,
aarch64_simd_gen_const_vector_dup (<V_cmp_result>mode,
aarch64_simd_gen_const_vector_dup (<V_INT_EQUIV>mode,
HOST_WIDE_INT_M1U << bits));
emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], v_bitmask,
operands[2], operands[1]));
@ -2319,10 +2319,10 @@
(xor:VSDQ_I_DI
(and:VSDQ_I_DI
(xor:VSDQ_I_DI
(match_operand:<V_cmp_result> 3 "register_operand" "w,0,w")
(match_operand:<V_INT_EQUIV> 3 "register_operand" "w,0,w")
(match_operand:VSDQ_I_DI 2 "register_operand" "w,w,0"))
(match_operand:VSDQ_I_DI 1 "register_operand" "0,w,w"))
(match_dup:<V_cmp_result> 3)
(match_dup:<V_INT_EQUIV> 3)
))]
"TARGET_SIMD"
"@
@ -2357,7 +2357,7 @@
(define_expand "aarch64_simd_bsl<mode>"
[(match_operand:VALLDIF 0 "register_operand")
(match_operand:<V_cmp_result> 1 "register_operand")
(match_operand:<V_INT_EQUIV> 1 "register_operand")
(match_operand:VALLDIF 2 "register_operand")
(match_operand:VALLDIF 3 "register_operand")]
"TARGET_SIMD"
@ -2366,26 +2366,26 @@
rtx tmp = operands[0];
if (FLOAT_MODE_P (<MODE>mode))
{
operands[2] = gen_lowpart (<V_cmp_result>mode, operands[2]);
operands[3] = gen_lowpart (<V_cmp_result>mode, operands[3]);
tmp = gen_reg_rtx (<V_cmp_result>mode);
operands[2] = gen_lowpart (<V_INT_EQUIV>mode, operands[2]);
operands[3] = gen_lowpart (<V_INT_EQUIV>mode, operands[3]);
tmp = gen_reg_rtx (<V_INT_EQUIV>mode);
}
operands[1] = gen_lowpart (<V_cmp_result>mode, operands[1]);
emit_insn (gen_aarch64_simd_bsl<v_cmp_result>_internal (tmp,
operands[1],
operands[2],
operands[3]));
operands[1] = gen_lowpart (<V_INT_EQUIV>mode, operands[1]);
emit_insn (gen_aarch64_simd_bsl<v_int_equiv>_internal (tmp,
operands[1],
operands[2],
operands[3]));
if (tmp != operands[0])
emit_move_insn (operands[0], gen_lowpart (<MODE>mode, tmp));
DONE;
})
(define_expand "vcond_mask_<mode><v_cmp_result>"
(define_expand "vcond_mask_<mode><v_int_equiv>"
[(match_operand:VALLDI 0 "register_operand")
(match_operand:VALLDI 1 "nonmemory_operand")
(match_operand:VALLDI 2 "nonmemory_operand")
(match_operand:<V_cmp_result> 3 "register_operand")]
(match_operand:<V_INT_EQUIV> 3 "register_operand")]
"TARGET_SIMD"
{
/* If we have (a = (P) ? -1 : 0);
@ -2396,7 +2396,7 @@
/* Similarly, (a = (P) ? 0 : -1) is just inverting the generated mask. */
else if (operands[1] == CONST0_RTX (<MODE>mode)
&& operands[2] == CONSTM1_RTX (<MODE>mode))
emit_insn (gen_one_cmpl<v_cmp_result>2 (operands[0], operands[3]));
emit_insn (gen_one_cmpl<v_int_equiv>2 (operands[0], operands[3]));
else
{
if (!REG_P (operands[1]))
@ -2478,7 +2478,7 @@
case NE:
/* Handle NE as !EQ. */
emit_insn (gen_aarch64_cmeq<mode> (mask, operands[2], operands[3]));
emit_insn (gen_one_cmpl<v_cmp_result>2 (mask, mask));
emit_insn (gen_one_cmpl<v_int_equiv>2 (mask, mask));
break;
case EQ:
@ -2492,8 +2492,8 @@
DONE;
})
(define_expand "vec_cmp<mode><v_cmp_result>"
[(set (match_operand:<V_cmp_result> 0 "register_operand")
(define_expand "vec_cmp<mode><v_int_equiv>"
[(set (match_operand:<V_INT_EQUIV> 0 "register_operand")
(match_operator 1 "comparison_operator"
[(match_operand:VDQF 2 "register_operand")
(match_operand:VDQF 3 "nonmemory_operand")]))]
@ -2501,7 +2501,7 @@
{
int use_zero_form = 0;
enum rtx_code code = GET_CODE (operands[1]);
rtx tmp = gen_reg_rtx (<V_cmp_result>mode);
rtx tmp = gen_reg_rtx (<V_INT_EQUIV>mode);
rtx (*comparison) (rtx, rtx, rtx) = NULL;
@ -2587,7 +2587,7 @@
a NE b -> !(a EQ b) */
gcc_assert (comparison != NULL);
emit_insn (comparison (operands[0], operands[2], operands[3]));
emit_insn (gen_one_cmpl<v_cmp_result>2 (operands[0], operands[0]));
emit_insn (gen_one_cmpl<v_int_equiv>2 (operands[0], operands[0]));
break;
case LT:
@ -2612,8 +2612,8 @@
emit_insn (gen_aarch64_cmgt<mode> (operands[0],
operands[2], operands[3]));
emit_insn (gen_aarch64_cmgt<mode> (tmp, operands[3], operands[2]));
emit_insn (gen_ior<v_cmp_result>3 (operands[0], operands[0], tmp));
emit_insn (gen_one_cmpl<v_cmp_result>2 (operands[0], operands[0]));
emit_insn (gen_ior<v_int_equiv>3 (operands[0], operands[0], tmp));
emit_insn (gen_one_cmpl<v_int_equiv>2 (operands[0], operands[0]));
break;
case UNORDERED:
@ -2622,15 +2622,15 @@
emit_insn (gen_aarch64_cmgt<mode> (tmp, operands[2], operands[3]));
emit_insn (gen_aarch64_cmge<mode> (operands[0],
operands[3], operands[2]));
emit_insn (gen_ior<v_cmp_result>3 (operands[0], operands[0], tmp));
emit_insn (gen_one_cmpl<v_cmp_result>2 (operands[0], operands[0]));
emit_insn (gen_ior<v_int_equiv>3 (operands[0], operands[0], tmp));
emit_insn (gen_one_cmpl<v_int_equiv>2 (operands[0], operands[0]));
break;
case ORDERED:
emit_insn (gen_aarch64_cmgt<mode> (tmp, operands[2], operands[3]));
emit_insn (gen_aarch64_cmge<mode> (operands[0],
operands[3], operands[2]));
emit_insn (gen_ior<v_cmp_result>3 (operands[0], operands[0], tmp));
emit_insn (gen_ior<v_int_equiv>3 (operands[0], operands[0], tmp));
break;
default:
@ -2662,7 +2662,7 @@
(match_operand:VALLDI 2 "nonmemory_operand")))]
"TARGET_SIMD"
{
rtx mask = gen_reg_rtx (<V_cmp_result>mode);
rtx mask = gen_reg_rtx (<V_INT_EQUIV>mode);
enum rtx_code code = GET_CODE (operands[3]);
/* NE is handled as !EQ in vec_cmp patterns, we can explicitly invert
@ -2674,10 +2674,10 @@
operands[4], operands[5]);
std::swap (operands[1], operands[2]);
}
emit_insn (gen_vec_cmp<mode><v_cmp_result> (mask, operands[3],
operands[4], operands[5]));
emit_insn (gen_vcond_mask_<mode><v_cmp_result> (operands[0], operands[1],
operands[2], mask));
emit_insn (gen_vec_cmp<mode><v_int_equiv> (mask, operands[3],
operands[4], operands[5]));
emit_insn (gen_vcond_mask_<mode><v_int_equiv> (operands[0], operands[1],
operands[2], mask));
DONE;
})
@ -2692,7 +2692,7 @@
(match_operand:<V_cmp_mixed> 2 "nonmemory_operand")))]
"TARGET_SIMD"
{
rtx mask = gen_reg_rtx (<V_cmp_result>mode);
rtx mask = gen_reg_rtx (<V_INT_EQUIV>mode);
enum rtx_code code = GET_CODE (operands[3]);
/* NE is handled as !EQ in vec_cmp patterns, we can explicitly invert
@ -2704,9 +2704,9 @@
operands[4], operands[5]);
std::swap (operands[1], operands[2]);
}
emit_insn (gen_vec_cmp<mode><v_cmp_result> (mask, operands[3],
operands[4], operands[5]));
emit_insn (gen_vcond_mask_<v_cmp_mixed><v_cmp_result> (
emit_insn (gen_vec_cmp<mode><v_int_equiv> (mask, operands[3],
operands[4], operands[5]));
emit_insn (gen_vcond_mask_<v_cmp_mixed><v_int_equiv> (
operands[0], operands[1],
operands[2], mask));
@ -2737,8 +2737,8 @@
}
emit_insn (gen_vec_cmp<mode><mode> (mask, operands[3],
operands[4], operands[5]));
emit_insn (gen_vcond_mask_<mode><v_cmp_result> (operands[0], operands[1],
operands[2], mask));
emit_insn (gen_vcond_mask_<mode><v_int_equiv> (operands[0], operands[1],
operands[2], mask));
DONE;
})
@ -2752,7 +2752,7 @@
(match_operand:VDQF 2 "nonmemory_operand")))]
"TARGET_SIMD"
{
rtx mask = gen_reg_rtx (<V_cmp_result>mode);
rtx mask = gen_reg_rtx (<V_INT_EQUIV>mode);
enum rtx_code code = GET_CODE (operands[3]);
/* NE is handled as !EQ in vec_cmp patterns, we can explicitly invert
@ -2767,8 +2767,8 @@
emit_insn (gen_vec_cmp<v_cmp_mixed><v_cmp_mixed> (
mask, operands[3],
operands[4], operands[5]));
emit_insn (gen_vcond_mask_<mode><v_cmp_result> (operands[0], operands[1],
operands[2], mask));
emit_insn (gen_vcond_mask_<mode><v_int_equiv> (operands[0], operands[1],
operands[2], mask));
DONE;
})
@ -4208,9 +4208,9 @@
;; have different ideas of what should be passed to this pattern.
(define_insn "aarch64_cm<optab><mode>"
[(set (match_operand:<V_cmp_result> 0 "register_operand" "=w,w")
(neg:<V_cmp_result>
(COMPARISONS:<V_cmp_result>
[(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=w,w")
(neg:<V_INT_EQUIV>
(COMPARISONS:<V_INT_EQUIV>
(match_operand:VDQ_I 1 "register_operand" "w,w")
(match_operand:VDQ_I 2 "aarch64_simd_reg_or_zero" "w,ZDz")
)))]
@ -4273,9 +4273,9 @@
;; cm(hs|hi)
(define_insn "aarch64_cm<optab><mode>"
[(set (match_operand:<V_cmp_result> 0 "register_operand" "=w")
(neg:<V_cmp_result>
(UCOMPARISONS:<V_cmp_result>
[(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=w")
(neg:<V_INT_EQUIV>
(UCOMPARISONS:<V_INT_EQUIV>
(match_operand:VDQ_I 1 "register_operand" "w")
(match_operand:VDQ_I 2 "register_operand" "w")
)))]
@ -4340,14 +4340,14 @@
;; plus (eq (and x y) 0) -1.
(define_insn "aarch64_cmtst<mode>"
[(set (match_operand:<V_cmp_result> 0 "register_operand" "=w")
(plus:<V_cmp_result>
(eq:<V_cmp_result>
[(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=w")
(plus:<V_INT_EQUIV>
(eq:<V_INT_EQUIV>
(and:VDQ_I
(match_operand:VDQ_I 1 "register_operand" "w")
(match_operand:VDQ_I 2 "register_operand" "w"))
(match_operand:VDQ_I 3 "aarch64_simd_imm_zero"))
(match_operand:<V_cmp_result> 4 "aarch64_simd_imm_minus_one")))
(match_operand:<V_INT_EQUIV> 4 "aarch64_simd_imm_minus_one")))
]
"TARGET_SIMD"
"cmtst\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
@ -4408,9 +4408,9 @@
;; fcm(eq|ge|gt|le|lt)
(define_insn "aarch64_cm<optab><mode>"
[(set (match_operand:<V_cmp_result> 0 "register_operand" "=w,w")
(neg:<V_cmp_result>
(COMPARISONS:<V_cmp_result>
[(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=w,w")
(neg:<V_INT_EQUIV>
(COMPARISONS:<V_INT_EQUIV>
(match_operand:VHSDF_HSDF 1 "register_operand" "w,w")
(match_operand:VHSDF_HSDF 2 "aarch64_simd_reg_or_zero" "w,YDz")
)))]
@ -4426,9 +4426,9 @@
;; generating fac(ge|gt).
(define_insn "aarch64_fac<optab><mode>"
[(set (match_operand:<V_cmp_result> 0 "register_operand" "=w")
(neg:<V_cmp_result>
(FAC_COMPARISONS:<V_cmp_result>
[(set (match_operand:<V_INT_EQUIV> 0 "register_operand" "=w")
(neg:<V_INT_EQUIV>
(FAC_COMPARISONS:<V_INT_EQUIV>
(abs:VHSDF_HSDF
(match_operand:VHSDF_HSDF 1 "register_operand" "w"))
(abs:VHSDF_HSDF
@ -5130,7 +5130,7 @@
[(match_operand:VALL_F16 0 "register_operand")
(match_operand:VALL_F16 1 "register_operand")
(match_operand:VALL_F16 2 "register_operand")
(match_operand:<V_cmp_result> 3)]
(match_operand:<V_INT_EQUIV> 3)]
"TARGET_SIMD"
{
if (aarch64_expand_vec_perm_const (operands[0], operands[1],

View File

@ -5196,7 +5196,7 @@
"TARGET_FLOAT && TARGET_SIMD"
{
machine_mode imode = <V_cmp_result>mode;
machine_mode imode = <V_INT_EQUIV>mode;
rtx mask = gen_reg_rtx (imode);
rtx op1x = gen_reg_rtx (imode);
rtx op2x = gen_reg_rtx (imode);
@ -5205,13 +5205,13 @@
emit_move_insn (mask, GEN_INT (trunc_int_for_mode (HOST_WIDE_INT_M1U << bits,
imode)));
emit_insn (gen_and<v_cmp_result>3 (op2x, mask,
lowpart_subreg (imode, operands[2],
<MODE>mode)));
emit_insn (gen_xor<v_cmp_result>3 (op1x,
lowpart_subreg (imode, operands[1],
<MODE>mode),
op2x));
emit_insn (gen_and<v_int_equiv>3 (op2x, mask,
lowpart_subreg (imode, operands[2],
<MODE>mode)));
emit_insn (gen_xor<v_int_equiv>3 (op1x,
lowpart_subreg (imode, operands[1],
<MODE>mode),
op2x));
emit_move_insn (operands[0],
lowpart_subreg (<MODE>mode, op1x, imode));
DONE;

View File

@ -662,25 +662,25 @@
;; Double vector types for ALLX.
(define_mode_attr Vallxd [(QI "8b") (HI "4h") (SI "2s")])
;; Mode of result of comparison operations.
(define_mode_attr V_cmp_result [(V8QI "V8QI") (V16QI "V16QI")
(V4HI "V4HI") (V8HI "V8HI")
(V2SI "V2SI") (V4SI "V4SI")
(DI "DI") (V2DI "V2DI")
(V4HF "V4HI") (V8HF "V8HI")
(V2SF "V2SI") (V4SF "V4SI")
(V2DF "V2DI") (DF "DI")
(SF "SI") (HF "HI")])
;; Mode with floating-point values replaced by like-sized integers.
(define_mode_attr V_INT_EQUIV [(V8QI "V8QI") (V16QI "V16QI")
(V4HI "V4HI") (V8HI "V8HI")
(V2SI "V2SI") (V4SI "V4SI")
(DI "DI") (V2DI "V2DI")
(V4HF "V4HI") (V8HF "V8HI")
(V2SF "V2SI") (V4SF "V4SI")
(V2DF "V2DI") (DF "DI")
(SF "SI") (HF "HI")])
;; Lower case mode of results of comparison operations.
(define_mode_attr v_cmp_result [(V8QI "v8qi") (V16QI "v16qi")
(V4HI "v4hi") (V8HI "v8hi")
(V2SI "v2si") (V4SI "v4si")
(DI "di") (V2DI "v2di")
(V4HF "v4hi") (V8HF "v8hi")
(V2SF "v2si") (V4SF "v4si")
(V2DF "v2di") (DF "di")
(SF "si")])
;; Lower case mode with floating-point values replaced by like-sized integers.
(define_mode_attr v_int_equiv [(V8QI "v8qi") (V16QI "v16qi")
(V4HI "v4hi") (V8HI "v8hi")
(V2SI "v2si") (V4SI "v4si")
(DI "di") (V2DI "v2di")
(V4HF "v4hi") (V8HF "v8hi")
(V2SF "v2si") (V4SF "v4si")
(V2DF "v2di") (DF "di")
(SF "si")])
;; Mode for vector conditional operations where the comparison has
;; different type from the lhs.