vectorizer: Fix up vectorization using WIDEN_MINUS_EXPR [PR102124]

The following testcase is miscompiled on aarch64-linux at -O3 since the
introduction of WIDEN_MINUS_EXPR.
The problem is if the inner type (half_type) is unsigned and the result
type in which the subtraction is performed (type) has precision more than
twice as larger as the inner type's precision.
For other widening operations like WIDEN_{PLUS,MULT}_EXPR, if half_type
is unsigned, the addition/multiplication result in itype is also unsigned
and needs to be zero-extended to type.
But subtraction is special, even when half_type is unsigned, the subtraction
behaves as signed (also regardless of whether the result type is signed or
unsigned), 0xfeU - 0xffU is -1 or 0xffffffffU, not 0x0000ffff.

I think it is better not to use mixed signedness of types in
WIDEN_MINUS_EXPR (have unsigned vector of operands and signed result
vector), so this patch instead adds another cast to make sure we always
sign-extend the result from itype to type if type is wider than itype.

2021-09-01  Jakub Jelinek  <jakub@redhat.com>

	PR tree-optimization/102124
	* tree-vect-patterns.c (vect_recog_widen_op_pattern): For ORIG_CODE
	MINUS_EXPR, if itype is unsigned with smaller precision than type,
	add an extra cast to signed variant of itype to ensure sign-extension.

	* gcc.dg/torture/pr102124.c: New test.
This commit is contained in:
Jakub Jelinek 2021-09-01 13:30:51 +02:00
parent a61623d9b3
commit bea07159d1
2 changed files with 52 additions and 1 deletions

View File

@ -0,0 +1,27 @@
/* PR tree-optimization/102124 */
int
foo (const unsigned char *a, const unsigned char *b, unsigned long len)
{
int ab, ba;
unsigned long i;
for (i = 0, ab = 0, ba = 0; i < len; i++)
{
ab |= a[i] - b[i];
ba |= b[i] - a[i];
}
return (ab | ba) >= 0;
}
int
main ()
{
unsigned char a[32] = { 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a' };
unsigned char b[32] = { 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a' };
unsigned char c[32] = { 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b' };
if (!foo (a, b, 16))
__builtin_abort ();
if (foo (a, c, 16))
__builtin_abort ();
return 0;
}

View File

@ -1268,11 +1268,31 @@ vect_recog_widen_op_pattern (vec_info *vinfo,
/* Check target support */
tree vectype = get_vectype_for_scalar_type (vinfo, half_type);
tree vecitype = get_vectype_for_scalar_type (vinfo, itype);
tree ctype = itype;
tree vecctype = vecitype;
if (orig_code == MINUS_EXPR
&& TYPE_UNSIGNED (itype)
&& TYPE_PRECISION (type) > TYPE_PRECISION (itype))
{
/* Subtraction is special, even if half_type is unsigned and no matter
whether type is signed or unsigned, if type is wider than itype,
we need to sign-extend from the widening operation result to the
result type.
Consider half_type unsigned char, operand 1 0xfe, operand 2 0xff,
itype unsigned short and type either int or unsigned int.
Widened (unsigned short) 0xfe - (unsigned short) 0xff is
(unsigned short) 0xffff, but for type int we want the result -1
and for type unsigned int 0xffffffff rather than 0xffff. */
ctype = build_nonstandard_integer_type (TYPE_PRECISION (itype), 0);
vecctype = get_vectype_for_scalar_type (vinfo, ctype);
}
enum tree_code dummy_code;
int dummy_int;
auto_vec<tree> dummy_vec;
if (!vectype
|| !vecitype
|| !vecctype
|| !supportable_widening_operation (vinfo, wide_code, last_stmt_info,
vecitype, vectype,
&dummy_code, &dummy_code,
@ -1291,8 +1311,12 @@ vect_recog_widen_op_pattern (vec_info *vinfo,
gimple *pattern_stmt = gimple_build_assign (var, wide_code,
oprnd[0], oprnd[1]);
if (vecctype != vecitype)
pattern_stmt = vect_convert_output (vinfo, last_stmt_info, ctype,
pattern_stmt, vecitype);
return vect_convert_output (vinfo, last_stmt_info,
type, pattern_stmt, vecitype);
type, pattern_stmt, vecctype);
}
/* Try to detect multiplication on widened inputs, converting MULT_EXPR