vect: Tighten check for SLP memory groups [PR103517]
When checking for compatible stmts, vect_build_slp_tree_1 did: && !(STMT_VINFO_GROUPED_ACCESS (stmt_info) && (first_stmt_code == ARRAY_REF || first_stmt_code == BIT_FIELD_REF || first_stmt_code == INDIRECT_REF || first_stmt_code == COMPONENT_REF || first_stmt_code == MEM_REF))) That is, it allowed any rhs_code as long as the first_stmt_code looked valid. This had the effect of allowing IFN_MASK_LOAD to be paired with an earlier non-call code (but didn't allow the reverse). This patch makes the check symmetrical. gcc/ PR tree-optimization/103517 * tree-vect-slp.c (vect_build_slp_tree_1): When allowing two different component references, check the codes of both them, rather than just the first. gcc/testsuite/ PR tree-optimization/103517 * gcc.dg/vect/pr103517.c: New test.
This commit is contained in:
parent
cb137e8572
commit
1e625a44f6
13
gcc/testsuite/gcc.dg/vect/pr103517.c
Normal file
13
gcc/testsuite/gcc.dg/vect/pr103517.c
Normal file
@ -0,0 +1,13 @@
|
||||
/* { dg-do compile } */
|
||||
/* { dg-additional-options "-march=skylake-avx512" { target x86_64-*-* i?86-*-* } } */
|
||||
|
||||
int a;
|
||||
short b, c;
|
||||
extern short d[];
|
||||
void e() {
|
||||
for (short f = 1; f < (short)a; f += 2)
|
||||
if (d[f + 1]) {
|
||||
b = d[f];
|
||||
c = d[f + 1];
|
||||
}
|
||||
}
|
@ -1121,7 +1121,12 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
|
||||
|| first_stmt_code == BIT_FIELD_REF
|
||||
|| first_stmt_code == INDIRECT_REF
|
||||
|| first_stmt_code == COMPONENT_REF
|
||||
|| first_stmt_code == MEM_REF)))
|
||||
|| first_stmt_code == MEM_REF)
|
||||
&& (rhs_code == ARRAY_REF
|
||||
|| rhs_code == BIT_FIELD_REF
|
||||
|| rhs_code == INDIRECT_REF
|
||||
|| rhs_code == COMPONENT_REF
|
||||
|| rhs_code == MEM_REF)))
|
||||
|| first_stmt_load_p != load_p
|
||||
|| first_stmt_phi_p != phi_p)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user