diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 0e8b23288a0..d9c45c9e794 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,10 @@ +2018-02-08 Richard Sandiford + + PR tree-optimization/84265 + * tree-vect-stmts.c (vectorizable_store): Don't treat + VMAT_CONTIGUOUS accesses as grouped. + (vectorizable_load): Likewise. + 2018-02-08 Richard Sandiford PR tree-optimization/81635 diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 52cf0b53870..e989c94a5e4 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,8 @@ +2018-02-08 Richard Sandiford + + PR tree-optimization/84265 + * gcc.dg/vect/pr84265.c: New test. + 2018-02-08 Richard Sandiford PR tree-optimization/81635 diff --git a/gcc/testsuite/gcc.dg/vect/pr84265.c b/gcc/testsuite/gcc.dg/vect/pr84265.c new file mode 100644 index 00000000000..59984aead05 --- /dev/null +++ b/gcc/testsuite/gcc.dg/vect/pr84265.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ + +struct a +{ + unsigned long b; + unsigned long c; + int d; + int *e; + char f; +}; + +struct +{ + int g; + struct a h[]; +} i; + +int j, k; +void l () +{ + for (; k; k++) + j += (int) (i.h[k].c - i.h[k].b); +} diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c index c5085ca9c0a..6066a52c23e 100644 --- a/gcc/tree-vect-stmts.c +++ b/gcc/tree-vect-stmts.c @@ -6214,7 +6214,8 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, } grouped_store = (STMT_VINFO_GROUPED_ACCESS (stmt_info) - && memory_access_type != VMAT_GATHER_SCATTER); + && memory_access_type != VMAT_GATHER_SCATTER + && (slp || memory_access_type != VMAT_CONTIGUOUS)); if (grouped_store) { first_stmt = GROUP_FIRST_ELEMENT (stmt_info); @@ -7708,7 +7709,8 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, return true; } - if (memory_access_type == VMAT_GATHER_SCATTER) + if (memory_access_type == VMAT_GATHER_SCATTER + || (!slp && memory_access_type == VMAT_CONTIGUOUS)) grouped_load = false; if (grouped_load)