From 78a8b26c40f8be8eaca5aaa2680e213e0bd7d05a Mon Sep 17 00:00:00 2001 From: Richard Biener Date: Mon, 4 May 2015 14:24:49 +0000 Subject: [PATCH] re PR middle-end/65965 (Straight-line memcpy/memset not vectorized when equivalent loop is) 2015-05-04 Richard Biener PR tree-optimization/65965 * tree-vect-data-refs.c (vect_analyze_data_ref_accesses): Split store groups at gaps. * gcc.dg/vect/bb-slp-33.c: New testcase. From-SVN: r222765 --- gcc/ChangeLog | 6 ++++ gcc/testsuite/ChangeLog | 5 +++ gcc/testsuite/gcc.dg/vect/bb-slp-33.c | 49 +++++++++++++++++++++++++++ gcc/tree-vect-data-refs.c | 9 +++++ 4 files changed, 69 insertions(+) create mode 100644 gcc/testsuite/gcc.dg/vect/bb-slp-33.c diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 9ba7b943efc..756071d5df4 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,9 @@ +2015-05-04 Richard Biener + + PR tree-optimization/65965 + * tree-vect-data-refs.c (vect_analyze_data_ref_accesses): Split + store groups at gaps. + 2015-05-04 Richard Biener PR tree-optimization/65935 diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 719e013cbcc..58308186822 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,8 @@ +2015-05-04 Richard Biener + + PR tree-optimization/65965 + * gcc.dg/vect/bb-slp-33.c: New testcase. + 2015-05-04 Richard Biener PR tree-optimization/65935 diff --git a/gcc/testsuite/gcc.dg/vect/bb-slp-33.c b/gcc/testsuite/gcc.dg/vect/bb-slp-33.c new file mode 100644 index 00000000000..4e84f51ee8d --- /dev/null +++ b/gcc/testsuite/gcc.dg/vect/bb-slp-33.c @@ -0,0 +1,49 @@ +/* { dg-require-effective-target vect_int } */ + +#include "tree-vect.h" + +extern void abort (void); + +void __attribute__((noinline,noclone)) +test(int *__restrict__ a, int *__restrict__ b) +{ + a[0] = b[0]; + a[1] = b[1]; + a[2] = b[2]; + a[3] = b[3]; + a[5] = 0; + a[6] = 0; + a[7] = 0; + a[8] = 0; +} + +int main() +{ + int a[9]; + int b[4]; + b[0] = 1; + __asm__ volatile (""); + b[1] = 2; + __asm__ volatile (""); + b[2] = 3; + __asm__ volatile (""); + b[3] = 4; + __asm__ volatile (""); + a[4] = 7; + check_vect (); + test(a, b); + if (a[0] != 1 + || a[1] != 2 + || a[2] != 3 + || a[3] != 4 + || a[4] != 7 + || a[5] != 0 + || a[6] != 0 + || a[7] != 0 + || a[8] != 0) + abort (); + return 0; +} + +/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "slp2" { target { vect_element_align || vect_hw_misalign } } } } */ +/* { dg-final { cleanup-tree-dump "slp2" } } */ diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c index 96afc7ab68d..ccb1f620c2d 100644 --- a/gcc/tree-vect-data-refs.c +++ b/gcc/tree-vect-data-refs.c @@ -2602,6 +2602,15 @@ vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo) if ((init_b - init_a) % type_size_a != 0) break; + /* If we have a store, the accesses are adjacent. This splits + groups into chunks we support (we don't support vectorization + of stores with gaps). */ + if (!DR_IS_READ (dra) + && (init_b - (HOST_WIDE_INT) TREE_INT_CST_LOW + (DR_INIT (datarefs_copy[i-1])) + != type_size_a)) + break; + /* The step (if not zero) is greater than the difference between data-refs' inits. This splits groups into suitable sizes. */ HOST_WIDE_INT step = tree_to_shwi (DR_STEP (dra));