re PR middle-end/65965 (Straight-line memcpy/memset not vectorized when equivalent loop is)

2015-05-04  Richard Biener  <rguenther@suse.de>

	PR tree-optimization/65965
	* tree-vect-data-refs.c (vect_analyze_data_ref_accesses): Split
	store groups at gaps.

	* gcc.dg/vect/bb-slp-33.c: New testcase.

From-SVN: r222765
This commit is contained in:
Richard Biener 2015-05-04 14:24:49 +00:00 committed by Richard Biener
parent 7457466941
commit 78a8b26c40
4 changed files with 69 additions and 0 deletions

View File

@ -1,3 +1,9 @@
2015-05-04 Richard Biener <rguenther@suse.de>
PR tree-optimization/65965
* tree-vect-data-refs.c (vect_analyze_data_ref_accesses): Split
store groups at gaps.
2015-05-04 Richard Biener <rguenther@suse.de>
PR tree-optimization/65935

View File

@ -1,3 +1,8 @@
2015-05-04 Richard Biener <rguenther@suse.de>
PR tree-optimization/65965
* gcc.dg/vect/bb-slp-33.c: New testcase.
2015-05-04 Richard Biener <rguenther@suse.de>
PR tree-optimization/65935

View File

@ -0,0 +1,49 @@
/* { dg-require-effective-target vect_int } */
#include "tree-vect.h"
extern void abort (void);
void __attribute__((noinline,noclone))
test(int *__restrict__ a, int *__restrict__ b)
{
a[0] = b[0];
a[1] = b[1];
a[2] = b[2];
a[3] = b[3];
a[5] = 0;
a[6] = 0;
a[7] = 0;
a[8] = 0;
}
int main()
{
int a[9];
int b[4];
b[0] = 1;
__asm__ volatile ("");
b[1] = 2;
__asm__ volatile ("");
b[2] = 3;
__asm__ volatile ("");
b[3] = 4;
__asm__ volatile ("");
a[4] = 7;
check_vect ();
test(a, b);
if (a[0] != 1
|| a[1] != 2
|| a[2] != 3
|| a[3] != 4
|| a[4] != 7
|| a[5] != 0
|| a[6] != 0
|| a[7] != 0
|| a[8] != 0)
abort ();
return 0;
}
/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "slp2" { target { vect_element_align || vect_hw_misalign } } } } */
/* { dg-final { cleanup-tree-dump "slp2" } } */

View File

@ -2602,6 +2602,15 @@ vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
if ((init_b - init_a) % type_size_a != 0)
break;
/* If we have a store, the accesses are adjacent. This splits
groups into chunks we support (we don't support vectorization
of stores with gaps). */
if (!DR_IS_READ (dra)
&& (init_b - (HOST_WIDE_INT) TREE_INT_CST_LOW
(DR_INIT (datarefs_copy[i-1]))
!= type_size_a))
break;
/* The step (if not zero) is greater than the difference between
data-refs' inits. This splits groups into suitable sizes. */
HOST_WIDE_INT step = tree_to_shwi (DR_STEP (dra));