re PR tree-optimization/77550 (std::deque with -O3 has infinite std::distance)

gcc:
2016-09-21  Bernd Edlinger  <bernd.edlinger@hotmail.de>

        PR tree-optimization/77550
        * tree-vect-stmts.c (create_array_ref): Change parameters.
        (get_group_alias_ptr_type): New function.
        (vectorizable_store, vectorizable_load): Use get_group_alias_ptr_type.

testsuite:
2016-09-21  Bernd Edlinger  <bernd.edlinger@hotmail.de>
        
        PR tree-optimization/77550
        * g++.dg/pr77550.C: New test.

From-SVN: r240313
This commit is contained in:
Bernd Edlinger 2016-09-21 13:03:59 +00:00 committed by Bernd Edlinger
parent 7e02fd1b7b
commit 44fc785481
4 changed files with 374 additions and 29 deletions

View File

@ -1,3 +1,10 @@
2016-09-21 Bernd Edlinger <bernd.edlinger@hotmail.de>
PR tree-optimization/77550
* tree-vect-stmts.c (create_array_ref): Change parameters.
(get_group_alias_ptr_type): New function.
(vectorizable_store, vectorizable_load): Use get_group_alias_ptr_type.
2016-09-21 Marek Polacek <polacek@redhat.com>
* gimple-ssa-sprintf.c (pass_sprintf_length::compute_format_length):

View File

@ -1,3 +1,8 @@
2016-09-21 Bernd Edlinger <bernd.edlinger@hotmail.de>
PR tree-optimization/77550
* g++.dg/pr77550.C: New test.
2016-09-21 Georg-Johann Lay <avr@gjlay.de>
PR target/77326

View File

@ -0,0 +1,295 @@
// { dg-do run }
// { dg-options "-std=c++14 -O3" }
namespace std {
typedef int size_t;
inline namespace __cxx11 {}
template <typename...> using _Require = void;
template <typename> using __void_t = void;
template <typename, typename, template <typename...> class, typename...>
struct A {
using type = int;
};
template <typename _Default, template <typename...> class _Op,
typename... _Args>
struct A<_Default, __void_t<_Op<_Args...>>, _Op, _Args...> {
using type = _Op<_Args...>;
};
template <typename _Default, template <typename...> class _Op,
typename... _Args>
using __detected_or = A<_Default, void, _Op, _Args...>;
template <typename _Default, template <typename...> class _Op,
typename... _Args>
using __detected_or_t = typename __detected_or<_Default, _Op, _Args...>::type;
template <template <typename...> class _Default,
template <typename...> class _Op, typename... _Args>
using __detected_or_t_ = __detected_or_t<_Default<_Args...>, _Op, _Args...>;
template <typename _InputIterator> void __distance(_InputIterator p1) { ++p1; }
template <typename _InputIterator>
void distance(_InputIterator p1, _InputIterator) {
__distance(p1);
}
template <typename, typename> using __replace_first_arg_t = int;
struct B {
template <typename _Up> using rebind = _Up *;
};
template <typename, typename> using __ptr_rebind = B;
template <typename _Tp> _Tp max(_Tp p1, _Tp) { return p1; }
}
void *operator new(unsigned long, void *p2) { return p2; }
template <typename _Tp> struct C {
typedef _Tp *pointer;
pointer allocate(int p1) {
return static_cast<_Tp *>(operator new(p1 * sizeof(_Tp)));
}
template <typename _Up> void construct(_Up *p1) { new (p1) _Up; }
};
namespace std {
template <typename _Tp> using __allocator_base = C<_Tp>;
template <typename _Tp> struct allocator : __allocator_base<_Tp> {
typedef unsigned long size_type;
template <typename _Tp1> struct rebind { typedef allocator<_Tp1> other; };
};
struct D {
template <typename _Alloc, typename _Up>
using __rebind = typename _Alloc::template rebind<_Up>::other;
template <typename _Tp> using __pointer = typename _Tp::pointer;
template <typename _Tp> using __c_pointer = typename _Tp::const_pointer;
template <typename _Tp> using __size_type = typename _Tp::size_type;
};
template <typename _Alloc, typename _Up>
using __alloc_rebind =
__detected_or_t_<__replace_first_arg_t, D::__rebind, _Alloc, _Up>;
template <typename _Alloc> struct K : D {
typedef _Alloc value_type;
using pointer = __detected_or_t<value_type, __pointer, _Alloc>;
using const_pointer =
__detected_or_t<__ptr_rebind<pointer, value_type>, __c_pointer>;
using size_type = __detected_or_t<int, __size_type, _Alloc>;
template <typename _Tp> using rebind_alloc = __alloc_rebind<_Alloc, _Tp>;
template <typename _Tp> static _Require<> _S_construct(_Tp p1) {
_Alloc __a;
__a.construct(p1);
}
static pointer allocate(_Alloc p1, size_type p2) { return p1.allocate(p2); }
template <typename _Tp, typename _Args>
static auto construct(_Alloc, _Tp p2, _Args) {
_S_construct(p2);
}
};
}
template <typename _Alloc> struct O : std::K<_Alloc> {
template <typename _Tp> struct rebind {
typedef typename std::K<_Alloc>::template rebind_alloc<_Tp> other;
};
};
namespace std {
template <typename _ForwardIterator, typename _Tp, typename _Allocator>
void __uninitialized_fill_a(_ForwardIterator p1, _ForwardIterator, _Tp,
_Allocator p4) try {
O<_Allocator>::construct(p4, p1, 0);
} catch (...) {
}
size_t __deque_buf_size(size_t p1) { return 1 ? 512 / p1 : 0; }
template <typename _Tp, typename _Ref, typename> struct F {
template <typename _Up> using __ptr_to = B::rebind<_Up>;
template <typename _CvTp> using __iter = F<_Tp, _CvTp &, __ptr_to<_CvTp>>;
typedef __ptr_to<_Tp> _Elt_pointer;
typedef __ptr_to<_Elt_pointer> _Map_pointer;
_Elt_pointer _M_cur;
_Elt_pointer _M_first;
_Elt_pointer _M_last;
_Map_pointer _M_node;
F() {}
F(__iter<_Tp> &p1) : _M_cur(p1._M_cur), _M_node(p1._M_node) {}
_Ref operator*() { return *_M_cur; }
void operator++() {
_M_set_node(_M_node + 1);
_M_cur = _M_first;
}
void _M_set_node(_Map_pointer p1) {
_M_node = p1;
_M_first = *p1;
_M_last = _M_first;
}
};
template <typename _Tp, typename _Ref, typename _Ptr>
int operator==(F<_Tp, _Ref, _Ptr> p1, F<_Tp, _Ref, _Ptr> p2) {
return p1._M_cur == p2._M_cur;
}
template <typename _Tp, typename _Ref, typename _Ptr>
int operator!=(F<_Tp, _Ref, _Ptr> p1, F<_Tp, _Ref, _Ptr> p2) {
return !(p1 == p2);
}
template <typename _Tp, typename _Alloc> struct _Deque_base {
typedef typename _Alloc::template rebind<_Tp>::other _Tp_alloc_type;
typedef O<_Tp_alloc_type> _Alloc_traits;
typedef typename _Alloc_traits::pointer _Ptr;
typedef typename _Alloc_traits::template rebind<_Ptr>::other _Map_alloc_type;
typedef F<_Tp, _Tp &, _Ptr> iterator;
typedef F<_Tp, _Tp &, typename _Alloc_traits::const_pointer> const_iterator;
_Deque_base(_Alloc p1, size_t) : _M_impl(p1) { _M_initialize_map(0); }
~_Deque_base() noexcept;
typedef typename iterator::_Map_pointer _Map_pointer;
struct L : _Tp_alloc_type {
_Map_pointer _M_map;
size_t _M_map_size;
iterator _M_start;
iterator _M_finish;
L(_Tp_alloc_type) {}
};
_Tp_alloc_type _M_get_Tp_allocator() { return _M_impl; }
_Ptr _M_allocate_node() { return O<_Tp_alloc_type>::allocate(_M_impl, 1); }
_Map_pointer _M_allocate_map(size_t p1) {
_Map_alloc_type __map_alloc;
return O<_Map_alloc_type>::allocate(__map_alloc, p1);
}
void _M_initialize_map(size_t);
void _M_create_nodes(_Map_pointer, _Map_pointer);
enum { _S_initial_map_size = 8 };
L _M_impl;
};
template <typename _Tp, typename _Alloc>
_Deque_base<_Tp, _Alloc>::~_Deque_base() noexcept {}
template <typename _Tp, typename _Alloc>
void _Deque_base<_Tp, _Alloc>::_M_initialize_map(size_t) {
size_t __num_nodes(__deque_buf_size(sizeof(_Tp)));
_M_impl._M_map_size = max((size_t)_S_initial_map_size, 0);
_M_impl._M_map = _M_allocate_map(_M_impl._M_map_size);
_Map_pointer __nstart(_M_impl._M_map);
_Map_pointer __nfinish = __nstart + __num_nodes;
try {
_M_create_nodes(__nstart, __nfinish);
} catch (...) {
}
_M_impl._M_start._M_set_node(__nstart);
_M_impl._M_finish._M_set_node(__nfinish - 1);
_M_impl._M_start._M_cur = _M_impl._M_start._M_first;
_M_impl._M_finish._M_cur = _M_impl._M_finish._M_first;
}
template <typename _Tp, typename _Alloc>
void _Deque_base<_Tp, _Alloc>::_M_create_nodes(_Map_pointer __nstart,
_Map_pointer __nfinish) {
_Map_pointer __cur;
try {
for (__cur = __nstart; __cur < __nfinish; ++__cur)
*__cur = _M_allocate_node();
} catch (...) {
}
}
template <typename _Tp, typename _Alloc = allocator<_Tp>>
struct deque : _Deque_base<_Tp, _Alloc> {
typedef _Deque_base<_Tp, _Alloc> _Base;
typedef typename _Base::_Map_pointer _Map_pointer;
typedef _Tp value_type;
typedef typename _Base::const_iterator const_iterator;
typedef size_t size_type;
typedef _Alloc allocator_type;
using _Base::_M_get_Tp_allocator;
deque(size_type, value_type __value, allocator_type __a = allocator_type())
: _Base(__a, 0) {
_M_fill_initialize(__value);
}
const_iterator begin() { return this->_M_impl._M_start; }
const_iterator end() { return this->_M_impl._M_finish; }
void _M_fill_initialize(const value_type &);
};
template <typename _Container> auto begin(_Container p1) { return p1.begin(); }
template <typename _Container> auto end(_Container p1) { return p1.end(); }
template <typename _Container> auto cbegin(_Container p1) { return begin(p1); }
template <typename _Container> auto cend(_Container p1) { return end(p1); }
template <typename _Tp, typename _Alloc>
void deque<_Tp, _Alloc>::_M_fill_initialize(const value_type &) {
_Map_pointer __cur;
try {
for (__cur = this->_M_impl._M_start._M_node;
__cur < this->_M_impl._M_finish._M_node; ++__cur)
__uninitialized_fill_a(*__cur, *__cur, 0, _M_get_Tp_allocator());
} catch (...) {
}
}
template <class> struct char_traits;
namespace __cxx11 {
template <typename _CharT, typename = char_traits<_CharT>,
typename = allocator<_CharT>>
struct basic_string;
typedef basic_string<char> string;
}
template <> struct char_traits<char> {
typedef char char_type;
static int compare(char_type, char_type *p2, size_t p3) {
return __builtin_memcmp(0, p2, p3);
}
};
namespace __cxx11 {
template <typename, typename, typename> struct basic_string {
typedef O<allocator<char>> _Alloc_traits;
typedef _Alloc_traits::size_type size_type;
typedef _Alloc_traits::pointer pointer;
struct _Alloc_hider {
_Alloc_hider(pointer, allocator<char> && = allocator<char>());
} _M_dataplus;
size_type _M_string_length;
enum { _S_local_capacity = 15 } _M_local_buf[_S_local_capacity];
pointer _M_local_data();
void _M_set_length(size_type);
basic_string() : _M_dataplus(_M_local_data()) { _M_set_length(0); }
basic_string(const basic_string &) : _M_dataplus(0) {}
size_type size() { return _M_string_length; }
char *data() const {}
};
}
template <typename _CharT>
int operator==(basic_string<_CharT> &p1, const basic_string<_CharT> &p2) {
return p1.size() && char_traits<_CharT>::compare(0, p2.data(), p1.size());
}
}
struct G {
template <class Facade> static void increment(Facade p1) { p1.increment(); }
};
template <class Derived> struct H {
Derived derived() { return *static_cast<Derived *>(this); }
void operator++() {
Derived __trans_tmp_1 = derived();
G::increment(__trans_tmp_1);
}
};
template <class Derived> struct I { typedef H<Derived> type; };
template <class Derived, class Base> struct M : I<Derived>::type {
M(Base p1) : m_iterator(p1) {}
Base base() { return m_iterator; }
Base &base_reference() { return m_iterator; }
Base m_iterator;
};
template <class, class> struct N;
template <class Predicate, class Iterator> struct J {
typedef M<N<Predicate, Iterator>, Iterator> type;
};
template <class Predicate, class Iterator>
struct N : J<Predicate, Iterator>::type {
typedef typename J<Predicate, Iterator>::type super_t;
N(Predicate p1, Iterator p2, Iterator p3)
: super_t(p2), m_predicate(p1), m_end(p3) {}
void increment() {
while (this->base() != m_end && !m_predicate(*this->base()))
++this->base_reference();
}
Predicate m_predicate;
Iterator m_end;
};
template <class Predicate, class Iterator>
N<Predicate, Iterator> make_filter_iterator(Predicate p1, Iterator p2,
Iterator p3) {
return N<Predicate, Iterator>(p1, p2, p3);
}
struct Foo {
std::string bar;
};
int main() {
std::deque<Foo> foos(0, {});
std::string test;
auto p = [test](auto &foo) { return foo.bar == test; };
auto begin = make_filter_iterator(p, cbegin(foos), cend(foos));
auto end = make_filter_iterator(p, cend(foos), cend(foos));
distance(begin, end);
}

View File

@ -170,11 +170,10 @@ write_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree vect,
(and its group). */
static tree
create_array_ref (tree type, tree ptr, struct data_reference *first_dr)
create_array_ref (tree type, tree ptr, tree alias_ptr_type)
{
tree mem_ref, alias_ptr_type;
tree mem_ref;
alias_ptr_type = reference_alias_ptr_type (DR_REF (first_dr));
mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
/* Arrays have the same alignment as their type. */
set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
@ -5432,6 +5431,35 @@ ensure_base_align (stmt_vec_info stmt_info, struct data_reference *dr)
}
/* Function get_group_alias_ptr_type.
Return the alias type for the group starting at FIRST_STMT. */
static tree
get_group_alias_ptr_type (gimple *first_stmt)
{
struct data_reference *first_dr, *next_dr;
gimple *next_stmt;
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt));
while (next_stmt)
{
next_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt));
if (get_alias_set (DR_REF (first_dr))
!= get_alias_set (DR_REF (next_dr)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"conflicting alias set types.\n");
return ptr_type_node;
}
next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
}
return reference_alias_ptr_type (DR_REF (first_dr));
}
/* Function vectorizable_store.
Check if STMT defines a non scalar data-ref (array/pointer/structure) that
@ -5482,6 +5510,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
gimple *new_stmt;
int vf;
vec_load_store_type vls_type;
tree ref_type;
if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
return false;
@ -5771,12 +5800,15 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
/* VEC_NUM is the number of vect stmts to be created for this
group. */
vec_num = group_size;
ref_type = get_group_alias_ptr_type (first_stmt);
}
else
{
first_stmt = stmt;
first_dr = dr;
group_size = vec_num = 1;
ref_type = reference_alias_ptr_type (DR_REF (first_dr));
}
if (dump_enabled_p ())
@ -5804,7 +5836,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
(unshare_expr (DR_BASE_ADDRESS (first_dr)),
size_binop (PLUS_EXPR,
convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr))),
convert_to_ptrofftype (DR_INIT(first_dr))));
convert_to_ptrofftype (DR_INIT (first_dr))));
stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (first_dr)));
/* For a store with loop-invariant (but other than power-of-2)
@ -5865,7 +5897,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
prev_stmt_info = NULL;
alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (first_dr)), 0);
alias_off = build_int_cst (ref_type, 0);
next_stmt = first_stmt;
for (g = 0; g < group_size; g++)
{
@ -6081,11 +6113,10 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
&& integer_zerop (DR_OFFSET (first_dr))
&& integer_zerop (DR_INIT (first_dr))
&& alias_sets_conflict_p (get_alias_set (aggr_type),
get_alias_set (DR_REF (first_dr))))
get_alias_set (TREE_TYPE (ref_type))))
{
dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
dataref_offset = build_int_cst (reference_alias_ptr_type
(DR_REF (first_dr)), 0);
dataref_offset = build_int_cst (ref_type, 0);
inv_p = false;
}
else
@ -6136,7 +6167,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
/* Emit:
MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array);
gimple_call_set_lhs (new_stmt, data_ref);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
@ -6174,8 +6205,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
dataref_ptr,
dataref_offset
? dataref_offset
: build_int_cst (reference_alias_ptr_type
(DR_REF (first_dr)), 0));
: build_int_cst (ref_type, 0));
align = TYPE_ALIGN_UNIT (vectype);
if (aligned_access_p (first_dr))
misalign = 0;
@ -6395,7 +6425,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
tree dataref_offset = NULL_TREE;
gimple *ptr_incr = NULL;
int ncopies;
int i, j, group_size = -1, group_gap_adj;
int i, j, group_size, group_gap_adj;
tree msq = NULL_TREE, lsq;
tree offset = NULL_TREE;
tree byte_offset = NULL_TREE;
@ -6417,6 +6447,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
tree aggr_type;
gather_scatter_info gs_info;
vec_info *vinfo = stmt_info->vinfo;
tree ref_type;
if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
return false;
@ -6773,10 +6804,19 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
gcc_assert (!nested_in_vect_loop);
if (slp && grouped_load)
first_dr = STMT_VINFO_DATA_REF
(vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
{
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
ref_type = get_group_alias_ptr_type (first_stmt);
}
else
first_dr = dr;
{
first_stmt = stmt;
first_dr = dr;
group_size = 1;
ref_type = reference_alias_ptr_type (DR_REF (first_dr));
}
stride_base
= fold_build_pointer_plus
@ -6820,7 +6860,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
prev_stmt_info = NULL;
running_off = offvar;
alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (first_dr)), 0);
alias_off = build_int_cst (ref_type, 0);
int nloads = nunits;
int lnel = 1;
tree ltype = TREE_TYPE (vectype);
@ -6917,6 +6957,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (grouped_load)
{
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
/* For SLP vectorization we directly vectorize a subchain
without permutation. */
if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
@ -6942,7 +6983,6 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
return true;
}
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
group_gap_adj = 0;
/* VEC_NUM is the number of vect stmts to be created for this group. */
@ -6960,6 +7000,8 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
}
else
vec_num = group_size;
ref_type = get_group_alias_ptr_type (first_stmt);
}
else
{
@ -6967,6 +7009,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
first_dr = dr;
group_size = vec_num = 1;
group_gap_adj = 0;
ref_type = reference_alias_ptr_type (DR_REF (first_dr));
}
alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
@ -7127,13 +7170,12 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
&& integer_zerop (DR_OFFSET (first_dr))
&& integer_zerop (DR_INIT (first_dr))
&& alias_sets_conflict_p (get_alias_set (aggr_type),
get_alias_set (DR_REF (first_dr)))
get_alias_set (TREE_TYPE (ref_type)))
&& (alignment_support_scheme == dr_aligned
|| alignment_support_scheme == dr_unaligned_supported))
{
dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
dataref_offset = build_int_cst (reference_alias_ptr_type
(DR_REF (first_dr)), 0);
dataref_offset = build_int_cst (ref_type, 0);
inv_p = false;
}
else if (first_stmt_for_drptr
@ -7179,7 +7221,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
/* Emit:
VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
gimple_call_set_lhs (new_stmt, vec_array);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
@ -7215,8 +7257,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
= fold_build2 (MEM_REF, vectype, dataref_ptr,
dataref_offset
? dataref_offset
: build_int_cst (reference_alias_ptr_type
(DR_REF (first_dr)), 0));
: build_int_cst (ref_type, 0));
align = TYPE_ALIGN_UNIT (vectype);
if (alignment_support_scheme == dr_aligned)
{
@ -7272,8 +7313,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
vect_finish_stmt_generation (stmt, new_stmt, gsi);
data_ref
= build2 (MEM_REF, vectype, ptr,
build_int_cst (reference_alias_ptr_type
(DR_REF (first_dr)), 0));
build_int_cst (ref_type, 0));
vec_dest = vect_create_destination_var (scalar_dest,
vectype);
new_stmt = gimple_build_assign (vec_dest, data_ref);
@ -7298,8 +7338,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
vect_finish_stmt_generation (stmt, new_stmt, gsi);
data_ref
= build2 (MEM_REF, vectype, ptr,
build_int_cst (reference_alias_ptr_type
(DR_REF (first_dr)), 0));
build_int_cst (ref_type, 0));
break;
}
case dr_explicit_realign_optimized:
@ -7315,8 +7354,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
vect_finish_stmt_generation (stmt, new_stmt, gsi);
data_ref
= build2 (MEM_REF, vectype, new_temp,
build_int_cst (reference_alias_ptr_type
(DR_REF (first_dr)), 0));
build_int_cst (ref_type, 0));
break;
default:
gcc_unreachable ();