fab2f61dc1
> * testsuite/libgomp.c++/scan-10.C: Add option -fvect-cost-model=cheap. I don't think this is the right thing to do. This just means that at some point between 2013 when -fsimd-cost-model has been introduced and now -fsimd-cost-model= option at least partially stopped working properly. As documented, -fsimd-cost-model= overrides the -fvect-cost-model= setting for OpenMP simd loops (loop->force_vectorize is true) if specified differently from default. In tree-vectorizer.h we have: static inline bool unlimited_cost_model (loop_p loop) { if (loop != NULL && loop->force_vectorize && flag_simd_cost_model != VECT_COST_MODEL_DEFAULT) return flag_simd_cost_model == VECT_COST_MODEL_UNLIMITED; return (flag_vect_cost_model == VECT_COST_MODEL_UNLIMITED); } and use it in various places, but we also just use flag_vect_cost_model in lots of places (and in one spot use flag_simd_cost_model, not sure if we are sure it is a force_vectorize loop or what). So, IMHO we should change the above inline function to loop_cost_model and let it return the cost model and then just reimplement unlimited_cost_model as return loop_cost_model (loop) == VECT_COST_MODEL_UNLIMITED; and then adjust the direct uses of the flag and revert these changes. 2021-10-12 Jakub Jelinek <jakub@redhat.com> gcc/ * tree-vectorizer.h (loop_cost_model): New function. (unlimited_cost_model): Use it. * tree-vect-loop.c (vect_analyze_loop_costing): Use loop_cost_model call instead of flag_vect_cost_model. * tree-vect-data-refs.c (vect_enhance_data_refs_alignment): Likewise. (vect_prune_runtime_alias_test_list): Likewise. Also use it instead of flag_simd_cost_model. gcc/testsuite/ * gcc.dg/gomp/simd-2.c: Remove option -fvect-cost-model=cheap. * gcc.dg/gomp/simd-3.c: Likewise. libgomp/ * testsuite/libgomp.c/scan-11.c: Remove option -fvect-cost-model=cheap. * testsuite/libgomp.c/scan-12.c: Likewise. * testsuite/libgomp.c/scan-13.c: Likewise. * testsuite/libgomp.c/scan-14.c: Likewise. * testsuite/libgomp.c/scan-15.c: Likewise. * testsuite/libgomp.c/scan-16.c: Likewise. * testsuite/libgomp.c/scan-17.c: Likewise. * testsuite/libgomp.c/scan-18.c: Likewise. * testsuite/libgomp.c/scan-19.c: Likewise. * testsuite/libgomp.c/scan-20.c: Likewise. * testsuite/libgomp.c/scan-21.c: Likewise. * testsuite/libgomp.c/scan-22.c: Likewise. * testsuite/libgomp.c++/scan-9.C: Likewise. * testsuite/libgomp.c++/scan-10.C: Likewise. * testsuite/libgomp.c++/scan-11.C: Likewise. * testsuite/libgomp.c++/scan-12.C: Likewise. * testsuite/libgomp.c++/scan-13.C: Likewise. * testsuite/libgomp.c++/scan-14.C: Likewise. * testsuite/libgomp.c++/scan-15.C: Likewise. * testsuite/libgomp.c++/scan-16.C: Likewise.
155 lines
2.6 KiB
C
155 lines
2.6 KiB
C
// { dg-require-effective-target size32plus }
|
|
// { dg-additional-options "-O2 -fopenmp -fdump-tree-vect-details" }
|
|
// { dg-additional-options "-msse2" { target sse2_runtime } }
|
|
// { dg-additional-options "-mavx" { target avx_runtime } }
|
|
// { dg-final { scan-tree-dump-times "vectorized \[2-6] loops" 2 "vect" { target sse2_runtime } } }
|
|
|
|
extern "C" void abort ();
|
|
|
|
struct S {
|
|
inline S ();
|
|
inline ~S ();
|
|
inline S (const S &);
|
|
inline S & operator= (const S &);
|
|
int s;
|
|
};
|
|
|
|
S::S () : s (0)
|
|
{
|
|
}
|
|
|
|
S::~S ()
|
|
{
|
|
}
|
|
|
|
S::S (const S &x)
|
|
{
|
|
s = x.s;
|
|
}
|
|
|
|
S &
|
|
S::operator= (const S &x)
|
|
{
|
|
s = x.s;
|
|
return *this;
|
|
}
|
|
|
|
static inline void
|
|
ini (S &x)
|
|
{
|
|
x.s = 0;
|
|
}
|
|
|
|
S r, a[1024], b[1024];
|
|
|
|
#pragma omp declare reduction (+: S: omp_out.s += omp_in.s)
|
|
#pragma omp declare reduction (plus: S: omp_out.s += omp_in.s) initializer (ini (omp_priv))
|
|
|
|
__attribute__((noipa)) void
|
|
foo (S *a, S *b, S &r)
|
|
{
|
|
#pragma omp for simd simdlen (1) reduction (inscan, +:r)
|
|
for (int i = 0; i < 1024; i++)
|
|
{
|
|
b[i] = r;
|
|
#pragma omp scan exclusive(r)
|
|
r.s += a[i].s;
|
|
}
|
|
}
|
|
|
|
__attribute__((noipa)) S
|
|
bar (void)
|
|
{
|
|
S s;
|
|
#pragma omp parallel
|
|
#pragma omp for simd if (0) reduction (inscan, plus:s)
|
|
for (int i = 0; i < 1024; i++)
|
|
{
|
|
b[i] = s;
|
|
#pragma omp scan exclusive(s)
|
|
s.s += 2 * a[i].s;
|
|
}
|
|
return s;
|
|
}
|
|
|
|
__attribute__((noipa)) void
|
|
baz (S *a, S *b, S &r)
|
|
{
|
|
#pragma omp parallel for simd reduction (inscan, +:r)
|
|
for (int i = 0; i < 1024; i++)
|
|
{
|
|
b[i] = r;
|
|
#pragma omp scan exclusive(r)
|
|
r.s += a[i].s;
|
|
}
|
|
}
|
|
|
|
__attribute__((noipa)) S
|
|
qux (void)
|
|
{
|
|
S s;
|
|
#pragma omp parallel for simd reduction (inscan, plus:s)
|
|
for (int i = 0; i < 1024; i++)
|
|
{
|
|
b[i] = s;
|
|
#pragma omp scan exclusive(s)
|
|
s.s += 2 * a[i].s;
|
|
}
|
|
return s;
|
|
}
|
|
|
|
int
|
|
main ()
|
|
{
|
|
S s;
|
|
for (int i = 0; i < 1024; ++i)
|
|
{
|
|
a[i].s = i;
|
|
b[i].s = -1;
|
|
asm ("" : "+g" (i));
|
|
}
|
|
#pragma omp parallel
|
|
foo (a, b, r);
|
|
if (r.s != 1024 * 1023 / 2)
|
|
abort ();
|
|
for (int i = 0; i < 1024; ++i)
|
|
{
|
|
if (b[i].s != s.s)
|
|
abort ();
|
|
else
|
|
b[i].s = 25;
|
|
s.s += i;
|
|
}
|
|
if (bar ().s != 1024 * 1023)
|
|
abort ();
|
|
s.s = 0;
|
|
for (int i = 0; i < 1024; ++i)
|
|
{
|
|
if (b[i].s != s.s)
|
|
abort ();
|
|
s.s += 2 * i;
|
|
}
|
|
r.s = 0;
|
|
baz (a, b, r);
|
|
if (r.s != 1024 * 1023 / 2)
|
|
abort ();
|
|
s.s = 0;
|
|
for (int i = 0; i < 1024; ++i)
|
|
{
|
|
if (b[i].s != s.s)
|
|
abort ();
|
|
else
|
|
b[i].s = 25;
|
|
s.s += i;
|
|
}
|
|
if (qux ().s != 1024 * 1023)
|
|
abort ();
|
|
s.s = 0;
|
|
for (int i = 0; i < 1024; ++i)
|
|
{
|
|
if (b[i].s != s.s)
|
|
abort ();
|
|
s.s += 2 * i;
|
|
}
|
|
}
|