re PR middle-end/61486 (ICE with #pragma omp teams)

PR middle-end/61486
	* gimplify.c (struct gimplify_omp_ctx): Add distribute field.
	(gimplify_adjust_omp_clauses): Don't or in GOVD_LASTPRIVATE
	if outer combined construct is distribute.
	(gimplify_omp_for): For OMP_DISTRIBUTE set
	gimplify_omp_ctxp->distribute.
	* omp-low.c (scan_sharing_clauses) <case OMP_CLAUSE_SHARED>: For
	GIMPLE_OMP_TEAMS, if decl isn't global in outer context, record
	mapping into decl map.
c-family/
	* c-omp.c (c_omp_split_clauses): Don't crash on firstprivate in
	#pragma omp target teams or
	#pragma omp {,target }teams distribute simd.
testsuite/
	* c-c++-common/gomp/pr61486-1.c: New test.
	* c-c++-common/gomp/pr61486-2.c: New test.

From-SVN: r211596
This commit is contained in:
Jakub Jelinek 2014-06-12 23:10:11 +02:00 committed by Jakub Jelinek
parent 6298491866
commit 9cf32741aa
8 changed files with 521 additions and 5 deletions

View File

@ -1,3 +1,15 @@
2014-06-12 Jakub Jelinek <jakub@redhat.com>
PR middle-end/61486
* gimplify.c (struct gimplify_omp_ctx): Add distribute field.
(gimplify_adjust_omp_clauses): Don't or in GOVD_LASTPRIVATE
if outer combined construct is distribute.
(gimplify_omp_for): For OMP_DISTRIBUTE set
gimplify_omp_ctxp->distribute.
* omp-low.c (scan_sharing_clauses) <case OMP_CLAUSE_SHARED>: For
GIMPLE_OMP_TEAMS, if decl isn't global in outer context, record
mapping into decl map.
2014-06-12 Jason Merrill <jason@redhat.com>
* common.opt (fabi-version): Change default to 0.

View File

@ -1,3 +1,10 @@
2014-06-12 Jakub Jelinek <jakub@redhat.com>
PR middle-end/61486
* c-omp.c (c_omp_split_clauses): Don't crash on firstprivate in
#pragma omp target teams or
#pragma omp {,target }teams distribute simd.
2014-06-12 Jason Merrill <jason@redhat.com>
* c.opt (Wabi=, fabi-compat-version): New.

View File

@ -789,8 +789,13 @@ c_omp_split_clauses (location_t loc, enum tree_code code,
else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
!= 0)
{
/* This must be #pragma omp {,target }teams distribute. */
gcc_assert (code == OMP_DISTRIBUTE);
/* This must be one of
#pragma omp {,target }teams distribute
#pragma omp target teams
#pragma omp {,target }teams distribute simd. */
gcc_assert (code == OMP_DISTRIBUTE
|| code == OMP_TEAMS
|| code == OMP_SIMD);
s = C_OMP_CLAUSE_SPLIT_TEAMS;
}
else if ((mask & (OMP_CLAUSE_MASK_1

View File

@ -139,6 +139,7 @@ struct gimplify_omp_ctx
enum omp_clause_default_kind default_kind;
enum omp_region_type region_type;
bool combined_loop;
bool distribute;
};
static struct gimplify_ctx *gimplify_ctxp;
@ -6359,7 +6360,11 @@ gimplify_adjust_omp_clauses (tree *list_p)
if (n == NULL
|| (n->value & GOVD_DATA_SHARE_CLASS) == 0)
{
int flags = GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE;
int flags = GOVD_FIRSTPRIVATE;
/* #pragma omp distribute does not allow
lastprivate clause. */
if (!ctx->outer_context->distribute)
flags |= GOVD_LASTPRIVATE;
if (n == NULL)
omp_add_variable (ctx->outer_context, decl,
flags | GOVD_SEEN);
@ -6640,6 +6645,8 @@ gimplify_omp_for (tree *expr_p, gimple_seq *pre_p)
|| TREE_CODE (for_stmt) == CILK_SIMD);
gimplify_scan_omp_clauses (&OMP_FOR_CLAUSES (for_stmt), pre_p,
simd ? ORT_SIMD : ORT_WORKSHARE);
if (TREE_CODE (for_stmt) == OMP_DISTRIBUTE)
gimplify_omp_ctxp->distribute = true;
/* Handle OMP_FOR_INIT. */
for_pre_body = NULL;

View File

@ -1509,11 +1509,19 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
break;
case OMP_CLAUSE_SHARED:
decl = OMP_CLAUSE_DECL (c);
/* Ignore shared directives in teams construct. */
if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
break;
{
/* Global variables don't need to be copied,
the receiver side will use them directly. */
tree odecl = maybe_lookup_decl_in_outer_ctx (decl, ctx);
if (is_global_var (odecl))
break;
insert_decl_map (&ctx->cb, decl, odecl);
break;
}
gcc_assert (is_taskreg_ctx (ctx));
decl = OMP_CLAUSE_DECL (c);
gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
|| !is_variable_sized (decl));
/* Global variables don't need to be copied,

View File

@ -1,3 +1,9 @@
2014-06-12 Jakub Jelinek <jakub@redhat.com>
PR middle-end/61486
* c-c++-common/gomp/pr61486-1.c: New test.
* c-c++-common/gomp/pr61486-2.c: New test.
2014-06-10 Alan Lawrence <alan.lawrence@arm.com>
PR target/59843

View File

@ -0,0 +1,13 @@
/* PR middle-end/61486 */
/* { dg-do compile } */
/* { dg-options "-fopenmp" } */
int
foo (int *a)
{
int i, j = 0;
#pragma omp target teams distribute simd linear(i, j) map(a[:10])
for (i = 0; i < 10; i++)
a[i] = j++;
return i + j;
}

View File

@ -0,0 +1,458 @@
/* PR middle-end/61486 */
/* { dg-do compile } */
/* { dg-options "-fopenmp" } */
#pragma omp declare target
void dosomething (int *a, int n, int m);
#pragma omp end declare target
void
test (int n, int o, int p, int q, int r, int s, int *pp)
{
int a[o], i, j;
#pragma omp target data device (n + 1) if (n != 6) map (tofrom: n, r)
{
#pragma omp target device (n + 1) if (n != 6) map (from: n) map (alloc: a[2:o-2])
dosomething (a, n, 0);
#pragma omp target teams device (n + 1) num_teams (n + 4) thread_limit (n * 2) \
if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
private (p) firstprivate (q) shared (n) reduction (+: r)
{
r = r + 1;
p = q;
dosomething (a, n, p + q);
}
#pragma omp target teams distribute device (n + 1) num_teams (n + 4) collapse (2) \
if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
private (p) firstprivate (q) shared (n) reduction (+: r) \
thread_limit (n * 2) dist_schedule (static, 4)
for (i = 0; i < 10; i++)
for (j = 0; j < 10; j++)
{
r = r + 1;
p = q;
dosomething (a, n, p + q);
}
#pragma omp target teams distribute device (n + 1) num_teams (n + 4) \
if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
private (p) firstprivate (q) shared (n) reduction (+: r) \
thread_limit (n * 2) dist_schedule (static, 4)
for (i = 0; i < 10; i++)
for (j = 0; j < 10; j++)
{
r = r + 1;
p = q;
dosomething (a, n, p + q);
}
#pragma omp target teams distribute parallel for device (n + 1) num_teams (n + 4) \
if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
private (p) firstprivate (q) shared (n) reduction (+: r) \
thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
num_threads (n + 4) proc_bind (spread) lastprivate (s) \
ordered schedule (static, 8)
for (i = 0; i < 10; i++)
for (j = 0; j < 10; j++)
{
r = r + 1;
p = q;
dosomething (a, n, p + q);
#pragma omp ordered
p = q;
s = i * 10 + j;
}
#pragma omp target teams distribute parallel for device (n + 1) num_teams (n + 4) \
if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
private (p) firstprivate (q) shared (n) reduction (+: r) \
thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
proc_bind (master) lastprivate (s) ordered schedule (static, 8)
for (i = 0; i < 10; i++)
{
for (j = 0; j < 10; j++)
{
r = r + 1;
p = q;
dosomething (a, n, p + q);
}
#pragma omp ordered
p = q;
s = i * 10;
}
#pragma omp target teams distribute parallel for simd device (n + 1) \
if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
private (p) firstprivate (q) shared (n) reduction (+: r) \
thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
num_threads (n + 4) proc_bind (spread) lastprivate (s) \
schedule (static, 8) num_teams (n + 4) safelen(8)
for (i = 0; i < 10; i++)
for (j = 0; j < 10; j++)
{
r = r + 1;
p = q;
a[2+i*10+j] = p + q;
s = i * 10 + j;
}
#pragma omp target teams distribute parallel for simd device (n + 1) \
if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
private (p) firstprivate (q) shared (n) reduction (+: r) \
thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
proc_bind (master) lastprivate (s) schedule (static, 8) \
num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
for (i = 0; i < 10; i++)
{
r = r + 1;
p = q;
a[2+i] = p + q;
s = i * 10;
}
#pragma omp target teams distribute simd device (n + 1) \
if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
private (p) firstprivate (q) shared (n) reduction (+: r) \
thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
lastprivate (s) num_teams (n + 4) safelen(8)
for (i = 0; i < 10; i++)
for (j = 0; j < 10; j++)
{
r = r + 1;
p = q;
a[2+i*10+j] = p + q;
s = i * 10 + j;
}
#pragma omp target teams distribute simd device (n + 1) \
if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
private (p) firstprivate (q) shared (n) reduction (+: r) \
thread_limit (n * 2) dist_schedule (static, 4) lastprivate (s) \
num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
for (i = 0; i < 10; i++)
{
r = r + 1;
p = q;
a[2+i] = p + q;
s = i * 10;
}
#pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
#pragma omp teams num_teams (n + 4) thread_limit (n * 2) default(shared) \
private (p) firstprivate (q) shared (n) reduction (+: r)
{
r = r + 1;
p = q;
dosomething (a, n, p + q);
}
#pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
#pragma omp teams distribute num_teams (n + 4) collapse (2) default(shared) \
private (p) firstprivate (q) shared (n) reduction (+: r) \
thread_limit (n * 2) dist_schedule (static, 4)
for (i = 0; i < 10; i++)
for (j = 0; j < 10; j++)
{
r = r + 1;
p = q;
dosomething (a, n, p + q);
}
#pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
#pragma omp teams distribute num_teams (n + 4) default(shared) \
private (p) firstprivate (q) shared (n) reduction (+: r) \
thread_limit (n * 2) dist_schedule (static, 4)
for (i = 0; i < 10; i++)
for (j = 0; j < 10; j++)
{
r = r + 1;
p = q;
dosomething (a, n, p + q);
}
#pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
#pragma omp teams distribute parallel for num_teams (n + 4) if (n != 6) \
default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
num_threads (n + 4) proc_bind (spread) lastprivate (s) \
ordered schedule (static, 8)
for (i = 0; i < 10; i++)
for (j = 0; j < 10; j++)
{
r = r + 1;
p = q;
dosomething (a, n, p + q);
#pragma omp ordered
p = q;
s = i * 10 + j;
}
#pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
#pragma omp teams distribute parallel for num_teams (n + 4) if (n != 6) \
default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
proc_bind (master) lastprivate (s) ordered schedule (static, 8)
for (i = 0; i < 10; i++)
{
for (j = 0; j < 10; j++)
{
r = r + 1;
p = q;
dosomething (a, n, p + q);
}
#pragma omp ordered
p = q;
s = i * 10;
}
#pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
#pragma omp teams distribute parallel for simd if (n != 6)default(shared) \
private (p) firstprivate (q) shared (n) reduction (+: r) \
thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
num_threads (n + 4) proc_bind (spread) lastprivate (s) \
schedule (static, 8) num_teams (n + 4) safelen(8)
for (i = 0; i < 10; i++)
for (j = 0; j < 10; j++)
{
r = r + 1;
p = q;
a[2+i*10+j] = p + q;
s = i * 10 + j;
}
#pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
#pragma omp teams distribute parallel for simd if (n != 6)default(shared) \
private (p) firstprivate (q) shared (n) reduction (+: r) \
thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
proc_bind (master) lastprivate (s) schedule (static, 8) \
num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
for (i = 0; i < 10; i++)
{
r = r + 1;
p = q;
a[2+i] = p + q;
s = i * 10;
}
#pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
#pragma omp teams distribute simd default(shared) \
private (p) firstprivate (q) shared (n) reduction (+: r) \
thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
lastprivate (s) num_teams (n + 4) safelen(8)
for (i = 0; i < 10; i++)
for (j = 0; j < 10; j++)
{
r = r + 1;
p = q;
a[2+i*10+j] = p + q;
s = i * 10 + j;
}
#pragma omp target device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2])
#pragma omp teams distribute simd default(shared) \
private (p) firstprivate (q) shared (n) reduction (+: r) \
thread_limit (n * 2) dist_schedule (static, 4) lastprivate (s) \
num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
for (i = 0; i < 10; i++)
{
r = r + 1;
p = q;
a[2+i] = p + q;
s = i * 10;
}
#pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
num_teams (n + 4) thread_limit (n * 2)default(shared) shared(n) \
private (p) reduction (+: r)
#pragma omp distribute collapse (2) dist_schedule (static, 4) firstprivate (q)
for (i = 0; i < 10; i++)
for (j = 0; j < 10; j++)
{
r = r + 1;
p = q;
dosomething (a, n, p + q);
}
#pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
num_teams (n + 4) thread_limit (n * 2) shared(n) private(p) reduction (+ : r) \
default(shared)
#pragma omp distribute dist_schedule (static, 4) firstprivate (q)
for (i = 0; i < 10; i++)
for (j = 0; j < 10; j++)
{
r = r + 1;
p = q;
dosomething (a, n, p + q);
}
#pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
num_teams (n + 4) thread_limit (n * 2)
#pragma omp distribute parallel for if (n != 6) \
default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
collapse (2) dist_schedule (static, 4) \
num_threads (n + 4) proc_bind (spread) lastprivate (s) \
ordered schedule (static, 8)
for (i = 0; i < 10; i++)
for (j = 0; j < 10; j++)
{
r = r + 1;
p = q;
dosomething (a, n, p + q);
#pragma omp ordered
p = q;
s = i * 10 + j;
}
#pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
num_teams (n + 4) thread_limit (n * 2)
#pragma omp distribute parallel for if (n != 6) \
default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
num_threads (n + 4) dist_schedule (static, 4) \
proc_bind (master) lastprivate (s) ordered schedule (static, 8)
for (i = 0; i < 10; i++)
{
for (j = 0; j < 10; j++)
{
r = r + 1;
p = q;
dosomething (a, n, p + q);
}
#pragma omp ordered
p = q;
s = i * 10;
}
#pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
num_teams (n + 4) thread_limit (n * 2)
#pragma omp distribute parallel for simd if (n != 6)default(shared) \
private (p) firstprivate (q) shared (n) reduction (+: r) \
collapse (2) dist_schedule (static, 4) \
num_threads (n + 4) proc_bind (spread) lastprivate (s) \
schedule (static, 8) safelen(8)
for (i = 0; i < 10; i++)
for (j = 0; j < 10; j++)
{
r = r + 1;
p = q;
a[2+i*10+j] = p + q;
s = i * 10 + j;
}
#pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
num_teams (n + 4) thread_limit (n * 2)
#pragma omp distribute parallel for simd if (n != 6)default(shared) \
private (p) firstprivate (q) shared (n) reduction (+: r) \
num_threads (n + 4) dist_schedule (static, 4) \
proc_bind (master) lastprivate (s) schedule (static, 8) \
safelen(16) linear(i:1) aligned (pp:4)
for (i = 0; i < 10; i++)
{
r = r + 1;
p = q;
a[2+i] = p + q;
s = i * 10;
}
#pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
num_teams (n + 4) thread_limit (n * 2) default(shared) shared(n) private(p) \
reduction(+:r)
#pragma omp distribute simd private (p) firstprivate (q) reduction (+: r) \
collapse (2) dist_schedule (static, 4) lastprivate (s) safelen(8)
for (i = 0; i < 10; i++)
for (j = 0; j < 10; j++)
{
r = r + 1;
p = q;
a[2+i*10+j] = p + q;
s = i * 10 + j;
}
#pragma omp target teams device (n + 1) if (n != 6)map(from:n) map(alloc:a[2:o-2]) \
num_teams (n + 4) thread_limit (n * 2) default(shared) shared(n) private(p) \
reduction(+:r)
#pragma omp distribute simd private (p) firstprivate (q) reduction (+: r) \
lastprivate (s) dist_schedule (static, 4) safelen(16) linear(i:1) aligned (pp:4)
for (i = 0; i < 10; i++)
{
r = r + 1;
p = q;
a[2+i] = p + q;
s = i * 10;
}
}
}
int q, i, j;
void
test2 (int n, int o, int p, int r, int s, int *pp)
{
int a[o];
#pragma omp distribute collapse (2) dist_schedule (static, 4) firstprivate (q)
for (i = 0; i < 10; i++)
for (j = 0; j < 10; j++)
{
r = r + 1;
p = q;
dosomething (a, n, p + q);
}
#pragma omp distribute dist_schedule (static, 4) firstprivate (q)
for (i = 0; i < 10; i++)
for (j = 0; j < 10; j++)
{
r = r + 1;
p = q;
dosomething (a, n, p + q);
}
#pragma omp distribute parallel for if (n != 6) \
default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
collapse (2) dist_schedule (static, 4) \
num_threads (n + 4) proc_bind (spread) lastprivate (s) \
ordered schedule (static, 8)
for (i = 0; i < 10; i++)
for (j = 0; j < 10; j++)
{
r = r + 1;
p = q;
dosomething (a, n, p + q);
#pragma omp ordered
p = q;
s = i * 10 + j;
}
#pragma omp distribute parallel for if (n != 6) \
default(shared) private (p) firstprivate (q) shared (n) reduction (+: r) \
num_threads (n + 4) dist_schedule (static, 4) \
proc_bind (master) lastprivate (s) ordered schedule (static, 8)
for (i = 0; i < 10; i++)
{
for (j = 0; j < 10; j++)
{
r = r + 1;
p = q;
dosomething (a, n, p + q);
}
#pragma omp ordered
p = q;
s = i * 10;
}
#pragma omp distribute parallel for simd if (n != 6)default(shared) \
private (p) firstprivate (q) shared (n) reduction (+: r) \
collapse (2) dist_schedule (static, 4) \
num_threads (n + 4) proc_bind (spread) lastprivate (s) \
schedule (static, 8) safelen(8)
for (i = 0; i < 10; i++)
for (j = 0; j < 10; j++)
{
r = r + 1;
p = q;
a[2+i*10+j] = p + q;
s = i * 10 + j;
}
#pragma omp distribute parallel for simd if (n != 6)default(shared) \
private (p) firstprivate (q) shared (n) reduction (+: r) \
num_threads (n + 4) dist_schedule (static, 4) \
proc_bind (master) lastprivate (s) schedule (static, 8) \
safelen(16) linear(i:1) aligned (pp:4)
for (i = 0; i < 10; i++)
{
r = r + 1;
p = q;
a[2+i] = p + q;
s = i * 10;
}
#pragma omp distribute simd private (p) firstprivate (q) reduction (+: r) \
collapse (2) dist_schedule (static, 4) lastprivate (s) safelen(8)
for (i = 0; i < 10; i++)
for (j = 0; j < 10; j++)
{
r = r + 1;
p = q;
a[2+i*10+j] = p + q;
s = i * 10 + j;
}
#pragma omp distribute simd private (p) firstprivate (q) reduction (+: r) \
lastprivate (s) dist_schedule (static, 4) safelen(16) linear(i:1) aligned (pp:4)
for (i = 0; i < 10; i++)
{
r = r + 1;
p = q;
a[2+i] = p + q;
s = i * 10;
}
}