e460634820
gcc/ 2015-11-14 Jakub Jelinek <jakub@redhat.com> * omp-low.c (lower_omp_ordered): Add argument to GOMP_SMD_ORDERED_* internal calls - 0 if ordered simd and 1 for ordered threads simd. * tree-vectorizer.c (adjust_simduid_builtins): If GOMP_SIMD_ORDERED_* argument is 1, replace it with GOMP_ordered_* call instead of removing it. gcc/c/ 2015-11-14 Jakub Jelinek <jakub@redhat.com> * c-typeck.c (c_finish_omp_clauses): Don't mark GOMP_MAP_FIRSTPRIVATE_POINTER decls addressable. gcc/cp/ 2015-11-14 Jakub Jelinek <jakub@redhat.com> * semantics.c (finish_omp_clauses): Don't mark GOMP_MAP_FIRSTPRIVATE_POINTER decls addressable. libgomp/ 2015-11-14 Jakub Jelinek <jakub@redhat.com> Aldy Hernandez <aldyh@redhat.com> Ilya Verbin <ilya.verbin@intel.com> * ordered.c (gomp_doacross_init, GOMP_doacross_post, GOMP_doacross_wait, gomp_doacross_ull_init, GOMP_doacross_ull_post, GOMP_doacross_ull_wait): For GFS_GUIDED don't divide number of iterators or IV by chunk size. * parallel.c (gomp_resolve_num_threads): Don't assume that if thr->ts.team is non-NULL, then pool must be non-NULL. * libgomp-plugin.h (GOMP_PLUGIN_target_task_completion): Declare. * libgomp.map (GOMP_PLUGIN_1.1): New symbol version, export GOMP_PLUGIN_target_task_completion. * Makefile.am (libgomp_la_SOURCES): Add priority_queue.c. * Makefile.in: Regenerate. * libgomp.h: Shuffle prototypes and forward definitions around so priority queues can be defined. (enum gomp_task_kind): Add GOMP_TASK_ASYNC_RUNNING. (enum gomp_target_task_state): New enum. (struct gomp_target_task): Add state, tgt, task and team fields. (gomp_create_target_task): Change return type to bool, add state argument. (gomp_target_task_fn): Change return type to bool. (struct gomp_device_descr): Add async_run_func. (struct gomp_task): Remove children, next_child, prev_child, next_queue, prev_queue, next_taskgroup, prev_taskgroup. Add pnode field. (struct gomp_taskgroup): Remove children. Add taskgroup_queue. (struct gomp_team): Change task_queue type to a priority queue. (splay_compare): Define inline. (priority_queue_offset): New. (priority_node_to_task): New. (task_to_priority_node): New. * oacc-mem.c: Do not include splay-tree.h. * priority_queue.c: New file. * priority_queue.h: New file. * splay-tree.c: Do not include splay-tree.h. (splay_tree_foreach_internal): New. (splay_tree_foreach): New. * splay-tree.h: Become re-entrant if splay_tree_prefix is defined. (splay_tree_callback): Define typedef. * target.c (splay_compare): Move to libgomp.h. (GOMP_target): Don't adjust *thr in any way around running offloaded task. (GOMP_target_ext): Likewise. Handle target nowait. (GOMP_target_update_ext, GOMP_target_enter_exit_data): Check return value from gomp_create_target_task, if false, fallthrough as if no dependencies exist. (gomp_target_task_fn): Change return type to bool, return true if the task should have another part scheduled later. Handle target nowait. (gomp_load_plugin_for_device): Initialize async_run. * task.c (gomp_init_task): Initialize children_queue. (gomp_clear_parent_in_list): New. (gomp_clear_parent_in_tree): New. (gomp_clear_parent): Handle priorities. (GOMP_task): Likewise. (priority_queue_move_task_first, gomp_target_task_completion, GOMP_PLUGIN_target_task_completion): New functions. (gomp_create_target_task): Use priority queues. Change return type to bool, add state argument, return false if for async {{enter,exit} data,update} constructs no dependencies need to be waited for, handle target nowait. Set task->fn to NULL instead of gomp_target_task_fn. (verify_children_queue): Remove. (priority_list_upgrade_task): New. (priority_queue_upgrade_task): New. (verify_task_queue): Remove. (priority_list_downgrade_task): New. (priority_queue_downgrade_task): New. (gomp_task_run_pre): Use priority queues. Abstract code out to priority_queue_downgrade_task. (gomp_task_run_post_handle_dependers): Use priority queues. (gomp_task_run_post_remove_parent): Likewise. (gomp_task_run_post_remove_taskgroup): Likewise. (gomp_barrier_handle_tasks): Likewise. Handle target nowait target tasks specially. (GOMP_taskwait): Likewise. (gomp_task_maybe_wait_for_dependencies): Likewise. Abstract code to priority-queue_upgrade_task. (GOMP_taskgroup_start): Use priority queues. (GOMP_taskgroup_end): Likewise. Handle target nowait target tasks specially. If taskgroup is NULL, and thr->ts.level is 0, act as a barrier. * taskloop.c (GOMP_taskloop): Handle priorities. * team.c (gomp_new_team): Call priority_queue_init. (free_team): Call priority_queue_free. (gomp_free_thread): Call gomp_team_end if thr->ts.team is artificial team created for target nowait in implicit parallel region. (gomp_team_start): For nested check, test thr->ts.level instead of thr->ts.team != NULL. * testsuite/libgomp.c/doacross-3.c: New test. * testsuite/libgomp.c/ordered-5.c: New test. * testsuite/libgomp.c/priority.c: New test. * testsuite/libgomp.c/target-31.c: New test. * testsuite/libgomp.c/target-32.c: New test. * testsuite/libgomp.c/target-33.c: New test. * testsuite/libgomp.c/target-34.c: New test. liboffloadmic/ 2015-11-14 Ilya Verbin <ilya.verbin@intel.com> * runtime/offload_host.cpp (task_completion_callback): New variable. (offload_proxy_task_completed_ooo): Call task_completion_callback. (__offload_register_task_callback): New function. * runtime/offload_host.h (__offload_register_task_callback): New declaration. * plugin/libgomp-plugin-intelmic.cpp (offload): Add async_data argument, handle async offloading. (register_main_image): Call register_main_image. (GOMP_OFFLOAD_init_device, get_target_table, GOMP_OFFLOAD_alloc, GOMP_OFFLOAD_free, GOMP_OFFLOAD_host2dev, GOMP_OFFLOAD_dev2host, GOMP_OFFLOAD_dev2dev) Adjust offload callers. (GOMP_OFFLOAD_async_run): New function. (GOMP_OFFLOAD_run): Implement using GOMP_OFFLOAD_async_run. From-SVN: r230381
303 lines
8.2 KiB
C
303 lines
8.2 KiB
C
/* Copyright (C) 2005-2015 Free Software Foundation, Inc.
|
||
Contributed by Richard Henderson <rth@redhat.com>.
|
||
|
||
This file is part of the GNU Offloading and Multi Processing Library
|
||
(libgomp).
|
||
|
||
Libgomp is free software; you can redistribute it and/or modify it
|
||
under the terms of the GNU General Public License as published by
|
||
the Free Software Foundation; either version 3, or (at your option)
|
||
any later version.
|
||
|
||
Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
|
||
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||
FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||
more details.
|
||
|
||
Under Section 7 of GPL version 3, you are granted additional
|
||
permissions described in the GCC Runtime Library Exception, version
|
||
3.1, as published by the Free Software Foundation.
|
||
|
||
You should have received a copy of the GNU General Public License and
|
||
a copy of the GCC Runtime Library Exception along with this program;
|
||
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
||
<http://www.gnu.org/licenses/>. */
|
||
|
||
/* This file handles the (bare) PARALLEL construct. */
|
||
|
||
#include "libgomp.h"
|
||
#include <limits.h>
|
||
|
||
|
||
/* Determine the number of threads to be launched for a PARALLEL construct.
|
||
This algorithm is explicitly described in OpenMP 3.0 section 2.4.1.
|
||
SPECIFIED is a combination of the NUM_THREADS clause and the IF clause.
|
||
If the IF clause is false, SPECIFIED is forced to 1. When NUM_THREADS
|
||
is not present, SPECIFIED is 0. */
|
||
|
||
unsigned
|
||
gomp_resolve_num_threads (unsigned specified, unsigned count)
|
||
{
|
||
struct gomp_thread *thr = gomp_thread ();
|
||
struct gomp_task_icv *icv;
|
||
unsigned threads_requested, max_num_threads, num_threads;
|
||
unsigned long busy;
|
||
struct gomp_thread_pool *pool;
|
||
|
||
icv = gomp_icv (false);
|
||
|
||
if (specified == 1)
|
||
return 1;
|
||
else if (thr->ts.active_level >= 1 && !icv->nest_var)
|
||
return 1;
|
||
else if (thr->ts.active_level >= gomp_max_active_levels_var)
|
||
return 1;
|
||
|
||
/* If NUM_THREADS not specified, use nthreads_var. */
|
||
if (specified == 0)
|
||
threads_requested = icv->nthreads_var;
|
||
else
|
||
threads_requested = specified;
|
||
|
||
max_num_threads = threads_requested;
|
||
|
||
/* If dynamic threads are enabled, bound the number of threads
|
||
that we launch. */
|
||
if (icv->dyn_var)
|
||
{
|
||
unsigned dyn = gomp_dynamic_max_threads ();
|
||
if (dyn < max_num_threads)
|
||
max_num_threads = dyn;
|
||
|
||
/* Optimization for parallel sections. */
|
||
if (count && count < max_num_threads)
|
||
max_num_threads = count;
|
||
}
|
||
|
||
/* UINT_MAX stands for infinity. */
|
||
if (__builtin_expect (icv->thread_limit_var == UINT_MAX, 1)
|
||
|| max_num_threads == 1)
|
||
return max_num_threads;
|
||
|
||
/* The threads_busy counter lives in thread_pool, if there
|
||
isn't a thread_pool yet, there must be just one thread
|
||
in the contention group. If thr->team is NULL, this isn't
|
||
nested parallel, so there is just one thread in the
|
||
contention group as well, no need to handle it atomically. */
|
||
pool = thr->thread_pool;
|
||
if (thr->ts.team == NULL || pool == NULL)
|
||
{
|
||
num_threads = max_num_threads;
|
||
if (num_threads > icv->thread_limit_var)
|
||
num_threads = icv->thread_limit_var;
|
||
if (pool)
|
||
pool->threads_busy = num_threads;
|
||
return num_threads;
|
||
}
|
||
|
||
#ifdef HAVE_SYNC_BUILTINS
|
||
do
|
||
{
|
||
busy = pool->threads_busy;
|
||
num_threads = max_num_threads;
|
||
if (icv->thread_limit_var - busy + 1 < num_threads)
|
||
num_threads = icv->thread_limit_var - busy + 1;
|
||
}
|
||
while (__sync_val_compare_and_swap (&pool->threads_busy,
|
||
busy, busy + num_threads - 1)
|
||
!= busy);
|
||
#else
|
||
gomp_mutex_lock (&gomp_managed_threads_lock);
|
||
num_threads = max_num_threads;
|
||
busy = pool->threads_busy;
|
||
if (icv->thread_limit_var - busy + 1 < num_threads)
|
||
num_threads = icv->thread_limit_var - busy + 1;
|
||
pool->threads_busy += num_threads - 1;
|
||
gomp_mutex_unlock (&gomp_managed_threads_lock);
|
||
#endif
|
||
|
||
return num_threads;
|
||
}
|
||
|
||
void
|
||
GOMP_parallel_start (void (*fn) (void *), void *data, unsigned num_threads)
|
||
{
|
||
num_threads = gomp_resolve_num_threads (num_threads, 0);
|
||
gomp_team_start (fn, data, num_threads, 0, gomp_new_team (num_threads));
|
||
}
|
||
|
||
void
|
||
GOMP_parallel_end (void)
|
||
{
|
||
struct gomp_task_icv *icv = gomp_icv (false);
|
||
if (__builtin_expect (icv->thread_limit_var != UINT_MAX, 0))
|
||
{
|
||
struct gomp_thread *thr = gomp_thread ();
|
||
struct gomp_team *team = thr->ts.team;
|
||
unsigned int nthreads = team ? team->nthreads : 1;
|
||
gomp_team_end ();
|
||
if (nthreads > 1)
|
||
{
|
||
/* If not nested, there is just one thread in the
|
||
contention group left, no need for atomicity. */
|
||
if (thr->ts.team == NULL)
|
||
thr->thread_pool->threads_busy = 1;
|
||
else
|
||
{
|
||
#ifdef HAVE_SYNC_BUILTINS
|
||
__sync_fetch_and_add (&thr->thread_pool->threads_busy,
|
||
1UL - nthreads);
|
||
#else
|
||
gomp_mutex_lock (&gomp_managed_threads_lock);
|
||
thr->thread_pool->threads_busy -= nthreads - 1;
|
||
gomp_mutex_unlock (&gomp_managed_threads_lock);
|
||
#endif
|
||
}
|
||
}
|
||
}
|
||
else
|
||
gomp_team_end ();
|
||
}
|
||
ialias (GOMP_parallel_end)
|
||
|
||
void
|
||
GOMP_parallel (void (*fn) (void *), void *data, unsigned num_threads, unsigned int flags)
|
||
{
|
||
num_threads = gomp_resolve_num_threads (num_threads, 0);
|
||
gomp_team_start (fn, data, num_threads, flags, gomp_new_team (num_threads));
|
||
fn (data);
|
||
ialias_call (GOMP_parallel_end) ();
|
||
}
|
||
|
||
bool
|
||
GOMP_cancellation_point (int which)
|
||
{
|
||
if (!gomp_cancel_var)
|
||
return false;
|
||
|
||
struct gomp_thread *thr = gomp_thread ();
|
||
struct gomp_team *team = thr->ts.team;
|
||
if (which & (GOMP_CANCEL_LOOP | GOMP_CANCEL_SECTIONS))
|
||
{
|
||
if (team == NULL)
|
||
return false;
|
||
return team->work_share_cancelled != 0;
|
||
}
|
||
else if (which & GOMP_CANCEL_TASKGROUP)
|
||
{
|
||
if (thr->task->taskgroup && thr->task->taskgroup->cancelled)
|
||
return true;
|
||
/* FALLTHRU into the GOMP_CANCEL_PARALLEL case,
|
||
as #pragma omp cancel parallel also cancels all explicit
|
||
tasks. */
|
||
}
|
||
if (team)
|
||
return gomp_team_barrier_cancelled (&team->barrier);
|
||
return false;
|
||
}
|
||
ialias (GOMP_cancellation_point)
|
||
|
||
bool
|
||
GOMP_cancel (int which, bool do_cancel)
|
||
{
|
||
if (!gomp_cancel_var)
|
||
return false;
|
||
|
||
if (!do_cancel)
|
||
return ialias_call (GOMP_cancellation_point) (which);
|
||
|
||
struct gomp_thread *thr = gomp_thread ();
|
||
struct gomp_team *team = thr->ts.team;
|
||
if (which & (GOMP_CANCEL_LOOP | GOMP_CANCEL_SECTIONS))
|
||
{
|
||
/* In orphaned worksharing region, all we want to cancel
|
||
is current thread. */
|
||
if (team != NULL)
|
||
team->work_share_cancelled = 1;
|
||
return true;
|
||
}
|
||
else if (which & GOMP_CANCEL_TASKGROUP)
|
||
{
|
||
if (thr->task->taskgroup && !thr->task->taskgroup->cancelled)
|
||
{
|
||
gomp_mutex_lock (&team->task_lock);
|
||
thr->task->taskgroup->cancelled = true;
|
||
gomp_mutex_unlock (&team->task_lock);
|
||
}
|
||
return true;
|
||
}
|
||
team->team_cancelled = 1;
|
||
gomp_team_barrier_cancel (team);
|
||
return true;
|
||
}
|
||
|
||
/* The public OpenMP API for thread and team related inquiries. */
|
||
|
||
int
|
||
omp_get_num_threads (void)
|
||
{
|
||
struct gomp_team *team = gomp_thread ()->ts.team;
|
||
return team ? team->nthreads : 1;
|
||
}
|
||
|
||
int
|
||
omp_get_thread_num (void)
|
||
{
|
||
return gomp_thread ()->ts.team_id;
|
||
}
|
||
|
||
/* This wasn't right for OpenMP 2.5. Active region used to be non-zero
|
||
when the IF clause doesn't evaluate to false, starting with OpenMP 3.0
|
||
it is non-zero with more than one thread in the team. */
|
||
|
||
int
|
||
omp_in_parallel (void)
|
||
{
|
||
return gomp_thread ()->ts.active_level > 0;
|
||
}
|
||
|
||
int
|
||
omp_get_level (void)
|
||
{
|
||
return gomp_thread ()->ts.level;
|
||
}
|
||
|
||
int
|
||
omp_get_ancestor_thread_num (int level)
|
||
{
|
||
struct gomp_team_state *ts = &gomp_thread ()->ts;
|
||
if (level < 0 || level > ts->level)
|
||
return -1;
|
||
for (level = ts->level - level; level > 0; --level)
|
||
ts = &ts->team->prev_ts;
|
||
return ts->team_id;
|
||
}
|
||
|
||
int
|
||
omp_get_team_size (int level)
|
||
{
|
||
struct gomp_team_state *ts = &gomp_thread ()->ts;
|
||
if (level < 0 || level > ts->level)
|
||
return -1;
|
||
for (level = ts->level - level; level > 0; --level)
|
||
ts = &ts->team->prev_ts;
|
||
if (ts->team == NULL)
|
||
return 1;
|
||
else
|
||
return ts->team->nthreads;
|
||
}
|
||
|
||
int
|
||
omp_get_active_level (void)
|
||
{
|
||
return gomp_thread ()->ts.active_level;
|
||
}
|
||
|
||
ialias (omp_get_num_threads)
|
||
ialias (omp_get_thread_num)
|
||
ialias (omp_in_parallel)
|
||
ialias (omp_get_level)
|
||
ialias (omp_get_ancestor_thread_num)
|
||
ialias (omp_get_team_size)
|
||
ialias (omp_get_active_level)
|