[gomp] Recycle last non-nested team if possible

libgomp/ChangeLog
2015-07-15  Sebastian Huber  <sebastian.huber@embedded-brains.de>

	* team.c (get_last_team): New.
	(gomp_new_team): Recycle last non-nested team if possible.
	(gomp_team_end): Move team work share list free lock destruction
	to ...
	(free_team): ... here.

From-SVN: r225811
This commit is contained in:
Sebastian Huber 2015-07-15 09:11:11 +00:00 committed by Sebastian Huber
parent 290f6359d2
commit 6dba011330
2 changed files with 45 additions and 13 deletions

View File

@ -1,3 +1,11 @@
2015-07-15 Sebastian Huber <sebastian.huber@embedded-brains.de>
* team.c (get_last_team): New.
(gomp_new_team): Recycle last non-nested team if possible.
(gomp_team_end): Move team work share list free lock destruction
to ...
(free_team): ... here.
2015-07-14 Maxim Blumenthal <maxim.blumenthal@intel.com>
* testsuite/libgomp.c/examples-4/simd-3.c: (main): Change type of res

View File

@ -134,6 +134,25 @@ gomp_thread_start (void *xdata)
return NULL;
}
static inline struct gomp_team *
get_last_team (unsigned nthreads)
{
struct gomp_thread *thr = gomp_thread ();
if (thr->ts.team == NULL)
{
struct gomp_thread_pool *pool = thr->thread_pool;
if (pool != NULL)
{
struct gomp_team *last_team = pool->last_team;
if (last_team != NULL && last_team->nthreads == nthreads)
{
pool->last_team = NULL;
return last_team;
}
}
}
return NULL;
}
/* Create a new team data structure. */
@ -141,18 +160,27 @@ struct gomp_team *
gomp_new_team (unsigned nthreads)
{
struct gomp_team *team;
size_t size;
int i;
size = sizeof (*team) + nthreads * (sizeof (team->ordered_release[0])
+ sizeof (team->implicit_task[0]));
team = gomp_malloc (size);
team = get_last_team (nthreads);
if (team == NULL)
{
size_t extra = sizeof (team->ordered_release[0])
+ sizeof (team->implicit_task[0]);
team = gomp_malloc (sizeof (*team) + nthreads * extra);
#ifndef HAVE_SYNC_BUILTINS
gomp_mutex_init (&team->work_share_list_free_lock);
#endif
gomp_barrier_init (&team->barrier, nthreads);
gomp_mutex_init (&team->task_lock);
team->nthreads = nthreads;
}
team->work_share_chunk = 8;
#ifdef HAVE_SYNC_BUILTINS
team->single_count = 0;
#else
gomp_mutex_init (&team->work_share_list_free_lock);
#endif
team->work_shares_to_free = &team->work_shares[0];
gomp_init_work_share (&team->work_shares[0], false, nthreads);
@ -163,14 +191,10 @@ gomp_new_team (unsigned nthreads)
team->work_shares[i].next_free = &team->work_shares[i + 1];
team->work_shares[i].next_free = NULL;
team->nthreads = nthreads;
gomp_barrier_init (&team->barrier, nthreads);
gomp_sem_init (&team->master_release, 0);
team->ordered_release = (void *) &team->implicit_task[nthreads];
team->ordered_release[0] = &team->master_release;
gomp_mutex_init (&team->task_lock);
team->task_queue = NULL;
team->task_count = 0;
team->task_queued_count = 0;
@ -187,6 +211,9 @@ gomp_new_team (unsigned nthreads)
static void
free_team (struct gomp_team *team)
{
#ifndef HAVE_SYNC_BUILTINS
gomp_mutex_destroy (&team->work_share_list_free_lock);
#endif
gomp_barrier_destroy (&team->barrier);
gomp_mutex_destroy (&team->task_lock);
free (team);
@ -895,9 +922,6 @@ gomp_team_end (void)
while (ws != NULL);
}
gomp_sem_destroy (&team->master_release);
#ifndef HAVE_SYNC_BUILTINS
gomp_mutex_destroy (&team->work_share_list_free_lock);
#endif
if (__builtin_expect (thr->ts.team != NULL, 0)
|| __builtin_expect (team->nthreads == 1, 0))