libgomp: Reduce copy and paste for RTEMS
libgomp/ * config/rtems/bar.c: Include "../linux/bar.c" and delete copy and paste code. From-SVN: r267752
This commit is contained in:
parent
30b4d0d0b9
commit
cb87fec331
@ -1,3 +1,8 @@
|
||||
2019-01-09 Sebastian Huber <sebastian.huber@embedded-brains.de>
|
||||
|
||||
* config/rtems/bar.c: Include "../linux/bar.c" and delete copy
|
||||
and paste code.
|
||||
|
||||
2019-01-09 Sebastian Huber <sebastian.huber@embedded-brains.de>
|
||||
|
||||
* config/rtems/affinity-fmt.c: New file. Include affinity-fmt.c,
|
||||
|
@ -72,184 +72,5 @@ do_wait (int *addr, int val)
|
||||
futex_wait (addr, val);
|
||||
}
|
||||
|
||||
/* Everything below this point should be identical to the Linux
|
||||
implementation. */
|
||||
|
||||
void
|
||||
gomp_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
|
||||
{
|
||||
if (__builtin_expect (state & BAR_WAS_LAST, 0))
|
||||
{
|
||||
/* Next time we'll be awaiting TOTAL threads again. */
|
||||
bar->awaited = bar->total;
|
||||
__atomic_store_n (&bar->generation, bar->generation + BAR_INCR,
|
||||
MEMMODEL_RELEASE);
|
||||
futex_wake ((int *) &bar->generation, INT_MAX);
|
||||
}
|
||||
else
|
||||
{
|
||||
do
|
||||
do_wait ((int *) &bar->generation, state);
|
||||
while (__atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE) == state);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
gomp_barrier_wait (gomp_barrier_t *bar)
|
||||
{
|
||||
gomp_barrier_wait_end (bar, gomp_barrier_wait_start (bar));
|
||||
}
|
||||
|
||||
/* Like gomp_barrier_wait, except that if the encountering thread
|
||||
is not the last one to hit the barrier, it returns immediately.
|
||||
The intended usage is that a thread which intends to gomp_barrier_destroy
|
||||
this barrier calls gomp_barrier_wait, while all other threads
|
||||
call gomp_barrier_wait_last. When gomp_barrier_wait returns,
|
||||
the barrier can be safely destroyed. */
|
||||
|
||||
void
|
||||
gomp_barrier_wait_last (gomp_barrier_t *bar)
|
||||
{
|
||||
gomp_barrier_state_t state = gomp_barrier_wait_start (bar);
|
||||
if (state & BAR_WAS_LAST)
|
||||
gomp_barrier_wait_end (bar, state);
|
||||
}
|
||||
|
||||
void
|
||||
gomp_team_barrier_wake (gomp_barrier_t *bar, int count)
|
||||
{
|
||||
futex_wake ((int *) &bar->generation, count == 0 ? INT_MAX : count);
|
||||
}
|
||||
|
||||
void
|
||||
gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
|
||||
{
|
||||
unsigned int generation, gen;
|
||||
|
||||
if (__builtin_expect (state & BAR_WAS_LAST, 0))
|
||||
{
|
||||
/* Next time we'll be awaiting TOTAL threads again. */
|
||||
struct gomp_thread *thr = gomp_thread ();
|
||||
struct gomp_team *team = thr->ts.team;
|
||||
|
||||
bar->awaited = bar->total;
|
||||
team->work_share_cancelled = 0;
|
||||
if (__builtin_expect (team->task_count, 0))
|
||||
{
|
||||
gomp_barrier_handle_tasks (state);
|
||||
state &= ~BAR_WAS_LAST;
|
||||
}
|
||||
else
|
||||
{
|
||||
state &= ~BAR_CANCELLED;
|
||||
state += BAR_INCR - BAR_WAS_LAST;
|
||||
__atomic_store_n (&bar->generation, state, MEMMODEL_RELEASE);
|
||||
futex_wake ((int *) &bar->generation, INT_MAX);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
generation = state;
|
||||
state &= ~BAR_CANCELLED;
|
||||
do
|
||||
{
|
||||
do_wait ((int *) &bar->generation, generation);
|
||||
gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
|
||||
if (__builtin_expect (gen & BAR_TASK_PENDING, 0))
|
||||
{
|
||||
gomp_barrier_handle_tasks (state);
|
||||
gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
|
||||
}
|
||||
generation |= gen & BAR_WAITING_FOR_TASK;
|
||||
}
|
||||
while (gen != state + BAR_INCR);
|
||||
}
|
||||
|
||||
void
|
||||
gomp_team_barrier_wait (gomp_barrier_t *bar)
|
||||
{
|
||||
gomp_team_barrier_wait_end (bar, gomp_barrier_wait_start (bar));
|
||||
}
|
||||
|
||||
void
|
||||
gomp_team_barrier_wait_final (gomp_barrier_t *bar)
|
||||
{
|
||||
gomp_barrier_state_t state = gomp_barrier_wait_final_start (bar);
|
||||
if (__builtin_expect (state & BAR_WAS_LAST, 0))
|
||||
bar->awaited_final = bar->total;
|
||||
gomp_team_barrier_wait_end (bar, state);
|
||||
}
|
||||
|
||||
bool
|
||||
gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar,
|
||||
gomp_barrier_state_t state)
|
||||
{
|
||||
unsigned int generation, gen;
|
||||
|
||||
if (__builtin_expect (state & BAR_WAS_LAST, 0))
|
||||
{
|
||||
/* Next time we'll be awaiting TOTAL threads again. */
|
||||
/* BAR_CANCELLED should never be set in state here, because
|
||||
cancellation means that at least one of the threads has been
|
||||
cancelled, thus on a cancellable barrier we should never see
|
||||
all threads to arrive. */
|
||||
struct gomp_thread *thr = gomp_thread ();
|
||||
struct gomp_team *team = thr->ts.team;
|
||||
|
||||
bar->awaited = bar->total;
|
||||
team->work_share_cancelled = 0;
|
||||
if (__builtin_expect (team->task_count, 0))
|
||||
{
|
||||
gomp_barrier_handle_tasks (state);
|
||||
state &= ~BAR_WAS_LAST;
|
||||
}
|
||||
else
|
||||
{
|
||||
state += BAR_INCR - BAR_WAS_LAST;
|
||||
__atomic_store_n (&bar->generation, state, MEMMODEL_RELEASE);
|
||||
futex_wake ((int *) &bar->generation, INT_MAX);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (__builtin_expect (state & BAR_CANCELLED, 0))
|
||||
return true;
|
||||
|
||||
generation = state;
|
||||
do
|
||||
{
|
||||
do_wait ((int *) &bar->generation, generation);
|
||||
gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
|
||||
if (__builtin_expect (gen & BAR_CANCELLED, 0))
|
||||
return true;
|
||||
if (__builtin_expect (gen & BAR_TASK_PENDING, 0))
|
||||
{
|
||||
gomp_barrier_handle_tasks (state);
|
||||
gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
|
||||
}
|
||||
generation |= gen & BAR_WAITING_FOR_TASK;
|
||||
}
|
||||
while (gen != state + BAR_INCR);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
gomp_team_barrier_wait_cancel (gomp_barrier_t *bar)
|
||||
{
|
||||
return gomp_team_barrier_wait_cancel_end (bar, gomp_barrier_wait_start (bar));
|
||||
}
|
||||
|
||||
void
|
||||
gomp_team_barrier_cancel (struct gomp_team *team)
|
||||
{
|
||||
gomp_mutex_lock (&team->task_lock);
|
||||
if (team->barrier.generation & BAR_CANCELLED)
|
||||
{
|
||||
gomp_mutex_unlock (&team->task_lock);
|
||||
return;
|
||||
}
|
||||
team->barrier.generation |= BAR_CANCELLED;
|
||||
gomp_mutex_unlock (&team->task_lock);
|
||||
futex_wake ((int *) &team->barrier.generation, INT_MAX);
|
||||
}
|
||||
#define GOMP_WAIT_H 1
|
||||
#include "../linux/bar.c"
|
||||
|
Loading…
Reference in New Issue
Block a user