* sysdeps/powerpc/bits/atomic.h (atomic_increment): Define.
	(atomic_decrement): Define.

	* sysdeps/powerpc/bits/atomic.h: Implement atomic_increment_val and
	atomic_decrement_val.
	* sysdeps/powerpc/powerpc32/bits/atomic.h: Likewise.
	* sysdeps/powerpc/powerpc64/bits/atomic.h: Likewise.

	* csu/tst-atomic.c (do_test): Add tests of atomic_increment_val
	and atomic_decrement_val.
This commit is contained in:
Ulrich Drepper 2004-09-08 06:09:02 +00:00
parent f510d815be
commit 7ba0e52c39
9 changed files with 129 additions and 26 deletions

View File

@ -1,5 +1,16 @@
2004-09-07 Ulrich Drepper <drepper@redhat.com>
* sysdeps/powerpc/bits/atomic.h (atomic_increment): Define.
(atomic_decrement): Define.
* sysdeps/powerpc/bits/atomic.h: Implement atomic_increment_val and
atomic_decrement_val.
* sysdeps/powerpc/powerpc32/bits/atomic.h: Likewise.
* sysdeps/powerpc/powerpc64/bits/atomic.h: Likewise.
* csu/tst-atomic.c (do_test): Add tests of atomic_increment_val
and atomic_decrement_val.
* include/atomic.h: Define atomic_increment_val, atomic_decrement_val,
and atomic_delay is not already defined.
* sysdeps/i386/i486/bits/atomic.h: Define atomic_delay.

View File

@ -1,5 +1,5 @@
/* Tests for atomic.h macros.
Copyright (C) 2003 Free Software Foundation, Inc.
Copyright (C) 2003, 2004 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
@ -130,6 +130,12 @@ do_test (void)
ret = 1;
}
if (atomic_increment_val (&mem) != 1)
{
puts ("atomic_increment_val test failed");
ret = 1;
}
mem = 0;
if (atomic_increment_and_test (&mem)
|| mem != 1)
@ -162,6 +168,12 @@ do_test (void)
ret = 1;
}
if (atomic_decrement_val (&mem) != 15)
{
puts ("atomic_decrement_val test failed");
ret = 1;
}
mem = 0;
if (atomic_decrement_and_test (&mem)
|| mem != -1)

View File

@ -445,8 +445,7 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
#if COLORING_INCREMENT != 0
/* Atomically increment NCREATED. */
unsigned int ncreated = (atomic_exchange_and_add (&nptl_ncreated, 1)
+ 1);
unsigned int ncreated = atomic_increment_val (&nptl_ncreated);
/* We chose the offset for coloring by incrementing it for
every new thread by a fixed amount. The offset used

View File

@ -69,7 +69,7 @@ pthread_barrier_wait (barrier)
unsigned int init_count = ibarrier->init_count;
/* If this was the last woken thread, unlock. */
if (atomic_exchange_and_add (&ibarrier->left, 1) == init_count - 1)
if (atomic_increment_val (&ibarrier->left) == init_count)
/* We are done. */
lll_unlock (ibarrier->lock);

View File

@ -1,5 +1,5 @@
/* sem_post -- post to a POSIX semaphore. Powerpc version.
Copyright (C) 2003 Free Software Foundation, Inc.
Copyright (C) 2003, 2004 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
@ -30,11 +30,10 @@ int
__new_sem_post (sem_t *sem)
{
int *futex = (int *) sem;
int err, nr;
__asm __volatile (__lll_rel_instr ::: "memory");
nr = atomic_exchange_and_add (futex, 1);
err = lll_futex_wake (futex, nr + 1);
int nr = atomic_increment_val (futex);
int err = lll_futex_wake (futex, nr);
if (__builtin_expect (err, 0) < 0)
{
__set_errno (-err);

View File

@ -1,5 +1,5 @@
/* sem_post -- post to a POSIX semaphore. Generic futex-using version.
Copyright (C) 2003 Free Software Foundation, Inc.
Copyright (C) 2003, 2004 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
@ -30,10 +30,9 @@ int
__new_sem_post (sem_t *sem)
{
int *futex = (int *) sem;
int err, nr;
nr = atomic_exchange_and_add (futex, 1);
err = lll_futex_wake (futex, nr + 1);
int nr = atomic_increment_val (futex);
int err = lll_futex_wake (futex, nr);
if (__builtin_expect (err, 0) < 0)
{
__set_errno (-err);

View File

@ -147,6 +147,32 @@ typedef uintmax_t uatomic_max_t;
__val; \
})
#define __arch_atomic_increment_val_32(mem) \
({ \
__typeof (*(mem)) __val; \
__asm __volatile ("1: lwarx %0,0,%2\n" \
" addi %0,%0,1\n" \
" stwcx. %0,0,%2\n" \
" bne- 1b" \
: "=&b" (__val), "=m" (*mem) \
: "b" (mem), "m" (*mem) \
: "cr0", "memory"); \
__val; \
})
#define __arch_atomic_decrement_val_32(mem) \
({ \
__typeof (*(mem)) __val; \
__asm __volatile ("1: lwarx %0,0,%2\n" \
" subi %0,%0,1\n" \
" stwcx. %0,0,%2\n" \
" bne- 1b" \
: "=&b" (__val), "=m" (*mem) \
: "b" (mem), "m" (*mem) \
: "cr0", "memory"); \
__val; \
})
#define __arch_atomic_decrement_if_positive_32(mem) \
({ int __val, __tmp; \
__asm __volatile ("1: lwarx %0,0,%3\n" \
@ -222,6 +248,34 @@ typedef uintmax_t uatomic_max_t;
__result; \
})
#define atomic_increment_val(mem) \
({ \
__typeof (*(mem)) __result; \
if (sizeof (*(mem)) == 4) \
__result = __arch_atomic_increment_val_32 (mem); \
else if (sizeof (*(mem)) == 8) \
__result = __arch_atomic_increment_val_64 (mem); \
else \
abort (); \
__result; \
})
#define atomic_increment(mem) ({ atomic_increment_val (mem); (void) 0; })
#define atomic_decrement_val(mem) \
({ \
__typeof (*(mem)) __result; \
if (sizeof (*(mem)) == 4) \
__result = __arch_atomic_decrement_val_32 (mem); \
else if (sizeof (*(mem)) == 8) \
__result = __arch_atomic_decrement_val_64 (mem); \
else \
abort (); \
__result; \
})
#define atomic_decrement(mem) ({ atomic_decrement_val (mem); (void) 0; })
/* Decrement *MEM if it is > 0, and return the old value. */
#define atomic_decrement_if_positive(mem) \

View File

@ -1,5 +1,5 @@
/* Atomic operations. PowerPC32 version.
Copyright (C) 2003 Free Software Foundation, Inc.
Copyright (C) 2003, 2004 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
@ -56,17 +56,15 @@
__tmp != 0; \
})
/*
* Powerpc32 processors don't implement the 64-bit (doubleword) forms of
* load and reserve (ldarx) and store conditional (stdcx.) instructions.
* So for powerpc32 we stub out the 64-bit forms.
*/
/* Powerpc32 processors don't implement the 64-bit (doubleword) forms of
load and reserve (ldarx) and store conditional (stdcx.) instructions.
So for powerpc32 we stub out the 64-bit forms. */
# define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
(abort (), 0)
# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
(abort (), (__typeof (*mem)) 0)
# define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
(abort (), 0)
@ -82,19 +80,24 @@
# define __arch_atomic_exchange_and_add_64(mem, value) \
({ abort (); (*mem) = (value); })
# define __arch_atomic_increment_val_64(mem) \
({ abort (); (*mem)++; })
# define __arch_atomic_decrement_val_64(mem) \
({ abort (); (*mem)--; })
# define __arch_atomic_decrement_if_positive_64(mem) \
({ abort (); (*mem)--; })
/*
* Older powerpc32 processors don't support the new "light weight"
* sync (lwsync). So the only safe option is to use normal sync
* for all powerpc32 applications.
/*
* Older powerpc32 processors don't support the new "light weight"
* sync (lwsync). So the only safe option is to use normal sync
* for all powerpc32 applications.
*/
# define atomic_read_barrier() __asm ("sync" ::: "memory")
/*
* Include the rest of the atomic ops macros which are common to both
* powerpc32 and powerpc64.
* powerpc32 and powerpc64.
*/
#include_next <bits/atomic.h>

View File

@ -168,6 +168,32 @@
__val; \
})
# define __arch_atomic_increment_val_64(mem) \
({ \
__typeof (*(mem)) __val; \
__asm __volatile ("1: ldarx %0,0,%2\n" \
" addi %0,%0,1\n" \
" stdcx. %0,0,%2\n" \
" bne- 1b" \
: "=&b" (__val), "=m" (*mem) \
: "b" (mem), "m" (*mem) \
: "cr0", "memory"); \
__val; \
})
# define __arch_atomic_decrement_val_64(mem) \
({ \
__typeof (*(mem)) __val; \
__asm __volatile ("1: ldarx %0,0,%2\n" \
" subi %0,%0,1\n" \
" stdcx. %0,0,%2\n" \
" bne- 1b" \
: "=&b" (__val), "=m" (*mem) \
: "b" (mem), "m" (*mem) \
: "cr0", "memory"); \
__val; \
})
# define __arch_atomic_decrement_if_positive_64(mem) \
({ int __val, __tmp; \
__asm __volatile ("1: ldarx %0,0,%3\n" \