2003-03-25 Roland McGrath <roland@redhat.com>

* sysdeps/powerpc/bits/atomic.h (__arch_atomic_exchange_32): New macro.
	(__arch_atomic_exchange_64): New macro.
	(atomic_exchange): Use them.
	(__arch_atomic_exchange_and_add_32): New macro.
	(__arch_atomic_exchange_and_add_64): New macro.
	(atomic_exchange_and_add): Use them.
	Original patch from Steven Munroe <sjmunroe@us.ibm.com>.
This commit is contained in:
Roland McGrath 2003-03-25 22:40:21 +00:00
parent bacb02966f
commit 3e195d9371
1 changed files with 76 additions and 25 deletions

View File

@ -102,38 +102,90 @@ typedef uintmax_t uatomic_max_t;
__tmp != 0; \
})
#else /* powerpc32 */
# define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
(abort (), 0)
#endif
#define atomic_exchange(mem, value) \
({ if (sizeof (*mem) != 4) \
abort (); \
int __val; \
# define __arch_atomic_exchange_64(mem, value) \
({ \
__typeof (*mem) __val; \
__asm __volatile (__ARCH_REL_INSTR "\n" \
"1: lwarx %0,0,%2\n" \
" stwcx. %3,0,%2\n" \
"1: ldarx %0,0,%2\n" \
" stdcx. %3,0,%2\n" \
" bne- 1b" \
: "=&r" (__val), "=m" (*mem) \
: "r" (mem), "r" (value), "1" (*mem) \
: "cr0"); \
__val; })
__val; \
})
# define __arch_atomic_exchange_and_add_64(mem, value) \
({ \
__typeof (*mem) __val, __tmp; \
__asm __volatile ("1: ldarx %0,0,%3\n" \
" addi %1,%0,%4\n" \
" stdcx. %1,0,%3\n" \
" bne- 1b" \
: "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
: "r" (mem), "I" (value), "2" (*mem) \
: "cr0"); \
__val; \
})
#define atomic_exchange_and_add(mem, value) \
({ if (sizeof (*mem) != 4) \
#else /* powerpc32 */
# define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
(abort (), 0)
# define __arch_atomic_exchange_64(mem, value) \
({ abort (); (*mem) = (value); })
# define __arch_atomic_exchange_and_add_64(mem, value) \
({ abort (); (*mem) = (value); })
#endif
#define __arch_atomic_exchange_32(mem, value) \
({ \
__typeof (*mem) __val; \
__asm __volatile (__ARCH_REL_INSTR "\n" \
"1: lwarx %0,0,%2\n" \
" stwcx. %3,0,%2\n" \
" bne- 1b" \
: "=&r" (__val), "=m" (*mem) \
: "r" (mem), "r" (value), "1" (*mem) \
: "cr0"); \
__val; \
})
#define __arch_atomic_exchange_and_add_32(mem, value) \
({ \
__typeof (*mem) __val, __tmp; \
__asm __volatile ("1: lwarx %0,0,%3\n" \
" addi %1,%0,%4\n" \
" stwcx. %1,0,%3\n" \
" bne- 1b" \
: "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
: "r" (mem), "I" (value), "2" (*mem) \
: "cr0"); \
__val; \
})
#define atomic_exchange(mem, value) \
({ \
__typeof (*(mem)) __result; \
if (sizeof (*mem) == 4) \
__result = __arch_atomic_exchange_32 ((mem), (value)); \
else if (sizeof (*mem) == 8) \
__result = __arch_atomic_exchange_64 ((mem), (value)); \
else \
abort (); \
int __val, __tmp; \
__asm __volatile ("1: lwarx %0,0,%3\n" \
" addi %1,%0,%4\n" \
" stwcx. %1,0,%3\n" \
" bne- 1b" \
: "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
: "r" (mem), "I" (value), "2" (*mem) \
: "cr0"); \
__val; \
__result; \
})
#define atomic_exchange_and_add(mem, value) \
({ \
__typeof (*(mem)) __result; \
if (sizeof (*mem) == 4) \
__result = __arch_atomic_exchange_and_add_32 ((mem), (value)); \
else if (sizeof (*mem) == 8) \
__result = __arch_atomic_exchange_and_add_64 ((mem), (value)); \
else \
abort (); \
__result; \
})
@ -156,7 +208,6 @@ typedef uintmax_t uatomic_max_t;
})
#define atomic_full_barrier() __asm ("sync" ::: "memory")
#ifdef __powerpc64__
# define atomic_read_barrier() __asm ("lwsync" ::: "memory")