re PR libstdc++/3584 (arm-specific atomic operations not atomic)

PR libstdc++/3584
* config/cpu/arm/atomicity.h (__exchange_and_add): Don't try to use
ASM sequences that don't give us atomic addition operations.  Instead
just add a comment explaining why it doesn't work.
(__atomic_add): Just use __exchange_and_add.
(__test_and_set, __always_swap): Delete.

From-SVN: r57778
This commit is contained in:
Richard Earnshaw 2002-10-03 15:58:29 +00:00 committed by Richard Earnshaw
parent e37383993d
commit bfc1eae300
2 changed files with 23 additions and 173 deletions

View File

@ -1,3 +1,12 @@
2002-10-03 Richard Earnshaw <rearnsha@arm.com>
PR libstdc++/3584
* config/cpu/arm/atomicity.h (__exchange_and_add): Don't try to use
ASM sequences that don't give us atomic addition operations. Instead
just add a comment explaining why it doesn't work.
(__atomic_add): Just use __exchange_and_add.
(__test_and_set, __always_swap): Delete.
2002-10-02 Andreas Jaeger <aj@suse.de>
* config/abi/x86_64-unknown-linux-gnu/baseline_symbols.txt:

View File

@ -32,51 +32,23 @@
typedef int _Atomic_word;
/* It isn't possible to write an atomic add instruction using the ARM
SWP instruction without using either a global guard variable or a
guard bit somewhere in the Atomic word. However, even with a guard
bit we need to understand the thread model (if any) in order to
make co-operatively threaded applications work correctly.
The previous Thumb-based implementations were also completely
broken, since they failed to switch back into Thumb mode (Gas bug,
I think). */
static inline _Atomic_word
__attribute__ ((__unused__))
__exchange_and_add (volatile _Atomic_word* __mem, int __val)
{
_Atomic_word __tmp, __tmp2, __result;
#ifdef __thumb__
/* Since this function is inlined, we can't be sure of the alignment. */
__asm__ __volatile__ (
"ldr %0, 4f \n\t"
"bx %0 \n\t"
".align 0 \n"
"4:\t"
".word 0f \n\t"
".code 32 \n"
"0:\t"
"ldr %0, [%3] \n\t"
"add %1, %0, %4 \n\t"
"swp %2, %1, [%3] \n\t"
"cmp %0, %2 \n\t"
"swpne %1, %2, [%3] \n\t"
"bne 0b \n\t"
"ldr %1, 1f \n\t"
"bx %1 \n"
"1:\t"
".word 2f \n\t"
".code 16 \n"
"2:\n"
: "=&l"(__result), "=&r"(__tmp), "=&r"(__tmp2)
: "r" (__mem), "r"(__val)
: "cc", "memory");
#else
__asm__ __volatile__ (
"\n"
"0:\t"
"ldr %0, [%3] \n\t"
"add %1, %0, %4 \n\t"
"swp %2, %1, [%3] \n\t"
"cmp %0, %2 \n\t"
"swpne %1, %2, [%3] \n\t"
"bne 0b \n\t"
""
: "=&r"(__result), "=&r"(__tmp), "=&r"(__tmp2)
: "r" (__mem), "r"(__val)
: "cc", "memory");
#endif
_Atomic_word __result = *__mem;
*__mem = __result + __val;
return __result;
}
@ -84,138 +56,7 @@ static inline void
__attribute__ ((__unused__))
__atomic_add (volatile _Atomic_word *__mem, int __val)
{
_Atomic_word __tmp, __tmp2, __tmp3;
#ifdef __thumb__
/* Since this function is inlined, we can't be sure of the alignment. */
__asm__ __volatile__ (
"ldr %0, 4f \n\t"
"bx %0 \n\t"
".align 0\n"
"4:\t"
".word 0f \n\t"
".code 32 \n"
"0:\t"
"ldr %0, [%3] \n\t"
"add %1, %0, %4 \n\t"
"swp %2, %1, [%3] \n\t"
"cmp %0, %2 \n\t"
"swpne %1, %2,[%3] \n\t"
"bne 0b \n\t"
"ldr %1, 1f \n\t"
"bx %1 \n"
"1:\t"
".word 2f \n\t"
".code 16 \n"
"2:\n"
: "=&l"(__tmp), "=&r"(__tmp2), "=&r"(__tmp3)
: "r" (__mem), "r"(__val)
: "cc", "memory");
#else
__asm__ __volatile__ (
"\n"
"0:\t"
"ldr %0, [%3] \n\t"
"add %1, %0, %4 \n\t"
"swp %2, %1, [%3] \n\t"
"cmp %0, %2 \n\t"
"swpne %1, %2, [%3] \n\t"
"bne 0b \n\t"
""
: "=&r"(__tmp), "=&r"(__tmp2), "=&r"(__tmp3)
: "r" (__mem), "r"(__val)
: "cc", "memory");
#endif
}
static inline long
__attribute__ ((__unused__))
__always_swap (volatile long *__p, long __newval)
{
long __result;
#ifdef __thumb__
long __tmp;
/* Since this function is inlined, we can't be sure of the alignment. */
__asm__ __volatile__ (
"ldr %0, 4f \n\t"
"bx %0 \n\t"
".align 0 \n"
"4:\t"
".word 0f \n\t"
".code 32\n"
"0:\t"
"swp %0, %3, [%2] \n\t"
"ldr %1, 1f \n\t"
"bx %1 \n"
"1:\t"
".word 2f \n\t"
".code 16 \n"
"2:\n"
: "=&l"(__result), "=&r"(__tmp)
: "r"(__p), "r"(__newval)
: "memory");
#else
__asm__ __volatile__ (
"\n\t"
"swp %0, %2, [%1] \n\t"
""
: "=&r"(__result)
: "r"(__p), "r"(__newval)
: "memory");
#endif
return __result;
}
static inline int
__attribute__ ((__unused__))
__test_and_set (volatile long *__p, long __newval)
{
int __result;
long __tmp;
#ifdef __thumb__
/* Since this function is inlined, we can't be sure of the alignment. */
__asm__ __volatile__ (
"ldr %0, 4f \n\t"
"bx %0 \n\t"
".align 0 \n"
"4:\t"
".word 0f \n\t"
".code 32 \n"
"0:\t"
"ldr %0, [%2] \n\t"
"cmp %0, #0 \n\t"
"bne 1f \n\t"
"swp %1, %3, [%2] \n\t"
"cmp %0, %1 \n\t"
"swpne %0, %1, [%2]\n\t"
"bne 0b \n"
"1:\t"
"ldr %1, 2f \n\t"
"bx %1 \n"
"2:\t"
".word 3f \n\t"
".code 16 \n"
"3:"
: "=&l"(__result), "=r" (__tmp)
: "r"(__p), "r"(__newval)
: "cc", "memory");
#else
__asm__ __volatile__ (
"\n"
"0:\t"
"ldr %0, [%2] \n\t"
"cmp %0, #0 \n\t"
"bne 1f \n\t"
"swp %1, %3, [%2] \n\t"
"cmp %0, %1 \n\t"
"swpne %0, %1, [%2] \n\t"
"bne 0b \n"
"1:\n\t"
""
: "=&r"(__result), "=r" (__tmp)
: "r"(__p), "r"(__newval)
: "cc", "memory");
#endif
return __result;
__exchange_and_add (__mem, __val);
}
#endif /* atomicity.h */