atomicity.h: Remove tricky .subsetion bits.

2001-01-25  Richard Henderson  <rth@redhat.com>

	* config/cpu/alpha/bits/atomicity.h: Remove tricky .subsetion
	bits. Fixes Tru64 build issues.

From-SVN: r39290
This commit is contained in:
Richard Henderson 2001-01-26 15:07:38 -08:00 committed by Benjamin Kosnik
parent f86f42a8ff
commit d9de4b22f6
2 changed files with 37 additions and 46 deletions

View File

@ -1,3 +1,8 @@
2001-01-25 Richard Henderson <rth@redhat.com>
* config/cpu/alpha/bits/atomicity.h: Remove tricky .subsetion
bits. Fixes Tru64 build issues.
2001-01-25 Michael Sokolov <msokolov@ivan.Harhan.ORG>
* acinclude.m4 (GLIBCPP_CHECK_GNU_MAKE): Bourne shell portability bug

View File

@ -1,5 +1,5 @@
/* Low-level functions for atomic operations. Alpha version.
Copyright (C) 1999 Free Software Foundation, Inc.
Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@ -20,6 +20,11 @@
#ifndef _BITS_ATOMICITY_H
#define _BITS_ATOMICITY_H 1
/* @@@ With gas we can play nice .subsection games to get the
non-predicted branch pointing forward. But Digital assemblers
don't understand those directives. This isn't a terribly
important issue, so just ignore it. */
typedef int _Atomic_word;
static inline _Atomic_word
@ -29,20 +34,14 @@ __exchange_and_add (volatile _Atomic_word* __mem, int __val)
register int __result, __tmp;
__asm__ __volatile__ (
"/* Inline exchange & add */\n"
"1:\t"
"ldl_l %0,%3\n\t"
"addl %0,%4,%1\n\t"
"stl_c %1,%2\n\t"
"beq %1,2f\n"
".subsection 1\n"
"2:\t"
"br 1b\n"
".previous\n\t"
"mb\n\t"
"/* End exchange & add */"
: "=&r"(__result), "=&r"(__tmp), "=m"(*__mem)
: "m" (*__mem), "r"(__val));
"\n$Lxadd_%=:\n\t"
"ldl_l %0,%3\n\t"
"addl %0,%4,%1\n\t"
"stl_c %1,%2\n\t"
"beq %1,$Lxadd_%=\n\t"
"mb"
: "=&r"(__result), "=&r"(__tmp), "=m"(*__mem)
: "m" (*__mem), "r"(__val));
return __result;
}
@ -54,20 +53,14 @@ __atomic_add (volatile _Atomic_word* __mem, int __val)
register _Atomic_word __result;
__asm__ __volatile__ (
"/* Inline exchange & add */\n"
"1:\t"
"ldl_l %0,%2\n\t"
"addl %0,%3,%0\n\t"
"stl_c %0,%1\n\t"
"beq %0,2f\n\t"
".subsection 1\n"
"2:\t"
"br 1b\n"
".previous\n\t"
"mb\n\t"
"/* End exchange & add */"
: "=&r"(__result), "=m"(*__mem)
: "m" (*__mem), "r"(__val));
"\n$Ladd_%=:\n\t"
"ldl_l %0,%2\n\t"
"addl %0,%3,%0\n\t"
"stl_c %0,%1\n\t"
"beq %0,$Ladd_%=\n\t"
"mb"
: "=&r"(__result), "=m"(*__mem)
: "m" (*__mem), "r"(__val));
}
static inline int
@ -77,23 +70,16 @@ __compare_and_swap (volatile long *__p, long __oldval, long __newval)
int __ret;
__asm__ __volatile__ (
"/* Inline compare & swap */\n"
"1:\t"
"ldq_l %0,%4\n\t"
"cmpeq %0,%2,%0\n\t"
"beq %0,3f\n\t"
"mov %3,%0\n\t"
"stq_c %0,%1\n\t"
"beq %0,2f\n\t"
".subsection 1\n"
"2:\t"
"br 1b\n"
".previous\n\t"
"3:\t"
"mb\n\t"
"/* End compare & swap */"
: "=&r"(__ret), "=m"(*__p)
: "r"(__oldval), "r"(__newval), "m"(*__p));
"\n$Lcas_%=:\n\t"
"ldq_l %0,%4\n\t"
"cmpeq %0,%2,%0\n\t"
"beq %0,3f\n\t"
"mov %3,%0\n\t"
"stq_c %0,%1\n\t"
"beq %0,$Lcas_%=\n\t"
"mb"
: "=&r"(__ret), "=m"(*__p)
: "r"(__oldval), "r"(__newval), "m"(*__p));
return __ret;
}