linux-atomic.c: New file.

* pa/linux-atomic.c: New file.
	* pa/t-linux (LIB2FUNCS_STATIC_EXTRA): Define.
	* pa/t-linux64 (LIB2FUNCS_STATIC_EXTRA): Define.

From-SVN: r140091
This commit is contained in:
Helge Deller 2008-09-07 16:53:31 +00:00 committed by John David Anglin
parent a834e097ea
commit d6ab7b032f
4 changed files with 309 additions and 0 deletions

View File

@ -1,3 +1,9 @@
2008-09-07 Helge Deller <deller@gmx.de>
* pa/linux-atomic.c: New file.
* pa/t-linux (LIB2FUNCS_STATIC_EXTRA): Define.
* pa/t-linux64 (LIB2FUNCS_STATIC_EXTRA): Define.
2008-09-07 Richard Guenther <rguenther@suse.de>
Ira Rosen <irar@il.ibm.com>

View File

@ -0,0 +1,300 @@
/* Linux-specific atomic operations for PA Linux.
Copyright (C) 2008 Free Software Foundation, Inc.
Based on code contributed by CodeSourcery for ARM EABI Linux.
Modifications for PA Linux by Helge Deller <deller@gmx.de>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2, or (at your option) any later
version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA. */
#include <errno.h>
/* All PA-RISC implementations supported by linux have strongly
ordered loads and stores. Only cache flushes and purges can be
delayed. The data cache implementations are all globally
coherent. Thus, there is no need to synchonize memory accesses.
GCC automatically issues a asm memory barrier when it encounters
a __sync_synchronize builtin. Thus, we do not need to define this
builtin.
We implement byte, short and int versions of each atomic operation
using the kernel helper defined below. There is no support for
64-bit operations yet. */
/* A privileged instruction to crash a userspace program with SIGILL. */
#define ABORT_INSTRUCTION asm ("iitlbp %r0,(%sr0, %r0)")
/* Determine kernel LWS function call (0=32-bit, 1=64-bit userspace). */
#define LWS_CAS (sizeof(unsigned long) == 4 ? 0 : 1)
/* Kernel helper for compare-and-exchange a 32-bit value. */
static inline long
__kernel_cmpxchg (int oldval, int newval, int *mem)
{
register unsigned long lws_mem asm("r26") = (unsigned long) (mem);
register long lws_ret asm("r28");
register long lws_errno asm("r21");
register int lws_old asm("r25") = oldval;
register int lws_new asm("r24") = newval;
asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
"ldi %5, %%r20 \n\t"
: "=r" (lws_ret), "=r" (lws_errno), "=r" (lws_mem),
"=r" (lws_old), "=r" (lws_new)
: "i" (LWS_CAS), "2" (lws_mem), "3" (lws_old), "4" (lws_new)
: "r1", "r20", "r22", "r23", "r29", "r31", "memory"
);
if (__builtin_expect (lws_errno == -EFAULT || lws_errno == -ENOSYS, 0))
ABORT_INSTRUCTION;
return lws_errno;
}
#define HIDDEN __attribute__ ((visibility ("hidden")))
/* Big endian masks */
#define INVERT_MASK_1 24
#define INVERT_MASK_2 16
#define MASK_1 0xffu
#define MASK_2 0xffffu
#define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \
int HIDDEN \
__sync_fetch_and_##OP##_4 (int *ptr, int val) \
{ \
int failure, tmp; \
\
do { \
tmp = *ptr; \
failure = __kernel_cmpxchg (tmp, PFX_OP tmp INF_OP val, ptr); \
} while (failure != 0); \
\
return tmp; \
}
FETCH_AND_OP_WORD (add, , +)
FETCH_AND_OP_WORD (sub, , -)
FETCH_AND_OP_WORD (or, , |)
FETCH_AND_OP_WORD (and, , &)
FETCH_AND_OP_WORD (xor, , ^)
FETCH_AND_OP_WORD (nand, ~, &)
#define NAME_oldval(OP, WIDTH) __sync_fetch_and_##OP##_##WIDTH
#define NAME_newval(OP, WIDTH) __sync_##OP##_and_fetch_##WIDTH
/* Implement both __sync_<op>_and_fetch and __sync_fetch_and_<op> for
subword-sized quantities. */
#define SUBWORD_SYNC_OP(OP, PFX_OP, INF_OP, TYPE, WIDTH, RETURN) \
TYPE HIDDEN \
NAME##_##RETURN (OP, WIDTH) (TYPE *ptr, TYPE val) \
{ \
int *wordptr = (int *) ((unsigned long) ptr & ~3); \
unsigned int mask, shift, oldval, newval; \
int failure; \
\
shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
mask = MASK_##WIDTH << shift; \
\
do { \
oldval = *wordptr; \
newval = ((PFX_OP ((oldval & mask) >> shift) \
INF_OP (unsigned int) val) << shift) & mask; \
newval |= oldval & ~mask; \
failure = __kernel_cmpxchg (oldval, newval, wordptr); \
} while (failure != 0); \
\
return (RETURN & mask) >> shift; \
}
SUBWORD_SYNC_OP (add, , +, short, 2, oldval)
SUBWORD_SYNC_OP (sub, , -, short, 2, oldval)
SUBWORD_SYNC_OP (or, , |, short, 2, oldval)
SUBWORD_SYNC_OP (and, , &, short, 2, oldval)
SUBWORD_SYNC_OP (xor, , ^, short, 2, oldval)
SUBWORD_SYNC_OP (nand, ~, &, short, 2, oldval)
SUBWORD_SYNC_OP (add, , +, char, 1, oldval)
SUBWORD_SYNC_OP (sub, , -, char, 1, oldval)
SUBWORD_SYNC_OP (or, , |, char, 1, oldval)
SUBWORD_SYNC_OP (and, , &, char, 1, oldval)
SUBWORD_SYNC_OP (xor, , ^, char, 1, oldval)
SUBWORD_SYNC_OP (nand, ~, &, char, 1, oldval)
#define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \
int HIDDEN \
__sync_##OP##_and_fetch_4 (int *ptr, int val) \
{ \
int tmp, failure; \
\
do { \
tmp = *ptr; \
failure = __kernel_cmpxchg (tmp, PFX_OP tmp INF_OP val, ptr); \
} while (failure != 0); \
\
return PFX_OP tmp INF_OP val; \
}
OP_AND_FETCH_WORD (add, , +)
OP_AND_FETCH_WORD (sub, , -)
OP_AND_FETCH_WORD (or, , |)
OP_AND_FETCH_WORD (and, , &)
OP_AND_FETCH_WORD (xor, , ^)
OP_AND_FETCH_WORD (nand, ~, &)
SUBWORD_SYNC_OP (add, , +, short, 2, newval)
SUBWORD_SYNC_OP (sub, , -, short, 2, newval)
SUBWORD_SYNC_OP (or, , |, short, 2, newval)
SUBWORD_SYNC_OP (and, , &, short, 2, newval)
SUBWORD_SYNC_OP (xor, , ^, short, 2, newval)
SUBWORD_SYNC_OP (nand, ~, &, short, 2, newval)
SUBWORD_SYNC_OP (add, , +, char, 1, newval)
SUBWORD_SYNC_OP (sub, , -, char, 1, newval)
SUBWORD_SYNC_OP (or, , |, char, 1, newval)
SUBWORD_SYNC_OP (and, , &, char, 1, newval)
SUBWORD_SYNC_OP (xor, , ^, char, 1, newval)
SUBWORD_SYNC_OP (nand, ~, &, char, 1, newval)
int HIDDEN
__sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval)
{
int actual_oldval, fail;
while (1)
{
actual_oldval = *ptr;
if (oldval != actual_oldval)
return actual_oldval;
fail = __kernel_cmpxchg (actual_oldval, newval, ptr);
if (!fail)
return oldval;
}
}
#define SUBWORD_VAL_CAS(TYPE, WIDTH) \
TYPE HIDDEN \
__sync_val_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
TYPE newval) \
{ \
int *wordptr = (int *)((unsigned long) ptr & ~3), fail; \
unsigned int mask, shift, actual_oldval, actual_newval; \
\
shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
mask = MASK_##WIDTH << shift; \
\
while (1) \
{ \
actual_oldval = *wordptr; \
\
if (((actual_oldval & mask) >> shift) != (unsigned int) oldval) \
return (actual_oldval & mask) >> shift; \
\
actual_newval = (actual_oldval & ~mask) \
| (((unsigned int) newval << shift) & mask); \
\
fail = __kernel_cmpxchg (actual_oldval, actual_newval, \
wordptr); \
\
if (!fail) \
return oldval; \
} \
}
SUBWORD_VAL_CAS (short, 2)
SUBWORD_VAL_CAS (char, 1)
typedef unsigned char bool;
bool HIDDEN
__sync_bool_compare_and_swap_4 (int *ptr, int oldval, int newval)
{
int failure = __kernel_cmpxchg (oldval, newval, ptr);
return (failure == 0);
}
#define SUBWORD_BOOL_CAS(TYPE, WIDTH) \
bool HIDDEN \
__sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
TYPE newval) \
{ \
TYPE actual_oldval \
= __sync_val_compare_and_swap_##WIDTH (ptr, oldval, newval); \
return (oldval == actual_oldval); \
}
SUBWORD_BOOL_CAS (short, 2)
SUBWORD_BOOL_CAS (char, 1)
int HIDDEN
__sync_lock_test_and_set_4 (int *ptr, int val)
{
int failure, oldval;
do {
oldval = *ptr;
failure = __kernel_cmpxchg (oldval, val, ptr);
} while (failure != 0);
return oldval;
}
#define SUBWORD_TEST_AND_SET(TYPE, WIDTH) \
TYPE HIDDEN \
__sync_lock_test_and_set_##WIDTH (TYPE *ptr, TYPE val) \
{ \
int failure; \
unsigned int oldval, newval, shift, mask; \
int *wordptr = (int *) ((unsigned long) ptr & ~3); \
\
shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
mask = MASK_##WIDTH << shift; \
\
do { \
oldval = *wordptr; \
newval = (oldval & ~mask) \
| (((unsigned int) val << shift) & mask); \
failure = __kernel_cmpxchg (oldval, newval, wordptr); \
} while (failure != 0); \
\
return (oldval & mask) >> shift; \
}
SUBWORD_TEST_AND_SET (short, 2)
SUBWORD_TEST_AND_SET (char, 1)
#define SYNC_LOCK_RELEASE(TYPE, WIDTH) \
void HIDDEN \
__sync_lock_release_##WIDTH (TYPE *ptr) \
{ \
*ptr = 0; \
}
SYNC_LOCK_RELEASE (int, 4)
SYNC_LOCK_RELEASE (short, 2)
SYNC_LOCK_RELEASE (char, 1)

View File

@ -9,6 +9,7 @@ LIB1ASMSRC = pa/milli64.S
TARGET_LIBGCC2_CFLAGS = -fPIC -DELF=1 -DLINUX=1
LIB2FUNCS_EXTRA=fptr.c
LIB2FUNCS_STATIC_EXTRA = $(srcdir)/config/pa/linux-atomic.c
fptr.c: $(srcdir)/config/pa/fptr.c
rm -f fptr.c

View File

@ -8,5 +8,7 @@ LIB1ASMSRC = pa/milli64.S
# Actually, hppa64 is always PIC but adding -fPIC does no harm.
CRTSTUFF_T_CFLAGS_S = -fPIC
LIB2FUNCS_STATIC_EXTRA = $(srcdir)/config/pa/linux-atomic.c
# Compile libgcc2.a as PIC.
TARGET_LIBGCC2_CFLAGS = -fPIC -Dpa64=1 -DELF=1