FRV: Add support for emulation of userspace atomic ops [try #2]

Use traps 120-126 to emulate atomic cmpxchg32, xchg32, and XOR-, OR-, AND-, SUB-
and ADD-to-memory operations for userspace.

Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
David Howells 2008-04-10 16:10:55 +01:00 committed by Linus Torvalds
parent 0c93d8e4d3
commit e31c243f98
4 changed files with 268 additions and 1 deletions

View File

@ -316,8 +316,14 @@ __trap_fixup_kernel_data_tlb_miss:
.section .trap.vector
.org TBR_TT_TRAP0 >> 2
.long system_call
.rept 126
.rept 119
.long __entry_unsupported_trap
.endr
# userspace atomic op emulation, traps 120-126
.rept 7
.long __entry_atomic_op
.endr
.org TBR_TT_BREAK >> 2
.long __entry_debug_exception

View File

@ -654,6 +654,26 @@ __entry_debug_exception:
movgs gr4,psr
jmpl @(gr5,gr0) ; call ill_insn(esfr1,epcr0,esr0)
###############################################################################
#
# handle atomic operation emulation for userspace
#
###############################################################################
.globl __entry_atomic_op
__entry_atomic_op:
LEDS 0x6012
sethi.p %hi(atomic_operation),gr5
setlo %lo(atomic_operation),gr5
movsg esfr1,gr8
movsg epcr0,gr9
movsg esr0,gr10
# now that we've accessed the exception regs, we can enable exceptions
movsg psr,gr4
ori gr4,#PSR_ET,gr4
movgs gr4,psr
jmpl @(gr5,gr0) ; call atomic_operation(esfr1,epcr0,esr0)
###############################################################################
#
# handle media exception

View File

@ -100,6 +100,233 @@ asmlinkage void illegal_instruction(unsigned long esfr1, unsigned long epcr0, un
force_sig_info(info.si_signo, &info, current);
} /* end illegal_instruction() */
/*****************************************************************************/
/*
* handle atomic operations with errors
* - arguments in gr8, gr9, gr10
* - original memory value placed in gr5
* - replacement memory value placed in gr9
*/
asmlinkage void atomic_operation(unsigned long esfr1, unsigned long epcr0,
unsigned long esr0)
{
static DEFINE_SPINLOCK(atomic_op_lock);
unsigned long x, y, z, *p;
mm_segment_t oldfs;
siginfo_t info;
int ret;
y = 0;
z = 0;
oldfs = get_fs();
if (!user_mode(__frame))
set_fs(KERNEL_DS);
switch (__frame->tbr & TBR_TT) {
/* TIRA gr0,#120
* u32 __atomic_user_cmpxchg32(u32 *ptr, u32 test, u32 new)
*/
case TBR_TT_ATOMIC_CMPXCHG32:
p = (unsigned long *) __frame->gr8;
x = __frame->gr9;
y = __frame->gr10;
for (;;) {
ret = get_user(z, p);
if (ret < 0)
goto error;
if (z != x)
goto done;
spin_lock_irq(&atomic_op_lock);
if (__get_user(z, p) == 0) {
if (z != x)
goto done2;
if (__put_user(y, p) == 0)
goto done2;
goto error2;
}
spin_unlock_irq(&atomic_op_lock);
}
/* TIRA gr0,#121
* u32 __atomic_kernel_xchg32(void *v, u32 new)
*/
case TBR_TT_ATOMIC_XCHG32:
p = (unsigned long *) __frame->gr8;
y = __frame->gr9;
for (;;) {
ret = get_user(z, p);
if (ret < 0)
goto error;
spin_lock_irq(&atomic_op_lock);
if (__get_user(z, p) == 0) {
if (__put_user(y, p) == 0)
goto done2;
goto error2;
}
spin_unlock_irq(&atomic_op_lock);
}
/* TIRA gr0,#122
* ulong __atomic_kernel_XOR_return(ulong i, ulong *v)
*/
case TBR_TT_ATOMIC_XOR:
p = (unsigned long *) __frame->gr8;
x = __frame->gr9;
for (;;) {
ret = get_user(z, p);
if (ret < 0)
goto error;
spin_lock_irq(&atomic_op_lock);
if (__get_user(z, p) == 0) {
y = x ^ z;
if (__put_user(y, p) == 0)
goto done2;
goto error2;
}
spin_unlock_irq(&atomic_op_lock);
}
/* TIRA gr0,#123
* ulong __atomic_kernel_OR_return(ulong i, ulong *v)
*/
case TBR_TT_ATOMIC_OR:
p = (unsigned long *) __frame->gr8;
x = __frame->gr9;
for (;;) {
ret = get_user(z, p);
if (ret < 0)
goto error;
spin_lock_irq(&atomic_op_lock);
if (__get_user(z, p) == 0) {
y = x ^ z;
if (__put_user(y, p) == 0)
goto done2;
goto error2;
}
spin_unlock_irq(&atomic_op_lock);
}
/* TIRA gr0,#124
* ulong __atomic_kernel_AND_return(ulong i, ulong *v)
*/
case TBR_TT_ATOMIC_AND:
p = (unsigned long *) __frame->gr8;
x = __frame->gr9;
for (;;) {
ret = get_user(z, p);
if (ret < 0)
goto error;
spin_lock_irq(&atomic_op_lock);
if (__get_user(z, p) == 0) {
y = x & z;
if (__put_user(y, p) == 0)
goto done2;
goto error2;
}
spin_unlock_irq(&atomic_op_lock);
}
/* TIRA gr0,#125
* int __atomic_user_sub_return(atomic_t *v, int i)
*/
case TBR_TT_ATOMIC_SUB:
p = (unsigned long *) __frame->gr8;
x = __frame->gr9;
for (;;) {
ret = get_user(z, p);
if (ret < 0)
goto error;
spin_lock_irq(&atomic_op_lock);
if (__get_user(z, p) == 0) {
y = z - x;
if (__put_user(y, p) == 0)
goto done2;
goto error2;
}
spin_unlock_irq(&atomic_op_lock);
}
/* TIRA gr0,#126
* int __atomic_user_add_return(atomic_t *v, int i)
*/
case TBR_TT_ATOMIC_ADD:
p = (unsigned long *) __frame->gr8;
x = __frame->gr9;
for (;;) {
ret = get_user(z, p);
if (ret < 0)
goto error;
spin_lock_irq(&atomic_op_lock);
if (__get_user(z, p) == 0) {
y = z + x;
if (__put_user(y, p) == 0)
goto done2;
goto error2;
}
spin_unlock_irq(&atomic_op_lock);
}
default:
BUG();
}
done2:
spin_unlock_irq(&atomic_op_lock);
done:
if (!user_mode(__frame))
set_fs(oldfs);
__frame->gr5 = z;
__frame->gr9 = y;
return;
error2:
spin_unlock_irq(&atomic_op_lock);
error:
if (!user_mode(__frame))
set_fs(oldfs);
__frame->pc -= 4;
die_if_kernel("-- Atomic Op Error --\n");
info.si_signo = SIGSEGV;
info.si_code = SEGV_ACCERR;
info.si_errno = 0;
info.si_addr = (void *) __frame->pc;
force_sig_info(info.si_signo, &info, current);
}
/*****************************************************************************/
/*
*

View File

@ -99,9 +99,23 @@
#define TBR_TT_TRAP1 (0x81 << 4)
#define TBR_TT_TRAP2 (0x82 << 4)
#define TBR_TT_TRAP3 (0x83 << 4)
#define TBR_TT_TRAP120 (0xf8 << 4)
#define TBR_TT_TRAP121 (0xf9 << 4)
#define TBR_TT_TRAP122 (0xfa << 4)
#define TBR_TT_TRAP123 (0xfb << 4)
#define TBR_TT_TRAP124 (0xfc << 4)
#define TBR_TT_TRAP125 (0xfd << 4)
#define TBR_TT_TRAP126 (0xfe << 4)
#define TBR_TT_BREAK (0xff << 4)
#define TBR_TT_ATOMIC_CMPXCHG32 TBR_TT_TRAP120
#define TBR_TT_ATOMIC_XCHG32 TBR_TT_TRAP121
#define TBR_TT_ATOMIC_XOR TBR_TT_TRAP122
#define TBR_TT_ATOMIC_OR TBR_TT_TRAP123
#define TBR_TT_ATOMIC_AND TBR_TT_TRAP124
#define TBR_TT_ATOMIC_SUB TBR_TT_TRAP125
#define TBR_TT_ATOMIC_ADD TBR_TT_TRAP126
#define __get_TBR() ({ unsigned long x; asm volatile("movsg tbr,%0" : "=r"(x)); x; })
/*