openrisc: add optimized atomic operations

Using the l.lwa and l.swa atomic instruction pair.
Most openrisc processor cores provide these instructions now. If the
instructions are not available emulation is provided.

Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
[shorne@gmail.com: remove OPENRISC_HAVE_INST_LWA_SWA config suggesed by
Alan Cox https://lkml.org/lkml/2014/7/23/666]
[shorne@gmail.com: expand to implement all ops suggested by Peter
Zijlstra https://lkml.org/lkml/2017/2/20/317]
Signed-off-by: Stafford Horne <shorne@gmail.com>
This commit is contained in:
Stefan Kristiansson 2014-05-13 22:30:56 +03:00 committed by Stafford Horne
parent 1159517253
commit bc19598f1d
3 changed files with 128 additions and 1 deletions

View File

@ -1,7 +1,6 @@
header-y += ucontext.h
generic-y += atomic.h
generic-y += auxvec.h
generic-y += barrier.h
generic-y += bitsperlong.h

View File

@ -0,0 +1,126 @@
/*
* Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#ifndef __ASM_OPENRISC_ATOMIC_H
#define __ASM_OPENRISC_ATOMIC_H
#include <linux/types.h>
/* Atomically perform op with v->counter and i */
#define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
int tmp; \
\
__asm__ __volatile__( \
"1: l.lwa %0,0(%1) \n" \
" l." #op " %0,%0,%2 \n" \
" l.swa 0(%1),%0 \n" \
" l.bnf 1b \n" \
" l.nop \n" \
: "=&r"(tmp) \
: "r"(&v->counter), "r"(i) \
: "cc", "memory"); \
}
/* Atomically perform op with v->counter and i, return the result */
#define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
int tmp; \
\
__asm__ __volatile__( \
"1: l.lwa %0,0(%1) \n" \
" l." #op " %0,%0,%2 \n" \
" l.swa 0(%1),%0 \n" \
" l.bnf 1b \n" \
" l.nop \n" \
: "=&r"(tmp) \
: "r"(&v->counter), "r"(i) \
: "cc", "memory"); \
\
return tmp; \
}
/* Atomically perform op with v->counter and i, return orig v->counter */
#define ATOMIC_FETCH_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
int tmp, old; \
\
__asm__ __volatile__( \
"1: l.lwa %0,0(%2) \n" \
" l." #op " %1,%0,%3 \n" \
" l.swa 0(%2),%1 \n" \
" l.bnf 1b \n" \
" l.nop \n" \
: "=&r"(old), "=&r"(tmp) \
: "r"(&v->counter), "r"(i) \
: "cc", "memory"); \
\
return old; \
}
ATOMIC_OP_RETURN(add)
ATOMIC_OP_RETURN(sub)
ATOMIC_FETCH_OP(add)
ATOMIC_FETCH_OP(sub)
ATOMIC_FETCH_OP(and)
ATOMIC_FETCH_OP(or)
ATOMIC_FETCH_OP(xor)
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
#define atomic_add_return atomic_add_return
#define atomic_sub_return atomic_sub_return
#define atomic_fetch_add atomic_fetch_add
#define atomic_fetch_sub atomic_fetch_sub
#define atomic_fetch_and atomic_fetch_and
#define atomic_fetch_or atomic_fetch_or
#define atomic_fetch_xor atomic_fetch_xor
#define atomic_and atomic_and
#define atomic_or atomic_or
#define atomic_xor atomic_xor
/*
* Atomically add a to v->counter as long as v is not already u.
* Returns the original value at v->counter.
*
* This is often used through atomic_inc_not_zero()
*/
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int old, tmp;
__asm__ __volatile__(
"1: l.lwa %0, 0(%2) \n"
" l.sfeq %0, %4 \n"
" l.bf 2f \n"
" l.add %1, %0, %3 \n"
" l.swa 0(%2), %1 \n"
" l.bnf 1b \n"
" l.nop \n"
"2: \n"
: "=&r"(old), "=&r" (tmp)
: "r"(&v->counter), "r"(a), "r"(u)
: "cc", "memory");
return old;
}
#define __atomic_add_unless __atomic_add_unless
#include <asm-generic/atomic.h>
#endif /* __ASM_OPENRISC_ATOMIC_H */

View File

@ -223,6 +223,7 @@ static inline void atomic_dec(atomic_t *v)
#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
#ifndef __atomic_add_unless
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
@ -231,5 +232,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
c = old;
return c;
}
#endif
#endif /* __ASM_GENERIC_ATOMIC_H */