locking/atomic, riscv: Use s64 for atomic64
As a step towards making the atomic64 API use consistent types treewide, let's have the RISC-V atomic64 implementation use s64 as the underlying type for atomic64_t, rather than long, matching the generated headers. As atomic64_read() depends on the generic defintion of atomic64_t, this still returns long on 64-bit. This will be converted in a subsequent patch. Otherwise, there should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Palmer Dabbelt <palmer@sifive.com> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Cc: arnd@arndb.de Cc: bp@alien8.de Cc: catalin.marinas@arm.com Cc: davem@davemloft.net Cc: fenghua.yu@intel.com Cc: heiko.carstens@de.ibm.com Cc: herbert@gondor.apana.org.au Cc: ink@jurassic.park.msu.ru Cc: jhogan@kernel.org Cc: linux@armlinux.org.uk Cc: mattst88@gmail.com Cc: mpe@ellerman.id.au Cc: paul.burton@mips.com Cc: paulus@samba.org Cc: ralf@linux-mips.org Cc: rth@twiddle.net Cc: tony.luck@intel.com Cc: vgupta@synopsys.com Link: https://lkml.kernel.org/r/20190522132250.26499-13-mark.rutland@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
33e42ef571
commit
0754211847
|
@ -38,11 +38,11 @@ static __always_inline void atomic_set(atomic_t *v, int i)
|
|||
|
||||
#ifndef CONFIG_GENERIC_ATOMIC64
|
||||
#define ATOMIC64_INIT(i) { (i) }
|
||||
static __always_inline long atomic64_read(const atomic64_t *v)
|
||||
static __always_inline s64 atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
return READ_ONCE(v->counter);
|
||||
}
|
||||
static __always_inline void atomic64_set(atomic64_t *v, long i)
|
||||
static __always_inline void atomic64_set(atomic64_t *v, s64 i)
|
||||
{
|
||||
WRITE_ONCE(v->counter, i);
|
||||
}
|
||||
|
@ -66,11 +66,11 @@ void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
|
|||
|
||||
#ifdef CONFIG_GENERIC_ATOMIC64
|
||||
#define ATOMIC_OPS(op, asm_op, I) \
|
||||
ATOMIC_OP (op, asm_op, I, w, int, )
|
||||
ATOMIC_OP (op, asm_op, I, w, int, )
|
||||
#else
|
||||
#define ATOMIC_OPS(op, asm_op, I) \
|
||||
ATOMIC_OP (op, asm_op, I, w, int, ) \
|
||||
ATOMIC_OP (op, asm_op, I, d, long, 64)
|
||||
ATOMIC_OP (op, asm_op, I, w, int, ) \
|
||||
ATOMIC_OP (op, asm_op, I, d, s64, 64)
|
||||
#endif
|
||||
|
||||
ATOMIC_OPS(add, add, i)
|
||||
|
@ -127,14 +127,14 @@ c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
|
|||
|
||||
#ifdef CONFIG_GENERIC_ATOMIC64
|
||||
#define ATOMIC_OPS(op, asm_op, c_op, I) \
|
||||
ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
|
||||
ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, )
|
||||
ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
|
||||
ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, )
|
||||
#else
|
||||
#define ATOMIC_OPS(op, asm_op, c_op, I) \
|
||||
ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
|
||||
ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \
|
||||
ATOMIC_FETCH_OP( op, asm_op, I, d, long, 64) \
|
||||
ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, long, 64)
|
||||
ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
|
||||
ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \
|
||||
ATOMIC_FETCH_OP( op, asm_op, I, d, s64, 64) \
|
||||
ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, s64, 64)
|
||||
#endif
|
||||
|
||||
ATOMIC_OPS(add, add, +, i)
|
||||
|
@ -166,11 +166,11 @@ ATOMIC_OPS(sub, add, +, -i)
|
|||
|
||||
#ifdef CONFIG_GENERIC_ATOMIC64
|
||||
#define ATOMIC_OPS(op, asm_op, I) \
|
||||
ATOMIC_FETCH_OP(op, asm_op, I, w, int, )
|
||||
ATOMIC_FETCH_OP(op, asm_op, I, w, int, )
|
||||
#else
|
||||
#define ATOMIC_OPS(op, asm_op, I) \
|
||||
ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \
|
||||
ATOMIC_FETCH_OP(op, asm_op, I, d, long, 64)
|
||||
ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \
|
||||
ATOMIC_FETCH_OP(op, asm_op, I, d, s64, 64)
|
||||
#endif
|
||||
|
||||
ATOMIC_OPS(and, and, i)
|
||||
|
@ -219,9 +219,10 @@ static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
|||
#define atomic_fetch_add_unless atomic_fetch_add_unless
|
||||
|
||||
#ifndef CONFIG_GENERIC_ATOMIC64
|
||||
static __always_inline long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
|
||||
static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
|
||||
{
|
||||
long prev, rc;
|
||||
s64 prev;
|
||||
long rc;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"0: lr.d %[p], %[c]\n"
|
||||
|
@ -290,11 +291,11 @@ c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
|
|||
|
||||
#ifdef CONFIG_GENERIC_ATOMIC64
|
||||
#define ATOMIC_OPS() \
|
||||
ATOMIC_OP( int, , 4)
|
||||
ATOMIC_OP(int, , 4)
|
||||
#else
|
||||
#define ATOMIC_OPS() \
|
||||
ATOMIC_OP( int, , 4) \
|
||||
ATOMIC_OP(long, 64, 8)
|
||||
ATOMIC_OP(int, , 4) \
|
||||
ATOMIC_OP(s64, 64, 8)
|
||||
#endif
|
||||
|
||||
ATOMIC_OPS()
|
||||
|
@ -332,9 +333,10 @@ static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
|
|||
#define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1)
|
||||
|
||||
#ifndef CONFIG_GENERIC_ATOMIC64
|
||||
static __always_inline long atomic64_sub_if_positive(atomic64_t *v, long offset)
|
||||
static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset)
|
||||
{
|
||||
long prev, rc;
|
||||
s64 prev;
|
||||
long rc;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"0: lr.d %[p], %[c]\n"
|
||||
|
|
Loading…
Reference in New Issue