arch: Remove spin_unlock_wait() arch-specific definitions

There is no agreed-upon definition of spin_unlock_wait()'s semantics,
and it appears that all callers could do just as well with a lock/unlock
pair.  This commit therefore removes the underlying arch-specific
arch_spin_unlock_wait() for all architectures providing them.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: <linux-arch@vger.kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Andrea Parri <parri.andrea@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Acked-by: Boqun Feng <boqun.feng@gmail.com>
This commit is contained in:
Paul E. McKenney 2017-06-29 15:53:02 -07:00
parent d3a024abbc
commit 952111d7db
21 changed files with 5 additions and 241 deletions

View File

@ -16,11 +16,6 @@
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_is_locked(x) ((x)->lock != 0)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->lock, !VAL);
}
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.lock == 0;

View File

@ -16,11 +16,6 @@
#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->slock, !VAL);
}
#ifdef CONFIG_ARC_HAS_LLSC
static inline void arch_spin_lock(arch_spinlock_t *lock)

View File

@ -52,22 +52,6 @@ static inline void dsb_sev(void)
* memory.
*/
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
u16 owner = READ_ONCE(lock->tickets.owner);
for (;;) {
arch_spinlock_t tmp = READ_ONCE(*lock);
if (tmp.tickets.owner == tmp.tickets.next ||
tmp.tickets.owner != owner)
break;
wfe();
}
smp_acquire__after_ctrl_dep();
}
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)

View File

@ -26,58 +26,6 @@
* The memory barriers are implicit with the load-acquire and store-release
* instructions.
*/
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
unsigned int tmp;
arch_spinlock_t lockval;
u32 owner;
/*
* Ensure prior spin_lock operations to other locks have completed
* on this CPU before we test whether "lock" is locked.
*/
smp_mb();
owner = READ_ONCE(lock->owner) << 16;
asm volatile(
" sevl\n"
"1: wfe\n"
"2: ldaxr %w0, %2\n"
/* Is the lock free? */
" eor %w1, %w0, %w0, ror #16\n"
" cbz %w1, 3f\n"
/* Lock taken -- has there been a subsequent unlock->lock transition? */
" eor %w1, %w3, %w0, lsl #16\n"
" cbz %w1, 1b\n"
/*
* The owner has been updated, so there was an unlock->lock
* transition that we missed. That means we can rely on the
* store-release of the unlock operation paired with the
* load-acquire of the lock operation to publish any of our
* previous stores to the new lock owner and therefore don't
* need to bother with the writeback below.
*/
" b 4f\n"
"3:\n"
/*
* Serialise against any concurrent lockers by writing back the
* unlocked lock value
*/
ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
" stxr %w1, %w0, %2\n"
__nops(2),
/* LSE atomics */
" mov %w1, %w0\n"
" cas %w0, %w0, %2\n"
" eor %w1, %w1, %w0\n")
/* Somebody else wrote to the lock, GOTO 10 and reload the value */
" cbnz %w1, 2b\n"
"4:"
: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
: "r" (owner)
: "memory");
}
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
@ -176,7 +124,11 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
smp_mb(); /* See arch_spin_unlock_wait */
/*
* Ensure prior spin_lock operations to other locks have completed
* on this CPU before we test whether "lock" is locked.
*/
smp_mb(); /* ^^^ */
return !arch_spin_value_unlocked(READ_ONCE(*lock));
}

View File

@ -48,11 +48,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
__raw_spin_unlock_asm(&lock->lock);
}
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->lock, !VAL);
}
static inline int arch_read_can_lock(arch_rwlock_t *rw)
{
return __raw_uncached_fetch_asm(&rw->lock) > 0;

View File

@ -179,11 +179,6 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
*/
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->lock, !VAL);
}
#define arch_spin_is_locked(x) ((x)->lock != 0)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)

View File

@ -76,22 +76,6 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
ACCESS_ONCE(*p) = (tmp + 2) & ~1;
}
static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
{
int *p = (int *)&lock->lock, ticket;
ia64_invala();
for (;;) {
asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory");
if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
return;
cpu_relax();
}
smp_acquire__after_ctrl_dep();
}
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
long tmp = ACCESS_ONCE(lock->lock);
@ -143,11 +127,6 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
arch_spin_lock(lock);
}
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
__ticket_spin_unlock_wait(lock);
}
#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)

View File

@ -30,11 +30,6 @@
#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->slock, VAL > 0);
}
/**
* arch_spin_trylock - Try spin lock and return a result
* @lock: Pointer to the lock variable

View File

@ -15,11 +15,6 @@
* locked.
*/
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->lock, !VAL);
}
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)

View File

@ -26,11 +26,6 @@
#define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->slock, !VAL);
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(

View File

@ -14,13 +14,6 @@ static inline int arch_spin_is_locked(arch_spinlock_t *x)
#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
static inline void arch_spin_unlock_wait(arch_spinlock_t *x)
{
volatile unsigned int *a = __ldcw_align(x);
smp_cond_load_acquire(a, VAL);
}
static inline void arch_spin_lock_flags(arch_spinlock_t *x,
unsigned long flags)
{

View File

@ -170,39 +170,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
lock->slock = 0;
}
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
arch_spinlock_t lock_val;
smp_mb();
/*
* Atomically load and store back the lock value (unchanged). This
* ensures that our observation of the lock value is ordered with
* respect to other lock operations.
*/
__asm__ __volatile__(
"1: " PPC_LWARX(%0, 0, %2, 0) "\n"
" stwcx. %0, 0, %2\n"
" bne- 1b\n"
: "=&r" (lock_val), "+m" (*lock)
: "r" (lock)
: "cr0", "xer");
if (arch_spin_value_unlocked(lock_val))
goto out;
while (lock->slock) {
HMT_low();
if (SHARED_PROCESSOR)
__spin_yield(lock);
}
HMT_medium();
out:
smp_mb();
}
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.

View File

@ -98,13 +98,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
: "cc", "memory");
}
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
while (arch_spin_is_locked(lock))
arch_spin_relax(lock);
smp_acquire__after_ctrl_dep();
}
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.

View File

@ -29,11 +29,6 @@ static inline unsigned __sl_cas(volatile unsigned *p, unsigned old, unsigned new
#define arch_spin_is_locked(x) ((x)->lock <= 0)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->lock, VAL > 0);
}
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
while (!__sl_cas(&lock->lock, 1, 0));

View File

@ -21,11 +21,6 @@
#define arch_spin_is_locked(x) ((x)->lock <= 0)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->lock, VAL > 0);
}
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.

View File

@ -14,11 +14,6 @@
#define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->lock, !VAL);
}
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
__asm__ __volatile__(

View File

@ -64,8 +64,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
lock->current_ticket = old_ticket + TICKET_QUANTUM;
}
void arch_spin_unlock_wait(arch_spinlock_t *lock);
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.

View File

@ -58,8 +58,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
__insn_fetchadd4(&lock->lock, 1U << __ARCH_SPIN_CURRENT_SHIFT);
}
void arch_spin_unlock_wait(arch_spinlock_t *lock);
void arch_spin_lock_slow(arch_spinlock_t *lock, u32 val);
/* Grab the "next" ticket number and bump it atomically.

View File

@ -62,29 +62,6 @@ int arch_spin_trylock(arch_spinlock_t *lock)
}
EXPORT_SYMBOL(arch_spin_trylock);
void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
u32 iterations = 0;
int curr = READ_ONCE(lock->current_ticket);
int next = READ_ONCE(lock->next_ticket);
/* Return immediately if unlocked. */
if (next == curr)
return;
/* Wait until the current locker has released the lock. */
do {
delay_backoff(iterations++);
} while (READ_ONCE(lock->current_ticket) == curr);
/*
* The TILE architecture doesn't do read speculation; therefore
* a control dependency guarantees a LOAD->{LOAD,STORE} order.
*/
barrier();
}
EXPORT_SYMBOL(arch_spin_unlock_wait);
/*
* The low byte is always reserved to be the marker for a "tns" operation
* since the low bit is set to "1" by a tns. The next seven bits are

View File

@ -62,28 +62,6 @@ int arch_spin_trylock(arch_spinlock_t *lock)
}
EXPORT_SYMBOL(arch_spin_trylock);
void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
u32 iterations = 0;
u32 val = READ_ONCE(lock->lock);
u32 curr = arch_spin_current(val);
/* Return immediately if unlocked. */
if (arch_spin_next(val) == curr)
return;
/* Wait until the current locker has released the lock. */
do {
delay_backoff(iterations++);
} while (arch_spin_current(READ_ONCE(lock->lock)) == curr);
/*
* The TILE architecture doesn't do read speculation; therefore
* a control dependency guarantees a LOAD->{LOAD,STORE} order.
*/
barrier();
}
EXPORT_SYMBOL(arch_spin_unlock_wait);
/*
* If the read lock fails due to a writer, we retry periodically

View File

@ -33,11 +33,6 @@
#define arch_spin_is_locked(x) ((x)->slock != 0)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->slock, !VAL);
}
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)