c942fddf87
Based on 3 normalized pattern(s): this program is free software you can redistribute it and or modify it under the terms of the gnu general public license as published by the free software foundation either version 2 of the license or at your option any later version this program is distributed in the hope that it will be useful but without any warranty without even the implied warranty of merchantability or fitness for a particular purpose see the gnu general public license for more details this program is free software you can redistribute it and or modify it under the terms of the gnu general public license as published by the free software foundation either version 2 of the license or at your option any later version [author] [kishon] [vijay] [abraham] [i] [kishon]@[ti] [com] this program is distributed in the hope that it will be useful but without any warranty without even the implied warranty of merchantability or fitness for a particular purpose see the gnu general public license for more details this program is free software you can redistribute it and or modify it under the terms of the gnu general public license as published by the free software foundation either version 2 of the license or at your option any later version [author] [graeme] [gregory] [gg]@[slimlogic] [co] [uk] [author] [kishon] [vijay] [abraham] [i] [kishon]@[ti] [com] [based] [on] [twl6030]_[usb] [c] [author] [hema] [hk] [hemahk]@[ti] [com] this program is distributed in the hope that it will be useful but without any warranty without even the implied warranty of merchantability or fitness for a particular purpose see the gnu general public license for more details extracted by the scancode license scanner the SPDX license identifier GPL-2.0-or-later has been chosen to replace the boilerplate/reference in 1105 file(s). Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Allison Randal <allison@lohutok.net> Reviewed-by: Richard Fontana <rfontana@redhat.com> Reviewed-by: Kate Stewart <kstewart@linuxfoundation.org> Cc: linux-spdx@vger.kernel.org Link: https://lkml.kernel.org/r/20190527070033.202006027@linutronix.de Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
117 lines
3.2 KiB
C
117 lines
3.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* Queued spinlock
|
|
*
|
|
* (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
|
|
* (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
|
|
*
|
|
* Authors: Waiman Long <waiman.long@hpe.com>
|
|
*/
|
|
#ifndef __ASM_GENERIC_QSPINLOCK_H
|
|
#define __ASM_GENERIC_QSPINLOCK_H
|
|
|
|
#include <asm-generic/qspinlock_types.h>
|
|
|
|
/**
|
|
* queued_spin_is_locked - is the spinlock locked?
|
|
* @lock: Pointer to queued spinlock structure
|
|
* Return: 1 if it is locked, 0 otherwise
|
|
*/
|
|
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
|
|
{
|
|
/*
|
|
* Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
|
|
* isn't immediately observable.
|
|
*/
|
|
return atomic_read(&lock->val);
|
|
}
|
|
|
|
/**
|
|
* queued_spin_value_unlocked - is the spinlock structure unlocked?
|
|
* @lock: queued spinlock structure
|
|
* Return: 1 if it is unlocked, 0 otherwise
|
|
*
|
|
* N.B. Whenever there are tasks waiting for the lock, it is considered
|
|
* locked wrt the lockref code to avoid lock stealing by the lockref
|
|
* code and change things underneath the lock. This also allows some
|
|
* optimizations to be applied without conflict with lockref.
|
|
*/
|
|
static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
|
|
{
|
|
return !atomic_read(&lock.val);
|
|
}
|
|
|
|
/**
|
|
* queued_spin_is_contended - check if the lock is contended
|
|
* @lock : Pointer to queued spinlock structure
|
|
* Return: 1 if lock contended, 0 otherwise
|
|
*/
|
|
static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
|
|
{
|
|
return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
|
|
}
|
|
/**
|
|
* queued_spin_trylock - try to acquire the queued spinlock
|
|
* @lock : Pointer to queued spinlock structure
|
|
* Return: 1 if lock acquired, 0 if failed
|
|
*/
|
|
static __always_inline int queued_spin_trylock(struct qspinlock *lock)
|
|
{
|
|
u32 val = atomic_read(&lock->val);
|
|
|
|
if (unlikely(val))
|
|
return 0;
|
|
|
|
return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
|
|
}
|
|
|
|
extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
|
|
|
|
/**
|
|
* queued_spin_lock - acquire a queued spinlock
|
|
* @lock: Pointer to queued spinlock structure
|
|
*/
|
|
static __always_inline void queued_spin_lock(struct qspinlock *lock)
|
|
{
|
|
u32 val = 0;
|
|
|
|
if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
|
|
return;
|
|
|
|
queued_spin_lock_slowpath(lock, val);
|
|
}
|
|
|
|
#ifndef queued_spin_unlock
|
|
/**
|
|
* queued_spin_unlock - release a queued spinlock
|
|
* @lock : Pointer to queued spinlock structure
|
|
*/
|
|
static __always_inline void queued_spin_unlock(struct qspinlock *lock)
|
|
{
|
|
/*
|
|
* unlock() needs release semantics:
|
|
*/
|
|
smp_store_release(&lock->locked, 0);
|
|
}
|
|
#endif
|
|
|
|
#ifndef virt_spin_lock
|
|
static __always_inline bool virt_spin_lock(struct qspinlock *lock)
|
|
{
|
|
return false;
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* Remapping spinlock architecture specific functions to the corresponding
|
|
* queued spinlock functions.
|
|
*/
|
|
#define arch_spin_is_locked(l) queued_spin_is_locked(l)
|
|
#define arch_spin_is_contended(l) queued_spin_is_contended(l)
|
|
#define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
|
|
#define arch_spin_lock(l) queued_spin_lock(l)
|
|
#define arch_spin_trylock(l) queued_spin_trylock(l)
|
|
#define arch_spin_unlock(l) queued_spin_unlock(l)
|
|
|
|
#endif /* __ASM_GENERIC_QSPINLOCK_H */
|