stomp-machine: create lg_global_trylock_relax() primitive
Create lg_global_trylock_relax() for use by stopper thread when it cannot schedule, to deal with stop_cpus_lock, which is now an lglock. Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
This commit is contained in:
parent
121435cad6
commit
8be91122a8
|
@ -74,4 +74,10 @@ void lg_local_unlock_cpu(struct lglock *lg, int cpu);
|
|||
void lg_global_lock(struct lglock *lg);
|
||||
void lg_global_unlock(struct lglock *lg);
|
||||
|
||||
#ifndef CONFIG_PREEMPT_RT_FULL
|
||||
#define lg_global_trylock_relax(name) lg_global_lock(name)
|
||||
#else
|
||||
void lg_global_trylock_relax(struct lglock *lg);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -35,6 +35,7 @@ extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
|
|||
*/
|
||||
extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
|
||||
extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
|
||||
extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock);
|
||||
|
||||
#define spin_lock(lock) \
|
||||
do { \
|
||||
|
|
|
@ -105,3 +105,28 @@ void lg_global_unlock(struct lglock *lg)
|
|||
preempt_enable_nort();
|
||||
}
|
||||
EXPORT_SYMBOL(lg_global_unlock);
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
/*
|
||||
* HACK: If you use this, you get to keep the pieces.
|
||||
* Used in queue_stop_cpus_work() when stop machinery
|
||||
* is called from inactive CPU, so we can't schedule.
|
||||
*/
|
||||
# define lg_do_trylock_relax(l) \
|
||||
do { \
|
||||
while (!__rt_spin_trylock(l)) \
|
||||
cpu_relax(); \
|
||||
} while (0)
|
||||
|
||||
void lg_global_trylock_relax(struct lglock *lg)
|
||||
{
|
||||
int i;
|
||||
|
||||
lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
|
||||
for_each_possible_cpu(i) {
|
||||
lg_lock_ptr *lock;
|
||||
lock = per_cpu_ptr(lg->lock, i);
|
||||
lg_do_trylock_relax(lock);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -1139,6 +1139,11 @@ void __lockfunc rt_spin_unlock_wait(spinlock_t *lock)
|
|||
}
|
||||
EXPORT_SYMBOL(rt_spin_unlock_wait);
|
||||
|
||||
int __lockfunc __rt_spin_trylock(struct rt_mutex *lock)
|
||||
{
|
||||
return rt_mutex_trylock(lock);
|
||||
}
|
||||
|
||||
int __lockfunc rt_spin_trylock(spinlock_t *lock)
|
||||
{
|
||||
int ret = rt_mutex_trylock(&lock->lock);
|
||||
|
|
Loading…
Reference in New Issue