From 493d266dbd68832a7ab3f0cbd26e8c2d6dc37587 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 15 Nov 2013 15:46:50 +0100 Subject: [PATCH] rtmutex: use a trylock for waiter lock in trylock Mike Galbraith captered the following: | >#11 [ffff88017b243e90] _raw_spin_lock at ffffffff815d2596 | >#12 [ffff88017b243e90] rt_mutex_trylock at ffffffff815d15be | >#13 [ffff88017b243eb0] get_next_timer_interrupt at ffffffff81063b42 | >#14 [ffff88017b243f00] tick_nohz_stop_sched_tick at ffffffff810bd1fd | >#15 [ffff88017b243f70] tick_nohz_irq_exit at ffffffff810bd7d2 | >#16 [ffff88017b243f90] irq_exit at ffffffff8105b02d | >#17 [ffff88017b243fb0] reschedule_interrupt at ffffffff815db3dd | >--- --- | >#18 [ffff88017a2a9bc8] reschedule_interrupt at ffffffff815db3dd | > [exception RIP: task_blocks_on_rt_mutex+51] | >#19 [ffff88017a2a9ce0] rt_spin_lock_slowlock at ffffffff815d183c | >#20 [ffff88017a2a9da0] lock_timer_base.isra.35 at ffffffff81061cbf | >#21 [ffff88017a2a9dd0] schedule_timeout at ffffffff815cf1ce | >#22 [ffff88017a2a9e50] rcu_gp_kthread at ffffffff810f9bbb | >#23 [ffff88017a2a9ed0] kthread at ffffffff810796d5 | >#24 [ffff88017a2a9f50] ret_from_fork at ffffffff815da04c lock_timer_base() does a try_lock() which deadlocks on the waiter lock not the lock itself. This patch takes the waiter_lock with trylock so it should work from interrupt context as well. If the fastpath doesn't work and the waiter_lock itself is taken then it seems that the lock itself taken. This patch also adds "rt_spin_unlock_after_trylock_in_irq" to keep lockdep happy. If we managed to take the wait_lock in the first place we should also be able to take it in the unlock path. Cc: stable-rt@vger.kernel.org Reported-by: Mike Galbraith Signed-off-by: Sebastian Andrzej Siewior --- include/linux/spinlock_rt.h | 1 + kernel/locking/rtmutex.c | 31 +++++++++++++++++++++++++++---- kernel/timer.c | 2 +- 3 files changed, 29 insertions(+), 5 deletions(-) diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h index 447c457950a4..ac6f08bde7e8 100644 --- a/include/linux/spinlock_rt.h +++ b/include/linux/spinlock_rt.h @@ -22,6 +22,7 @@ extern void __lockfunc rt_spin_lock(spinlock_t *lock); extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock); extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass); extern void __lockfunc rt_spin_unlock(spinlock_t *lock); +extern void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock); extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock); extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags); extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock); diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 14e3a8fba66c..8bd666b25a37 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1047,10 +1047,8 @@ static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock) /* * Slow path to release a rt_mutex spin_lock style */ -static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) +static void __sched __rt_spin_lock_slowunlock(struct rt_mutex *lock) { - raw_spin_lock(&lock->wait_lock); - debug_rt_mutex_unlock(lock); rt_mutex_deadlock_account_unlock(current); @@ -1069,6 +1067,23 @@ static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) rt_mutex_adjust_prio(current); } +static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) +{ + raw_spin_lock(&lock->wait_lock); + __rt_spin_lock_slowunlock(lock); +} + +static void noinline __sched rt_spin_lock_slowunlock_hirq(struct rt_mutex *lock) +{ + int ret; + + do { + ret = raw_spin_trylock(&lock->wait_lock); + } while (!ret); + + __rt_spin_lock_slowunlock(lock); +} + void __lockfunc rt_spin_lock(spinlock_t *lock) { rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); @@ -1099,6 +1114,13 @@ void __lockfunc rt_spin_unlock(spinlock_t *lock) } EXPORT_SYMBOL(rt_spin_unlock); +void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock) +{ + /* NOTE: we always pass in '1' for nested, for simplicity */ + spin_release(&lock->dep_map, 1, _RET_IP_); + rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock_hirq); +} + void __lockfunc __rt_spin_unlock(struct rt_mutex *lock) { rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock); @@ -1443,7 +1465,8 @@ rt_mutex_slowtrylock(struct rt_mutex *lock) { int ret = 0; - raw_spin_lock(&lock->wait_lock); + if (!raw_spin_trylock(&lock->wait_lock)) + return ret; if (likely(rt_mutex_owner(lock) != current)) { diff --git a/kernel/timer.c b/kernel/timer.c index 2e4968e92607..d8026bbf389e 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1400,7 +1400,7 @@ unsigned long get_next_timer_interrupt(unsigned long now) expires = base->next_timer; } #ifdef CONFIG_PREEMPT_RT_FULL - rt_spin_unlock(&base->lock); + rt_spin_unlock_after_trylock_in_irq(&base->lock); #else spin_unlock(&base->lock); #endif