locking: ww_mutex: fix ww_mutex vs self-deadlock
If the caller already holds the mutex, task_blocks_on_rt_mutex() returns -EDEADLK, we proceed directly to rt_mutex_handle_deadlock() where it's instant game over. Let ww_mutexes return EDEADLK/EALREADY as they want to instead. Cc: stable-rt@vger.kernel.org Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
d12e92f871
commit
faa7968040
|
@ -1694,13 +1694,20 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
||||||
|
|
||||||
if (likely(!ret))
|
if (likely(!ret))
|
||||||
ret = __rt_mutex_slowlock(lock, state, timeout, &waiter, ww_ctx);
|
ret = __rt_mutex_slowlock(lock, state, timeout, &waiter, ww_ctx);
|
||||||
|
else if (ww_ctx) {
|
||||||
|
/* ww_mutex received EDEADLK, let it become EALREADY */
|
||||||
|
ret = __mutex_lock_check_stamp(lock, ww_ctx);
|
||||||
|
BUG_ON(!ret);
|
||||||
|
}
|
||||||
|
|
||||||
set_current_state(TASK_RUNNING);
|
set_current_state(TASK_RUNNING);
|
||||||
|
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret)) {
|
||||||
if (rt_mutex_has_waiters(lock))
|
if (rt_mutex_has_waiters(lock))
|
||||||
remove_waiter(lock, &waiter);
|
remove_waiter(lock, &waiter);
|
||||||
rt_mutex_handle_deadlock(ret, chwalk, &waiter);
|
/* ww_mutex want to report EDEADLK/EALREADY, let them */
|
||||||
|
if (!ww_ctx)
|
||||||
|
rt_mutex_handle_deadlock(ret, chwalk, &waiter);
|
||||||
} else if (ww_ctx) {
|
} else if (ww_ctx) {
|
||||||
ww_mutex_account_lock(lock, ww_ctx);
|
ww_mutex_account_lock(lock, ww_ctx);
|
||||||
}
|
}
|
||||||
|
@ -2239,8 +2246,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_c
|
||||||
might_sleep();
|
might_sleep();
|
||||||
|
|
||||||
mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
|
mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
|
||||||
ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL,
|
ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ww_ctx);
|
||||||
RT_MUTEX_FULL_CHAINWALK, ww_ctx);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
mutex_release(&lock->base.dep_map, 1, _RET_IP_);
|
mutex_release(&lock->base.dep_map, 1, _RET_IP_);
|
||||||
else if (!ret && ww_ctx->acquired > 1)
|
else if (!ret && ww_ctx->acquired > 1)
|
||||||
|
@ -2258,8 +2264,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
|
||||||
might_sleep();
|
might_sleep();
|
||||||
|
|
||||||
mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
|
mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
|
||||||
ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL,
|
ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ww_ctx);
|
||||||
RT_MUTEX_FULL_CHAINWALK, ww_ctx);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
mutex_release(&lock->base.dep_map, 1, _RET_IP_);
|
mutex_release(&lock->base.dep_map, 1, _RET_IP_);
|
||||||
else if (!ret && ww_ctx->acquired > 1)
|
else if (!ret && ww_ctx->acquired > 1)
|
||||||
|
@ -2271,11 +2276,13 @@ EXPORT_SYMBOL_GPL(__ww_mutex_lock);
|
||||||
|
|
||||||
void __sched ww_mutex_unlock(struct ww_mutex *lock)
|
void __sched ww_mutex_unlock(struct ww_mutex *lock)
|
||||||
{
|
{
|
||||||
|
int nest = !!lock->ctx;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The unlocking fastpath is the 0->1 transition from 'locked'
|
* The unlocking fastpath is the 0->1 transition from 'locked'
|
||||||
* into 'unlocked' state:
|
* into 'unlocked' state:
|
||||||
*/
|
*/
|
||||||
if (lock->ctx) {
|
if (nest) {
|
||||||
#ifdef CONFIG_DEBUG_MUTEXES
|
#ifdef CONFIG_DEBUG_MUTEXES
|
||||||
DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
|
DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
|
||||||
#endif
|
#endif
|
||||||
|
@ -2284,7 +2291,7 @@ void __sched ww_mutex_unlock(struct ww_mutex *lock)
|
||||||
lock->ctx = NULL;
|
lock->ctx = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_release(&lock->base.dep_map, 1, _RET_IP_);
|
mutex_release(&lock->base.dep_map, nest, _RET_IP_);
|
||||||
rt_mutex_unlock(&lock->base.lock);
|
rt_mutex_unlock(&lock->base.lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ww_mutex_unlock);
|
EXPORT_SYMBOL(ww_mutex_unlock);
|
||||||
|
|
Loading…
Reference in New Issue