rtmutex: Document pi chain walk

upstream commit: 3eb65aeadf

Add commentry to document the chain walk and the protection mechanisms
and their scope.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
Thomas Gleixner 2014-06-09 19:40:34 +02:00 committed by Alibek Omarov
parent af64a14546
commit 5974d85ed0
1 changed files with 91 additions and 9 deletions

View File

@ -358,6 +358,48 @@ static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
* @top_task: the current top waiter
*
* Returns 0 or -EDEADLK.
*
* Chain walk basics and protection scope
*
* [R] refcount on task
* [P] task->pi_lock held
* [L] rtmutex->wait_lock held
*
* Step Description Protected by
* function arguments:
* @task [R]
* @orig_lock if != NULL @top_task is blocked on it
* @next_lock Unprotected. Cannot be
* dereferenced. Only used for
* comparison.
* @orig_waiter if != NULL @top_task is blocked on it
* @top_task current, or in case of proxy
* locking protected by calling
* code
* again:
* loop_sanity_check();
* retry:
* [1] lock(task->pi_lock); [R] acquire [P]
* [2] waiter = task->pi_blocked_on; [P]
* [3] check_exit_conditions_1(); [P]
* [4] lock = waiter->lock; [P]
* [5] if (!try_lock(lock->wait_lock)) { [P] try to acquire [L]
* unlock(task->pi_lock); release [P]
* goto retry;
* }
* [6] check_exit_conditions_2(); [P] + [L]
* [7] requeue_lock_waiter(lock, waiter); [P] + [L]
* [8] unlock(task->pi_lock); release [P]
* put_task_struct(task); release [R]
* [9] check_exit_conditions_3(); [L]
* [10] task = owner(lock); [L]
* get_task_struct(task); [L] acquire [R]
* lock(task->pi_lock); [L] acquire [P]
* [11] requeue_pi_waiter(tsk, waiters(lock));[P] + [L]
* [12] check_exit_conditions_4(); [P] + [L]
* [13] unlock(task->pi_lock); release [P]
* unlock(lock->wait_lock); release [L]
* goto again;
*/
static int rt_mutex_adjust_prio_chain(struct task_struct *task,
int deadlock_detect,
@ -382,6 +424,9 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
* carefully whether things change under us.
*/
again:
/*
* We limit the lock chain length for each invocation.
*/
if (++depth > max_lock_depth) {
static int prev_max;
@ -399,13 +444,28 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
return -EDEADLK;
}
/*
* We are fully preemptible here and only hold the refcount on
* @task. So everything can have changed under us since the
* caller or our own code below (goto retry/again) dropped all
* locks.
*/
retry:
/*
* Task can not go away as we did a get_task() before !
* [1] Task cannot go away as we did a get_task() before !
*/
raw_spin_lock_irqsave(&task->pi_lock, flags);
/*
* [2] Get the waiter on which @task is blocked on.
*/
waiter = task->pi_blocked_on;
/*
* [3] check_exit_conditions_1() protected by task->pi_lock.
*/
/*
* Check whether the end of the boosting chain has been
* reached or the state of the chain has changed while we
@ -456,7 +516,15 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
if (!detect_deadlock && waiter->prio == task->prio)
goto out_unlock_pi;
/*
* [4] Get the next lock
*/
lock = waiter->lock;
/*
* [5] We need to trylock here as we are holding task->pi_lock,
* which is the reverse lock order versus the other rtmutex
* operations.
*/
if (!raw_spin_trylock(&lock->wait_lock)) {
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
cpu_relax();
@ -464,6 +532,9 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
}
/*
* [6] check_exit_conditions_2() protected by task->pi_lock and
* lock->wait_lock.
*
* Deadlock detection. If the lock is the same as the original
* lock which caused us to walk the lock chain or if the
* current lock is owned by the task which initiated the chain
@ -483,16 +554,18 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
*/
prerequeue_top_waiter = rt_mutex_top_waiter(lock);
/* Requeue the waiter in the lock waiter list. */
/* [7] Requeue the waiter in the lock waiter list. */
rt_mutex_dequeue(lock, waiter);
waiter->prio = task->prio;
rt_mutex_enqueue(lock, waiter);
/* Release the task */
/* [8] Release the task */
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
put_task_struct(task);
/*
* [9] check_exit_conditions_3 protected by lock->wait_lock.
*
* We must abort the chain walk if there is no lock owner even
* in the dead lock detection case, as we have nothing to
* follow here. This is the end of the chain we are walking.
@ -501,8 +574,9 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
struct rt_mutex_waiter *lock_top_waiter;
/*
* If the requeue above changed the top waiter, then we need
* to wake the new top waiter up to try to get the lock.
* If the requeue [7] above changed the top waiter,
* then we need to wake the new top waiter up to try
* to get the lock.
*/
lock_top_waiter = rt_mutex_top_waiter(lock);
if (prerequeue_top_waiter != lock_top_waiter)
@ -511,11 +585,12 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
return 0;
}
/* Grab the next task, i.e. the owner of @lock */
/* [10] Grab the next task, i.e. the owner of @lock */
task = rt_mutex_owner(lock);
get_task_struct(task);
raw_spin_lock_irqsave(&task->pi_lock, flags);
/* [11] requeue the pi waiters if necessary */
if (waiter == rt_mutex_top_waiter(lock)) {
/*
* The waiter became the new top (highest priority)
@ -550,23 +625,30 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
}
/*
* [12] check_exit_conditions_4() protected by task->pi_lock
* and lock->wait_lock. The actual decisions are made after we
* dropped the locks.
*
* Check whether the task which owns the current lock is pi
* blocked itself. If yes we store a pointer to the lock for
* the lock chain change detection above. After we dropped
* task->pi_lock next_lock cannot be dereferenced anymore.
*/
next_lock = task_blocked_on_lock(task);
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
/*
* Store the top waiter of @lock for the end of chain walk
* decision below.
*/
top_waiter = rt_mutex_top_waiter(lock);
/* [13] Drop the locks */
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
raw_spin_unlock(&lock->wait_lock);
/*
* Make the actual exit decisions [12], based on the stored
* values.
*
* We reached the end of the lock chain. Stop right here. No
* point to go back just to figure that out.
*/