rtmutex-futex-prepare-rt.patch

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Thomas Gleixner 2011-06-10 11:04:15 +02:00 committed by Alibek Omarov
parent 31f0810d91
commit 1e4861eaa4
3 changed files with 91 additions and 19 deletions

View File

@ -1712,6 +1712,16 @@ retry_private:
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
} else if (ret == -EAGAIN) {
/*
* Waiter was woken by timeout or
* signal and has set pi_blocked_on to
* PI_WAKEUP_INPROGRESS before we
* tried to enqueue it on the rtmutex.
*/
this->pi_state = NULL;
free_pi_state(pi_state);
continue;
} else if (ret) {
/* -EDEADLK */
this->pi_state = NULL;
@ -2565,7 +2575,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
struct hrtimer_sleeper timeout, *to = NULL;
struct rt_mutex_waiter rt_waiter;
struct rt_mutex *pi_mutex = NULL;
struct futex_hash_bucket *hb;
struct futex_hash_bucket *hb, *hb2;
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
@ -2624,20 +2634,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
spin_lock(&hb->lock);
ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
spin_unlock(&hb->lock);
if (ret)
goto out_put_keys;
/*
* On RT we must avoid races with requeue and trying to block
* on two mutexes (hb->lock and uaddr2's rtmutex) by
* serializing access to pi_blocked_on with pi_lock.
*/
raw_spin_lock_irq(&current->pi_lock);
if (current->pi_blocked_on) {
/*
* We have been requeued or are in the process of
* being requeued.
*/
raw_spin_unlock_irq(&current->pi_lock);
} else {
/*
* Setting pi_blocked_on to PI_WAKEUP_INPROGRESS
* prevents a concurrent requeue from moving us to the
* uaddr2 rtmutex. After that we can safely acquire
* (and possibly block on) hb->lock.
*/
current->pi_blocked_on = PI_WAKEUP_INPROGRESS;
raw_spin_unlock_irq(&current->pi_lock);
spin_lock(&hb->lock);
/*
* Clean up pi_blocked_on. We might leak it otherwise
* when we succeeded with the hb->lock in the fast
* path.
*/
raw_spin_lock_irq(&current->pi_lock);
current->pi_blocked_on = NULL;
raw_spin_unlock_irq(&current->pi_lock);
ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
spin_unlock(&hb->lock);
if (ret)
goto out_put_keys;
}
/*
* In order for us to be here, we know our q.key == key2, and since
* we took the hb->lock above, we also know that futex_requeue() has
* completed and we no longer have to concern ourselves with a wakeup
* race with the atomic proxy lock acquisition by the requeue code. The
* futex_requeue dropped our key1 reference and incremented our key2
* reference count.
* In order to be here, we have either been requeued, are in
* the process of being requeued, or requeue successfully
* acquired uaddr2 on our behalf. If pi_blocked_on was
* non-null above, we may be racing with a requeue. Do not
* rely on q->lock_ptr to be hb2->lock until after blocking on
* hb->lock or hb2->lock. The futex_requeue dropped our key1
* reference and incremented our key2 reference count.
*/
hb2 = hash_futex(&key2);
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
@ -2646,9 +2691,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
spin_lock(&hb2->lock);
BUG_ON(&hb2->lock != q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
spin_unlock(q.lock_ptr);
spin_unlock(&hb2->lock);
}
} else {
/*
@ -2661,7 +2707,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
debug_rt_mutex_free_waiter(&rt_waiter);
spin_lock(q.lock_ptr);
spin_lock(&hb2->lock);
BUG_ON(&hb2->lock != q.lock_ptr);
/*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.

View File

@ -69,6 +69,11 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
clear_rt_mutex_waiters(lock);
}
static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
{
return waiter && waiter != PI_WAKEUP_INPROGRESS;
}
/*
* We can speed up the acquire/release, if the architecture
* supports cmpxchg and if there's no debugging state to be set up
@ -389,7 +394,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
* reached or the state of the chain has changed while we
* dropped the locks.
*/
if (!waiter)
if (!rt_mutex_real_waiter(waiter))
goto out_unlock_pi;
/*
@ -641,6 +646,23 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
return -EDEADLK;
raw_spin_lock_irqsave(&task->pi_lock, flags);
/*
* In the case of futex requeue PI, this will be a proxy
* lock. The task will wake unaware that it is enqueueed on
* this lock. Avoid blocking on two locks and corrupting
* pi_blocked_on via the PI_WAKEUP_INPROGRESS
* flag. futex_wait_requeue_pi() sets this when it wakes up
* before requeue (due to a signal or timeout). Do not enqueue
* the task if PI_WAKEUP_INPROGRESS is set.
*/
if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return -EAGAIN;
}
BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
__rt_mutex_adjust_prio(task);
waiter->task = task;
waiter->lock = lock;
@ -664,7 +686,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
rt_mutex_enqueue_pi(owner, waiter);
__rt_mutex_adjust_prio(owner);
if (owner->pi_blocked_on)
if (rt_mutex_real_waiter(owner->pi_blocked_on))
chain_walk = 1;
} else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
chain_walk = 1;
@ -781,7 +803,8 @@ static void remove_waiter(struct rt_mutex *lock,
__rt_mutex_adjust_prio(owner);
/* Store the lock on which owner is blocked or NULL */
next_lock = task_blocked_on_lock(owner);
if (rt_mutex_real_waiter(owner->pi_blocked_on))
next_lock = task_blocked_on_lock(owner);
raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
}
@ -813,7 +836,7 @@ void rt_mutex_adjust_pi(struct task_struct *task)
raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
if (!waiter || (waiter->prio == task->prio &&
if (!rt_mutex_real_waiter(waiter) || (waiter->prio == task->prio &&
!dl_prio(task->prio))) {
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return;

View File

@ -104,6 +104,8 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
/*
* PI-futex support (proxy locking functions, etc.):
*/
#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner);