rtmutex: Handle the various new futex race conditions

RT opens a few new interesting race conditions in the rtmutex/futex
combo due to futex hash bucket lock being a 'sleeping' spinlock and
therefor not disabling preemption.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Thomas Gleixner 2011-06-10 11:04:15 +02:00 committed by Alibek Omarov
parent 597fbf406e
commit 83fddd55bf
3 changed files with 94 additions and 21 deletions

View File

@ -2270,6 +2270,16 @@ retry_private:
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
} else if (ret == -EAGAIN) {
/*
* Waiter was woken by timeout or
* signal and has set pi_blocked_on to
* PI_WAKEUP_INPROGRESS before we
* tried to enqueue it on the rtmutex.
*/
this->pi_state = NULL;
put_pi_state(pi_state);
continue;
} else if (ret) {
/*
* rt_mutex_start_proxy_lock() detected a
@ -3316,7 +3326,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
{
struct hrtimer_sleeper timeout, *to;
struct rt_mutex_waiter rt_waiter;
struct futex_hash_bucket *hb;
struct futex_hash_bucket *hb, *hb2;
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
@ -3368,20 +3378,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
spin_lock(&hb->lock);
ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
spin_unlock(&hb->lock);
if (ret)
goto out_put_keys;
/*
* On RT we must avoid races with requeue and trying to block
* on two mutexes (hb->lock and uaddr2's rtmutex) by
* serializing access to pi_blocked_on with pi_lock.
*/
raw_spin_lock_irq(&current->pi_lock);
if (current->pi_blocked_on) {
/*
* We have been requeued or are in the process of
* being requeued.
*/
raw_spin_unlock_irq(&current->pi_lock);
} else {
/*
* Setting pi_blocked_on to PI_WAKEUP_INPROGRESS
* prevents a concurrent requeue from moving us to the
* uaddr2 rtmutex. After that we can safely acquire
* (and possibly block on) hb->lock.
*/
current->pi_blocked_on = PI_WAKEUP_INPROGRESS;
raw_spin_unlock_irq(&current->pi_lock);
spin_lock(&hb->lock);
/*
* Clean up pi_blocked_on. We might leak it otherwise
* when we succeeded with the hb->lock in the fast
* path.
*/
raw_spin_lock_irq(&current->pi_lock);
current->pi_blocked_on = NULL;
raw_spin_unlock_irq(&current->pi_lock);
ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
spin_unlock(&hb->lock);
if (ret)
goto out_put_keys;
}
/*
* In order for us to be here, we know our q.key == key2, and since
* we took the hb->lock above, we also know that futex_requeue() has
* completed and we no longer have to concern ourselves with a wakeup
* race with the atomic proxy lock acquisition by the requeue code. The
* futex_requeue dropped our key1 reference and incremented our key2
* reference count.
* In order to be here, we have either been requeued, are in
* the process of being requeued, or requeue successfully
* acquired uaddr2 on our behalf. If pi_blocked_on was
* non-null above, we may be racing with a requeue. Do not
* rely on q->lock_ptr to be hb2->lock until after blocking on
* hb->lock or hb2->lock. The futex_requeue dropped our key1
* reference and incremented our key2 reference count.
*/
hb2 = hash_futex(&key2);
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
@ -3390,14 +3435,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
spin_lock(&hb2->lock);
BUG_ON(&hb2->lock != q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
/*
* Drop the reference to the pi state which
* the requeue_pi() code acquired for us.
*/
put_pi_state(q.pi_state);
spin_unlock(q.lock_ptr);
spin_unlock(&hb2->lock);
/*
* Adjust the return value. It's either -EFAULT or
* success (1) but the caller expects 0 for success.
@ -3416,7 +3462,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
spin_lock(q.lock_ptr);
spin_lock(&hb2->lock);
BUG_ON(&hb2->lock != q.lock_ptr);
if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
ret = 0;

View File

@ -136,6 +136,11 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
}
static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
{
return waiter && waiter != PI_WAKEUP_INPROGRESS;
}
/*
* We can speed up the acquire/release, if there's no debugging state to be
* set up.
@ -380,7 +385,8 @@ int max_lock_depth = 1024;
static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
{
return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
return rt_mutex_real_waiter(p->pi_blocked_on) ?
p->pi_blocked_on->lock : NULL;
}
/*
@ -516,7 +522,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
* reached or the state of the chain has changed while we
* dropped the locks.
*/
if (!waiter)
if (!rt_mutex_real_waiter(waiter))
goto out_unlock_pi;
/*
@ -950,6 +956,22 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
return -EDEADLK;
raw_spin_lock(&task->pi_lock);
/*
* In the case of futex requeue PI, this will be a proxy
* lock. The task will wake unaware that it is enqueueed on
* this lock. Avoid blocking on two locks and corrupting
* pi_blocked_on via the PI_WAKEUP_INPROGRESS
* flag. futex_wait_requeue_pi() sets this when it wakes up
* before requeue (due to a signal or timeout). Do not enqueue
* the task if PI_WAKEUP_INPROGRESS is set.
*/
if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
raw_spin_unlock(&task->pi_lock);
return -EAGAIN;
}
BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
waiter->task = task;
waiter->lock = lock;
waiter->prio = task->prio;
@ -973,7 +995,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
rt_mutex_enqueue_pi(owner, waiter);
rt_mutex_adjust_prio(owner);
if (owner->pi_blocked_on)
if (rt_mutex_real_waiter(owner->pi_blocked_on))
chain_walk = 1;
} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
chain_walk = 1;
@ -1069,7 +1091,7 @@ static void remove_waiter(struct rt_mutex *lock,
{
bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
struct task_struct *owner = rt_mutex_owner(lock);
struct rt_mutex *next_lock;
struct rt_mutex *next_lock = NULL;
lockdep_assert_held(&lock->wait_lock);
@ -1095,7 +1117,8 @@ static void remove_waiter(struct rt_mutex *lock,
rt_mutex_adjust_prio(owner);
/* Store the lock on which owner is blocked or NULL */
next_lock = task_blocked_on_lock(owner);
if (rt_mutex_real_waiter(owner->pi_blocked_on))
next_lock = task_blocked_on_lock(owner);
raw_spin_unlock(&owner->pi_lock);
@ -1131,7 +1154,8 @@ void rt_mutex_adjust_pi(struct task_struct *task)
raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
if (!rt_mutex_real_waiter(waiter) ||
rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return;
}

View File

@ -130,6 +130,8 @@ enum rtmutex_chainwalk {
/*
* PI-futex support (proxy locking functions, etc.):
*/
#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner);