cpu_chill: Add a UNINTERRUPTIBLE hrtimer_nanosleep

We hit another bug that was caused by switching cpu_chill() from
msleep() to hrtimer_nanosleep().

This time it is a livelock. The problem is that hrtimer_nanosleep()
calls schedule with the state == TASK_INTERRUPTIBLE. But these means
that if a signal is pending, the scheduler wont schedule, and will
simply change the current task state back to TASK_RUNNING. This
nullifies the whole point of cpu_chill() in the first place. That is,
if a task is spinning on a try_lock() and it preempted the owner of the
lock, if it has a signal pending, it will never give up the CPU to let
the owner of the lock run.

I made a static function __hrtimer_nanosleep() that takes a fifth
parameter "state", which determines the task state of that the
nanosleep() will be in. The normal hrtimer_nanosleep() will act the
same, but cpu_chill() will call the __hrtimer_nanosleep() directly with
the TASK_UNINTERRUPTIBLE state.

cpu_chill() only cares that the first sleep happens, and does not care
about the state of the restart schedule (in hrtimer_nanosleep_restart).

Cc: stable-rt@vger.kernel.org
Reported-by: Ulrich Obergfell <uobergfe@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
This commit is contained in:
Steven Rostedt 2014-03-04 12:28:32 -05:00 committed by Alibek Omarov
parent f0414724df
commit 0041f854b5
1 changed files with 18 additions and 7 deletions

View File

@ -1792,12 +1792,13 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
}
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode,
unsigned long state)
{
hrtimer_init_sleeper(t, current);
do {
set_current_state(TASK_INTERRUPTIBLE);
set_current_state(state);
hrtimer_start_expires(&t->timer, mode);
if (!hrtimer_active(&t->timer))
t->task = NULL;
@ -1841,7 +1842,8 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
HRTIMER_MODE_ABS);
hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
if (do_nanosleep(&t, HRTIMER_MODE_ABS))
/* cpu_chill() does not care about restart state. */
if (do_nanosleep(&t, HRTIMER_MODE_ABS, TASK_INTERRUPTIBLE))
goto out;
rmtp = restart->nanosleep.rmtp;
@ -1858,8 +1860,10 @@ out:
return ret;
}
long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
const enum hrtimer_mode mode, const clockid_t clockid)
static long
__hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
const enum hrtimer_mode mode, const clockid_t clockid,
unsigned long state)
{
struct restart_block *restart;
struct hrtimer_sleeper t;
@ -1872,7 +1876,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
hrtimer_init_on_stack(&t.timer, clockid, mode);
hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
if (do_nanosleep(&t, mode))
if (do_nanosleep(&t, mode, state))
goto out;
/* Absolute timers do not update the rmtp value and restart: */
@ -1899,6 +1903,12 @@ out:
return ret;
}
long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
const enum hrtimer_mode mode, const clockid_t clockid)
{
return __hrtimer_nanosleep(rqtp, rmtp, mode, clockid, TASK_INTERRUPTIBLE);
}
SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
struct timespec __user *, rmtp)
{
@ -1925,7 +1935,8 @@ void cpu_chill(void)
unsigned int freeze_flag = current->flags & PF_NOFREEZE;
current->flags |= PF_NOFREEZE;
hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
__hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC,
TASK_UNINTERRUPTIBLE);
if (!freeze_flag)
current->flags &= ~PF_NOFREEZE;
}