softirq: Adapt NOHZ softirq pending check to new RT scheme
We can't rely on ksoftirqd anymore and we need to check the tasks which run a particular softirq and if such a task is pi blocked ignore the other pending bits of that task as well. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
d2f09d0ed6
commit
e1bb862d7e
|
@ -65,46 +65,71 @@ const char * const softirq_to_name[NR_SOFTIRQS] = {
|
|||
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
# ifdef CONFIG_PREEMPT_RT_FULL
|
||||
|
||||
struct softirq_runner {
|
||||
struct task_struct *runner[NR_SOFTIRQS];
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct softirq_runner, softirq_runners);
|
||||
|
||||
static inline void softirq_set_runner(unsigned int sirq)
|
||||
{
|
||||
struct softirq_runner *sr = &__get_cpu_var(softirq_runners);
|
||||
|
||||
sr->runner[sirq] = current;
|
||||
}
|
||||
|
||||
static inline void softirq_clr_runner(unsigned int sirq)
|
||||
{
|
||||
struct softirq_runner *sr = &__get_cpu_var(softirq_runners);
|
||||
|
||||
sr->runner[sirq] = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* On preempt-rt a softirq might be blocked on a lock. There might be
|
||||
* no other runnable task on this CPU because the lock owner runs on
|
||||
* some other CPU. So we have to go into idle with the pending bit
|
||||
* set. Therefor we need to check this otherwise we warn about false
|
||||
* positives which confuses users and defeats the whole purpose of
|
||||
* this test.
|
||||
* On preempt-rt a softirq running context might be blocked on a
|
||||
* lock. There might be no other runnable task on this CPU because the
|
||||
* lock owner runs on some other CPU. So we have to go into idle with
|
||||
* the pending bit set. Therefor we need to check this otherwise we
|
||||
* warn about false positives which confuses users and defeats the
|
||||
* whole purpose of this test.
|
||||
*
|
||||
* This code is called with interrupts disabled.
|
||||
*/
|
||||
void softirq_check_pending_idle(void)
|
||||
{
|
||||
static int rate_limit;
|
||||
u32 warnpending = 0, pending;
|
||||
struct softirq_runner *sr = &__get_cpu_var(softirq_runners);
|
||||
u32 warnpending;
|
||||
int i;
|
||||
|
||||
if (rate_limit >= 10)
|
||||
return;
|
||||
|
||||
pending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK;
|
||||
if (pending) {
|
||||
struct task_struct *tsk;
|
||||
warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK;
|
||||
for (i = 0; i < NR_SOFTIRQS; i++) {
|
||||
struct task_struct *tsk = sr->runner[i];
|
||||
|
||||
tsk = __get_cpu_var(ksoftirqd);
|
||||
/*
|
||||
* The wakeup code in rtmutex.c wakes up the task
|
||||
* _before_ it sets pi_blocked_on to NULL under
|
||||
* tsk->pi_lock. So we need to check for both: state
|
||||
* and pi_blocked_on.
|
||||
*/
|
||||
raw_spin_lock(&tsk->pi_lock);
|
||||
|
||||
if (!tsk->pi_blocked_on && !(tsk->state == TASK_RUNNING))
|
||||
warnpending = 1;
|
||||
|
||||
raw_spin_unlock(&tsk->pi_lock);
|
||||
if (tsk) {
|
||||
raw_spin_lock(&tsk->pi_lock);
|
||||
if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) {
|
||||
/* Clear all bits pending in that task */
|
||||
warnpending &= ~(tsk->softirqs_raised);
|
||||
warnpending &= ~(1 << i);
|
||||
}
|
||||
raw_spin_unlock(&tsk->pi_lock);
|
||||
}
|
||||
}
|
||||
|
||||
if (warnpending) {
|
||||
printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
|
||||
pending);
|
||||
warnpending);
|
||||
rate_limit++;
|
||||
}
|
||||
}
|
||||
|
@ -124,6 +149,10 @@ void softirq_check_pending_idle(void)
|
|||
}
|
||||
}
|
||||
# endif
|
||||
|
||||
#else /* !CONFIG_NO_HZ_COMMON */
|
||||
static inline void softirq_set_runner(unsigned int sirq) { }
|
||||
static inline void softirq_clr_runner(unsigned int sirq) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -508,6 +537,7 @@ static void do_current_softirqs(int need_rcu_bh_qs)
|
|||
*/
|
||||
lock_softirq(i);
|
||||
local_irq_disable();
|
||||
softirq_set_runner(i);
|
||||
/*
|
||||
* Check with the local_softirq_pending() bits,
|
||||
* whether we need to process this still or if someone
|
||||
|
@ -518,6 +548,7 @@ static void do_current_softirqs(int need_rcu_bh_qs)
|
|||
set_softirq_pending(pending & ~mask);
|
||||
do_single_softirq(i, need_rcu_bh_qs);
|
||||
}
|
||||
softirq_clr_runner(i);
|
||||
unlock_softirq(i);
|
||||
WARN_ON(current->softirq_nestcnt != 1);
|
||||
}
|
||||
|
@ -613,7 +644,7 @@ void thread_do_softirq(void)
|
|||
}
|
||||
}
|
||||
|
||||
void __raise_softirq_irqoff(unsigned int nr)
|
||||
static void do_raise_softirq_irqoff(unsigned int nr)
|
||||
{
|
||||
trace_softirq_raise(nr);
|
||||
or_softirq_pending(1UL << nr);
|
||||
|
@ -630,12 +661,19 @@ void __raise_softirq_irqoff(unsigned int nr)
|
|||
__this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr);
|
||||
}
|
||||
|
||||
void __raise_softirq_irqoff(unsigned int nr)
|
||||
{
|
||||
do_raise_softirq_irqoff(nr);
|
||||
if (!in_irq() && !current->softirq_nestcnt)
|
||||
wakeup_softirqd();
|
||||
}
|
||||
|
||||
/*
|
||||
* This function must run with irqs disabled!
|
||||
*/
|
||||
void raise_softirq_irqoff(unsigned int nr)
|
||||
{
|
||||
__raise_softirq_irqoff(nr);
|
||||
do_raise_softirq_irqoff(nr);
|
||||
|
||||
/*
|
||||
* If we're in an hard interrupt we let irq return code deal
|
||||
|
@ -657,11 +695,6 @@ void raise_softirq_irqoff(unsigned int nr)
|
|||
wakeup_softirqd();
|
||||
}
|
||||
|
||||
void do_raise_softirq_irqoff(unsigned int nr)
|
||||
{
|
||||
raise_softirq_irqoff(nr);
|
||||
}
|
||||
|
||||
static inline int ksoftirqd_softirq_pending(void)
|
||||
{
|
||||
return current->softirqs_raised;
|
||||
|
|
Loading…
Reference in New Issue