kernel/irq_work: fix no_hz deadlock
Invoking NO_HZ's irq_work callback from timer irq is not working very well if the callback decides to invoke hrtimer_cancel(): |hrtimer_try_to_cancel+0x55/0x5f |hrtimer_cancel+0x16/0x28 |tick_nohz_restart+0x17/0x72 |__tick_nohz_full_check+0x8e/0x93 |nohz_full_kick_work_func+0xe/0x10 |irq_work_run_list+0x39/0x57 |irq_work_tick+0x60/0x67 |update_process_times+0x57/0x67 |tick_sched_handle+0x4a/0x59 |tick_sched_timer+0x3b/0x64 |__run_hrtimer+0x7a/0x149 |hrtimer_interrupt+0x1cc/0x2c5 and here we deadlock while waiting for the lock which we are holding. To fix this I'm doing the same thing that upstream is doing: is the irq_work dedicated IRQ and use it only for what is marked as "hirq" which should only be the FULL_NO_HZ related work. Cc: stable-rt@vger.kernel.org Reported-by: Carsten Emde <C.Emde@osadl.org> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> [ Added back in_irq() check for non PREEMPT_RT configs ] Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
085ce53a6b
commit
4ff6b67ad1
|
@ -463,14 +463,12 @@ void arch_send_call_function_single_ipi(int cpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_IRQ_WORK
|
#ifdef CONFIG_IRQ_WORK
|
||||||
#ifndef CONFIG_PREEMPT_RT_FULL
|
|
||||||
void arch_irq_work_raise(void)
|
void arch_irq_work_raise(void)
|
||||||
{
|
{
|
||||||
if (is_smp())
|
if (is_smp())
|
||||||
smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
|
smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#endif
|
|
||||||
|
|
||||||
static const char *ipi_types[NR_IPI] = {
|
static const char *ipi_types[NR_IPI] = {
|
||||||
#define S(x,s) [x] = s
|
#define S(x,s) [x] = s
|
||||||
|
|
|
@ -423,7 +423,7 @@ unsigned long profile_pc(struct pt_regs *regs)
|
||||||
EXPORT_SYMBOL(profile_pc);
|
EXPORT_SYMBOL(profile_pc);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL)
|
#if defined(CONFIG_IRQ_WORK)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
|
* 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
|
||||||
|
|
|
@ -43,12 +43,10 @@ void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
|
||||||
set_irq_regs(old_regs);
|
set_irq_regs(old_regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef CONFIG_PREEMPT_RT_FULL
|
|
||||||
void arch_irq_work_raise(void)
|
void arch_irq_work_raise(void)
|
||||||
{
|
{
|
||||||
set_softint(1 << PIL_DEFERRED_PCR_WORK);
|
set_softint(1 << PIL_DEFERRED_PCR_WORK);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
const struct pcr_ops *pcr_ops;
|
const struct pcr_ops *pcr_ops;
|
||||||
EXPORT_SYMBOL_GPL(pcr_ops);
|
EXPORT_SYMBOL_GPL(pcr_ops);
|
||||||
|
|
|
@ -38,7 +38,6 @@ __visible void smp_trace_irq_work_interrupt(struct pt_regs *regs)
|
||||||
exiting_irq();
|
exiting_irq();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef CONFIG_PREEMPT_RT_FULL
|
|
||||||
void arch_irq_work_raise(void)
|
void arch_irq_work_raise(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_X86_LOCAL_APIC
|
#ifdef CONFIG_X86_LOCAL_APIC
|
||||||
|
@ -49,4 +48,3 @@ void arch_irq_work_raise(void)
|
||||||
apic_wait_icr_idle();
|
apic_wait_icr_idle();
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
#include <linux/tick.h>
|
#include <linux/tick.h>
|
||||||
#include <linux/cpu.h>
|
#include <linux/cpu.h>
|
||||||
#include <linux/notifier.h>
|
#include <linux/notifier.h>
|
||||||
|
#include <linux/interrupt.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
|
|
||||||
|
|
||||||
|
@ -51,11 +52,7 @@ static bool irq_work_claim(struct irq_work *work)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PREEMPT_RT_FULL
|
|
||||||
void arch_irq_work_raise(void)
|
|
||||||
#else
|
|
||||||
void __weak arch_irq_work_raise(void)
|
void __weak arch_irq_work_raise(void)
|
||||||
#endif
|
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Lame architectures will get the timer tick callback
|
* Lame architectures will get the timer tick callback
|
||||||
|
|
|
@ -180,6 +180,11 @@ static bool can_stop_full_tick(void)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!arch_irq_work_has_interrupt()) {
|
||||||
|
trace_tick_stop(0, "missing irq work interrupt\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/* sched_clock_tick() needs us? */
|
/* sched_clock_tick() needs us? */
|
||||||
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
|
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -1450,7 +1450,7 @@ void update_process_times(int user_tick)
|
||||||
scheduler_tick();
|
scheduler_tick();
|
||||||
run_local_timers();
|
run_local_timers();
|
||||||
rcu_check_callbacks(cpu, user_tick);
|
rcu_check_callbacks(cpu, user_tick);
|
||||||
#if defined(CONFIG_IRQ_WORK)
|
#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL)
|
||||||
if (in_irq())
|
if (in_irq())
|
||||||
irq_work_run();
|
irq_work_run();
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Reference in New Issue