irq_work: allow certain work in hard irq context
irq_work is processed in softirq context on -RT because we want to avoid
long latencies which might arise from processing lots of perf events.
The noHZ-full mode requires its callback to be called from real hardirq
context (commit 76c24fb
("nohz: New APIs to re-evaluate the tick on full
dynticks CPUs")). If it is called from a thread context we might get
wrong results for checks like "is_idle_task(current)".
This patch introduces a second list (hirq_work_list) which will be used
if irq_work_run() has been invoked from hardirq context and process only
work items marked with IRQ_WORK_HARD_IRQ.
This patch also removes arch_irq_work_raise() from sparc & powerpc like
it is already done for x86. Atleast for powerpc it is somehow
superfluous because it is called from the timer interrupt which should
invoke update_process_times().
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
This commit is contained in:
parent
10a8fb755e
commit
6b2a704800
|
@ -463,12 +463,14 @@ void arch_send_call_function_single_ipi(int cpu)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_IRQ_WORK
|
||||
#ifndef CONFIG_PREEMPT_RT_FULL
|
||||
void arch_irq_work_raise(void)
|
||||
{
|
||||
if (is_smp())
|
||||
smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
static const char *ipi_types[NR_IPI] = {
|
||||
#define S(x,s) [x] = s
|
||||
|
|
|
@ -423,7 +423,7 @@ unsigned long profile_pc(struct pt_regs *regs)
|
|||
EXPORT_SYMBOL(profile_pc);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_IRQ_WORK
|
||||
#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL)
|
||||
|
||||
/*
|
||||
* 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
|
||||
|
|
|
@ -43,10 +43,12 @@ void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
|
|||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_PREEMPT_RT_FULL
|
||||
void arch_irq_work_raise(void)
|
||||
{
|
||||
set_softint(1 << PIL_DEFERRED_PCR_WORK);
|
||||
}
|
||||
#endif
|
||||
|
||||
const struct pcr_ops *pcr_ops;
|
||||
EXPORT_SYMBOL_GPL(pcr_ops);
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#define IRQ_WORK_BUSY 2UL
|
||||
#define IRQ_WORK_FLAGS 3UL
|
||||
#define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
|
||||
#define IRQ_WORK_HARD_IRQ 8UL /* Run hard IRQ context, even on RT */
|
||||
|
||||
struct irq_work {
|
||||
unsigned long flags;
|
||||
|
|
|
@ -20,6 +20,9 @@
|
|||
|
||||
|
||||
static DEFINE_PER_CPU(struct llist_head, irq_work_list);
|
||||
#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
static DEFINE_PER_CPU(struct llist_head, hirq_work_list);
|
||||
#endif
|
||||
static DEFINE_PER_CPU(int, irq_work_raised);
|
||||
|
||||
/*
|
||||
|
@ -48,7 +51,11 @@ static bool irq_work_claim(struct irq_work *work)
|
|||
return true;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
void arch_irq_work_raise(void)
|
||||
#else
|
||||
void __weak arch_irq_work_raise(void)
|
||||
#endif
|
||||
{
|
||||
/*
|
||||
* Lame architectures will get the timer tick callback
|
||||
|
@ -70,8 +77,12 @@ void irq_work_queue(struct irq_work *work)
|
|||
/* Queue the entry and raise the IPI if needed. */
|
||||
preempt_disable();
|
||||
|
||||
llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
if (work->flags & IRQ_WORK_HARD_IRQ)
|
||||
llist_add(&work->llnode, &__get_cpu_var(hirq_work_list));
|
||||
else
|
||||
#endif
|
||||
llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
|
||||
/*
|
||||
* If the work is not "lazy" or the tick is stopped, raise the irq
|
||||
* work interrupt (if supported by the arch), otherwise, just wait
|
||||
|
@ -115,7 +126,12 @@ static void __irq_work_run(void)
|
|||
__this_cpu_write(irq_work_raised, 0);
|
||||
barrier();
|
||||
|
||||
this_list = &__get_cpu_var(irq_work_list);
|
||||
#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
if (in_irq())
|
||||
this_list = &__get_cpu_var(hirq_work_list);
|
||||
else
|
||||
#endif
|
||||
this_list = &__get_cpu_var(irq_work_list);
|
||||
if (llist_empty(this_list))
|
||||
return;
|
||||
|
||||
|
|
|
@ -226,6 +226,7 @@ static void nohz_full_kick_work_func(struct irq_work *work)
|
|||
|
||||
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
|
||||
.func = nohz_full_kick_work_func,
|
||||
.flags = IRQ_WORK_HARD_IRQ,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -1425,7 +1425,7 @@ void update_process_times(int user_tick)
|
|||
scheduler_tick();
|
||||
run_local_timers();
|
||||
rcu_check_callbacks(cpu, user_tick);
|
||||
#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL)
|
||||
#if defined(CONFIG_IRQ_WORK)
|
||||
if (in_irq())
|
||||
irq_work_run();
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue