irqwork: push most work into softirq context

Initially we defered all irqwork into softirq because we didn't want the
latency spikes if perf or another user was busy and delayed the RT task.
The NOHZ trigger (nohz_full_kick_work) was the first user that did not work
as expected if it did not run in the original irqwork context so we had to
bring it back somehow for it. push_irq_work_func is the second one that
requires this.

This patch adds the IRQ_WORK_HARD_IRQ which makes sure the callback runs
in raw-irq context. Everything else is defered into softirq context. Without
-RT we have the orignal behavior.

This patch incorporates tglx orignal work which revoked a little bringing back
the arch_irq_work_raise() if possible and a few fixes from Steven Rostedt and
Mike Galbraith,

[bigeasy: melt tglx's irq_work_tick_soft() which splits irq_work_tick() into a
          hard and soft variant]
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
This commit is contained in:
Sebastian Andrzej Siewior 2015-06-23 15:32:51 +02:00 committed by Alibek Omarov
parent 89b200404b
commit 87f8ee14f4
6 changed files with 60 additions and 12 deletions

View File

@ -18,6 +18,8 @@
/* Doesn't want IPI, wait for tick: */
#define IRQ_WORK_LAZY BIT(2)
/* Run hard IRQ context, even on RT */
#define IRQ_WORK_HARD_IRQ BIT(3)
#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY)
@ -52,4 +54,10 @@ static inline bool irq_work_needs_cpu(void) { return false; }
static inline void irq_work_run(void) { }
#endif
#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT)
void irq_work_tick_soft(void);
#else
static inline void irq_work_tick_soft(void) { }
#endif
#endif /* _LINUX_IRQ_WORK_H */

View File

@ -18,6 +18,7 @@
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <asm/processor.h>
@ -60,13 +61,19 @@ void __weak arch_irq_work_raise(void)
/* Enqueue on current CPU, work must already be claimed and preempt disabled */
static void __irq_work_queue_local(struct irq_work *work)
{
struct llist_head *list;
bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT);
lazy_work = work->flags & IRQ_WORK_LAZY;
/* If the work is "lazy", handle it from next tick if any */
if (work->flags & IRQ_WORK_LAZY) {
if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
tick_nohz_tick_stopped())
arch_irq_work_raise();
} else {
if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ)))
list = this_cpu_ptr(&lazy_list);
else
list = this_cpu_ptr(&raised_list);
if (llist_add(&work->llnode, list)) {
if (!lazy_work || tick_nohz_tick_stopped())
arch_irq_work_raise();
}
}
@ -108,9 +115,16 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
preempt_disable();
if (cpu != smp_processor_id()) {
struct llist_head *list;
/* Arch remote IPI send/receive backend aren't NMI safe */
WARN_ON_ONCE(in_nmi());
if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(work->flags & IRQ_WORK_HARD_IRQ))
list = &per_cpu(lazy_list, cpu);
else
list = &per_cpu(raised_list, cpu);
if (llist_add(&work->llnode, list))
arch_send_call_function_single_ipi(cpu);
} else {
__irq_work_queue_local(work);
@ -129,9 +143,8 @@ bool irq_work_needs_cpu(void)
raised = this_cpu_ptr(&raised_list);
lazy = this_cpu_ptr(&lazy_list);
if (llist_empty(raised) || arch_irq_work_has_interrupt())
if (llist_empty(lazy))
return false;
if (llist_empty(raised) && llist_empty(lazy))
return false;
/* All work should have been flushed before going offline */
WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
@ -145,8 +158,12 @@ static void irq_work_run_list(struct llist_head *list)
struct llist_node *llnode;
unsigned long flags;
#ifndef CONFIG_PREEMPT_RT
/*
* nort: On RT IRQ-work may run in SOFTIRQ context.
*/
BUG_ON(!irqs_disabled());
#endif
if (llist_empty(list))
return;
@ -178,7 +195,16 @@ static void irq_work_run_list(struct llist_head *list)
void irq_work_run(void)
{
irq_work_run_list(this_cpu_ptr(&raised_list));
irq_work_run_list(this_cpu_ptr(&lazy_list));
if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
/*
* NOTE: we raise softirq via IPI for safety,
* and execute in irq_work_tick() to move the
* overhead from hard to soft irq context.
*/
if (!llist_empty(this_cpu_ptr(&lazy_list)))
raise_softirq(TIMER_SOFTIRQ);
} else
irq_work_run_list(this_cpu_ptr(&lazy_list));
}
EXPORT_SYMBOL_GPL(irq_work_run);
@ -188,8 +214,17 @@ void irq_work_tick(void)
if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
irq_work_run_list(raised);
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
irq_work_run_list(this_cpu_ptr(&lazy_list));
}
#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT)
void irq_work_tick_soft(void)
{
irq_work_run_list(this_cpu_ptr(&lazy_list));
}
#endif
/*
* Synchronize against the irq_work @entry, ensures the entry is not

View File

@ -1103,6 +1103,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
!rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
(rnp->ffmask & rdp->grpmask)) {
init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
rdp->rcu_iw.flags = IRQ_WORK_HARD_IRQ;
rdp->rcu_iw_pending = true;
rdp->rcu_iw_gp_seq = rnp->gp_seq;
irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);

View File

@ -502,6 +502,7 @@ static int init_rootdomain(struct root_domain *rd)
rd->rto_cpu = -1;
raw_spin_lock_init(&rd->rto_lock);
init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
rd->rto_push_work.flags |= IRQ_WORK_HARD_IRQ;
#endif
init_dl_bw(&rd->dl_bw);

View File

@ -239,6 +239,7 @@ static void nohz_full_kick_func(struct irq_work *work)
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
.func = nohz_full_kick_func,
.flags = IRQ_WORK_HARD_IRQ,
};
/*

View File

@ -1797,6 +1797,8 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
irq_work_tick_soft();
__run_timers(base);
if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));