rcu: Make ksoftirqd do RCU quiescent states
Implementing RCU-bh in terms of RCU-preempt makes the system vulnerable to network-based denial-of-service attacks. This patch therefore makes __do_softirq() invoke rcu_bh_qs(), but only when __do_softirq() is running in ksoftirqd context. A wrapper layer in interposed so that other calls to __do_softirq() avoid invoking rcu_bh_qs(). The underlying function __do_softirq_common() does the actual work. The reason that rcu_bh_qs() is bad in these non-ksoftirqd contexts is that there might be a local_bh_enable() inside an RCU-preempt read-side critical section. This local_bh_enable() can invoke __do_softirq() directly, so if __do_softirq() were to invoke rcu_bh_qs() (which just calls rcu_preempt_qs() in the PREEMPT_RT_FULL case), there would be an illegal RCU-preempt quiescent state in the middle of an RCU-preempt read-side critical section. Therefore, quiescent states can only happen in cases where __do_softirq() is invoked directly from ksoftirqd. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Link: http://lkml.kernel.org/r/20111005184518.GA21601@linux.vnet.ibm.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
482b29e64e
commit
010ec70629
|
@ -229,13 +229,7 @@ static inline int rcu_preempt_depth(void)
|
|||
/* Internal to kernel */
|
||||
void rcu_init(void);
|
||||
void rcu_sched_qs(int cpu);
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
static inline void rcu_bh_qs(int cpu) { }
|
||||
#else
|
||||
void rcu_bh_qs(int cpu);
|
||||
#endif
|
||||
|
||||
void rcu_check_callbacks(int cpu, int user);
|
||||
struct notifier_block;
|
||||
void rcu_idle_enter(void);
|
||||
|
|
|
@ -199,7 +199,14 @@ void rcu_sched_qs(int cpu)
|
|||
rdp->passed_quiesce = 1;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_PREEMPT_RT_FULL
|
||||
#ifdef CONFIG_PREEMPT_RT_FULL
|
||||
static void rcu_preempt_qs(int cpu);
|
||||
|
||||
void rcu_bh_qs(int cpu)
|
||||
{
|
||||
rcu_preempt_qs(cpu);
|
||||
}
|
||||
#else
|
||||
void rcu_bh_qs(int cpu)
|
||||
{
|
||||
struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
|
||||
|
|
|
@ -1575,7 +1575,7 @@ static void rcu_prepare_kthreads(int cpu)
|
|||
|
||||
#endif /* #else #ifdef CONFIG_RCU_BOOST */
|
||||
|
||||
#if !defined(CONFIG_RCU_FAST_NO_HZ)
|
||||
#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL)
|
||||
|
||||
/*
|
||||
* Check to see if any future RCU-related work will need to be done
|
||||
|
@ -1591,6 +1591,9 @@ int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
|
|||
*delta_jiffies = ULONG_MAX;
|
||||
return rcu_cpu_has_callbacks(cpu, NULL);
|
||||
}
|
||||
#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */
|
||||
|
||||
#if !defined(CONFIG_RCU_FAST_NO_HZ)
|
||||
|
||||
/*
|
||||
* Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
|
||||
|
@ -1688,6 +1691,8 @@ static bool rcu_try_advance_all_cbs(void)
|
|||
return cbs_ready;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_PREEMPT_RT_FULL
|
||||
|
||||
/*
|
||||
* Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
|
||||
* to invoke. If the CPU has callbacks, try to advance them. Tell the
|
||||
|
@ -1726,6 +1731,7 @@ int rcu_needs_cpu(int cpu, unsigned long *dj)
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */
|
||||
|
||||
/*
|
||||
* Prepare a CPU for idle from an RCU perspective. The first major task
|
||||
|
|
|
@ -141,7 +141,7 @@ static void wakeup_softirqd(void)
|
|||
wake_up_process(tsk);
|
||||
}
|
||||
|
||||
static void handle_pending_softirqs(u32 pending, int cpu)
|
||||
static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs)
|
||||
{
|
||||
struct softirq_action *h = softirq_vec;
|
||||
unsigned int prev_count = preempt_count();
|
||||
|
@ -167,7 +167,8 @@ static void handle_pending_softirqs(u32 pending, int cpu)
|
|||
prev_count, preempt_count());
|
||||
preempt_count_set(prev_count);
|
||||
}
|
||||
rcu_bh_qs(cpu);
|
||||
if (need_rcu_bh_qs)
|
||||
rcu_bh_qs(cpu);
|
||||
h++;
|
||||
pending >>= softirq_bit;
|
||||
}
|
||||
|
@ -349,7 +350,7 @@ restart:
|
|||
/* Reset the pending bitmask before enabling irqs */
|
||||
set_softirq_pending(0);
|
||||
|
||||
handle_pending_softirqs(pending, cpu);
|
||||
handle_pending_softirqs(pending, cpu, 1);
|
||||
|
||||
pending = local_softirq_pending();
|
||||
if (pending) {
|
||||
|
@ -398,7 +399,12 @@ static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { }
|
|||
static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock);
|
||||
static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner);
|
||||
|
||||
asmlinkage void __do_softirq(void);
|
||||
static void __do_softirq_common(int need_rcu_bh_qs);
|
||||
|
||||
asmlinkage __do_softirq(void)
|
||||
{
|
||||
__do_softirq_common(0);
|
||||
}
|
||||
|
||||
void __init softirq_early_init(void)
|
||||
{
|
||||
|
@ -507,7 +513,7 @@ asmlinkage void __do_softirq(void)
|
|||
|
||||
lockdep_softirq_enter();
|
||||
|
||||
handle_pending_softirqs(pending, cpu);
|
||||
handle_pending_softirqs(pending, cpu, need_rcu_bh_qs);
|
||||
|
||||
pending = local_softirq_pending();
|
||||
if (pending)
|
||||
|
@ -546,7 +552,7 @@ static int __thread_do_softirq(int cpu)
|
|||
* schedule!
|
||||
*/
|
||||
if (local_softirq_pending())
|
||||
__do_softirq();
|
||||
__do_softirq_common(cpu >= 0);
|
||||
local_unlock(local_softirq_lock);
|
||||
unpin_current_cpu();
|
||||
preempt_disable();
|
||||
|
|
Loading…
Reference in New Issue