rcu: Define RCU-bh update API in terms of RCU

Now that the main RCU API knows about softirq disabling and softirq's
quiescent states, the RCU-bh update code can be dispensed with.
This commit therefore removes the RCU-bh update-side implementation and
defines RCU-bh's update-side API in terms of that of either RCU-preempt or
RCU-sched, depending on the setting of the CONFIG_PREEMPT Kconfig option.

In kernels built with CONFIG_RCU_NOCB_CPU=y this has the knock-on effect
of reducing by one the number of rcuo kthreads per CPU.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
Paul E. McKenney 2018-07-01 07:40:52 -07:00
parent ba1c64c272
commit 65cfe3583b
7 changed files with 48 additions and 194 deletions

View File

@ -55,11 +55,15 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func);
#define call_rcu call_rcu_sched #define call_rcu call_rcu_sched
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
void call_rcu_bh(struct rcu_head *head, rcu_callback_t func);
void call_rcu_sched(struct rcu_head *head, rcu_callback_t func); void call_rcu_sched(struct rcu_head *head, rcu_callback_t func);
void synchronize_sched(void); void synchronize_sched(void);
void rcu_barrier_tasks(void); void rcu_barrier_tasks(void);
static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
{
call_rcu(head, func);
}
#ifdef CONFIG_PREEMPT_RCU #ifdef CONFIG_PREEMPT_RCU
void __rcu_read_lock(void); void __rcu_read_lock(void);
@ -104,7 +108,6 @@ static inline int rcu_preempt_depth(void)
void rcu_init(void); void rcu_init(void);
extern int rcu_scheduler_active __read_mostly; extern int rcu_scheduler_active __read_mostly;
void rcu_sched_qs(void); void rcu_sched_qs(void);
void rcu_bh_qs(void);
void rcu_check_callbacks(int user); void rcu_check_callbacks(int user);
void rcu_report_dead(unsigned int cpu); void rcu_report_dead(unsigned int cpu);
void rcutree_migrate_callbacks(int cpu); void rcutree_migrate_callbacks(int cpu);
@ -326,8 +329,7 @@ static inline void rcu_preempt_sleep_check(void) { }
* and rcu_assign_pointer(). Some of these could be folded into their * and rcu_assign_pointer(). Some of these could be folded into their
* callers, but they are left separate in order to ease introduction of * callers, but they are left separate in order to ease introduction of
* multiple flavors of pointers to match the multiple flavors of RCU * multiple flavors of pointers to match the multiple flavors of RCU
* (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in * (e.g., __rcu_sched, and __srcu), should this make sense in the future.
* the future.
*/ */
#ifdef __CHECKER__ #ifdef __CHECKER__

View File

@ -56,19 +56,23 @@ static inline void cond_synchronize_sched(unsigned long oldstate)
might_sleep(); might_sleep();
} }
extern void rcu_barrier_bh(void);
extern void rcu_barrier_sched(void);
static inline void synchronize_rcu_expedited(void) static inline void synchronize_rcu_expedited(void)
{ {
synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */
} }
extern void rcu_barrier_sched(void);
static inline void rcu_barrier(void) static inline void rcu_barrier(void)
{ {
rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */
} }
static inline void rcu_barrier_bh(void)
{
rcu_barrier();
}
static inline void synchronize_rcu_bh(void) static inline void synchronize_rcu_bh(void)
{ {
synchronize_sched(); synchronize_sched();

View File

@ -45,7 +45,11 @@ static inline void rcu_virt_note_context_switch(int cpu)
rcu_note_context_switch(false); rcu_note_context_switch(false);
} }
void synchronize_rcu_bh(void); static inline void synchronize_rcu_bh(void)
{
synchronize_rcu();
}
void synchronize_sched_expedited(void); void synchronize_sched_expedited(void);
void synchronize_rcu_expedited(void); void synchronize_rcu_expedited(void);
@ -69,7 +73,7 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
*/ */
static inline void synchronize_rcu_bh_expedited(void) static inline void synchronize_rcu_bh_expedited(void)
{ {
synchronize_sched_expedited(); synchronize_rcu_expedited();
} }
void rcu_barrier(void); void rcu_barrier(void);

View File

@ -51,64 +51,22 @@ static struct rcu_ctrlblk rcu_sched_ctrlblk = {
.curtail = &rcu_sched_ctrlblk.rcucblist, .curtail = &rcu_sched_ctrlblk.rcucblist,
}; };
static struct rcu_ctrlblk rcu_bh_ctrlblk = {
.donetail = &rcu_bh_ctrlblk.rcucblist,
.curtail = &rcu_bh_ctrlblk.rcucblist,
};
void rcu_barrier_bh(void)
{
wait_rcu_gp(call_rcu_bh);
}
EXPORT_SYMBOL(rcu_barrier_bh);
void rcu_barrier_sched(void) void rcu_barrier_sched(void)
{ {
wait_rcu_gp(call_rcu_sched); wait_rcu_gp(call_rcu_sched);
} }
EXPORT_SYMBOL(rcu_barrier_sched); EXPORT_SYMBOL(rcu_barrier_sched);
/* /* Record an rcu quiescent state. */
* Helper function for rcu_sched_qs() and rcu_bh_qs().
* Also irqs are disabled to avoid confusion due to interrupt handlers
* invoking call_rcu().
*/
static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
{
if (rcp->donetail != rcp->curtail) {
rcp->donetail = rcp->curtail;
return 1;
}
return 0;
}
/*
* Record an rcu quiescent state. And an rcu_bh quiescent state while we
* are at it, given that any rcu quiescent state is also an rcu_bh
* quiescent state. Use "+" instead of "||" to defeat short circuiting.
*/
void rcu_sched_qs(void) void rcu_sched_qs(void)
{ {
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
if (rcu_qsctr_help(&rcu_sched_ctrlblk) + if (rcu_sched_ctrlblk.donetail != rcu_sched_ctrlblk.curtail) {
rcu_qsctr_help(&rcu_bh_ctrlblk)) rcu_sched_ctrlblk.donetail = rcu_sched_ctrlblk.curtail;
raise_softirq(RCU_SOFTIRQ);
local_irq_restore(flags);
}
/*
* Record an rcu_bh quiescent state.
*/
void rcu_bh_qs(void)
{
unsigned long flags;
local_irq_save(flags);
if (rcu_qsctr_help(&rcu_bh_ctrlblk))
raise_softirq(RCU_SOFTIRQ); raise_softirq(RCU_SOFTIRQ);
}
local_irq_restore(flags); local_irq_restore(flags);
} }
@ -122,32 +80,27 @@ void rcu_check_callbacks(int user)
{ {
if (user) if (user)
rcu_sched_qs(); rcu_sched_qs();
if (user || !in_softirq())
rcu_bh_qs();
} }
/* /* Invoke the RCU callbacks whose grace period has elapsed. */
* Invoke the RCU callbacks on the specified rcu_ctrlkblk structure static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
* whose grace period has elapsed.
*/
static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
{ {
struct rcu_head *next, *list; struct rcu_head *next, *list;
unsigned long flags; unsigned long flags;
/* Move the ready-to-invoke callbacks to a local list. */ /* Move the ready-to-invoke callbacks to a local list. */
local_irq_save(flags); local_irq_save(flags);
if (rcp->donetail == &rcp->rcucblist) { if (rcu_sched_ctrlblk.donetail == &rcu_sched_ctrlblk.rcucblist) {
/* No callbacks ready, so just leave. */ /* No callbacks ready, so just leave. */
local_irq_restore(flags); local_irq_restore(flags);
return; return;
} }
list = rcp->rcucblist; list = rcu_sched_ctrlblk.rcucblist;
rcp->rcucblist = *rcp->donetail; rcu_sched_ctrlblk.rcucblist = *rcu_sched_ctrlblk.donetail;
*rcp->donetail = NULL; *rcu_sched_ctrlblk.donetail = NULL;
if (rcp->curtail == rcp->donetail) if (rcu_sched_ctrlblk.curtail == rcu_sched_ctrlblk.donetail)
rcp->curtail = &rcp->rcucblist; rcu_sched_ctrlblk.curtail = &rcu_sched_ctrlblk.rcucblist;
rcp->donetail = &rcp->rcucblist; rcu_sched_ctrlblk.donetail = &rcu_sched_ctrlblk.rcucblist;
local_irq_restore(flags); local_irq_restore(flags);
/* Invoke the callbacks on the local list. */ /* Invoke the callbacks on the local list. */
@ -162,19 +115,13 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
} }
} }
static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
{
__rcu_process_callbacks(&rcu_sched_ctrlblk);
__rcu_process_callbacks(&rcu_bh_ctrlblk);
}
/* /*
* Wait for a grace period to elapse. But it is illegal to invoke * Wait for a grace period to elapse. But it is illegal to invoke
* synchronize_sched() from within an RCU read-side critical section. * synchronize_sched() from within an RCU read-side critical section.
* Therefore, any legal call to synchronize_sched() is a quiescent * Therefore, any legal call to synchronize_sched() is a quiescent
* state, and so on a UP system, synchronize_sched() need do nothing. * state, and so on a UP system, synchronize_sched() need do nothing.
* Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the * (But Lai Jiangshan points out the benefits of doing might_sleep()
* benefits of doing might_sleep() to reduce latency.) * to reduce latency.)
* *
* Cool, huh? (Due to Josh Triplett.) * Cool, huh? (Due to Josh Triplett.)
*/ */
@ -188,11 +135,11 @@ void synchronize_sched(void)
EXPORT_SYMBOL_GPL(synchronize_sched); EXPORT_SYMBOL_GPL(synchronize_sched);
/* /*
* Helper function for call_rcu() and call_rcu_bh(). * Post an RCU callback to be invoked after the end of an RCU-sched grace
* period. But since we have but one CPU, that would be after any
* quiescent state.
*/ */
static void __call_rcu(struct rcu_head *head, void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
rcu_callback_t func,
struct rcu_ctrlblk *rcp)
{ {
unsigned long flags; unsigned long flags;
@ -201,8 +148,8 @@ static void __call_rcu(struct rcu_head *head,
head->next = NULL; head->next = NULL;
local_irq_save(flags); local_irq_save(flags);
*rcp->curtail = head; *rcu_sched_ctrlblk.curtail = head;
rcp->curtail = &head->next; rcu_sched_ctrlblk.curtail = &head->next;
local_irq_restore(flags); local_irq_restore(flags);
if (unlikely(is_idle_task(current))) { if (unlikely(is_idle_task(current))) {
@ -210,28 +157,8 @@ static void __call_rcu(struct rcu_head *head,
resched_cpu(0); resched_cpu(0);
} }
} }
/*
* Post an RCU callback to be invoked after the end of an RCU-sched grace
* period. But since we have but one CPU, that would be after any
* quiescent state.
*/
void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
{
__call_rcu(head, func, &rcu_sched_ctrlblk);
}
EXPORT_SYMBOL_GPL(call_rcu_sched); EXPORT_SYMBOL_GPL(call_rcu_sched);
/*
* Post an RCU bottom-half callback to be invoked after any subsequent
* quiescent state.
*/
void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
{
__call_rcu(head, func, &rcu_bh_ctrlblk);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
void __init rcu_init(void) void __init rcu_init(void)
{ {
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);

View File

@ -108,7 +108,6 @@ struct rcu_state sname##_state = { \
} }
RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched); RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
static struct rcu_state *const rcu_state_p; static struct rcu_state *const rcu_state_p;
LIST_HEAD(rcu_struct_flavors); LIST_HEAD(rcu_struct_flavors);
@ -244,17 +243,6 @@ void rcu_sched_qs(void)
this_cpu_ptr(&rcu_sched_data), true); this_cpu_ptr(&rcu_sched_data), true);
} }
void rcu_bh_qs(void)
{
RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!");
if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
trace_rcu_grace_period(TPS("rcu_bh"),
__this_cpu_read(rcu_bh_data.gp_seq),
TPS("cpuqs"));
__this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
}
}
void rcu_softirq_qs(void) void rcu_softirq_qs(void)
{ {
rcu_sched_qs(); rcu_sched_qs();
@ -581,7 +569,7 @@ EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq);
*/ */
unsigned long rcu_bh_get_gp_seq(void) unsigned long rcu_bh_get_gp_seq(void)
{ {
return READ_ONCE(rcu_bh_state.gp_seq); return READ_ONCE(rcu_state_p->gp_seq);
} }
EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq); EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq);
@ -621,7 +609,7 @@ EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
*/ */
void rcu_bh_force_quiescent_state(void) void rcu_bh_force_quiescent_state(void)
{ {
force_quiescent_state(&rcu_bh_state); force_quiescent_state(rcu_state_p);
} }
EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
@ -680,10 +668,8 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
switch (test_type) { switch (test_type) {
case RCU_FLAVOR: case RCU_FLAVOR:
rsp = rcu_state_p;
break;
case RCU_BH_FLAVOR: case RCU_BH_FLAVOR:
rsp = &rcu_bh_state; rsp = rcu_state_p;
break; break;
case RCU_SCHED_FLAVOR: case RCU_SCHED_FLAVOR:
rsp = &rcu_sched_state; rsp = &rcu_sched_state;
@ -2673,26 +2659,15 @@ void rcu_check_callbacks(int user)
* nested interrupt. In this case, the CPU is in * nested interrupt. In this case, the CPU is in
* a quiescent state, so note it. * a quiescent state, so note it.
* *
* No memory barrier is required here because both * No memory barrier is required here because
* rcu_sched_qs() and rcu_bh_qs() reference only CPU-local * rcu_sched_qs() references only CPU-local variables
* variables that other CPUs neither access nor modify, * that other CPUs neither access nor modify, at least
* at least not while the corresponding CPU is online. * not while the corresponding CPU is online.
*/ */
rcu_sched_qs(); rcu_sched_qs();
rcu_bh_qs();
rcu_note_voluntary_context_switch(current); rcu_note_voluntary_context_switch(current);
} else if (!in_softirq()) {
/*
* Get here if this CPU did not take its interrupt from
* softirq, in other words, if it is not interrupting
* a rcu_bh read-side critical section. This is an _bh
* critical section, so note it.
*/
rcu_bh_qs();
} }
rcu_preempt_check_callbacks(); rcu_preempt_check_callbacks();
if (rcu_pending()) if (rcu_pending())
@ -3079,34 +3054,6 @@ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
} }
EXPORT_SYMBOL_GPL(call_rcu_sched); EXPORT_SYMBOL_GPL(call_rcu_sched);
/**
* call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
* @head: structure to be used for queueing the RCU updates.
* @func: actual callback function to be invoked after the grace period
*
* The callback function will be invoked some time after a full grace
* period elapses, in other words after all currently executing RCU
* read-side critical sections have completed. call_rcu_bh() assumes
* that the read-side critical sections end on completion of a softirq
* handler. This means that read-side critical sections in process
* context must not be interrupted by softirqs. This interface is to be
* used when most of the read-side critical sections are in softirq context.
* RCU read-side critical sections are delimited by:
*
* - rcu_read_lock() and rcu_read_unlock(), if in interrupt context, OR
* - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
*
* These may be nested.
*
* See the description of call_rcu() for more detailed information on
* memory ordering guarantees.
*/
void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
{
__call_rcu(head, func, &rcu_bh_state, -1, 0);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
/* /*
* Queue an RCU callback for lazy invocation after a grace period. * Queue an RCU callback for lazy invocation after a grace period.
* This will likely be later named something like "call_rcu_lazy()", * This will likely be later named something like "call_rcu_lazy()",
@ -3191,33 +3138,6 @@ void synchronize_sched(void)
} }
EXPORT_SYMBOL_GPL(synchronize_sched); EXPORT_SYMBOL_GPL(synchronize_sched);
/**
* synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
*
* Control will return to the caller some time after a full rcu_bh grace
* period has elapsed, in other words after all currently executing rcu_bh
* read-side critical sections have completed. RCU read-side critical
* sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
* and may be nested.
*
* See the description of synchronize_sched() for more detailed information
* on memory ordering guarantees.
*/
void synchronize_rcu_bh(void)
{
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
if (rcu_blocking_is_gp())
return;
if (rcu_gp_is_expedited())
synchronize_rcu_bh_expedited();
else
wait_rcu_gp(call_rcu_bh);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
/** /**
* get_state_synchronize_rcu - Snapshot current RCU state * get_state_synchronize_rcu - Snapshot current RCU state
* *
@ -3529,7 +3449,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
*/ */
void rcu_barrier_bh(void) void rcu_barrier_bh(void)
{ {
_rcu_barrier(&rcu_bh_state); _rcu_barrier(rcu_state_p);
} }
EXPORT_SYMBOL_GPL(rcu_barrier_bh); EXPORT_SYMBOL_GPL(rcu_barrier_bh);
@ -4180,7 +4100,6 @@ void __init rcu_init(void)
rcu_bootup_announce(); rcu_bootup_announce();
rcu_init_geometry(); rcu_init_geometry();
rcu_init_one(&rcu_bh_state);
rcu_init_one(&rcu_sched_state); rcu_init_one(&rcu_sched_state);
if (dump_tree) if (dump_tree)
rcu_dump_rcu_node_tree(&rcu_sched_state); rcu_dump_rcu_node_tree(&rcu_sched_state);

View File

@ -1320,7 +1320,6 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
static void rcu_kthread_do_work(void) static void rcu_kthread_do_work(void)
{ {
rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data)); rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data));
rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data)); rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data));
} }

View File

@ -301,7 +301,6 @@ restart:
pending >>= softirq_bit; pending >>= softirq_bit;
} }
rcu_bh_qs();
if (__this_cpu_read(ksoftirqd) == current) if (__this_cpu_read(ksoftirqd) == current)
rcu_softirq_qs(); rcu_softirq_qs();
local_irq_disable(); local_irq_disable();