rcu: Express Tiny RCU updates in terms of RCU rather than RCU-sched

This commit renames Tiny RCU functions so that the lowest level of
functionality is RCU (e.g., synchronize_rcu()) rather than RCU-sched
(e.g., synchronize_sched()).  This provides greater naming compatibility
with Tree RCU, which will in turn permit more LoC removal once
the RCU-sched and RCU-bh update-side API is removed.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
[ paulmck: Fix Tiny call_rcu()'s EXPORT_SYMBOL() in response to a bug
  report from kbuild test robot. ]
This commit is contained in:
Paul E. McKenney 2018-07-03 10:44:44 -07:00
parent 45975c7d21
commit 709fdce754
4 changed files with 48 additions and 47 deletions

View File

@ -49,15 +49,14 @@
/* Exported common interfaces */
#ifdef CONFIG_TINY_RCU
#define call_rcu call_rcu_sched
#else
void call_rcu(struct rcu_head *head, rcu_callback_t func);
#ifndef CONFIG_TINY_RCU
void synchronize_sched(void);
void call_rcu_sched(struct rcu_head *head, rcu_callback_t func);
#endif
void call_rcu_sched(struct rcu_head *head, rcu_callback_t func);
void synchronize_sched(void);
void call_rcu(struct rcu_head *head, rcu_callback_t func);
void rcu_barrier_tasks(void);
void synchronize_rcu(void);
static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
{
@ -68,7 +67,6 @@ static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
void __rcu_read_lock(void);
void __rcu_read_unlock(void);
void synchronize_rcu(void);
/*
* Defined as a macro as it is a very low level header included from

View File

@ -36,9 +36,9 @@ static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
/* Never flag non-existent other CPUs! */
static inline bool rcu_eqs_special_set(int cpu) { return false; }
static inline void synchronize_rcu(void)
static inline void synchronize_sched(void)
{
synchronize_sched();
synchronize_rcu();
}
static inline unsigned long get_state_synchronize_rcu(void)
@ -61,16 +61,11 @@ static inline void cond_synchronize_sched(unsigned long oldstate)
might_sleep();
}
static inline void synchronize_rcu_expedited(void)
{
synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */
}
extern void rcu_barrier(void);
extern void rcu_barrier_sched(void);
static inline void rcu_barrier(void)
static inline void rcu_barrier_sched(void)
{
rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */
rcu_barrier(); /* Only one CPU, so only one list of callbacks! */
}
static inline void rcu_barrier_bh(void)
@ -88,27 +83,36 @@ static inline void synchronize_rcu_bh_expedited(void)
synchronize_sched();
}
static inline void synchronize_rcu_expedited(void)
{
synchronize_sched();
}
static inline void synchronize_sched_expedited(void)
{
synchronize_sched();
}
static inline void kfree_call_rcu(struct rcu_head *head,
rcu_callback_t func)
static inline void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
{
call_rcu(head, func);
}
void rcu_sched_qs(void);
static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
{
call_rcu(head, func);
}
void rcu_qs(void);
static inline void rcu_softirq_qs(void)
{
rcu_sched_qs();
rcu_qs();
}
#define rcu_note_context_switch(preempt) \
do { \
rcu_sched_qs(); \
rcu_qs(); \
rcu_tasks_qs(current); \
} while (0)

View File

@ -45,7 +45,6 @@ static inline void rcu_virt_note_context_switch(int cpu)
rcu_note_context_switch(false);
}
void synchronize_rcu(void);
static inline void synchronize_rcu_bh(void)
{
synchronize_rcu();

View File

@ -46,25 +46,25 @@ struct rcu_ctrlblk {
};
/* Definition for rcupdate control block. */
static struct rcu_ctrlblk rcu_sched_ctrlblk = {
.donetail = &rcu_sched_ctrlblk.rcucblist,
.curtail = &rcu_sched_ctrlblk.rcucblist,
static struct rcu_ctrlblk rcu_ctrlblk = {
.donetail = &rcu_ctrlblk.rcucblist,
.curtail = &rcu_ctrlblk.rcucblist,
};
void rcu_barrier_sched(void)
void rcu_barrier(void)
{
wait_rcu_gp(call_rcu_sched);
wait_rcu_gp(call_rcu);
}
EXPORT_SYMBOL(rcu_barrier_sched);
EXPORT_SYMBOL(rcu_barrier);
/* Record an rcu quiescent state. */
void rcu_sched_qs(void)
void rcu_qs(void)
{
unsigned long flags;
local_irq_save(flags);
if (rcu_sched_ctrlblk.donetail != rcu_sched_ctrlblk.curtail) {
rcu_sched_ctrlblk.donetail = rcu_sched_ctrlblk.curtail;
if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
raise_softirq(RCU_SOFTIRQ);
}
local_irq_restore(flags);
@ -79,7 +79,7 @@ void rcu_sched_qs(void)
void rcu_check_callbacks(int user)
{
if (user)
rcu_sched_qs();
rcu_qs();
}
/* Invoke the RCU callbacks whose grace period has elapsed. */
@ -90,17 +90,17 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
/* Move the ready-to-invoke callbacks to a local list. */
local_irq_save(flags);
if (rcu_sched_ctrlblk.donetail == &rcu_sched_ctrlblk.rcucblist) {
if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) {
/* No callbacks ready, so just leave. */
local_irq_restore(flags);
return;
}
list = rcu_sched_ctrlblk.rcucblist;
rcu_sched_ctrlblk.rcucblist = *rcu_sched_ctrlblk.donetail;
*rcu_sched_ctrlblk.donetail = NULL;
if (rcu_sched_ctrlblk.curtail == rcu_sched_ctrlblk.donetail)
rcu_sched_ctrlblk.curtail = &rcu_sched_ctrlblk.rcucblist;
rcu_sched_ctrlblk.donetail = &rcu_sched_ctrlblk.rcucblist;
list = rcu_ctrlblk.rcucblist;
rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail;
*rcu_ctrlblk.donetail = NULL;
if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail)
rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist;
rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist;
local_irq_restore(flags);
/* Invoke the callbacks on the local list. */
@ -125,21 +125,21 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
*
* Cool, huh? (Due to Josh Triplett.)
*/
void synchronize_sched(void)
void synchronize_rcu(void)
{
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_sched() in RCU read-side critical section");
}
EXPORT_SYMBOL_GPL(synchronize_sched);
EXPORT_SYMBOL_GPL(synchronize_rcu);
/*
* Post an RCU callback to be invoked after the end of an RCU-sched grace
* period. But since we have but one CPU, that would be after any
* quiescent state.
*/
void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
void call_rcu(struct rcu_head *head, rcu_callback_t func)
{
unsigned long flags;
@ -148,16 +148,16 @@ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
head->next = NULL;
local_irq_save(flags);
*rcu_sched_ctrlblk.curtail = head;
rcu_sched_ctrlblk.curtail = &head->next;
*rcu_ctrlblk.curtail = head;
rcu_ctrlblk.curtail = &head->next;
local_irq_restore(flags);
if (unlikely(is_idle_task(current))) {
/* force scheduling for rcu_sched_qs() */
/* force scheduling for rcu_qs() */
resched_cpu(0);
}
}
EXPORT_SYMBOL_GPL(call_rcu_sched);
EXPORT_SYMBOL_GPL(call_rcu);
void __init rcu_init(void)
{