rcu: Consolidate tree/tiny __rcu_read_{,un}lock() implementations
The CONFIG_TREE_PREEMPT_RCU and CONFIG_TINY_PREEMPT_RCU versions of __rcu_read_lock() and __rcu_read_unlock() are identical, so this commit consolidates them into kernel/rcupdate.h. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
This commit is contained in:
parent
e9023c4061
commit
2a3fa843b5
|
@ -147,6 +147,7 @@ extern void synchronize_sched(void);
|
||||||
|
|
||||||
extern void __rcu_read_lock(void);
|
extern void __rcu_read_lock(void);
|
||||||
extern void __rcu_read_unlock(void);
|
extern void __rcu_read_unlock(void);
|
||||||
|
extern void rcu_read_unlock_special(struct task_struct *t);
|
||||||
void synchronize_rcu(void);
|
void synchronize_rcu(void);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -53,6 +53,50 @@
|
||||||
|
|
||||||
#ifdef CONFIG_PREEMPT_RCU
|
#ifdef CONFIG_PREEMPT_RCU
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Preemptible RCU implementation for rcu_read_lock().
|
||||||
|
* Just increment ->rcu_read_lock_nesting, shared state will be updated
|
||||||
|
* if we block.
|
||||||
|
*/
|
||||||
|
void __rcu_read_lock(void)
|
||||||
|
{
|
||||||
|
current->rcu_read_lock_nesting++;
|
||||||
|
barrier(); /* critical section after entry code. */
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(__rcu_read_lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Preemptible RCU implementation for rcu_read_unlock().
|
||||||
|
* Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
|
||||||
|
* rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
|
||||||
|
* invoke rcu_read_unlock_special() to clean up after a context switch
|
||||||
|
* in an RCU read-side critical section and other special cases.
|
||||||
|
*/
|
||||||
|
void __rcu_read_unlock(void)
|
||||||
|
{
|
||||||
|
struct task_struct *t = current;
|
||||||
|
|
||||||
|
if (t->rcu_read_lock_nesting != 1) {
|
||||||
|
--t->rcu_read_lock_nesting;
|
||||||
|
} else {
|
||||||
|
barrier(); /* critical section before exit code. */
|
||||||
|
t->rcu_read_lock_nesting = INT_MIN;
|
||||||
|
barrier(); /* assign before ->rcu_read_unlock_special load */
|
||||||
|
if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
|
||||||
|
rcu_read_unlock_special(t);
|
||||||
|
barrier(); /* ->rcu_read_unlock_special load before assign */
|
||||||
|
t->rcu_read_lock_nesting = 0;
|
||||||
|
}
|
||||||
|
#ifdef CONFIG_PROVE_LOCKING
|
||||||
|
{
|
||||||
|
int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
|
||||||
|
|
||||||
|
WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
|
||||||
|
}
|
||||||
|
#endif /* #ifdef CONFIG_PROVE_LOCKING */
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(__rcu_read_unlock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check for a task exiting while in a preemptible-RCU read-side
|
* Check for a task exiting while in a preemptible-RCU read-side
|
||||||
* critical section, clean up if so. No need to issue warnings,
|
* critical section, clean up if so. No need to issue warnings,
|
||||||
|
|
|
@ -132,7 +132,6 @@ static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
|
||||||
RCU_TRACE(.rcb.name = "rcu_preempt")
|
RCU_TRACE(.rcb.name = "rcu_preempt")
|
||||||
};
|
};
|
||||||
|
|
||||||
static void rcu_read_unlock_special(struct task_struct *t);
|
|
||||||
static int rcu_preempted_readers_exp(void);
|
static int rcu_preempted_readers_exp(void);
|
||||||
static void rcu_report_exp_done(void);
|
static void rcu_report_exp_done(void);
|
||||||
|
|
||||||
|
@ -526,24 +525,12 @@ void rcu_preempt_note_context_switch(void)
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Tiny-preemptible RCU implementation for rcu_read_lock().
|
|
||||||
* Just increment ->rcu_read_lock_nesting, shared state will be updated
|
|
||||||
* if we block.
|
|
||||||
*/
|
|
||||||
void __rcu_read_lock(void)
|
|
||||||
{
|
|
||||||
current->rcu_read_lock_nesting++;
|
|
||||||
barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(__rcu_read_lock);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Handle special cases during rcu_read_unlock(), such as needing to
|
* Handle special cases during rcu_read_unlock(), such as needing to
|
||||||
* notify RCU core processing or task having blocked during the RCU
|
* notify RCU core processing or task having blocked during the RCU
|
||||||
* read-side critical section.
|
* read-side critical section.
|
||||||
*/
|
*/
|
||||||
static noinline void rcu_read_unlock_special(struct task_struct *t)
|
void rcu_read_unlock_special(struct task_struct *t)
|
||||||
{
|
{
|
||||||
int empty;
|
int empty;
|
||||||
int empty_exp;
|
int empty_exp;
|
||||||
|
@ -626,38 +613,6 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Tiny-preemptible RCU implementation for rcu_read_unlock().
|
|
||||||
* Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
|
|
||||||
* rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
|
|
||||||
* invoke rcu_read_unlock_special() to clean up after a context switch
|
|
||||||
* in an RCU read-side critical section and other special cases.
|
|
||||||
*/
|
|
||||||
void __rcu_read_unlock(void)
|
|
||||||
{
|
|
||||||
struct task_struct *t = current;
|
|
||||||
|
|
||||||
barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */
|
|
||||||
if (t->rcu_read_lock_nesting != 1)
|
|
||||||
--t->rcu_read_lock_nesting;
|
|
||||||
else {
|
|
||||||
t->rcu_read_lock_nesting = INT_MIN;
|
|
||||||
barrier(); /* assign before ->rcu_read_unlock_special load */
|
|
||||||
if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
|
|
||||||
rcu_read_unlock_special(t);
|
|
||||||
barrier(); /* ->rcu_read_unlock_special load before assign */
|
|
||||||
t->rcu_read_lock_nesting = 0;
|
|
||||||
}
|
|
||||||
#ifdef CONFIG_PROVE_LOCKING
|
|
||||||
{
|
|
||||||
int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
|
|
||||||
|
|
||||||
WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
|
|
||||||
}
|
|
||||||
#endif /* #ifdef CONFIG_PROVE_LOCKING */
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(__rcu_read_unlock);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check for a quiescent state from the current CPU. When a task blocks,
|
* Check for a quiescent state from the current CPU. When a task blocks,
|
||||||
* the task is recorded in the rcu_preempt_ctrlblk structure, which is
|
* the task is recorded in the rcu_preempt_ctrlblk structure, which is
|
||||||
|
|
|
@ -78,7 +78,6 @@ struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt);
|
||||||
DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
|
DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
|
||||||
static struct rcu_state *rcu_state = &rcu_preempt_state;
|
static struct rcu_state *rcu_state = &rcu_preempt_state;
|
||||||
|
|
||||||
static void rcu_read_unlock_special(struct task_struct *t);
|
|
||||||
static int rcu_preempted_readers_exp(struct rcu_node *rnp);
|
static int rcu_preempted_readers_exp(struct rcu_node *rnp);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -232,18 +231,6 @@ static void rcu_preempt_note_context_switch(int cpu)
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Tree-preemptible RCU implementation for rcu_read_lock().
|
|
||||||
* Just increment ->rcu_read_lock_nesting, shared state will be updated
|
|
||||||
* if we block.
|
|
||||||
*/
|
|
||||||
void __rcu_read_lock(void)
|
|
||||||
{
|
|
||||||
current->rcu_read_lock_nesting++;
|
|
||||||
barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(__rcu_read_lock);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check for preempted RCU readers blocking the current grace period
|
* Check for preempted RCU readers blocking the current grace period
|
||||||
* for the specified rcu_node structure. If the caller needs a reliable
|
* for the specified rcu_node structure. If the caller needs a reliable
|
||||||
|
@ -310,7 +297,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
|
||||||
* notify RCU core processing or task having blocked during the RCU
|
* notify RCU core processing or task having blocked during the RCU
|
||||||
* read-side critical section.
|
* read-side critical section.
|
||||||
*/
|
*/
|
||||||
static noinline void rcu_read_unlock_special(struct task_struct *t)
|
void rcu_read_unlock_special(struct task_struct *t)
|
||||||
{
|
{
|
||||||
int empty;
|
int empty;
|
||||||
int empty_exp;
|
int empty_exp;
|
||||||
|
@ -418,38 +405,6 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Tree-preemptible RCU implementation for rcu_read_unlock().
|
|
||||||
* Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
|
|
||||||
* rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
|
|
||||||
* invoke rcu_read_unlock_special() to clean up after a context switch
|
|
||||||
* in an RCU read-side critical section and other special cases.
|
|
||||||
*/
|
|
||||||
void __rcu_read_unlock(void)
|
|
||||||
{
|
|
||||||
struct task_struct *t = current;
|
|
||||||
|
|
||||||
if (t->rcu_read_lock_nesting != 1)
|
|
||||||
--t->rcu_read_lock_nesting;
|
|
||||||
else {
|
|
||||||
barrier(); /* critical section before exit code. */
|
|
||||||
t->rcu_read_lock_nesting = INT_MIN;
|
|
||||||
barrier(); /* assign before ->rcu_read_unlock_special load */
|
|
||||||
if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
|
|
||||||
rcu_read_unlock_special(t);
|
|
||||||
barrier(); /* ->rcu_read_unlock_special load before assign */
|
|
||||||
t->rcu_read_lock_nesting = 0;
|
|
||||||
}
|
|
||||||
#ifdef CONFIG_PROVE_LOCKING
|
|
||||||
{
|
|
||||||
int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
|
|
||||||
|
|
||||||
WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
|
|
||||||
}
|
|
||||||
#endif /* #ifdef CONFIG_PROVE_LOCKING */
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(__rcu_read_unlock);
|
|
||||||
|
|
||||||
#ifdef CONFIG_RCU_CPU_STALL_VERBOSE
|
#ifdef CONFIG_RCU_CPU_STALL_VERBOSE
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue