rcu: Rename rcu_data's ->deferred_qs to ->exp_deferred_qs
The rcu_data structure's ->deferred_qs field is used to indicate that the current CPU is blocking an expedited grace period (perhaps a future one). Given that it is used only for expedited grace periods, its current name is misleading, so this commit renames it to ->exp_deferred_qs. Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
This commit is contained in:
parent
eddded8012
commit
1bb336443c
|
@ -154,7 +154,7 @@ struct rcu_data {
|
||||||
bool core_needs_qs; /* Core waits for quiesc state. */
|
bool core_needs_qs; /* Core waits for quiesc state. */
|
||||||
bool beenonline; /* CPU online at least once. */
|
bool beenonline; /* CPU online at least once. */
|
||||||
bool gpwrap; /* Possible ->gp_seq wrap. */
|
bool gpwrap; /* Possible ->gp_seq wrap. */
|
||||||
bool deferred_qs; /* This CPU awaiting a deferred QS? */
|
bool exp_deferred_qs; /* This CPU awaiting a deferred QS? */
|
||||||
struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
|
struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
|
||||||
unsigned long grpmask; /* Mask to apply to leaf qsmask. */
|
unsigned long grpmask; /* Mask to apply to leaf qsmask. */
|
||||||
unsigned long ticks_this_gp; /* The number of scheduling-clock */
|
unsigned long ticks_this_gp; /* The number of scheduling-clock */
|
||||||
|
|
|
@ -250,7 +250,7 @@ static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
|
||||||
*/
|
*/
|
||||||
static void rcu_report_exp_rdp(struct rcu_data *rdp)
|
static void rcu_report_exp_rdp(struct rcu_data *rdp)
|
||||||
{
|
{
|
||||||
WRITE_ONCE(rdp->deferred_qs, false);
|
WRITE_ONCE(rdp->exp_deferred_qs, false);
|
||||||
rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
|
rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -616,7 +616,7 @@ static void rcu_exp_handler(void *unused)
|
||||||
rcu_dynticks_curr_cpu_in_eqs()) {
|
rcu_dynticks_curr_cpu_in_eqs()) {
|
||||||
rcu_report_exp_rdp(rdp);
|
rcu_report_exp_rdp(rdp);
|
||||||
} else {
|
} else {
|
||||||
rdp->deferred_qs = true;
|
rdp->exp_deferred_qs = true;
|
||||||
set_tsk_need_resched(t);
|
set_tsk_need_resched(t);
|
||||||
set_preempt_need_resched();
|
set_preempt_need_resched();
|
||||||
}
|
}
|
||||||
|
@ -638,7 +638,7 @@ static void rcu_exp_handler(void *unused)
|
||||||
if (t->rcu_read_lock_nesting > 0) {
|
if (t->rcu_read_lock_nesting > 0) {
|
||||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||||
if (rnp->expmask & rdp->grpmask) {
|
if (rnp->expmask & rdp->grpmask) {
|
||||||
rdp->deferred_qs = true;
|
rdp->exp_deferred_qs = true;
|
||||||
t->rcu_read_unlock_special.b.exp_hint = true;
|
t->rcu_read_unlock_special.b.exp_hint = true;
|
||||||
}
|
}
|
||||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||||
|
@ -661,7 +661,7 @@ static void rcu_exp_handler(void *unused)
|
||||||
*
|
*
|
||||||
* Otherwise, force a context switch after the CPU enables everything.
|
* Otherwise, force a context switch after the CPU enables everything.
|
||||||
*/
|
*/
|
||||||
rdp->deferred_qs = true;
|
rdp->exp_deferred_qs = true;
|
||||||
if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
|
if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
|
||||||
WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) {
|
WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) {
|
||||||
rcu_preempt_deferred_qs(t);
|
rcu_preempt_deferred_qs(t);
|
||||||
|
|
|
@ -237,10 +237,10 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
|
||||||
* no need to check for a subsequent expedited GP. (Though we are
|
* no need to check for a subsequent expedited GP. (Though we are
|
||||||
* still in a quiescent state in any case.)
|
* still in a quiescent state in any case.)
|
||||||
*/
|
*/
|
||||||
if (blkd_state & RCU_EXP_BLKD && rdp->deferred_qs)
|
if (blkd_state & RCU_EXP_BLKD && rdp->exp_deferred_qs)
|
||||||
rcu_report_exp_rdp(rdp);
|
rcu_report_exp_rdp(rdp);
|
||||||
else
|
else
|
||||||
WARN_ON_ONCE(rdp->deferred_qs);
|
WARN_ON_ONCE(rdp->exp_deferred_qs);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -337,7 +337,7 @@ void rcu_note_context_switch(bool preempt)
|
||||||
* means that we continue to block the current grace period.
|
* means that we continue to block the current grace period.
|
||||||
*/
|
*/
|
||||||
rcu_qs();
|
rcu_qs();
|
||||||
if (rdp->deferred_qs)
|
if (rdp->exp_deferred_qs)
|
||||||
rcu_report_exp_rdp(rdp);
|
rcu_report_exp_rdp(rdp);
|
||||||
trace_rcu_utilization(TPS("End context switch"));
|
trace_rcu_utilization(TPS("End context switch"));
|
||||||
barrier(); /* Avoid RCU read-side critical sections leaking up. */
|
barrier(); /* Avoid RCU read-side critical sections leaking up. */
|
||||||
|
@ -451,7 +451,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
|
||||||
*/
|
*/
|
||||||
special = t->rcu_read_unlock_special;
|
special = t->rcu_read_unlock_special;
|
||||||
rdp = this_cpu_ptr(&rcu_data);
|
rdp = this_cpu_ptr(&rcu_data);
|
||||||
if (!special.s && !rdp->deferred_qs) {
|
if (!special.s && !rdp->exp_deferred_qs) {
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -459,7 +459,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
|
||||||
if (special.b.need_qs) {
|
if (special.b.need_qs) {
|
||||||
rcu_qs();
|
rcu_qs();
|
||||||
t->rcu_read_unlock_special.b.need_qs = false;
|
t->rcu_read_unlock_special.b.need_qs = false;
|
||||||
if (!t->rcu_read_unlock_special.s && !rdp->deferred_qs) {
|
if (!t->rcu_read_unlock_special.s && !rdp->exp_deferred_qs) {
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -471,7 +471,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
|
||||||
* tasks are handled when removing the task from the
|
* tasks are handled when removing the task from the
|
||||||
* blocked-tasks list below.
|
* blocked-tasks list below.
|
||||||
*/
|
*/
|
||||||
if (rdp->deferred_qs) {
|
if (rdp->exp_deferred_qs) {
|
||||||
rcu_report_exp_rdp(rdp);
|
rcu_report_exp_rdp(rdp);
|
||||||
if (!t->rcu_read_unlock_special.s) {
|
if (!t->rcu_read_unlock_special.s) {
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
@ -560,7 +560,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
|
||||||
*/
|
*/
|
||||||
static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
|
static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
|
||||||
{
|
{
|
||||||
return (__this_cpu_read(rcu_data.deferred_qs) ||
|
return (__this_cpu_read(rcu_data.exp_deferred_qs) ||
|
||||||
READ_ONCE(t->rcu_read_unlock_special.s)) &&
|
READ_ONCE(t->rcu_read_unlock_special.s)) &&
|
||||||
t->rcu_read_lock_nesting <= 0;
|
t->rcu_read_lock_nesting <= 0;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue