From de078d875cc7fc709f7818f26d38389c04369826 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 8 Sep 2009 15:54:36 -0700 Subject: [PATCH 01/11] rcu: Need to update rnp->gpnum if preemptable RCU is to be reliable Without this patch, tasks preempted in RCU read-side critical sections can fail to block the grace period, given that rnp->gpnum is used to determine which rnp->blocked_tasks[] element the preempted task is enqueued on. Before the patch, rnp->gpnum is always zero, so preempted tasks are always enqueued on rnp->blocked_tasks[0], which is correct only when the current CPU has not checked into the current grace period and the grace-period number is even, or, similarly, if the current CPU -has- checked into the current grace period and the grace-period number is odd. Signed-off-by: Paul E. McKenney Acked-by: Steven Rostedt Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josht@linux.vnet.ibm.com Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org LKML-Reference: <12524504771622-git-send-email-> Signed-off-by: Ingo Molnar --- kernel/rcutree.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 6b11b07cfe7f..c634a92d1217 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -632,6 +632,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) /* Special-case the common single-level case. */ if (NUM_RCU_NODES == 1) { rnp->qsmask = rnp->qsmaskinit; + rnp->gpnum = rsp->gpnum; rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ spin_unlock_irqrestore(&rnp->lock, flags); return; @@ -657,8 +658,10 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) */ rnp_end = rsp->level[NUM_RCU_LVLS - 1]; - for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++) + for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++) { rnp_cur->qsmask = rnp_cur->qsmaskinit; + rnp->gpnum = rsp->gpnum; + } /* * Now set up the leaf nodes. Here we must be careful. First, @@ -679,6 +682,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) for (; rnp_cur < rnp_end; rnp_cur++) { spin_lock(&rnp_cur->lock); /* irqs already disabled. */ rnp_cur->qsmask = rnp_cur->qsmaskinit; + rnp->gpnum = rsp->gpnum; spin_unlock(&rnp_cur->lock); /* irqs already disabled. */ } From b835db1f9cadaf008750a32664e35a207782c95e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 8 Sep 2009 15:54:37 -0700 Subject: [PATCH 02/11] rcu: Initialize multi-level RCU grace periods holding locks Prior implementations initialized the root and any internal nodes without holding locks, then initialized the leaves holding locks. This is a false economy, as the leaf nodes will usually greatly outnumber the root and internal nodes. Acquiring locks on all nodes is conceptually much simpler as well. Signed-off-by: Paul E. McKenney Acked-by: Steven Rostedt Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josht@linux.vnet.ibm.com Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org LKML-Reference: <12524504773190-git-send-email-> Signed-off-by: Ingo Molnar --- kernel/rcutree.c | 41 ++++++++++++----------------------------- 1 file changed, 12 insertions(+), 29 deletions(-) diff --git a/kernel/rcutree.c b/kernel/rcutree.c index c634a92d1217..da301e2fd84f 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -645,41 +645,24 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) spin_lock(&rsp->onofflock); /* irqs already disabled. */ /* - * Set the quiescent-state-needed bits in all the non-leaf RCU - * nodes for all currently online CPUs. This operation relies - * on the layout of the hierarchy within the rsp->node[] array. - * Note that other CPUs will access only the leaves of the - * hierarchy, which still indicate that no grace period is in - * progress. In addition, we have excluded CPU-hotplug operations. - * - * We therefore do not need to hold any locks. Any required - * memory barriers will be supplied by the locks guarding the - * leaf rcu_nodes in the hierarchy. - */ - - rnp_end = rsp->level[NUM_RCU_LVLS - 1]; - for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++) { - rnp_cur->qsmask = rnp_cur->qsmaskinit; - rnp->gpnum = rsp->gpnum; - } - - /* - * Now set up the leaf nodes. Here we must be careful. First, - * we need to hold the lock in order to exclude other CPUs, which - * might be contending for the leaf nodes' locks. Second, as - * soon as we initialize a given leaf node, its CPUs might run - * up the rest of the hierarchy. We must therefore acquire locks - * for each node that we touch during this stage. (But we still - * are excluding CPU-hotplug operations.) + * Set the quiescent-state-needed bits in all the rcu_node + * structures for all currently online CPUs in breadth-first + * order, starting from the root rcu_node structure. This + * operation relies on the layout of the hierarchy within the + * rsp->node[] array. Note that other CPUs will access only + * the leaves of the hierarchy, which still indicate that no + * grace period is in progress, at least until the corresponding + * leaf node has been initialized. In addition, we have excluded + * CPU-hotplug operations. * * Note that the grace period cannot complete until we finish * the initialization process, as there will be at least one * qsmask bit set in the root node until that time, namely the - * one corresponding to this CPU. + * one corresponding to this CPU, due to the fact that we have + * irqs disabled. */ rnp_end = &rsp->node[NUM_RCU_NODES]; - rnp_cur = rsp->level[NUM_RCU_LVLS - 1]; - for (; rnp_cur < rnp_end; rnp_cur++) { + for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++) { spin_lock(&rnp_cur->lock); /* irqs already disabled. */ rnp_cur->qsmask = rnp_cur->qsmaskinit; rnp->gpnum = rsp->gpnum; From b8d57a76d9f92aa63b4f12990da5697b17000b0c Mon Sep 17 00:00:00 2001 From: Josh Triplett Date: Tue, 8 Sep 2009 15:54:35 -0700 Subject: [PATCH 03/11] rcutorture: Occasionally delay readers enough to make RCU force_quiescent_state rcutorture already delays readers, but never for long enough to make RCU force a quiescent state. Add an occasional delay of 50ms. Signed-off-by: Josh Triplett Signed-off-by: Paul E. McKenney Acked-by: Steven Rostedt Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josht@linux.vnet.ibm.com Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org LKML-Reference: <12524504772607-git-send-email-> Signed-off-by: Ingo Molnar --- kernel/rcutorture.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index b33db539a8ad..328a8257c885 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c @@ -281,14 +281,17 @@ static int rcu_torture_read_lock(void) __acquires(RCU) static void rcu_read_delay(struct rcu_random_state *rrsp) { - long delay; - const long longdelay = 200; + const unsigned long shortdelay_us = 200; + const unsigned long longdelay_ms = 50; - /* We want there to be long-running readers, but not all the time. */ + /* We want a short delay sometimes to make a reader delay the grace + * period, and we want a long delay occasionally to trigger + * force_quiescent_state. */ - delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay); - if (!delay) - udelay(longdelay); + if (!(rcu_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) + mdelay(longdelay_ms); + if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) + udelay(shortdelay_us); } static void rcu_torture_read_unlock(int idx) __releases(RCU) From bbe3eae8bb039b5ffd64a6e3d1a0deaa1f3cbae9 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 13 Sep 2009 09:15:08 -0700 Subject: [PATCH 04/11] rcu: Kconfig help needs to say that TREE_PREEMPT_RCU scales down To quote Valdis: This leaves somebody who has a laptop wondering which choice is best for a system with only one or two cores that has CONFIG_PREEMPT defined. One choice says it scales down nicely, the other explicitly has a 'depends on PREEMPT' attached to it... So add "scales down nicely" to TREE_PREEMPT_RCU to match that of TREE_RCU. Suggested-by: Valdis Kletnieks Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org LKML-Reference: <12528585112362-git-send-email-> Signed-off-by: Ingo Molnar --- init/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/init/Kconfig b/init/Kconfig index 8e8b76d8a272..4c2c936afd2d 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -331,7 +331,8 @@ config TREE_PREEMPT_RCU This option selects the RCU implementation that is designed for very large SMP systems with hundreds or thousands of CPUs, but for which real-time response - is also required. + is also required. It also scales down nicely to + smaller systems. endchoice From b0e165c035b13e1074fa0b555318bd9cb7102558 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 13 Sep 2009 09:15:09 -0700 Subject: [PATCH 05/11] rcu: Add debug checks to TREE_PREEMPT_RCU for premature grace periods Check to make sure that there are no blocked tasks for the previous grace period while initializing for the next grace period, verify that rcu_preempt_qs() is given the correct CPU number and is never called for an offline CPU. Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu LKML-Reference: <12528585111986-git-send-email-> Signed-off-by: Ingo Molnar --- kernel/rcutree.c | 2 ++ kernel/rcutree_plugin.h | 25 +++++++++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/kernel/rcutree.c b/kernel/rcutree.c index da301e2fd84f..e9a4ae94647f 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -632,6 +632,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) /* Special-case the common single-level case. */ if (NUM_RCU_NODES == 1) { rnp->qsmask = rnp->qsmaskinit; + rcu_preempt_check_blocked_tasks(rnp); rnp->gpnum = rsp->gpnum; rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ spin_unlock_irqrestore(&rnp->lock, flags); @@ -665,6 +666,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++) { spin_lock(&rnp_cur->lock); /* irqs already disabled. */ rnp_cur->qsmask = rnp_cur->qsmaskinit; + rcu_preempt_check_blocked_tasks(rnp); rnp->gpnum = rsp->gpnum; spin_unlock(&rnp_cur->lock); /* irqs already disabled. */ } diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 47789369ea59..b8e4b0384f00 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -86,6 +86,7 @@ static void rcu_preempt_qs(int cpu) if (t->rcu_read_lock_nesting && (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { + WARN_ON_ONCE(cpu != smp_processor_id()); /* Possibly blocking in an RCU read-side critical section. */ rdp = rcu_preempt_state.rda[cpu]; @@ -103,7 +104,11 @@ static void rcu_preempt_qs(int cpu) * state for the current grace period), then as long * as that task remains queued, the current grace period * cannot end. + * + * But first, note that the current CPU must still be + * on line! */ + WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0); phase = !(rnp->qsmask & rdp->grpmask) ^ (rnp->gpnum & 0x1); list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]); smp_mb(); /* Ensure later ctxt swtch seen after above. */ @@ -258,6 +263,18 @@ static void rcu_print_task_stall(struct rcu_node *rnp) #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ +/* + * Check that the list of blocked tasks for the newly completed grace + * period is in fact empty. It is a serious bug to complete a grace + * period that still has RCU readers blocked! This function must be + * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock + * must be held by the caller. + */ +static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) +{ + WARN_ON_ONCE(!list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])); +} + /* * Check for preempted RCU readers for the specified rcu_node structure. * If the caller needs a reliable answer, it must hold the rcu_node's @@ -450,6 +467,14 @@ static void rcu_print_task_stall(struct rcu_node *rnp) #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ +/* + * Because there is no preemptable RCU, there can be no readers blocked, + * so there is no need to check for blocked tasks. + */ +static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) +{ +} + /* * Because preemptable RCU does not exist, there are never any preempted * RCU readers. From c3422bea5f09b0e85704f51f2b01271630b8940b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 13 Sep 2009 09:15:10 -0700 Subject: [PATCH 06/11] rcu: Simplify rcu_read_unlock_special() quiescent-state accounting The earlier approach required two scheduling-clock ticks to note an preemptable-RCU quiescent state in the situation in which the scheduling-clock interrupt is unlucky enough to always interrupt an RCU read-side critical section. With this change, the quiescent state is instead noted by the outermost rcu_read_unlock() immediately following the first scheduling-clock tick, or, alternatively, by the first subsequent context switch. Therefore, this change also speeds up grace periods. Suggested-by: Josh Triplett Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu LKML-Reference: <12528585111945-git-send-email-> Signed-off-by: Ingo Molnar --- include/linux/sched.h | 1 - kernel/rcutree.c | 15 +++++------- kernel/rcutree_plugin.h | 54 ++++++++++++++++++++--------------------- 3 files changed, 32 insertions(+), 38 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index f3d74bd04d18..c62a9f84d614 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1740,7 +1740,6 @@ extern cputime_t task_gtime(struct task_struct *p); #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ -#define RCU_READ_UNLOCK_GOT_QS (1 << 2) /* CPU has responded to RCU core. */ static inline void rcu_copy_process(struct task_struct *p) { diff --git a/kernel/rcutree.c b/kernel/rcutree.c index e9a4ae94647f..6c99553e9f15 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -107,27 +107,23 @@ static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp, */ void rcu_sched_qs(int cpu) { - unsigned long flags; struct rcu_data *rdp; - local_irq_save(flags); rdp = &per_cpu(rcu_sched_data, cpu); - rdp->passed_quiesc = 1; rdp->passed_quiesc_completed = rdp->completed; - rcu_preempt_qs(cpu); - local_irq_restore(flags); + barrier(); + rdp->passed_quiesc = 1; + rcu_preempt_note_context_switch(cpu); } void rcu_bh_qs(int cpu) { - unsigned long flags; struct rcu_data *rdp; - local_irq_save(flags); rdp = &per_cpu(rcu_bh_data, cpu); - rdp->passed_quiesc = 1; rdp->passed_quiesc_completed = rdp->completed; - local_irq_restore(flags); + barrier(); + rdp->passed_quiesc = 1; } #ifdef CONFIG_NO_HZ @@ -615,6 +611,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) /* Advance to a new grace period and initialize state. */ rsp->gpnum++; + WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT); rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; record_gp_stall_check_time(rsp); diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index b8e4b0384f00..c9616e48379b 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -64,34 +64,42 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed); * not in a quiescent state. There might be any number of tasks blocked * while in an RCU read-side critical section. */ -static void rcu_preempt_qs_record(int cpu) +static void rcu_preempt_qs(int cpu) { struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); - rdp->passed_quiesc = 1; rdp->passed_quiesc_completed = rdp->completed; + barrier(); + rdp->passed_quiesc = 1; } /* - * We have entered the scheduler or are between softirqs in ksoftirqd. - * If we are in an RCU read-side critical section, we need to reflect - * that in the state of the rcu_node structure corresponding to this CPU. - * Caller must disable hardirqs. + * We have entered the scheduler, and the current task might soon be + * context-switched away from. If this task is in an RCU read-side + * critical section, we will no longer be able to rely on the CPU to + * record that fact, so we enqueue the task on the appropriate entry + * of the blocked_tasks[] array. The task will dequeue itself when + * it exits the outermost enclosing RCU read-side critical section. + * Therefore, the current grace period cannot be permitted to complete + * until the blocked_tasks[] entry indexed by the low-order bit of + * rnp->gpnum empties. + * + * Caller must disable preemption. */ -static void rcu_preempt_qs(int cpu) +static void rcu_preempt_note_context_switch(int cpu) { struct task_struct *t = current; + unsigned long flags; int phase; struct rcu_data *rdp; struct rcu_node *rnp; if (t->rcu_read_lock_nesting && (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { - WARN_ON_ONCE(cpu != smp_processor_id()); /* Possibly blocking in an RCU read-side critical section. */ rdp = rcu_preempt_state.rda[cpu]; rnp = rdp->mynode; - spin_lock(&rnp->lock); + spin_lock_irqsave(&rnp->lock, flags); t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; t->rcu_blocked_node = rnp; @@ -112,7 +120,7 @@ static void rcu_preempt_qs(int cpu) phase = !(rnp->qsmask & rdp->grpmask) ^ (rnp->gpnum & 0x1); list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]); smp_mb(); /* Ensure later ctxt swtch seen after above. */ - spin_unlock(&rnp->lock); + spin_unlock_irqrestore(&rnp->lock, flags); } /* @@ -124,9 +132,8 @@ static void rcu_preempt_qs(int cpu) * grace period, then the fact that the task has been enqueued * means that we continue to block the current grace period. */ - rcu_preempt_qs_record(cpu); - t->rcu_read_unlock_special &= ~(RCU_READ_UNLOCK_NEED_QS | - RCU_READ_UNLOCK_GOT_QS); + rcu_preempt_qs(cpu); + t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; } /* @@ -162,7 +169,7 @@ static void rcu_read_unlock_special(struct task_struct *t) special = t->rcu_read_unlock_special; if (special & RCU_READ_UNLOCK_NEED_QS) { t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; - t->rcu_read_unlock_special |= RCU_READ_UNLOCK_GOT_QS; + rcu_preempt_qs(smp_processor_id()); } /* Hardware IRQ handlers cannot block. */ @@ -199,9 +206,7 @@ static void rcu_read_unlock_special(struct task_struct *t) */ if (!empty && rnp->qsmask == 0 && list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) { - t->rcu_read_unlock_special &= - ~(RCU_READ_UNLOCK_NEED_QS | - RCU_READ_UNLOCK_GOT_QS); + t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; if (rnp->parent == NULL) { /* Only one rcu_node in the tree. */ cpu_quiet_msk_finish(&rcu_preempt_state, flags); @@ -352,19 +357,12 @@ static void rcu_preempt_check_callbacks(int cpu) struct task_struct *t = current; if (t->rcu_read_lock_nesting == 0) { - t->rcu_read_unlock_special &= - ~(RCU_READ_UNLOCK_NEED_QS | RCU_READ_UNLOCK_GOT_QS); - rcu_preempt_qs_record(cpu); + t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; + rcu_preempt_qs(cpu); return; } if (per_cpu(rcu_preempt_data, cpu).qs_pending) { - if (t->rcu_read_unlock_special & RCU_READ_UNLOCK_GOT_QS) { - rcu_preempt_qs_record(cpu); - t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_GOT_QS; - } else if (!(t->rcu_read_unlock_special & - RCU_READ_UNLOCK_NEED_QS)) { - t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; - } + t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; } } @@ -451,7 +449,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed); * Because preemptable RCU does not exist, we never have to check for * CPUs being in quiescent states. */ -static void rcu_preempt_qs(int cpu) +static void rcu_preempt_note_context_switch(int cpu) { } From 16e3081191837a6a04733de5cd5d1d1b303140d4 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 13 Sep 2009 09:15:11 -0700 Subject: [PATCH 07/11] rcu: Fix synchronize_rcu() for TREE_PREEMPT_RCU The redirection of synchronize_sched() to synchronize_rcu() was appropriate for TREE_RCU, but not for TREE_PREEMPT_RCU. Fix this by creating an underlying synchronize_sched(). TREE_RCU then redirects synchronize_rcu() to synchronize_sched(), while TREE_PREEMPT_RCU has its own version of synchronize_rcu(). Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu LKML-Reference: <12528585111916-git-send-email-> Signed-off-by: Ingo Molnar --- include/linux/rcupdate.h | 23 +++++---------------- include/linux/rcutree.h | 4 ++-- kernel/rcupdate.c | 44 +++++++++++++++++++++++++++++++++++++++- 3 files changed, 50 insertions(+), 21 deletions(-) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 95e0615f4d75..39dce83c4865 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -52,8 +52,13 @@ struct rcu_head { }; /* Exported common interfaces */ +#ifdef CONFIG_TREE_PREEMPT_RCU extern void synchronize_rcu(void); +#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ +#define synchronize_rcu synchronize_sched +#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ extern void synchronize_rcu_bh(void); +extern void synchronize_sched(void); extern void rcu_barrier(void); extern void rcu_barrier_bh(void); extern void rcu_barrier_sched(void); @@ -261,24 +266,6 @@ struct rcu_synchronize { extern void wakeme_after_rcu(struct rcu_head *head); -/** - * synchronize_sched - block until all CPUs have exited any non-preemptive - * kernel code sequences. - * - * This means that all preempt_disable code sequences, including NMI and - * hardware-interrupt handlers, in progress on entry will have completed - * before this primitive returns. However, this does not guarantee that - * softirq handlers will have completed, since in some kernels, these - * handlers can run in process context, and can block. - * - * This primitive provides the guarantees made by the (now removed) - * synchronize_kernel() API. In contrast, synchronize_rcu() only - * guarantees that rcu_read_lock() sections will have completed. - * In "classic RCU", these two guarantees happen to be one and - * the same, but can differ in realtime RCU implementations. - */ -#define synchronize_sched() __synchronize_sched() - /** * call_rcu - Queue an RCU callback for invocation after a grace period. * @head: structure to be used for queueing the RCU updates. diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index a89307717825..00d08c0cbcc1 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -53,6 +53,8 @@ static inline void __rcu_read_unlock(void) preempt_enable(); } +#define __synchronize_sched() synchronize_rcu() + static inline void exit_rcu(void) { } @@ -68,8 +70,6 @@ static inline void __rcu_read_unlock_bh(void) local_bh_enable(); } -#define __synchronize_sched() synchronize_rcu() - extern void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index bd5d5c8e5140..28d2f24e7871 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -74,6 +74,8 @@ void wakeme_after_rcu(struct rcu_head *head) complete(&rcu->completion); } +#ifdef CONFIG_TREE_PREEMPT_RCU + /** * synchronize_rcu - wait until a grace period has elapsed. * @@ -87,7 +89,7 @@ void synchronize_rcu(void) { struct rcu_synchronize rcu; - if (rcu_blocking_is_gp()) + if (!rcu_scheduler_active) return; init_completion(&rcu.completion); @@ -98,6 +100,46 @@ void synchronize_rcu(void) } EXPORT_SYMBOL_GPL(synchronize_rcu); +#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ + +/** + * synchronize_sched - wait until an rcu-sched grace period has elapsed. + * + * Control will return to the caller some time after a full rcu-sched + * grace period has elapsed, in other words after all currently executing + * rcu-sched read-side critical sections have completed. These read-side + * critical sections are delimited by rcu_read_lock_sched() and + * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(), + * local_irq_disable(), and so on may be used in place of + * rcu_read_lock_sched(). + * + * This means that all preempt_disable code sequences, including NMI and + * hardware-interrupt handlers, in progress on entry will have completed + * before this primitive returns. However, this does not guarantee that + * softirq handlers will have completed, since in some kernels, these + * handlers can run in process context, and can block. + * + * This primitive provides the guarantees made by the (now removed) + * synchronize_kernel() API. In contrast, synchronize_rcu() only + * guarantees that rcu_read_lock() sections will have completed. + * In "classic RCU", these two guarantees happen to be one and + * the same, but can differ in realtime RCU implementations. + */ +void synchronize_sched(void) +{ + struct rcu_synchronize rcu; + + if (rcu_blocking_is_gp()) + return; + + init_completion(&rcu.completion); + /* Will wake me after RCU finished. */ + call_rcu_sched(&rcu.head, wakeme_after_rcu); + /* Wait for it. */ + wait_for_completion(&rcu.completion); +} +EXPORT_SYMBOL_GPL(synchronize_sched); + /** * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. * From 28ecd58020409be8eb176c716f957fc3386fa2fa Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 18 Sep 2009 09:50:17 -0700 Subject: [PATCH 08/11] rcu: Add WARN_ON_ONCE() consistency checks covering state transitions o Verify that qsmask bits stay clear through GP initialization. o Verify that cpu_quiet_msk_finish() is never invoked unless there actually is an RCU grace period in progress. o Verify that all internal-node rcu_node structures have empty blocked_tasks[] lists. o Verify that child rcu_node structure's bits remain clear after acquiring parent's lock. Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu LKML-Reference: <12532926191947-git-send-email-> Signed-off-by: Ingo Molnar --- kernel/rcutree.c | 13 +++++++++---- kernel/rcutree_plugin.h | 20 ++++++++++++++------ 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 6c99553e9f15..e8624ebf2320 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -628,8 +628,8 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) /* Special-case the common single-level case. */ if (NUM_RCU_NODES == 1) { - rnp->qsmask = rnp->qsmaskinit; rcu_preempt_check_blocked_tasks(rnp); + rnp->qsmask = rnp->qsmaskinit; rnp->gpnum = rsp->gpnum; rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ spin_unlock_irqrestore(&rnp->lock, flags); @@ -662,8 +662,8 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) rnp_end = &rsp->node[NUM_RCU_NODES]; for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++) { spin_lock(&rnp_cur->lock); /* irqs already disabled. */ - rnp_cur->qsmask = rnp_cur->qsmaskinit; rcu_preempt_check_blocked_tasks(rnp); + rnp_cur->qsmask = rnp_cur->qsmaskinit; rnp->gpnum = rsp->gpnum; spin_unlock(&rnp_cur->lock); /* irqs already disabled. */ } @@ -708,6 +708,7 @@ rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) __releases(rnp->lock) { + WARN_ON_ONCE(rsp->completed == rsp->gpnum); rsp->completed = rsp->gpnum; rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ @@ -725,6 +726,8 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, unsigned long flags) __releases(rnp->lock) { + struct rcu_node *rnp_c; + /* Walk up the rcu_node hierarchy. */ for (;;) { if (!(rnp->qsmask & mask)) { @@ -748,8 +751,10 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, break; } spin_unlock_irqrestore(&rnp->lock, flags); + rnp_c = rnp; rnp = rnp->parent; spin_lock_irqsave(&rnp->lock, flags); + WARN_ON_ONCE(rnp_c->qsmask); } /* @@ -858,7 +863,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) spin_lock_irqsave(&rsp->onofflock, flags); /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ - rnp = rdp->mynode; + rnp = rdp->mynode; /* this is the outgoing CPU's rnp. */ mask = rdp->grpmask; /* rnp->grplo is constant. */ do { spin_lock(&rnp->lock); /* irqs already disabled. */ @@ -867,7 +872,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) spin_unlock(&rnp->lock); /* irqs remain disabled. */ break; } - rcu_preempt_offline_tasks(rsp, rnp); + rcu_preempt_offline_tasks(rsp, rnp, rdp); mask = rnp->grpmask; spin_unlock(&rnp->lock); /* irqs remain disabled. */ rnp = rnp->parent; diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index c9616e48379b..5f94619450af 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -206,7 +206,8 @@ static void rcu_read_unlock_special(struct task_struct *t) */ if (!empty && rnp->qsmask == 0 && list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) { - t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; + struct rcu_node *rnp_p; + if (rnp->parent == NULL) { /* Only one rcu_node in the tree. */ cpu_quiet_msk_finish(&rcu_preempt_state, flags); @@ -215,9 +216,10 @@ static void rcu_read_unlock_special(struct task_struct *t) /* Report up the rest of the hierarchy. */ mask = rnp->grpmask; spin_unlock_irqrestore(&rnp->lock, flags); - rnp = rnp->parent; - spin_lock_irqsave(&rnp->lock, flags); - cpu_quiet_msk(mask, &rcu_preempt_state, rnp, flags); + rnp_p = rnp->parent; + spin_lock_irqsave(&rnp_p->lock, flags); + WARN_ON_ONCE(rnp->qsmask); + cpu_quiet_msk(mask, &rcu_preempt_state, rnp_p, flags); return; } spin_unlock(&rnp->lock); @@ -278,6 +280,7 @@ static void rcu_print_task_stall(struct rcu_node *rnp) static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) { WARN_ON_ONCE(!list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])); + WARN_ON_ONCE(rnp->qsmask); } /* @@ -302,7 +305,8 @@ static int rcu_preempted_readers(struct rcu_node *rnp) * The caller must hold rnp->lock with irqs disabled. */ static void rcu_preempt_offline_tasks(struct rcu_state *rsp, - struct rcu_node *rnp) + struct rcu_node *rnp, + struct rcu_data *rdp) { int i; struct list_head *lp; @@ -314,6 +318,9 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp, WARN_ONCE(1, "Last CPU thought to be offlined?"); return; /* Shouldn't happen: at least one CPU online. */ } + WARN_ON_ONCE(rnp != rdp->mynode && + (!list_empty(&rnp->blocked_tasks[0]) || + !list_empty(&rnp->blocked_tasks[1]))); /* * Move tasks up to root rcu_node. Rely on the fact that the @@ -489,7 +496,8 @@ static int rcu_preempted_readers(struct rcu_node *rnp) * tasks that were blocked within RCU read-side critical sections. */ static void rcu_preempt_offline_tasks(struct rcu_state *rsp, - struct rcu_node *rnp) + struct rcu_node *rnp, + struct rcu_data *rdp) { } From e7d8842ed34a7fe19d1ed90f84c211fb056ac523 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 18 Sep 2009 09:50:18 -0700 Subject: [PATCH 09/11] rcu: Apply results of code inspection of kernel/rcutree_plugin.h o Drop the calls to cpu_quiet() from the online/offline code. These are unnecessary, since force_quiescent_state() will clean up, and removing them simplifies the code a bit. o Add a warning to check that we don't enqueue the same blocked task twice onto the ->blocked_tasks[] lists. o Rework the phase computation in rcu_preempt_note_context_switch() to be more readable, as suggested by Josh Triplett. o Disable irqs to close a race between the scheduling clock interrupt and rcu_preempt_note_context_switch() WRT the ->rcu_read_unlock_special field. o Add comments to rnp->lock acquisition and release within rcu_read_unlock_special() noting that irqs are already disabled. Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu LKML-Reference: <12532926201851-git-send-email-> Signed-off-by: Ingo Molnar --- kernel/rcutree.c | 27 +++++---------------------- kernel/rcutree_plugin.h | 10 ++++++---- 2 files changed, 11 insertions(+), 26 deletions(-) diff --git a/kernel/rcutree.c b/kernel/rcutree.c index e8624ebf2320..ae4a553e37ce 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -767,10 +767,10 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, /* * Record a quiescent state for the specified CPU, which must either be - * the current CPU or an offline CPU. The lastcomp argument is used to - * make sure we are still in the grace period of interest. We don't want - * to end the current grace period based on quiescent states detected in - * an earlier grace period! + * the current CPU. The lastcomp argument is used to make sure we are + * still in the grace period of interest. We don't want to end the current + * grace period based on quiescent states detected in an earlier grace + * period! */ static void cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) @@ -805,7 +805,6 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) * This GP can't end until cpu checks in, so all of our * callbacks can be processed during the next GP. */ - rdp = rsp->rda[smp_processor_id()]; rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */ @@ -881,9 +880,6 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ - /* Being offline is a quiescent state, so go record it. */ - cpu_quiet(cpu, rsp, rdp, lastcomp); - /* * Move callbacks from the outgoing CPU to the running CPU. * Note that the outgoing CPU is now quiscent, so it is now @@ -1448,20 +1444,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) rnp = rnp->parent; } while (rnp != NULL && !(rnp->qsmaskinit & mask)); - spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ - - /* - * A new grace period might start here. If so, we will be part of - * it, and its gpnum will be greater than ours, so we will - * participate. It is also possible for the gpnum to have been - * incremented before this function was called, and the bitmasks - * to not be filled out until now, in which case we will also - * participate due to our gpnum being behind. - */ - - /* Since it is coming online, the CPU is in a quiescent state. */ - cpu_quiet(cpu, rsp, rdp, lastcomp); - local_irq_restore(flags); + spin_unlock_irqrestore(&rsp->onofflock, flags); } static void __cpuinit rcu_online_cpu(int cpu) diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 5f94619450af..cd6047cc7fc2 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -117,9 +117,9 @@ static void rcu_preempt_note_context_switch(int cpu) * on line! */ WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0); - phase = !(rnp->qsmask & rdp->grpmask) ^ (rnp->gpnum & 0x1); + WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); + phase = (rnp->gpnum + !(rnp->qsmask & rdp->grpmask)) & 0x1; list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]); - smp_mb(); /* Ensure later ctxt swtch seen after above. */ spin_unlock_irqrestore(&rnp->lock, flags); } @@ -133,7 +133,9 @@ static void rcu_preempt_note_context_switch(int cpu) * means that we continue to block the current grace period. */ rcu_preempt_qs(cpu); + local_irq_save(flags); t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; + local_irq_restore(flags); } /* @@ -189,10 +191,10 @@ static void rcu_read_unlock_special(struct task_struct *t) */ for (;;) { rnp = t->rcu_blocked_node; - spin_lock(&rnp->lock); + spin_lock(&rnp->lock); /* irqs already disabled. */ if (rnp == t->rcu_blocked_node) break; - spin_unlock(&rnp->lock); + spin_unlock(&rnp->lock); /* irqs remain disabled. */ } empty = list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]); list_del_init(&t->rcu_node_entry); From 49e291266d0920264471d9d64268fb030e33a99a Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 18 Sep 2009 09:50:19 -0700 Subject: [PATCH 10/11] rcu: Fix thinko, actually initialize full tree Commit de078d8 ("rcu: Need to update rnp->gpnum if preemptable RCU is to be reliable") repeatedly and incorrectly initializes the root rcu_node structure's ->gpnum field rather than initializing the ->gpnum field of each node in the tree. Fix this. Also add an additional consistency check to catch this in the future. Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu LKML-Reference: <125329262011-git-send-email-> Signed-off-by: Ingo Molnar --- kernel/rcutree.c | 11 ++++------- kernel/rcutree_plugin.h | 4 +++- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/kernel/rcutree.c b/kernel/rcutree.c index ae4a553e37ce..1b32cdd1b2e2 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -601,8 +601,6 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) { struct rcu_data *rdp = rsp->rda[smp_processor_id()]; struct rcu_node *rnp = rcu_get_root(rsp); - struct rcu_node *rnp_cur; - struct rcu_node *rnp_end; if (!cpu_needs_another_gp(rsp, rdp)) { spin_unlock_irqrestore(&rnp->lock, flags); @@ -659,13 +657,12 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) * one corresponding to this CPU, due to the fact that we have * irqs disabled. */ - rnp_end = &rsp->node[NUM_RCU_NODES]; - for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++) { - spin_lock(&rnp_cur->lock); /* irqs already disabled. */ + for (rnp = &rsp->node[0]; rnp < &rsp->node[NUM_RCU_NODES]; rnp++) { + spin_lock(&rnp->lock); /* irqs already disabled. */ rcu_preempt_check_blocked_tasks(rnp); - rnp_cur->qsmask = rnp_cur->qsmaskinit; + rnp->qsmask = rnp->qsmaskinit; rnp->gpnum = rsp->gpnum; - spin_unlock(&rnp_cur->lock); /* irqs already disabled. */ + spin_unlock(&rnp->lock); /* irqs already disabled. */ } rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index cd6047cc7fc2..09b7325baad1 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -476,10 +476,12 @@ static void rcu_print_task_stall(struct rcu_node *rnp) /* * Because there is no preemptable RCU, there can be no readers blocked, - * so there is no need to check for blocked tasks. + * so there is no need to check for blocked tasks. So check only for + * bogus qsmask values. */ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) { + WARN_ON_ONCE(rnp->qsmask); } /* From a71fca58b7f4abca551ae2256ac08dd9123a03f9 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 18 Sep 2009 10:28:19 -0700 Subject: [PATCH 11/11] rcu: Fix whitespace inconsistencies Fix a number of whitespace ^Ierrors in the include/linux/rcu* and the kernel/rcu* files. Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu LKML-Reference: <20090918172819.GA24405@linux.vnet.ibm.com> [ did more checkpatch fixlets ] Signed-off-by: Ingo Molnar --- include/linux/rculist_nulls.h | 2 +- include/linux/rcupdate.h | 6 +++--- include/linux/rcutree.h | 2 +- kernel/rcupdate.c | 4 ++-- kernel/rcutorture.c | 28 +++++++++++++++------------- kernel/rcutree.c | 2 +- kernel/rcutree.h | 2 +- kernel/rcutree_plugin.h | 3 +-- kernel/rcutree_trace.c | 2 +- 9 files changed, 26 insertions(+), 25 deletions(-) diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h index f9ddd03961a8..589a40919f01 100644 --- a/include/linux/rculist_nulls.h +++ b/include/linux/rculist_nulls.h @@ -102,7 +102,7 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, */ #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ for (pos = rcu_dereference((head)->first); \ - (!is_a_nulls(pos)) && \ + (!is_a_nulls(pos)) && \ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ pos = rcu_dereference(pos->next)) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 39dce83c4865..6fe0363724e9 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -1,5 +1,5 @@ /* - * Read-Copy Update mechanism for mutual exclusion + * Read-Copy Update mechanism for mutual exclusion * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -18,7 +18,7 @@ * Copyright IBM Corporation, 2001 * * Author: Dipankar Sarma - * + * * Based on the original work by Paul McKenney * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: @@ -26,7 +26,7 @@ * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) * * For detailed explanation of Read-Copy Update mechanism see - - * http://lse.sourceforge.net/locking/rcupdate.html + * http://lse.sourceforge.net/locking/rcupdate.html * */ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 00d08c0cbcc1..37682770e9d2 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -24,7 +24,7 @@ * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * * For detailed explanation of Read-Copy Update mechanism see - - * Documentation/RCU + * Documentation/RCU */ #ifndef __LINUX_RCUTREE_H diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 28d2f24e7871..37ac45483082 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -19,7 +19,7 @@ * * Authors: Dipankar Sarma * Manfred Spraul - * + * * Based on the original work by Paul McKenney * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: @@ -27,7 +27,7 @@ * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) * * For detailed explanation of Read-Copy Update mechanism see - - * http://lse.sourceforge.net/locking/rcupdate.html + * http://lse.sourceforge.net/locking/rcupdate.html * */ #include diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 328a8257c885..233768f21f97 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c @@ -18,7 +18,7 @@ * Copyright (C) IBM Corporation, 2005, 2006 * * Authors: Paul E. McKenney - * Josh Triplett + * Josh Triplett * * See also: Documentation/RCU/torture.txt */ @@ -50,7 +50,7 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Paul E. McKenney and " - "Josh Triplett "); + "Josh Triplett "); static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */ static int nfakewriters = 4; /* # fake writer threads */ @@ -110,8 +110,8 @@ struct rcu_torture { }; static LIST_HEAD(rcu_torture_freelist); -static struct rcu_torture *rcu_torture_current = NULL; -static long rcu_torture_current_version = 0; +static struct rcu_torture *rcu_torture_current; +static long rcu_torture_current_version; static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; static DEFINE_SPINLOCK(rcu_torture_lock); static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) = @@ -124,11 +124,11 @@ static atomic_t n_rcu_torture_alloc_fail; static atomic_t n_rcu_torture_free; static atomic_t n_rcu_torture_mberror; static atomic_t n_rcu_torture_error; -static long n_rcu_torture_timers = 0; +static long n_rcu_torture_timers; static struct list_head rcu_torture_removed; static cpumask_var_t shuffle_tmp_mask; -static int stutter_pause_test = 0; +static int stutter_pause_test; #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE) #define RCUTORTURE_RUNNABLE_INIT 1 @@ -267,7 +267,8 @@ struct rcu_torture_ops { int irq_capable; char *name; }; -static struct rcu_torture_ops *cur_ops = NULL; + +static struct rcu_torture_ops *cur_ops; /* * Definitions for rcu torture testing. @@ -342,8 +343,8 @@ static struct rcu_torture_ops rcu_ops = { .sync = synchronize_rcu, .cb_barrier = rcu_barrier, .stats = NULL, - .irq_capable = 1, - .name = "rcu" + .irq_capable = 1, + .name = "rcu" }; static void rcu_sync_torture_deferred_free(struct rcu_torture *p) @@ -641,7 +642,8 @@ rcu_torture_writer(void *arg) do { schedule_timeout_uninterruptible(1); - if ((rp = rcu_torture_alloc()) == NULL) + rp = rcu_torture_alloc(); + if (rp == NULL) continue; rp->rtort_pipe_count = 0; udelay(rcu_random(&rand) & 0x3ff); @@ -1113,7 +1115,7 @@ rcu_torture_init(void) printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n", torture_type); mutex_unlock(&fullstop_mutex); - return (-EINVAL); + return -EINVAL; } if (cur_ops->init) cur_ops->init(); /* no "goto unwind" prior to this point!!! */ @@ -1164,7 +1166,7 @@ rcu_torture_init(void) goto unwind; } fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]), - GFP_KERNEL); + GFP_KERNEL); if (fakewriter_tasks == NULL) { VERBOSE_PRINTK_ERRSTRING("out of memory"); firsterr = -ENOMEM; @@ -1173,7 +1175,7 @@ rcu_torture_init(void) for (i = 0; i < nfakewriters; i++) { VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task"); fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL, - "rcu_torture_fakewriter"); + "rcu_torture_fakewriter"); if (IS_ERR(fakewriter_tasks[i])) { firsterr = PTR_ERR(fakewriter_tasks[i]); VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter"); diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 1b32cdd1b2e2..52b06f6e158c 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -25,7 +25,7 @@ * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * * For detailed explanation of Read-Copy Update mechanism see - - * Documentation/RCU + * Documentation/RCU */ #include #include diff --git a/kernel/rcutree.h b/kernel/rcutree.h index bf8a6f9f134d..8e8287a983c2 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h @@ -142,7 +142,7 @@ struct rcu_data { */ struct rcu_head *nxtlist; struct rcu_head **nxttail[RCU_NEXT_SIZE]; - long qlen; /* # of queued callbacks */ + long qlen; /* # of queued callbacks */ long blimit; /* Upper limit on a processed batch */ #ifdef CONFIG_NO_HZ diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 09b7325baad1..1cee04f627eb 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -370,9 +370,8 @@ static void rcu_preempt_check_callbacks(int cpu) rcu_preempt_qs(cpu); return; } - if (per_cpu(rcu_preempt_data, cpu).qs_pending) { + if (per_cpu(rcu_preempt_data, cpu).qs_pending) t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; - } } /* diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index 0ea1bff69727..c89f5e9fd173 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c @@ -20,7 +20,7 @@ * Papers: http://www.rdrop.com/users/paulmck/RCU * * For detailed explanation of Read-Copy Update mechanism see - - * Documentation/RCU + * Documentation/RCU * */ #include