rcu: add tracing for TINY_RCU and TINY_PREEMPT_RCU

Add tracing for the tiny RCU implementations, including statistics on
boosting in the case of TINY_PREEMPT_RCU and RCU_BOOST.

Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
Paul E. McKenney 2010-09-30 21:26:52 -07:00 committed by Paul E. McKenney
parent 24278d1483
commit 9e571a82f0
3 changed files with 226 additions and 11 deletions

View File

@ -384,7 +384,6 @@ config PREEMPT_RCU
config RCU_TRACE config RCU_TRACE
bool "Enable tracing for RCU" bool "Enable tracing for RCU"
depends on TREE_RCU || TREE_PREEMPT_RCU
help help
This option provides tracing in RCU which presents stats This option provides tracing in RCU which presents stats
in debugfs for debugging RCU implementation. in debugfs for debugging RCU implementation.

View File

@ -144,6 +144,7 @@ static void rcu_process_callbacks(struct rcu_ctrlblk *rcp)
{ {
struct rcu_head *next, *list; struct rcu_head *next, *list;
unsigned long flags; unsigned long flags;
RCU_TRACE(int cb_count = 0);
/* If no RCU callbacks ready to invoke, just return. */ /* If no RCU callbacks ready to invoke, just return. */
if (&rcp->rcucblist == rcp->donetail) if (&rcp->rcucblist == rcp->donetail)
@ -169,7 +170,9 @@ static void rcu_process_callbacks(struct rcu_ctrlblk *rcp)
list->func(list); list->func(list);
local_bh_enable(); local_bh_enable();
list = next; list = next;
RCU_TRACE(cb_count++);
} }
RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
} }
/* /*
@ -252,6 +255,7 @@ static void __call_rcu(struct rcu_head *head,
local_irq_save(flags); local_irq_save(flags);
*rcp->curtail = head; *rcp->curtail = head;
rcp->curtail = &head->next; rcp->curtail = &head->next;
RCU_TRACE(rcp->qlen++);
local_irq_restore(flags); local_irq_restore(flags);
} }

View File

@ -23,12 +23,21 @@
*/ */
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#ifdef CONFIG_RCU_TRACE
#define RCU_TRACE(stmt) stmt
#else /* #ifdef CONFIG_RCU_TRACE */
#define RCU_TRACE(stmt)
#endif /* #else #ifdef CONFIG_RCU_TRACE */
/* Global control variables for rcupdate callback mechanism. */ /* Global control variables for rcupdate callback mechanism. */
struct rcu_ctrlblk { struct rcu_ctrlblk {
struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */ struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
struct rcu_head **donetail; /* ->next pointer of last "done" CB. */ struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
struct rcu_head **curtail; /* ->next pointer of last CB. */ struct rcu_head **curtail; /* ->next pointer of last CB. */
RCU_TRACE(long qlen); /* Number of pending CBs. */
}; };
/* Definition for rcupdate control block. */ /* Definition for rcupdate control block. */
@ -90,8 +99,26 @@ struct rcu_preempt_ctrlblk {
u8 gpcpu; /* Last grace period blocked by the CPU. */ u8 gpcpu; /* Last grace period blocked by the CPU. */
u8 completed; /* Last grace period completed. */ u8 completed; /* Last grace period completed. */
/* If all three are equal, RCU is idle. */ /* If all three are equal, RCU is idle. */
#ifdef CONFIG_RCU_BOOST
s8 boosted_this_gp; /* Has boosting already happened? */ s8 boosted_this_gp; /* Has boosting already happened? */
unsigned long boost_time; /* When to start boosting (jiffies) */ unsigned long boost_time; /* When to start boosting (jiffies) */
#endif /* #ifdef CONFIG_RCU_BOOST */
#ifdef CONFIG_RCU_TRACE
unsigned long n_grace_periods;
#ifdef CONFIG_RCU_BOOST
unsigned long n_tasks_boosted;
unsigned long n_exp_boosts;
unsigned long n_normal_boosts;
unsigned long n_normal_balk_blkd_tasks;
unsigned long n_normal_balk_gp_tasks;
unsigned long n_normal_balk_boost_tasks;
unsigned long n_normal_balk_boosted;
unsigned long n_normal_balk_notyet;
unsigned long n_normal_balk_nos;
unsigned long n_exp_balk_blkd_tasks;
unsigned long n_exp_balk_nos;
#endif /* #ifdef CONFIG_RCU_BOOST */
#endif /* #ifdef CONFIG_RCU_TRACE */
}; };
static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = { static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
@ -170,6 +197,65 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t)
return np; return np;
} }
#ifdef CONFIG_RCU_TRACE
#ifdef CONFIG_RCU_BOOST
static void rcu_initiate_boost_trace(void);
static void rcu_initiate_exp_boost_trace(void);
#endif /* #ifdef CONFIG_RCU_BOOST */
/*
* Dump additional statistice for TINY_PREEMPT_RCU.
*/
static void show_tiny_preempt_stats(struct seq_file *m)
{
seq_printf(m, "rcu_preempt: qlen=%ld gp=%lu g%u/p%u/c%u tasks=%c%c%c\n",
rcu_preempt_ctrlblk.rcb.qlen,
rcu_preempt_ctrlblk.n_grace_periods,
rcu_preempt_ctrlblk.gpnum,
rcu_preempt_ctrlblk.gpcpu,
rcu_preempt_ctrlblk.completed,
"T."[list_empty(&rcu_preempt_ctrlblk.blkd_tasks)],
"N."[!rcu_preempt_ctrlblk.gp_tasks],
"E."[!rcu_preempt_ctrlblk.exp_tasks]);
#ifdef CONFIG_RCU_BOOST
seq_printf(m, " ttb=%c btg=",
"B."[!rcu_preempt_ctrlblk.boost_tasks]);
switch (rcu_preempt_ctrlblk.boosted_this_gp) {
case -1:
seq_puts(m, "exp");
break;
case 0:
seq_puts(m, "no");
break;
case 1:
seq_puts(m, "done");
break;
default:
seq_printf(m, "?%d?", rcu_preempt_ctrlblk.boosted_this_gp);
}
seq_printf(m, " ntb=%lu neb=%lu nnb=%lu j=%04x bt=%04x\n",
rcu_preempt_ctrlblk.n_tasks_boosted,
rcu_preempt_ctrlblk.n_exp_boosts,
rcu_preempt_ctrlblk.n_normal_boosts,
(int)(jiffies & 0xffff),
(int)(rcu_preempt_ctrlblk.boost_time & 0xffff));
seq_printf(m, " %s: nt=%lu gt=%lu bt=%lu b=%lu ny=%lu nos=%lu\n",
"normal balk",
rcu_preempt_ctrlblk.n_normal_balk_blkd_tasks,
rcu_preempt_ctrlblk.n_normal_balk_gp_tasks,
rcu_preempt_ctrlblk.n_normal_balk_boost_tasks,
rcu_preempt_ctrlblk.n_normal_balk_boosted,
rcu_preempt_ctrlblk.n_normal_balk_notyet,
rcu_preempt_ctrlblk.n_normal_balk_nos);
seq_printf(m, " exp balk: bt=%lu nos=%lu\n",
rcu_preempt_ctrlblk.n_exp_balk_blkd_tasks,
rcu_preempt_ctrlblk.n_exp_balk_nos);
#endif /* #ifdef CONFIG_RCU_BOOST */
}
#endif /* #ifdef CONFIG_RCU_TRACE */
#ifdef CONFIG_RCU_BOOST #ifdef CONFIG_RCU_BOOST
#include "rtmutex_common.h" #include "rtmutex_common.h"
@ -197,6 +283,7 @@ static int rcu_boost(void)
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED; t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED;
raw_local_irq_restore(flags); raw_local_irq_restore(flags);
rt_mutex_lock(&mtx); rt_mutex_lock(&mtx);
RCU_TRACE(rcu_preempt_ctrlblk.n_tasks_boosted++);
rt_mutex_unlock(&mtx); rt_mutex_unlock(&mtx);
return rcu_preempt_ctrlblk.boost_tasks != NULL; return rcu_preempt_ctrlblk.boost_tasks != NULL;
} }
@ -206,16 +293,27 @@ static int rcu_boost(void)
* the current grace period, and, if so, tell the rcu_kthread_task to * the current grace period, and, if so, tell the rcu_kthread_task to
* start boosting them. If there is an expedited boost in progress, * start boosting them. If there is an expedited boost in progress,
* we wait for it to complete. * we wait for it to complete.
*
* If there are no blocked readers blocking the current grace period,
* return 0 to let the caller know, otherwise return 1. Note that this
* return value is independent of whether or not boosting was done.
*/ */
static void rcu_initiate_boost(void) static int rcu_initiate_boost(void)
{ {
if (!rcu_preempt_blocked_readers_cgp()) {
RCU_TRACE(rcu_preempt_ctrlblk.n_normal_balk_blkd_tasks++);
return 0;
}
if (rcu_preempt_ctrlblk.gp_tasks != NULL && if (rcu_preempt_ctrlblk.gp_tasks != NULL &&
rcu_preempt_ctrlblk.boost_tasks == NULL && rcu_preempt_ctrlblk.boost_tasks == NULL &&
rcu_preempt_ctrlblk.boosted_this_gp == 0 && rcu_preempt_ctrlblk.boosted_this_gp == 0 &&
ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time)) { ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time)) {
rcu_preempt_ctrlblk.boost_tasks = rcu_preempt_ctrlblk.gp_tasks; rcu_preempt_ctrlblk.boost_tasks = rcu_preempt_ctrlblk.gp_tasks;
invoke_rcu_kthread(); invoke_rcu_kthread();
} RCU_TRACE(rcu_preempt_ctrlblk.n_normal_boosts++);
} else
RCU_TRACE(rcu_initiate_boost_trace());
return 1;
} }
/* /*
@ -231,7 +329,9 @@ static void rcu_initiate_expedited_boost(void)
rcu_preempt_ctrlblk.blkd_tasks.next; rcu_preempt_ctrlblk.blkd_tasks.next;
rcu_preempt_ctrlblk.boosted_this_gp = -1; rcu_preempt_ctrlblk.boosted_this_gp = -1;
invoke_rcu_kthread(); invoke_rcu_kthread();
} RCU_TRACE(rcu_preempt_ctrlblk.n_exp_boosts++);
} else
RCU_TRACE(rcu_initiate_exp_boost_trace());
raw_local_irq_restore(flags); raw_local_irq_restore(flags);
} }
@ -258,10 +358,13 @@ static int rcu_boost(void)
} }
/* /*
* If there is no RCU priority boosting, we don't initiate boosting. * If there is no RCU priority boosting, we don't initiate boosting,
* but we do indicate whether there are blocked readers blocking the
* current grace period.
*/ */
static void rcu_initiate_boost(void) static int rcu_initiate_boost(void)
{ {
return rcu_preempt_blocked_readers_cgp();
} }
/* /*
@ -308,13 +411,14 @@ static void rcu_preempt_cpu_qs(void)
current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
/* If there is no GP then there is nothing more to do. */ /* If there is no GP then there is nothing more to do. */
if (!rcu_preempt_gp_in_progress() || rcu_preempt_blocked_readers_cgp()) if (!rcu_preempt_gp_in_progress())
return; return;
/* If there are blocked readers, go check up on boosting. */ /*
if (rcu_preempt_blocked_readers_cgp()) { * Check up on boosting. If there are no readers blocking the
rcu_initiate_boost(); * current grace period, leave.
*/
if (rcu_initiate_boost())
return; return;
}
/* Advance callbacks. */ /* Advance callbacks. */
rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum; rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum;
@ -339,6 +443,7 @@ static void rcu_preempt_start_gp(void)
/* Official start of GP. */ /* Official start of GP. */
rcu_preempt_ctrlblk.gpnum++; rcu_preempt_ctrlblk.gpnum++;
RCU_TRACE(rcu_preempt_ctrlblk.n_grace_periods++);
/* Any blocked RCU readers block new GP. */ /* Any blocked RCU readers block new GP. */
if (rcu_preempt_blocked_readers_any()) if (rcu_preempt_blocked_readers_any())
@ -591,6 +696,7 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
local_irq_save(flags); local_irq_save(flags);
*rcu_preempt_ctrlblk.nexttail = head; *rcu_preempt_ctrlblk.nexttail = head;
rcu_preempt_ctrlblk.nexttail = &head->next; rcu_preempt_ctrlblk.nexttail = &head->next;
RCU_TRACE(rcu_preempt_ctrlblk.rcb.qlen++);
rcu_preempt_start_gp(); /* checks to see if GP needed. */ rcu_preempt_start_gp(); /* checks to see if GP needed. */
local_irq_restore(flags); local_irq_restore(flags);
} }
@ -747,6 +853,18 @@ void exit_rcu(void)
#else /* #ifdef CONFIG_TINY_PREEMPT_RCU */ #else /* #ifdef CONFIG_TINY_PREEMPT_RCU */
#ifdef CONFIG_RCU_TRACE
/*
* Because preemptible RCU does not exist, it is not necessary to
* dump out its statistics.
*/
static void show_tiny_preempt_stats(struct seq_file *m)
{
}
#endif /* #ifdef CONFIG_RCU_TRACE */
/* /*
* Because preemptible RCU does not exist, it is never necessary to * Because preemptible RCU does not exist, it is never necessary to
* boost preempted RCU readers. * boost preempted RCU readers.
@ -802,3 +920,97 @@ void __init rcu_scheduler_starting(void)
#else /* #ifdef CONFIG_RCU_BOOST */ #else /* #ifdef CONFIG_RCU_BOOST */
#define RCU_BOOST_PRIO 1 #define RCU_BOOST_PRIO 1
#endif /* #else #ifdef CONFIG_RCU_BOOST */ #endif /* #else #ifdef CONFIG_RCU_BOOST */
#ifdef CONFIG_RCU_TRACE
#ifdef CONFIG_RCU_BOOST
static void rcu_initiate_boost_trace(void)
{
if (rcu_preempt_ctrlblk.gp_tasks == NULL)
rcu_preempt_ctrlblk.n_normal_balk_gp_tasks++;
else if (rcu_preempt_ctrlblk.boost_tasks != NULL)
rcu_preempt_ctrlblk.n_normal_balk_boost_tasks++;
else if (rcu_preempt_ctrlblk.boosted_this_gp != 0)
rcu_preempt_ctrlblk.n_normal_balk_boosted++;
else if (!ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))
rcu_preempt_ctrlblk.n_normal_balk_notyet++;
else
rcu_preempt_ctrlblk.n_normal_balk_nos++;
}
static void rcu_initiate_exp_boost_trace(void)
{
if (list_empty(&rcu_preempt_ctrlblk.blkd_tasks))
rcu_preempt_ctrlblk.n_exp_balk_blkd_tasks++;
else
rcu_preempt_ctrlblk.n_exp_balk_nos++;
}
#endif /* #ifdef CONFIG_RCU_BOOST */
static void rcu_trace_sub_qlen(struct rcu_ctrlblk *rcp, int n)
{
unsigned long flags;
raw_local_irq_save(flags);
rcp->qlen -= n;
raw_local_irq_restore(flags);
}
/*
* Dump statistics for TINY_RCU, such as they are.
*/
static int show_tiny_stats(struct seq_file *m, void *unused)
{
show_tiny_preempt_stats(m);
seq_printf(m, "rcu_sched: qlen: %ld\n", rcu_sched_ctrlblk.qlen);
seq_printf(m, "rcu_bh: qlen: %ld\n", rcu_bh_ctrlblk.qlen);
return 0;
}
static int show_tiny_stats_open(struct inode *inode, struct file *file)
{
return single_open(file, show_tiny_stats, NULL);
}
static const struct file_operations show_tiny_stats_fops = {
.owner = THIS_MODULE,
.open = show_tiny_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static struct dentry *rcudir;
static int __init rcutiny_trace_init(void)
{
struct dentry *retval;
rcudir = debugfs_create_dir("rcu", NULL);
if (!rcudir)
goto free_out;
retval = debugfs_create_file("rcudata", 0444, rcudir,
NULL, &show_tiny_stats_fops);
if (!retval)
goto free_out;
return 0;
free_out:
debugfs_remove_recursive(rcudir);
return 1;
}
static void __exit rcutiny_trace_cleanup(void)
{
debugfs_remove_recursive(rcudir);
}
module_init(rcutiny_trace_init);
module_exit(rcutiny_trace_cleanup);
MODULE_AUTHOR("Paul E. McKenney");
MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation");
MODULE_LICENSE("GPL");
#endif /* #ifdef CONFIG_RCU_TRACE */