tracing: add event trace infrastructure

This patch creates the event tracing infrastructure of ftrace.
It will create the files:

 /debug/tracing/available_events
 /debug/tracing/set_event

The available_events will list the trace points that have been
registered with the event tracer.

set_events will allow the user to enable or disable an event hook.

example:

 # echo sched_wakeup > /debug/tracing/set_event

Will enable the sched_wakeup event (if it is registered).

 # echo "!sched_wakeup" >> /debug/tracing/set_event

Will disable the sched_wakeup event (and only that event).

 # echo > /debug/tracing/set_event

Will disable all events (notice the '>')

 # cat /debug/tracing/available_events > /debug/tracing/set_event

Will enable all registered event hooks.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
This commit is contained in:
Steven Rostedt 2009-02-24 10:21:36 -05:00
parent 7c37730cd3
commit b77e38aa24
5 changed files with 352 additions and 1 deletions

View File

@ -61,6 +61,14 @@
#define BRANCH_PROFILE()
#endif
#ifdef CONFIG_EVENT_TRACER
#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \
*(_ftrace_events) \
VMLINUX_SYMBOL(__stop_ftrace_events) = .;
#else
#define FTRACE_EVENTS()
#endif
/* .data section */
#define DATA_DATA \
*(.data) \
@ -81,7 +89,8 @@
*(__tracepoints) \
VMLINUX_SYMBOL(__stop___tracepoints) = .; \
LIKELY_PROFILE() \
BRANCH_PROFILE()
BRANCH_PROFILE() \
FTRACE_EVENTS()
#define RO_DATA(align) \
. = ALIGN((align)); \

View File

@ -159,6 +159,15 @@ config CONTEXT_SWITCH_TRACER
This tracer gets called from the context switch and records
all switching of tasks.
config EVENT_TRACER
bool "Trace various events in the kernel"
depends on DEBUG_KERNEL
select TRACING
help
This tracer hooks to various trace points in the kernel
allowing the user to pick and choose which trace point they
want to trace.
config BOOT_TRACER
bool "Trace boot initcalls"
depends on DEBUG_KERNEL

View File

@ -38,5 +38,6 @@ obj-$(CONFIG_POWER_TRACER) += trace_power.o
obj-$(CONFIG_KMEMTRACE) += kmemtrace.o
obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o
obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
obj-$(CONFIG_EVENT_TRACER) += trace_events.o
libftrace-y := ftrace.o

280
kernel/trace/trace_events.c Normal file
View File

@ -0,0 +1,280 @@
/*
* event tracer
*
* Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
*
*/
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ctype.h>
#include "trace_events.h"
void event_trace_printk(unsigned long ip, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
tracing_record_cmdline(current);
trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
va_end(ap);
}
static void ftrace_clear_events(void)
{
struct ftrace_event_call *call = (void *)__start_ftrace_events;
while ((unsigned long)call < (unsigned long)__stop_ftrace_events) {
if (call->enabled) {
call->enabled = 0;
call->unregfunc();
}
call++;
}
}
static int ftrace_set_clr_event(char *buf, int set)
{
struct ftrace_event_call *call = (void *)__start_ftrace_events;
while ((unsigned long)call < (unsigned long)__stop_ftrace_events) {
if (strcmp(buf, call->name) != 0) {
call++;
continue;
}
if (set) {
/* Already set? */
if (call->enabled)
return 0;
call->enabled = 1;
call->regfunc();
} else {
/* Already cleared? */
if (!call->enabled)
return 0;
call->enabled = 0;
call->unregfunc();
}
return 0;
}
return -EINVAL;
}
/* 128 should be much more than enough */
#define EVENT_BUF_SIZE 127
static ssize_t
ftrace_event_write(struct file *file, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
size_t read = 0;
int i, set = 1;
ssize_t ret;
char *buf;
char ch;
if (!cnt || cnt < 0)
return 0;
ret = get_user(ch, ubuf++);
if (ret)
return ret;
read++;
cnt--;
/* skip white space */
while (cnt && isspace(ch)) {
ret = get_user(ch, ubuf++);
if (ret)
return ret;
read++;
cnt--;
}
/* Only white space found? */
if (isspace(ch)) {
file->f_pos += read;
ret = read;
return ret;
}
buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (cnt > EVENT_BUF_SIZE)
cnt = EVENT_BUF_SIZE;
i = 0;
while (cnt && !isspace(ch)) {
if (!i && ch == '!')
set = 0;
else
buf[i++] = ch;
ret = get_user(ch, ubuf++);
if (ret)
goto out_free;
read++;
cnt--;
}
buf[i] = 0;
file->f_pos += read;
ret = ftrace_set_clr_event(buf, set);
if (ret)
goto out_free;
ret = read;
out_free:
kfree(buf);
return ret;
}
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
struct ftrace_event_call *call = m->private;
struct ftrace_event_call *next = call;
(*pos)++;
if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
return NULL;
m->private = ++next;
return call;
}
static void *t_start(struct seq_file *m, loff_t *pos)
{
return t_next(m, NULL, pos);
}
static void *
s_next(struct seq_file *m, void *v, loff_t *pos)
{
struct ftrace_event_call *call = m->private;
struct ftrace_event_call *next;
(*pos)++;
retry:
if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
return NULL;
if (!call->enabled) {
call++;
goto retry;
}
next = call;
m->private = ++next;
return call;
}
static void *s_start(struct seq_file *m, loff_t *pos)
{
return s_next(m, NULL, pos);
}
static int t_show(struct seq_file *m, void *v)
{
struct ftrace_event_call *call = v;
seq_printf(m, "%s\n", call->name);
return 0;
}
static void t_stop(struct seq_file *m, void *p)
{
}
static int
ftrace_event_seq_open(struct inode *inode, struct file *file)
{
int ret;
const struct seq_operations *seq_ops;
if ((file->f_mode & FMODE_WRITE) &&
!(file->f_flags & O_APPEND))
ftrace_clear_events();
seq_ops = inode->i_private;
ret = seq_open(file, seq_ops);
if (!ret) {
struct seq_file *m = file->private_data;
m->private = __start_ftrace_events;
}
return ret;
}
static const struct seq_operations show_event_seq_ops = {
.start = t_start,
.next = t_next,
.show = t_show,
.stop = t_stop,
};
static const struct seq_operations show_set_event_seq_ops = {
.start = s_start,
.next = s_next,
.show = t_show,
.stop = t_stop,
};
static const struct file_operations ftrace_avail_fops = {
.open = ftrace_event_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct file_operations ftrace_set_event_fops = {
.open = ftrace_event_seq_open,
.read = seq_read,
.write = ftrace_event_write,
.llseek = seq_lseek,
.release = seq_release,
};
static __init int event_trace_init(void)
{
struct dentry *d_tracer;
struct dentry *entry;
d_tracer = tracing_init_dentry();
if (!d_tracer)
return 0;
entry = debugfs_create_file("available_events", 0444, d_tracer,
(void *)&show_event_seq_ops,
&ftrace_avail_fops);
if (!entry)
pr_warning("Could not create debugfs "
"'available_events' entry\n");
entry = debugfs_create_file("set_event", 0644, d_tracer,
(void *)&show_set_event_seq_ops,
&ftrace_set_event_fops);
if (!entry)
pr_warning("Could not create debugfs "
"'set_event' entry\n");
return 0;
}
fs_initcall(event_trace_init);

View File

@ -0,0 +1,52 @@
#ifndef _LINUX_KERNEL_TRACE_EVENTS_H
#define _LINUX_KERNEL_TRACE_EVENTS_H
#include <linux/ftrace.h>
#include "trace.h"
struct ftrace_event_call {
char *name;
int enabled;
int (*regfunc)(void);
void (*unregfunc)(void);
};
#undef TPFMT
#define TPFMT(fmt, args...) fmt "\n", ##args
#undef DEFINE_TRACE_FMT
#define DEFINE_TRACE_FMT(call, proto, args, fmt) \
static void ftrace_event_##call(proto) \
{ \
event_trace_printk(_RET_IP_, "(" #call ") " fmt); \
} \
\
static int ftrace_reg_event_##call(void) \
{ \
int ret; \
\
ret = register_trace_##call(ftrace_event_##call); \
if (!ret) \
pr_info("event trace: Could not activate trace point " \
"probe to " #call); \
return ret; \
} \
\
static void ftrace_unreg_event_##call(void) \
{ \
unregister_trace_##call(ftrace_event_##call); \
} \
\
static struct ftrace_event_call __used \
__attribute__((section("_ftrace_events"))) event_##call = { \
.name = #call, \
.regfunc = ftrace_reg_event_##call, \
.unregfunc = ftrace_unreg_event_##call, \
}
void event_trace_printk(unsigned long ip, const char *fmt, ...);
extern unsigned long __start_ftrace_events[];
extern unsigned long __stop_ftrace_events[];
#endif /* _LINUX_KERNEL_TRACE_EVENTS_H */