Merge branches 'tracing/blktrace', 'tracing/ftrace' and 'tracing/urgent' into tracing/core

This commit is contained in:
Ingo Molnar 2009-02-19 10:20:17 +01:00
commit 40999096e8
13 changed files with 183 additions and 107 deletions

View File

@ -108,7 +108,7 @@ struct ftrace_func_command {
struct seq_file;
struct ftrace_hook_ops {
struct ftrace_probe_ops {
void (*func)(unsigned long ip,
unsigned long parent_ip,
void **data);
@ -116,19 +116,19 @@ struct ftrace_hook_ops {
void (*free)(void **data);
int (*print)(struct seq_file *m,
unsigned long ip,
struct ftrace_hook_ops *ops,
struct ftrace_probe_ops *ops,
void *data);
};
extern int
register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data);
extern void
unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data);
extern void
unregister_ftrace_function_hook_func(char *glob, struct ftrace_hook_ops *ops);
extern void unregister_ftrace_function_hook_all(char *glob);
unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
extern void unregister_ftrace_function_probe_all(char *glob);
enum {
FTRACE_FL_FREE = (1 << 0),

View File

@ -60,6 +60,7 @@ config FUNCTION_TRACER
depends on HAVE_FUNCTION_TRACER
depends on DEBUG_KERNEL
select FRAME_POINTER
select KALLSYMS
select TRACING
select CONTEXT_SWITCH_TRACER
help
@ -246,6 +247,7 @@ config STACK_TRACER
depends on DEBUG_KERNEL
select FUNCTION_TRACER
select STACKTRACE
select KALLSYMS
help
This special tracer records the maximum stack footprint of the
kernel and displays it in debugfs/tracing/stack_trace.

View File

@ -255,9 +255,9 @@ static struct pid * const ftrace_swapper_pid = &init_struct_pid;
static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
struct ftrace_func_hook {
struct ftrace_func_probe {
struct hlist_node node;
struct ftrace_hook_ops *ops;
struct ftrace_probe_ops *ops;
unsigned long flags;
unsigned long ip;
void *data;
@ -460,8 +460,8 @@ static void ftrace_bug(int failed, unsigned long ip)
static int
__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
{
unsigned long ip, fl;
unsigned long ftrace_addr;
unsigned long ip, fl;
ftrace_addr = (unsigned long)FTRACE_ADDR;
@ -530,9 +530,9 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
static void ftrace_replace_code(int enable)
{
int failed;
struct dyn_ftrace *rec;
struct ftrace_page *pg;
int failed;
do_for_each_ftrace_rec(pg, rec) {
/*
@ -830,11 +830,11 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos)
static int t_hash_show(struct seq_file *m, void *v)
{
struct ftrace_func_hook *rec;
struct ftrace_func_probe *rec;
struct hlist_node *hnd = v;
char str[KSYM_SYMBOL_LEN];
rec = hlist_entry(hnd, struct ftrace_func_hook, node);
rec = hlist_entry(hnd, struct ftrace_func_probe, node);
if (rec->ops->print)
return rec->ops->print(m, rec->ip, rec->ops, rec->data);
@ -1208,14 +1208,15 @@ ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
static void ftrace_match_records(char *buff, int len, int enable)
{
char *search;
unsigned int search_len;
struct ftrace_page *pg;
struct dyn_ftrace *rec;
unsigned long flag;
char *search;
int type;
unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
unsigned search_len;
int not;
flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
type = ftrace_setup_glob(buff, len, &search, &not);
search_len = strlen(search);
@ -1263,14 +1264,16 @@ ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
static void ftrace_match_module_records(char *buff, char *mod, int enable)
{
char *search = buff;
unsigned search_len = 0;
struct ftrace_page *pg;
struct dyn_ftrace *rec;
int type = MATCH_FULL;
unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
unsigned search_len = 0;
char *search = buff;
unsigned long flag;
int not = 0;
flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
/* blank or '*' mean the same */
if (strcmp(buff, "*") == 0)
buff[0] = 0;
@ -1348,9 +1351,9 @@ static int __init ftrace_mod_cmd_init(void)
device_initcall(ftrace_mod_cmd_init);
static void
function_trace_hook_call(unsigned long ip, unsigned long parent_ip)
function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
{
struct ftrace_func_hook *entry;
struct ftrace_func_probe *entry;
struct hlist_head *hhd;
struct hlist_node *n;
unsigned long key;
@ -1376,18 +1379,18 @@ function_trace_hook_call(unsigned long ip, unsigned long parent_ip)
ftrace_preempt_enable(resched);
}
static struct ftrace_ops trace_hook_ops __read_mostly =
static struct ftrace_ops trace_probe_ops __read_mostly =
{
.func = function_trace_hook_call,
.func = function_trace_probe_call,
};
static int ftrace_hook_registered;
static int ftrace_probe_registered;
static void __enable_ftrace_function_hook(void)
static void __enable_ftrace_function_probe(void)
{
int i;
if (ftrace_hook_registered)
if (ftrace_probe_registered)
return;
for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
@ -1399,16 +1402,16 @@ static void __enable_ftrace_function_hook(void)
if (i == FTRACE_FUNC_HASHSIZE)
return;
__register_ftrace_function(&trace_hook_ops);
__register_ftrace_function(&trace_probe_ops);
ftrace_startup(0);
ftrace_hook_registered = 1;
ftrace_probe_registered = 1;
}
static void __disable_ftrace_function_hook(void)
static void __disable_ftrace_function_probe(void)
{
int i;
if (!ftrace_hook_registered)
if (!ftrace_probe_registered)
return;
for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
@ -1418,16 +1421,16 @@ static void __disable_ftrace_function_hook(void)
}
/* no more funcs left */
__unregister_ftrace_function(&trace_hook_ops);
__unregister_ftrace_function(&trace_probe_ops);
ftrace_shutdown(0);
ftrace_hook_registered = 0;
ftrace_probe_registered = 0;
}
static void ftrace_free_entry_rcu(struct rcu_head *rhp)
{
struct ftrace_func_hook *entry =
container_of(rhp, struct ftrace_func_hook, rcu);
struct ftrace_func_probe *entry =
container_of(rhp, struct ftrace_func_probe, rcu);
if (entry->ops->free)
entry->ops->free(&entry->data);
@ -1436,21 +1439,21 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
int
register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data)
{
struct ftrace_func_hook *entry;
struct ftrace_func_probe *entry;
struct ftrace_page *pg;
struct dyn_ftrace *rec;
unsigned long key;
int type, len, not;
unsigned long key;
int count = 0;
char *search;
type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
len = strlen(search);
/* we do not support '!' for function hooks */
/* we do not support '!' for function probes */
if (WARN_ON(not))
return -EINVAL;
@ -1465,7 +1468,7 @@ register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
/* If we did not hook to any, then return error */
/* If we did not process any, then return error */
if (!count)
count = -ENOMEM;
goto out_unlock;
@ -1495,7 +1498,7 @@ register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
} while_for_each_ftrace_rec();
__enable_ftrace_function_hook();
__enable_ftrace_function_probe();
out_unlock:
mutex_unlock(&ftrace_lock);
@ -1504,15 +1507,15 @@ register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
}
enum {
HOOK_TEST_FUNC = 1,
HOOK_TEST_DATA = 2
PROBE_TEST_FUNC = 1,
PROBE_TEST_DATA = 2
};
static void
__unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data, int flags)
{
struct ftrace_func_hook *entry;
struct ftrace_func_probe *entry;
struct hlist_node *n, *tmp;
char str[KSYM_SYMBOL_LEN];
int type = MATCH_FULL;
@ -1527,7 +1530,7 @@ __unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
len = strlen(search);
/* we do not support '!' for function hooks */
/* we do not support '!' for function probes */
if (WARN_ON(not))
return;
}
@ -1539,10 +1542,10 @@ __unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
/* break up if statements for readability */
if ((flags & HOOK_TEST_FUNC) && entry->ops != ops)
if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
continue;
if ((flags & HOOK_TEST_DATA) && entry->data != data)
if ((flags & PROBE_TEST_DATA) && entry->data != data)
continue;
/* do this last, since it is the most expensive */
@ -1557,27 +1560,27 @@ __unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
call_rcu(&entry->rcu, ftrace_free_entry_rcu);
}
}
__disable_ftrace_function_hook();
__disable_ftrace_function_probe();
mutex_unlock(&ftrace_lock);
}
void
unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data)
{
__unregister_ftrace_function_hook(glob, ops, data,
HOOK_TEST_FUNC | HOOK_TEST_DATA);
__unregister_ftrace_function_probe(glob, ops, data,
PROBE_TEST_FUNC | PROBE_TEST_DATA);
}
void
unregister_ftrace_function_hook_func(char *glob, struct ftrace_hook_ops *ops)
unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
{
__unregister_ftrace_function_hook(glob, ops, NULL, HOOK_TEST_FUNC);
__unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
}
void unregister_ftrace_function_hook_all(char *glob)
void unregister_ftrace_function_probe_all(char *glob)
{
__unregister_ftrace_function_hook(glob, NULL, NULL, 0);
__unregister_ftrace_function_probe(glob, NULL, NULL, 0);
}
static LIST_HEAD(ftrace_commands);
@ -1623,8 +1626,8 @@ int unregister_ftrace_command(struct ftrace_func_command *cmd)
static int ftrace_process_regex(char *buff, int len, int enable)
{
struct ftrace_func_command *p;
char *func, *command, *next = buff;
struct ftrace_func_command *p;
int ret = -EINVAL;
func = strsep(&next, ":");
@ -2392,7 +2395,6 @@ static __init int ftrace_init_debugfs(void)
"'set_ftrace_pid' entry\n");
return 0;
}
fs_initcall(ftrace_init_debugfs);
/**

View File

@ -336,7 +336,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
data->rt_priority = tsk->rt_priority;
/* record this tasks comm */
tracing_record_cmdline(current);
tracing_record_cmdline(tsk);
}
static void
@ -499,6 +499,9 @@ __acquires(kernel_lock)
else
if (!type->flags->opts)
type->flags->opts = dummy_tracer_opt;
if (!type->wait_pipe)
type->wait_pipe = default_wait_pipe;
#ifdef CONFIG_FTRACE_STARTUP_TEST
if (type->selftest && !tracing_selftest_disabled) {
@ -1064,7 +1067,10 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry->next_prio = wakee->prio;
entry->next_state = wakee->state;
entry->next_cpu = task_cpu(wakee);
trace_buffer_unlock_commit(tr, event, flags, pc);
ring_buffer_unlock_commit(tr->buffer, event);
ftrace_trace_stack(tr, flags, 6, pc);
ftrace_trace_userstack(tr, flags, pc);
}
void
@ -2392,6 +2398,38 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
}
}
void default_wait_pipe(struct trace_iterator *iter)
{
DEFINE_WAIT(wait);
prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
if (trace_empty(iter))
schedule();
finish_wait(&trace_wait, &wait);
}
/*
* This is a make-shift waitqueue.
* A tracer might use this callback on some rare cases:
*
* 1) the current tracer might hold the runqueue lock when it wakes up
* a reader, hence a deadlock (sched, function, and function graph tracers)
* 2) the function tracers, trace all functions, we don't want
* the overhead of calling wake_up and friends
* (and tracing them too)
*
* Anyway, this is really very primitive wakeup.
*/
void poll_wait_pipe(struct trace_iterator *iter)
{
set_current_state(TASK_INTERRUPTIBLE);
/* sleep for 100 msecs, and try again. */
schedule_timeout(HZ / 10);
}
/* Must be called with trace_types_lock mutex held. */
static int tracing_wait_pipe(struct file *filp)
{
@ -2403,30 +2441,14 @@ static int tracing_wait_pipe(struct file *filp)
return -EAGAIN;
}
/*
* This is a make-shift waitqueue. The reason we don't use
* an actual wait queue is because:
* 1) we only ever have one waiter
* 2) the tracing, traces all functions, we don't want
* the overhead of calling wake_up and friends
* (and tracing them too)
* Anyway, this is really very primitive wakeup.
*/
set_current_state(TASK_INTERRUPTIBLE);
iter->tr->waiter = current;
mutex_unlock(&trace_types_lock);
/* sleep for 100 msecs, and try again. */
schedule_timeout(HZ/10);
iter->trace->wait_pipe(iter);
mutex_lock(&trace_types_lock);
iter->tr->waiter = NULL;
if (signal_pending(current)) {
if (signal_pending(current))
return -EINTR;
}
if (iter->trace != current_trace)
return 0;
@ -2442,8 +2464,6 @@ static int tracing_wait_pipe(struct file *filp)
*/
if (!tracer_enabled && iter->pos)
break;
continue;
}
return 1;
@ -2551,8 +2571,7 @@ static struct pipe_buf_operations tracing_pipe_buf_ops = {
};
static size_t
tracing_fill_pipe_page(struct page *pages, size_t rem,
struct trace_iterator *iter)
tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
{
size_t count;
int ret;
@ -2629,7 +2648,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
if (!pages[i])
break;
rem = tracing_fill_pipe_page(pages[i], rem, iter);
rem = tracing_fill_pipe_page(rem, iter);
/* Copy the data into the page, so we can start over. */
ret = trace_seq_to_buffer(&iter->seq,

View File

@ -337,18 +337,34 @@ struct tracer_flags {
#define TRACER_OPT(s, b) .name = #s, .bit = b
/*
* A specific tracer, represented by methods that operate on a trace array:
/**
* struct tracer - a specific tracer and its callbacks to interact with debugfs
* @name: the name chosen to select it on the available_tracers file
* @init: called when one switches to this tracer (echo name > current_tracer)
* @reset: called when one switches to another tracer
* @start: called when tracing is unpaused (echo 1 > tracing_enabled)
* @stop: called when tracing is paused (echo 0 > tracing_enabled)
* @open: called when the trace file is opened
* @pipe_open: called when the trace_pipe file is opened
* @wait_pipe: override how the user waits for traces on trace_pipe
* @close: called when the trace file is released
* @read: override the default read callback on trace_pipe
* @splice_read: override the default splice_read callback on trace_pipe
* @selftest: selftest to run on boot (see trace_selftest.c)
* @print_headers: override the first lines that describe your columns
* @print_line: callback that prints a trace
* @set_flag: signals one of your private flags changed (trace_options file)
* @flags: your private flags
*/
struct tracer {
const char *name;
/* Your tracer should raise a warning if init fails */
int (*init)(struct trace_array *tr);
void (*reset)(struct trace_array *tr);
void (*start)(struct trace_array *tr);
void (*stop)(struct trace_array *tr);
void (*open)(struct trace_iterator *iter);
void (*pipe_open)(struct trace_iterator *iter);
void (*wait_pipe)(struct trace_iterator *iter);
void (*close)(struct trace_iterator *iter);
ssize_t (*read)(struct trace_iterator *iter,
struct file *filp, char __user *ubuf,
@ -432,6 +448,9 @@ void tracing_generic_entry_update(struct trace_entry *entry,
unsigned long flags,
int pc);
void default_wait_pipe(struct trace_iterator *iter);
void poll_wait_pipe(struct trace_iterator *iter);
void ftrace(struct trace_array *tr,
struct trace_array_cpu *data,
unsigned long ip,

View File

@ -225,6 +225,7 @@ static struct tracer function_trace __read_mostly =
.init = function_trace_init,
.reset = function_trace_reset,
.start = function_trace_start,
.wait_pipe = poll_wait_pipe,
.flags = &func_flags,
.set_flag = func_set_flag,
#ifdef CONFIG_FTRACE_SELFTEST
@ -269,21 +270,21 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
static int
ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
struct ftrace_hook_ops *ops, void *data);
struct ftrace_probe_ops *ops, void *data);
static struct ftrace_hook_ops traceon_hook_ops = {
static struct ftrace_probe_ops traceon_probe_ops = {
.func = ftrace_traceon,
.print = ftrace_trace_onoff_print,
};
static struct ftrace_hook_ops traceoff_hook_ops = {
static struct ftrace_probe_ops traceoff_probe_ops = {
.func = ftrace_traceoff,
.print = ftrace_trace_onoff_print,
};
static int
ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
struct ftrace_hook_ops *ops, void *data)
struct ftrace_probe_ops *ops, void *data)
{
char str[KSYM_SYMBOL_LEN];
long count = (long)data;
@ -291,12 +292,14 @@ ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
kallsyms_lookup(ip, NULL, NULL, NULL, str);
seq_printf(m, "%s:", str);
if (ops == &traceon_hook_ops)
if (ops == &traceon_probe_ops)
seq_printf(m, "traceon");
else
seq_printf(m, "traceoff");
if (count != -1)
if (count == -1)
seq_printf(m, ":unlimited\n");
else
seq_printf(m, ":count=%ld", count);
seq_putc(m, '\n');
@ -306,15 +309,15 @@ ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
static int
ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
{
struct ftrace_hook_ops *ops;
struct ftrace_probe_ops *ops;
/* we register both traceon and traceoff to this callback */
if (strcmp(cmd, "traceon") == 0)
ops = &traceon_hook_ops;
ops = &traceon_probe_ops;
else
ops = &traceoff_hook_ops;
ops = &traceoff_probe_ops;
unregister_ftrace_function_hook_func(glob, ops);
unregister_ftrace_function_probe_func(glob, ops);
return 0;
}
@ -322,7 +325,7 @@ ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
static int
ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
{
struct ftrace_hook_ops *ops;
struct ftrace_probe_ops *ops;
void *count = (void *)-1;
char *number;
int ret;
@ -336,9 +339,9 @@ ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
/* we register both traceon and traceoff to this callback */
if (strcmp(cmd, "traceon") == 0)
ops = &traceon_hook_ops;
ops = &traceon_probe_ops;
else
ops = &traceoff_hook_ops;
ops = &traceoff_probe_ops;
if (!param)
goto out_reg;
@ -357,7 +360,7 @@ ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
return ret;
out_reg:
ret = register_ftrace_function_hook(glob, ops, count);
ret = register_ftrace_function_probe(glob, ops, count);
return ret;
}
@ -397,6 +400,5 @@ static __init int init_function_trace(void)
init_func_cmd_traceon();
return register_tracer(&function_trace);
}
device_initcall(init_function_trace);

View File

@ -757,6 +757,7 @@ static struct tracer graph_trace __read_mostly = {
.name = "function_graph",
.open = graph_trace_open,
.close = graph_trace_close,
.wait_pipe = poll_wait_pipe,
.init = graph_trace_init,
.reset = graph_trace_reset,
.print_line = print_graph_function,

View File

@ -1,5 +1,5 @@
/*
* trace irqs off criticall timings
* trace irqs off critical timings
*
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
* Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>

View File

@ -93,7 +93,7 @@ static int tracing_sched_register(void)
ret = register_trace_sched_switch(probe_sched_switch);
if (ret) {
pr_info("sched trace: Couldn't activate tracepoint"
" probe to kernel_sched_schedule\n");
" probe to kernel_sched_switch\n");
goto fail_deprobe_wake_new;
}
@ -221,6 +221,7 @@ static struct tracer sched_switch_trace __read_mostly =
.reset = sched_switch_trace_reset,
.start = sched_switch_trace_start,
.stop = sched_switch_trace_stop,
.wait_pipe = poll_wait_pipe,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_sched_switch,
#endif

View File

@ -284,7 +284,7 @@ static void start_wakeup_tracer(struct trace_array *tr)
ret = register_trace_sched_switch(probe_wakeup_sched_switch);
if (ret) {
pr_info("sched trace: Couldn't activate tracepoint"
" probe to kernel_sched_schedule\n");
" probe to kernel_sched_switch\n");
goto fail_deprobe_wake_new;
}
@ -380,6 +380,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
.reset = wakeup_tracer_reset,
.start = wakeup_tracer_start,
.stop = wakeup_tracer_stop,
.wait_pipe = poll_wait_pipe,
.print_max = 1,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_wakeup,

View File

@ -24,10 +24,20 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
{
struct ring_buffer_event *event;
struct trace_entry *entry;
unsigned int loops = 0;
while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
entry = ring_buffer_event_data(event);
/*
* The ring buffer is a size of trace_buf_size, if
* we loop more than the size, there's something wrong
* with the ring buffer.
*/
if (loops++ > trace_buf_size) {
printk(KERN_CONT ".. bad ring buffer ");
goto failed;
}
if (!trace_valid_entry(entry)) {
printk(KERN_CONT ".. invalid entry %d ",
entry->type);
@ -58,11 +68,20 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
cnt = ring_buffer_entries(tr->buffer);
/*
* The trace_test_buffer_cpu runs a while loop to consume all data.
* If the calling tracer is broken, and is constantly filling
* the buffer, this will run forever, and hard lock the box.
* We disable the ring buffer while we do this test to prevent
* a hard lock up.
*/
tracing_off();
for_each_possible_cpu(cpu) {
ret = trace_test_buffer_cpu(tr, cpu);
if (ret)
break;
}
tracing_on();
__raw_spin_unlock(&ftrace_max_lock);
local_irq_restore(flags);
@ -107,9 +126,9 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
func();
/*
* Some archs *cough*PowerPC*cough* add charachters to the
* Some archs *cough*PowerPC*cough* add characters to the
* start of the function names. We simply put a '*' to
* accomodate them.
* accommodate them.
*/
func_name = "*" STR(DYN_FTRACE_TEST_NAME);
@ -622,7 +641,7 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
ret = tracer_init(trace, tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
return 0;
return ret;
}
/* Sleep for a 1/10 of a second */
@ -634,6 +653,11 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
trace->reset(tr);
tracing_start();
if (!ret && !count) {
printk(KERN_CONT ".. no entries found ..");
ret = -1;
}
return ret;
}
#endif /* CONFIG_SYSPROF_TRACER */
@ -661,6 +685,11 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
trace->reset(tr);
tracing_start();
if (!ret && !count) {
printk(KERN_CONT ".. no entries found ..");
ret = -1;
}
return ret;
}
#endif /* CONFIG_BRANCH_TRACER */

View File

@ -30,7 +30,7 @@ struct tracer_stat_session {
struct dentry *file;
};
/* All of the sessions currently in use. Each stat file embeed one session */
/* All of the sessions currently in use. Each stat file embed one session */
static LIST_HEAD(all_stat_sessions);
static DEFINE_MUTEX(all_stat_sessions_mutex);

View File

@ -327,5 +327,5 @@ void init_tracer_sysprof_debugfs(struct dentry *d_tracer)
d_tracer, NULL, &sysprof_sample_fops);
if (entry)
return;
pr_warning("Could not create debugfs 'dyn_ftrace_total_info' entry\n");
pr_warning("Could not create debugfs 'sysprof_sample_period' entry\n");
}