2008-09-23 12:32:08 +02:00
|
|
|
/*
|
|
|
|
* ring buffer based initcalls tracer
|
|
|
|
*
|
|
|
|
* Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/debugfs.h>
|
|
|
|
#include <linux/ftrace.h>
|
2008-10-02 13:26:05 +02:00
|
|
|
#include <linux/kallsyms.h>
|
2008-09-23 12:32:08 +02:00
|
|
|
|
|
|
|
#include "trace.h"
|
|
|
|
|
|
|
|
static struct trace_array *boot_trace;
|
2008-10-31 12:57:20 +01:00
|
|
|
static bool pre_initcalls_finished;
|
2008-09-23 12:32:08 +02:00
|
|
|
|
2008-10-31 12:57:20 +01:00
|
|
|
/* Tells the boot tracer that the pre_smp_initcalls are finished.
|
|
|
|
* So we are ready .
|
|
|
|
* It doesn't enable sched events tracing however.
|
|
|
|
* You have to call enable_boot_trace to do so.
|
|
|
|
*/
|
2008-09-23 12:32:08 +02:00
|
|
|
void start_boot_trace(void)
|
|
|
|
{
|
2008-10-31 12:57:20 +01:00
|
|
|
pre_initcalls_finished = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void enable_boot_trace(void)
|
|
|
|
{
|
2008-10-31 13:20:08 +01:00
|
|
|
if (pre_initcalls_finished)
|
|
|
|
tracing_start_cmdline_record();
|
2008-09-23 12:32:08 +02:00
|
|
|
}
|
|
|
|
|
2008-10-31 12:57:20 +01:00
|
|
|
void disable_boot_trace(void)
|
2008-09-23 12:32:08 +02:00
|
|
|
{
|
2008-10-31 13:20:08 +01:00
|
|
|
if (pre_initcalls_finished)
|
|
|
|
tracing_stop_cmdline_record();
|
2008-09-23 12:32:08 +02:00
|
|
|
}
|
|
|
|
|
2008-10-31 13:34:45 +01:00
|
|
|
static void reset_boot_trace(struct trace_array *tr)
|
2008-10-03 15:39:21 +02:00
|
|
|
{
|
2008-10-31 13:34:45 +01:00
|
|
|
sched_switch_trace.reset(tr);
|
2008-10-03 15:39:21 +02:00
|
|
|
}
|
|
|
|
|
2008-09-23 12:32:08 +02:00
|
|
|
static void boot_trace_init(struct trace_array *tr)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
boot_trace = tr;
|
|
|
|
|
|
|
|
for_each_cpu_mask(cpu, cpu_possible_map)
|
2008-09-30 05:02:41 +02:00
|
|
|
tracing_reset(tr, cpu);
|
2008-10-31 13:20:08 +01:00
|
|
|
|
|
|
|
sched_switch_trace.init(tr);
|
2008-09-23 12:32:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void boot_trace_ctrl_update(struct trace_array *tr)
|
|
|
|
{
|
|
|
|
if (tr->ctrl)
|
2008-10-31 12:57:20 +01:00
|
|
|
enable_boot_trace();
|
2008-09-23 12:32:08 +02:00
|
|
|
else
|
2008-10-31 12:57:20 +01:00
|
|
|
disable_boot_trace();
|
2008-09-23 12:32:08 +02:00
|
|
|
}
|
|
|
|
|
2008-09-29 20:31:58 +02:00
|
|
|
static enum print_line_t initcall_print_line(struct trace_iterator *iter)
|
2008-09-23 12:32:08 +02:00
|
|
|
{
|
2008-09-29 20:31:58 +02:00
|
|
|
int ret;
|
2008-09-23 12:32:08 +02:00
|
|
|
struct trace_entry *entry = iter->ent;
|
2008-09-30 05:02:42 +02:00
|
|
|
struct trace_boot *field = (struct trace_boot *)entry;
|
|
|
|
struct boot_trace *it = &field->initcall;
|
2008-09-23 12:32:08 +02:00
|
|
|
struct trace_seq *s = &iter->seq;
|
2008-10-02 12:59:20 +02:00
|
|
|
struct timespec calltime = ktime_to_timespec(it->calltime);
|
|
|
|
struct timespec rettime = ktime_to_timespec(it->rettime);
|
2008-09-23 12:32:08 +02:00
|
|
|
|
2008-09-29 20:31:58 +02:00
|
|
|
if (entry->type == TRACE_BOOT) {
|
2008-10-04 22:42:27 +02:00
|
|
|
ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n",
|
2008-10-02 12:59:20 +02:00
|
|
|
calltime.tv_sec,
|
|
|
|
calltime.tv_nsec,
|
|
|
|
it->func, it->caller);
|
|
|
|
if (!ret)
|
2008-09-29 20:31:58 +02:00
|
|
|
return TRACE_TYPE_PARTIAL_LINE;
|
2008-10-02 13:26:05 +02:00
|
|
|
|
2008-10-04 22:42:27 +02:00
|
|
|
ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s "
|
2008-10-02 12:59:20 +02:00
|
|
|
"returned %d after %lld msecs\n",
|
|
|
|
rettime.tv_sec,
|
|
|
|
rettime.tv_nsec,
|
|
|
|
it->func, it->result, it->duration);
|
2008-10-02 13:26:05 +02:00
|
|
|
|
2008-10-02 12:59:20 +02:00
|
|
|
if (!ret)
|
|
|
|
return TRACE_TYPE_PARTIAL_LINE;
|
|
|
|
return TRACE_TYPE_HANDLED;
|
2008-09-29 20:31:58 +02:00
|
|
|
}
|
|
|
|
return TRACE_TYPE_UNHANDLED;
|
2008-09-23 12:32:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
struct tracer boot_tracer __read_mostly =
|
|
|
|
{
|
|
|
|
.name = "initcall",
|
|
|
|
.init = boot_trace_init,
|
2008-10-03 15:39:21 +02:00
|
|
|
.reset = reset_boot_trace,
|
2008-09-23 12:32:08 +02:00
|
|
|
.ctrl_update = boot_trace_ctrl_update,
|
|
|
|
.print_line = initcall_print_line,
|
|
|
|
};
|
|
|
|
|
2008-10-02 13:26:05 +02:00
|
|
|
void trace_boot(struct boot_trace *it, initcall_t fn)
|
2008-09-23 12:32:08 +02:00
|
|
|
{
|
2008-09-30 05:02:41 +02:00
|
|
|
struct ring_buffer_event *event;
|
2008-09-30 05:02:42 +02:00
|
|
|
struct trace_boot *entry;
|
2008-09-23 12:32:08 +02:00
|
|
|
struct trace_array_cpu *data;
|
|
|
|
unsigned long irq_flags;
|
|
|
|
struct trace_array *tr = boot_trace;
|
|
|
|
|
2008-10-31 12:57:20 +01:00
|
|
|
if (!pre_initcalls_finished)
|
2008-09-23 12:32:08 +02:00
|
|
|
return;
|
|
|
|
|
2008-10-02 13:26:05 +02:00
|
|
|
/* Get its name now since this function could
|
|
|
|
* disappear because it is in the .init section.
|
|
|
|
*/
|
|
|
|
sprint_symbol(it->func, (unsigned long)fn);
|
2008-09-23 12:32:08 +02:00
|
|
|
preempt_disable();
|
|
|
|
data = tr->data[smp_processor_id()];
|
|
|
|
|
2008-09-30 05:02:41 +02:00
|
|
|
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
|
|
|
|
&irq_flags);
|
|
|
|
if (!event)
|
|
|
|
goto out;
|
|
|
|
entry = ring_buffer_event_data(event);
|
2008-10-01 19:14:09 +02:00
|
|
|
tracing_generic_entry_update(&entry->ent, 0, 0);
|
2008-09-30 05:02:42 +02:00
|
|
|
entry->ent.type = TRACE_BOOT;
|
|
|
|
entry->initcall = *it;
|
2008-09-30 05:02:41 +02:00
|
|
|
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
2008-09-23 12:32:08 +02:00
|
|
|
|
|
|
|
trace_wake_up();
|
|
|
|
|
2008-09-30 05:02:41 +02:00
|
|
|
out:
|
2008-09-23 12:32:08 +02:00
|
|
|
preempt_enable();
|
|
|
|
}
|