Merge branch 'tracing/ftrace' into tracing/core

Merge reason: this mini-topic had outstanding problems that delayed
              its merge, so it does not fast-forward.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Ingo Molnar 2009-06-04 13:59:26 +02:00
commit 64edbc5620
9 changed files with 207 additions and 88 deletions

View File

@ -751,12 +751,25 @@ and is between 256 and 4096 characters. It is defined in the file
ia64_pal_cache_flush instead of SAL_CACHE_FLUSH.
ftrace=[tracer]
[ftrace] will set and start the specified tracer
[FTRACE] will set and start the specified tracer
as early as possible in order to facilitate early
boot debugging.
ftrace_dump_on_oops
[ftrace] will dump the trace buffers on oops.
[FTRACE] will dump the trace buffers on oops.
ftrace_filter=[function-list]
[FTRACE] Limit the functions traced by the function
tracer at boot up. function-list is a comma separated
list of functions. This list can be changed at run
time by the set_ftrace_filter file in the debugfs
tracing directory.
ftrace_notrace=[function-list]
[FTRACE] Do not trace the functions specified in
function-list. This list can be changed at run time
by the set_ftrace_notrace file in the debugfs
tracing directory.
gamecon.map[2|3]=
[HW,JOY] Multisystem joystick and NES/SNES/PSX pad

View File

@ -51,6 +51,7 @@ struct trace_iterator {
int cpu_file;
struct mutex mutex;
struct ring_buffer_iter *buffer_iter[NR_CPUS];
unsigned long iter_flags;
/* The below is zeroed out in pipe_read */
struct trace_seq seq;
@ -58,7 +59,6 @@ struct trace_iterator {
int cpu;
u64 ts;
unsigned long iter_flags;
loff_t pos;
long idx;

View File

@ -7,18 +7,18 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM irq
#define softirq_name(sirq) { sirq, #sirq }
#define show_softirq_name(val) \
__print_symbolic(val, \
softirq_name(HI_SOFTIRQ), \
softirq_name(TIMER_SOFTIRQ), \
softirq_name(NET_TX_SOFTIRQ), \
softirq_name(NET_RX_SOFTIRQ), \
softirq_name(BLOCK_SOFTIRQ), \
softirq_name(TASKLET_SOFTIRQ), \
softirq_name(SCHED_SOFTIRQ), \
softirq_name(HRTIMER_SOFTIRQ), \
softirq_name(RCU_SOFTIRQ))
#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
#define show_softirq_name(val) \
__print_symbolic(val, \
softirq_name(HI), \
softirq_name(TIMER), \
softirq_name(NET_TX), \
softirq_name(NET_RX), \
softirq_name(BLOCK), \
softirq_name(TASKLET), \
softirq_name(SCHED), \
softirq_name(HRTIMER), \
softirq_name(RCU))
/**
* irq_handler_entry - called immediately before the irq action handler

View File

@ -18,14 +18,17 @@
#include <linux/ftrace_event.h>
#undef __array
#define __array(type, item, len) type item[len];
#undef __field
#define __field(type, item) type item;
#undef __array
#define __array(type, item, len) type item[len];
#undef __dynamic_array
#define __dynamic_array(type, item, len) unsigned short __data_loc_##item;
#undef __string
#define __string(item, src) unsigned short __str_loc_##item;
#define __string(item, src) __dynamic_array(char, item, -1)
#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args
@ -35,7 +38,7 @@
struct ftrace_raw_##name { \
struct trace_entry ent; \
tstruct \
char __str_data[0]; \
char __data[0]; \
}; \
static struct ftrace_event_call event_##name
@ -47,30 +50,31 @@
*
* Include the following:
*
* struct ftrace_str_offsets_<call> {
* int <str1>;
* int <str2>;
* struct ftrace_data_offsets_<call> {
* int <item1>;
* int <item2>;
* [...]
* };
*
* The __string() macro will create each int <str>, this is to
* keep the offset of each string from the beggining of the event
* once we perform the strlen() of the src strings.
*
* The __dynamic_array() macro will create each int <item>, this is
* to keep the offset of each array from the beginning of the event.
*/
#undef __array
#define __array(type, item, len)
#undef __field
#define __field(type, item);
#undef __array
#define __array(type, item, len)
#undef __dynamic_array
#define __dynamic_array(type, item, len) int item;
#undef __string
#define __string(item, src) int item;
#define __string(item, src) __dynamic_array(char, item, -1)
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
struct ftrace_str_offsets_##call { \
struct ftrace_data_offsets_##call { \
tstruct; \
};
@ -119,8 +123,12 @@
#undef TP_printk
#define TP_printk(fmt, args...) fmt "\n", args
#undef __get_dynamic_array
#define __get_dynamic_array(field) \
((void *)__entry + __entry->__data_loc_##field)
#undef __get_str
#define __get_str(field) ((char *)__entry + __entry->__str_loc_##field)
#define __get_str(field) (char *)__get_dynamic_array(field)
#undef __print_flags
#define __print_flags(flag, delim, flag_array...) \
@ -207,16 +215,19 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
if (!ret) \
return 0;
#undef __string
#define __string(item, src) \
ret = trace_seq_printf(s, "\tfield: __str_loc " #item ";\t" \
"offset:%u;tsize:%u;\n", \
#undef __dynamic_array
#define __dynamic_array(type, item, len) \
ret = trace_seq_printf(s, "\tfield:__data_loc " #item ";\t" \
"offset:%u;\tsize:%u;\n", \
(unsigned int)offsetof(typeof(field), \
__str_loc_##item), \
(unsigned int)sizeof(field.__str_loc_##item)); \
__data_loc_##item), \
(unsigned int)sizeof(field.__data_loc_##item)); \
if (!ret) \
return 0;
#undef __string
#define __string(item, src) __dynamic_array(char, item, -1)
#undef __entry
#define __entry REC
@ -260,11 +271,14 @@ ftrace_format_##call(struct trace_seq *s) \
if (ret) \
return ret;
#undef __dynamic_array
#define __dynamic_array(type, item, len) \
ret = trace_define_field(event_call, "__data_loc" "[" #type "]", #item,\
offsetof(typeof(field), __data_loc_##item), \
sizeof(field.__data_loc_##item), 0);
#undef __string
#define __string(item, src) \
ret = trace_define_field(event_call, "__str_loc", #item, \
offsetof(typeof(field), __str_loc_##item), \
sizeof(field.__str_loc_##item), 0);
#define __string(item, src) __dynamic_array(char, item, -1)
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
@ -288,6 +302,43 @@ ftrace_define_fields_##call(void) \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
* remember the offset of each array from the beginning of the event.
*/
#undef __entry
#define __entry entry
#undef __field
#define __field(type, item)
#undef __array
#define __array(type, item, len)
#undef __dynamic_array
#define __dynamic_array(type, item, len) \
__data_offsets->item = __data_size + \
offsetof(typeof(*entry), __data); \
__data_size += (len) * sizeof(type);
#undef __string
#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
static inline int ftrace_get_offsets_##call( \
struct ftrace_data_offsets_##call *__data_offsets, proto) \
{ \
int __data_size = 0; \
struct ftrace_raw_##call __maybe_unused *entry; \
\
tstruct; \
\
return __data_size; \
}
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
* Stage 4 of the trace events.
*
@ -432,15 +483,15 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
#undef __array
#define __array(type, item, len)
#undef __dynamic_array
#define __dynamic_array(type, item, len) \
__entry->__data_loc_##item = __data_offsets.item;
#undef __string
#define __string(item, src) \
__str_offsets.item = __str_size + \
offsetof(typeof(*entry), __str_data); \
__str_size += strlen(src) + 1;
#define __string(item, src) __dynamic_array(char, item, -1) \
#undef __assign_str
#define __assign_str(dst, src) \
__entry->__str_loc_##dst = __str_offsets.dst; \
strcpy(__get_str(dst), src);
#undef TRACE_EVENT
@ -451,27 +502,30 @@ static struct ftrace_event_call event_##call; \
\
static void ftrace_raw_event_##call(proto) \
{ \
struct ftrace_str_offsets_##call __maybe_unused __str_offsets; \
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
struct ftrace_event_call *event_call = &event_##call; \
struct ring_buffer_event *event; \
struct ftrace_raw_##call *entry; \
unsigned long irq_flags; \
int __str_size = 0; \
int __data_size; \
int pc; \
\
local_save_flags(irq_flags); \
pc = preempt_count(); \
\
tstruct; \
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
\
event = trace_current_buffer_lock_reserve(event_##call.id, \
sizeof(struct ftrace_raw_##call) + __str_size,\
sizeof(*entry) + __data_size, \
irq_flags, pc); \
if (!event) \
return; \
entry = ring_buffer_event_data(event); \
\
assign; \
\
tstruct \
\
{ assign; } \
\
if (!filter_current_check_discard(event_call, entry, event)) \
trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \

View File

@ -56,6 +56,13 @@ config CONTEXT_SWITCH_TRACER
select MARKERS
bool
# All tracer options should select GENERIC_TRACER. For those options that are
# enabled by all tracers (context switch and event tracer) they select TRACING.
# This allows those options to appear when no other tracer is selected. But the
# options do not appear when something else selects it. We need the two options
# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
# hidding of the automatic options options.
config TRACING
bool
select DEBUG_FS
@ -66,6 +73,10 @@ config TRACING
select BINARY_PRINTF
select EVENT_TRACING
config GENERIC_TRACER
bool
select TRACING
#
# Minimum requirements an architecture has to meet for us to
# be able to offer generic tracing facilities:
@ -95,7 +106,7 @@ config FUNCTION_TRACER
depends on HAVE_FUNCTION_TRACER
select FRAME_POINTER
select KALLSYMS
select TRACING
select GENERIC_TRACER
select CONTEXT_SWITCH_TRACER
help
Enable the kernel to trace every kernel function. This is done
@ -126,7 +137,7 @@ config IRQSOFF_TRACER
depends on TRACE_IRQFLAGS_SUPPORT
depends on GENERIC_TIME
select TRACE_IRQFLAGS
select TRACING
select GENERIC_TRACER
select TRACER_MAX_TRACE
help
This option measures the time spent in irqs-off critical
@ -147,7 +158,7 @@ config PREEMPT_TRACER
default n
depends on GENERIC_TIME
depends on PREEMPT
select TRACING
select GENERIC_TRACER
select TRACER_MAX_TRACE
help
This option measures the time spent in preemption off critical
@ -166,7 +177,7 @@ config PREEMPT_TRACER
config SYSPROF_TRACER
bool "Sysprof Tracer"
depends on X86
select TRACING
select GENERIC_TRACER
select CONTEXT_SWITCH_TRACER
help
This tracer provides the trace needed by the 'Sysprof' userspace
@ -174,44 +185,33 @@ config SYSPROF_TRACER
config SCHED_TRACER
bool "Scheduling Latency Tracer"
select TRACING
select GENERIC_TRACER
select CONTEXT_SWITCH_TRACER
select TRACER_MAX_TRACE
help
This tracer tracks the latency of the highest priority task
to be scheduled in, starting from the point it has woken up.
config ENABLE_CONTEXT_SWITCH_TRACER
bool "Trace process context switches"
select TRACING
select CONTEXT_SWITCH_TRACER
help
This tracer gets called from the context switch and records
all switching of tasks.
config ENABLE_EVENT_TRACING
bool "Trace various events in the kernel"
config ENABLE_DEFAULT_TRACERS
bool "Trace process context switches and events"
depends on !GENERIC_TRACER
select TRACING
help
This tracer hooks to various trace points in the kernel
allowing the user to pick and choose which trace point they
want to trace.
Note, all tracers enable event tracing. This option is
only a convenience to enable event tracing when no other
tracers are selected.
want to trace. It also includes the sched_switch tracer plugin.
config FTRACE_SYSCALLS
bool "Trace syscalls"
depends on HAVE_FTRACE_SYSCALLS
select TRACING
select GENERIC_TRACER
select KALLSYMS
help
Basic tracer to catch the syscall entry and exit events.
config BOOT_TRACER
bool "Trace boot initcalls"
select TRACING
select GENERIC_TRACER
select CONTEXT_SWITCH_TRACER
help
This tracer helps developers to optimize boot times: it records
@ -228,7 +228,7 @@ config BOOT_TRACER
config TRACE_BRANCH_PROFILING
bool
select TRACING
select GENERIC_TRACER
choice
prompt "Branch Profiling"
@ -308,7 +308,7 @@ config BRANCH_TRACER
config POWER_TRACER
bool "Trace power consumption behavior"
depends on X86
select TRACING
select GENERIC_TRACER
help
This tracer helps developers to analyze and optimize the kernels
power management decisions, specifically the C-state and P-state
@ -342,14 +342,14 @@ config STACK_TRACER
config HW_BRANCH_TRACER
depends on HAVE_HW_BRANCH_TRACER
bool "Trace hw branches"
select TRACING
select GENERIC_TRACER
help
This tracer records all branches on the system in a circular
buffer giving access to the last N branches for each cpu.
config KMEMTRACE
bool "Trace SLAB allocations"
select TRACING
select GENERIC_TRACER
help
kmemtrace provides tracing for slab allocator functions, such as
kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected
@ -369,7 +369,7 @@ config KMEMTRACE
config WORKQUEUE_TRACER
bool "Trace workqueues"
select TRACING
select GENERIC_TRACER
help
The workqueue tracer provides some statistical informations
about each cpu workqueue thread such as the number of the
@ -385,7 +385,7 @@ config BLK_DEV_IO_TRACE
select RELAY
select DEBUG_FS
select TRACEPOINTS
select TRACING
select GENERIC_TRACER
select STACKTRACE
help
Say Y here if you want to be able to trace the block layer actions
@ -446,7 +446,7 @@ config FTRACE_SELFTEST
config FTRACE_STARTUP_TEST
bool "Perform a startup test on ftrace"
depends on TRACING
depends on GENERIC_TRACER
select FTRACE_SELFTEST
help
This option performs a series of startup tests on ftrace. On bootup
@ -457,7 +457,7 @@ config FTRACE_STARTUP_TEST
config MMIOTRACE
bool "Memory mapped IO tracing"
depends on HAVE_MMIOTRACE_SUPPORT && PCI
select TRACING
select GENERIC_TRACER
help
Mmiotrace traces Memory Mapped I/O access and is meant for
debugging and reverse engineering. It is called from the ioremap

View File

@ -32,6 +32,7 @@
#include <trace/events/sched.h>
#include <asm/ftrace.h>
#include <asm/setup.h>
#include "trace_output.h"
#include "trace_stat.h"
@ -598,7 +599,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip)
local_irq_save(flags);
stat = &__get_cpu_var(ftrace_profile_stats);
if (!stat->hash)
if (!stat->hash || !ftrace_profile_enabled)
goto out;
rec = ftrace_find_profiled_func(stat, ip);
@ -629,7 +630,7 @@ static void profile_graph_return(struct ftrace_graph_ret *trace)
local_irq_save(flags);
stat = &__get_cpu_var(ftrace_profile_stats);
if (!stat->hash)
if (!stat->hash || !ftrace_profile_enabled)
goto out;
calltime = trace->rettime - trace->calltime;
@ -723,6 +724,10 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
ftrace_profile_enabled = 1;
} else {
ftrace_profile_enabled = 0;
/*
* unregister_ftrace_profiler calls stop_machine
* so this acts like an synchronize_sched.
*/
unregister_ftrace_profiler();
}
}
@ -2369,6 +2374,45 @@ void ftrace_set_notrace(unsigned char *buf, int len, int reset)
ftrace_set_regex(buf, len, reset, 0);
}
/*
* command line interface to allow users to set filters on boot up.
*/
#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
static int __init set_ftrace_notrace(char *str)
{
strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
return 1;
}
__setup("ftrace_notrace=", set_ftrace_notrace);
static int __init set_ftrace_filter(char *str)
{
strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
return 1;
}
__setup("ftrace_filter=", set_ftrace_filter);
static void __init set_ftrace_early_filter(char *buf, int enable)
{
char *func;
while (buf) {
func = strsep(&buf, ",");
ftrace_set_regex(func, strlen(func), 0, enable);
}
}
static void __init set_ftrace_early_filters(void)
{
if (ftrace_filter_buf[0])
set_ftrace_early_filter(ftrace_filter_buf, 1);
if (ftrace_notrace_buf[0])
set_ftrace_early_filter(ftrace_notrace_buf, 0);
}
static int
ftrace_regex_release(struct inode *inode, struct file *file, int enable)
{
@ -2829,6 +2873,8 @@ void __init ftrace_init(void)
if (ret)
pr_warning("Failed to register trace ftrace module notifier\n");
set_ftrace_early_filters();
return;
failed:
ftrace_disabled = 1;

View File

@ -2826,6 +2826,9 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
/* trace pipe does not show start of buffer */
cpumask_setall(iter->started);
if (trace_flags & TRACE_ITER_LATENCY_FMT)
iter->iter_flags |= TRACE_FILE_LAT_FMT;
iter->cpu_file = cpu_file;
iter->tr = &global_trace;
mutex_init(&iter->mutex);

View File

@ -478,12 +478,12 @@ enum {
static int is_string_field(const char *type)
{
if (strstr(type, "__data_loc") && strstr(type, "char"))
return FILTER_DYN_STRING;
if (strchr(type, '[') && strstr(type, "char"))
return FILTER_STATIC_STRING;
if (!strcmp(type, "__str_loc"))
return FILTER_DYN_STRING;
return 0;
}

View File

@ -17,6 +17,7 @@
static DECLARE_RWSEM(trace_event_mutex);
DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq);
EXPORT_PER_CPU_SYMBOL(ftrace_event_seq);
static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
@ -250,6 +251,7 @@ ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
return p->buffer;
}
EXPORT_SYMBOL(ftrace_print_flags_seq);
const char *
ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
@ -275,6 +277,7 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
return p->buffer;
}
EXPORT_SYMBOL(ftrace_print_symbols_seq);
#ifdef CONFIG_KRETPROBES
static inline const char *kretprobed(const char *name)