Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/core

Pull tracing updates from Steve Rostedt.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2012-07-06 11:12:17 +02:00
commit 35c2f48c66
7 changed files with 36 additions and 22 deletions

View File

@ -710,16 +710,6 @@ TRACE_EVENT(kvm_skinit,
__entry->rip, __entry->slb)
);
#define __print_insn(insn, ilen) ({ \
int i; \
const char *ret = p->buffer + p->len; \
\
for (i = 0; i < ilen; ++i) \
trace_seq_printf(p, " %02x", insn[i]); \
trace_seq_printf(p, "%c", 0); \
ret; \
})
#define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
#define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
#define KVM_EMUL_INSN_F_CS_D (1 << 2)
@ -786,7 +776,7 @@ TRACE_EVENT(kvm_emulate_insn,
TP_printk("%x:%llx:%s (%s)%s",
__entry->csbase, __entry->rip,
__print_insn(__entry->insn, __entry->len),
__print_hex(__entry->insn, __entry->len),
__print_symbolic(__entry->flags,
kvm_trace_symbol_emul_flags),
__entry->failed ? " failed" : ""

View File

@ -65,7 +65,7 @@ struct trace_iterator {
void *private;
int cpu_file;
struct mutex mutex;
struct ring_buffer_iter *buffer_iter[NR_CPUS];
struct ring_buffer_iter **buffer_iter;
unsigned long iter_flags;
/* trace_seq for __print_flags() and __print_symbolic() etc. */

View File

@ -571,6 +571,7 @@ static inline void ftrace_test_probe_##call(void) \
#undef __print_flags
#undef __print_symbolic
#undef __print_hex
#undef __get_dynamic_array
#undef __get_str

View File

@ -3239,6 +3239,10 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
if (cpu_buffer->commit_page == cpu_buffer->reader_page)
goto out;
/* Don't bother swapping if the ring buffer is empty */
if (rb_num_of_entries(cpu_buffer) == 0)
goto out;
/*
* Reset the reader page to size zero.
*/

View File

@ -830,6 +830,8 @@ int register_tracer(struct tracer *type)
current_trace = saved_tracer;
if (ret) {
printk(KERN_CONT "FAILED!\n");
/* Add the warning after printing 'FAILED' */
WARN_ON(1);
goto out;
}
/* Only reset on passing, to avoid touching corrupted buffers */
@ -1708,9 +1710,11 @@ EXPORT_SYMBOL_GPL(trace_vprintk);
static void trace_iterator_increment(struct trace_iterator *iter)
{
struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
iter->idx++;
if (iter->buffer_iter[iter->cpu])
ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
if (buf_iter)
ring_buffer_read(buf_iter, NULL);
}
static struct trace_entry *
@ -1718,7 +1722,7 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
unsigned long *lost_events)
{
struct ring_buffer_event *event;
struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
if (buf_iter)
event = ring_buffer_iter_peek(buf_iter, ts);
@ -1856,10 +1860,10 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
tr->data[cpu]->skipped_entries = 0;
if (!iter->buffer_iter[cpu])
buf_iter = trace_buffer_iter(iter, cpu);
if (!buf_iter)
return;
buf_iter = iter->buffer_iter[cpu];
ring_buffer_iter_reset(buf_iter);
/*
@ -2205,13 +2209,15 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
int trace_empty(struct trace_iterator *iter)
{
struct ring_buffer_iter *buf_iter;
int cpu;
/* If we are looking at one CPU buffer, only check that one */
if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
cpu = iter->cpu_file;
if (iter->buffer_iter[cpu]) {
if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
buf_iter = trace_buffer_iter(iter, cpu);
if (buf_iter) {
if (!ring_buffer_iter_empty(buf_iter))
return 0;
} else {
if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
@ -2221,8 +2227,9 @@ int trace_empty(struct trace_iterator *iter)
}
for_each_tracing_cpu(cpu) {
if (iter->buffer_iter[cpu]) {
if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
buf_iter = trace_buffer_iter(iter, cpu);
if (buf_iter) {
if (!ring_buffer_iter_empty(buf_iter))
return 0;
} else {
if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
@ -2381,6 +2388,8 @@ __tracing_open(struct inode *inode, struct file *file)
if (!iter)
return ERR_PTR(-ENOMEM);
iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
GFP_KERNEL);
/*
* We make a copy of the current tracer to avoid concurrent
* changes on it while we are reading.
@ -2441,6 +2450,7 @@ __tracing_open(struct inode *inode, struct file *file)
fail:
mutex_unlock(&trace_types_lock);
kfree(iter->trace);
kfree(iter->buffer_iter);
seq_release_private(inode, file);
return ERR_PTR(-ENOMEM);
}
@ -2481,6 +2491,7 @@ static int tracing_release(struct inode *inode, struct file *file)
mutex_destroy(&iter->mutex);
free_cpumask_var(iter->started);
kfree(iter->trace);
kfree(iter->buffer_iter);
seq_release_private(inode, file);
return 0;
}

View File

@ -317,6 +317,14 @@ struct tracer {
#define TRACE_PIPE_ALL_CPU -1
static inline struct ring_buffer_iter *
trace_buffer_iter(struct trace_iterator *iter, int cpu)
{
if (iter->buffer_iter && iter->buffer_iter[cpu])
return iter->buffer_iter[cpu];
return NULL;
}
int tracer_init(struct tracer *t, struct trace_array *tr);
int tracing_is_enabled(void);
void trace_wake_up(void);

View File

@ -538,7 +538,7 @@ get_return_for_leaf(struct trace_iterator *iter,
next = &data->ret;
} else {
ring_iter = iter->buffer_iter[iter->cpu];
ring_iter = trace_buffer_iter(iter, iter->cpu);
/* First peek to compare current entry and the next one */
if (ring_iter)