perf kvm: Option to print events that exceed a duration

This is useful to spot high latency blips. It is normal for HLT reasons
to have long exit times, so strip those from the duration check.

v2: changed threshold to duration per acme's request

Signed-off-by: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Runzhen Wang <runzhen@linux.vnet.ibm.com>
Cc: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/r/1375926999-75129-1-git-send-email-dsahern@gmail.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
David Ahern 2013-08-07 21:56:38 -04:00 committed by Arnaldo Carvalho de Melo
parent 0a3d23a256
commit 70f7b4a7f3
2 changed files with 24 additions and 4 deletions

View File

@ -106,6 +106,7 @@ struct perf_kvm_stat {
u64 total_time;
u64 total_count;
u64 lost_events;
u64 duration;
const char *pid_str;
struct intlist *pid_list;
@ -473,7 +474,7 @@ static bool update_kvm_event(struct kvm_event *event, int vcpu_id,
static bool handle_end_event(struct perf_kvm_stat *kvm,
struct vcpu_event_record *vcpu_record,
struct event_key *key,
u64 timestamp)
struct perf_sample *sample)
{
struct kvm_event *event;
u64 time_begin, time_diff;
@ -510,12 +511,24 @@ static bool handle_end_event(struct perf_kvm_stat *kvm,
vcpu_record->start_time = 0;
/* seems to happen once in a while during live mode */
if (timestamp < time_begin) {
if (sample->time < time_begin) {
pr_debug("End time before begin time; skipping event.\n");
return true;
}
time_diff = timestamp - time_begin;
time_diff = sample->time - time_begin;
if (kvm->duration && time_diff > kvm->duration) {
char decode[32];
kvm->events_ops->decode_key(kvm, &event->key, decode);
if (strcmp(decode, "HLT")) {
pr_info("%" PRIu64 " VM %d, vcpu %d: %s event took %" PRIu64 "usec\n",
sample->time, sample->pid, vcpu_record->vcpu_id,
decode, time_diff/1000);
}
}
return update_kvm_event(event, vcpu, time_diff);
}
@ -562,7 +575,7 @@ static bool handle_kvm_event(struct perf_kvm_stat *kvm,
return handle_begin_event(kvm, vcpu_record, &key, sample->time);
if (kvm->events_ops->is_end_event(evsel, sample, &key))
return handle_end_event(kvm, vcpu_record, &key, sample->time);
return handle_end_event(kvm, vcpu_record, &key, sample);
return true;
}
@ -1429,6 +1442,8 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
"key for sorting: sample(sort by samples number)"
" time (sort by avg time)"),
OPT_U64(0, "duration", &kvm->duration,
"show events other than HALT that take longer than duration usecs"),
OPT_END()
};
const char * const live_usage[] = {
@ -1467,6 +1482,8 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
usage_with_options(live_usage, live_options);
}
kvm->duration *= NSEC_PER_USEC; /* convert usec to nsec */
/*
* target related setups
*/

View File

@ -125,6 +125,9 @@
#ifndef NSEC_PER_SEC
# define NSEC_PER_SEC 1000000000ULL
#endif
#ifndef NSEC_PER_USEC
# define NSEC_PER_USEC 1000ULL
#endif
static inline unsigned long long rdclock(void)
{