bpf: perf event change needed for subsequent bpf helpers

This patch does not impact existing functionalities.
It contains the changes in perf event area needed for
subsequent bpf_perf_event_read_value and
bpf_perf_prog_read_value helpers.

Signed-off-by: Yonghong Song <yhs@fb.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Yonghong Song 2017-10-05 09:19:19 -07:00 committed by David S. Miller
parent bdc476413d
commit 97562633bc
4 changed files with 20 additions and 6 deletions

View File

@ -806,6 +806,7 @@ struct perf_output_handle {
struct bpf_perf_event_data_kern {
struct pt_regs *regs;
struct perf_sample_data *data;
struct perf_event *event;
};
#ifdef CONFIG_CGROUP_PERF
@ -884,7 +885,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
void *context);
extern void perf_pmu_migrate_context(struct pmu *pmu,
int src_cpu, int dst_cpu);
int perf_event_read_local(struct perf_event *event, u64 *value);
int perf_event_read_local(struct perf_event *event, u64 *value,
u64 *enabled, u64 *running);
extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running);
@ -1286,7 +1288,8 @@ static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *
{
return ERR_PTR(-EINVAL);
}
static inline int perf_event_read_local(struct perf_event *event, u64 *value)
static inline int perf_event_read_local(struct perf_event *event, u64 *value,
u64 *enabled, u64 *running)
{
return -EINVAL;
}

View File

@ -492,7 +492,7 @@ static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
ee = ERR_PTR(-EOPNOTSUPP);
event = perf_file->private_data;
if (perf_event_read_local(event, &value) == -EOPNOTSUPP)
if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
goto err_out;
ee = bpf_event_entry_gen(perf_file, map_file);

View File

@ -3684,10 +3684,12 @@ static inline u64 perf_event_count(struct perf_event *event)
* will not be local and we cannot read them atomically
* - must not have a pmu::count method
*/
int perf_event_read_local(struct perf_event *event, u64 *value)
int perf_event_read_local(struct perf_event *event, u64 *value,
u64 *enabled, u64 *running)
{
unsigned long flags;
int ret = 0;
u64 now;
/*
* Disabling interrupts avoids all counter scheduling (context
@ -3718,13 +3720,21 @@ int perf_event_read_local(struct perf_event *event, u64 *value)
goto out;
}
now = event->shadow_ctx_time + perf_clock();
if (enabled)
*enabled = now - event->tstamp_enabled;
/*
* If the event is currently on this CPU, its either a per-task event,
* or local to this CPU. Furthermore it means its ACTIVE (otherwise
* oncpu == -1).
*/
if (event->oncpu == smp_processor_id())
if (event->oncpu == smp_processor_id()) {
event->pmu->read(event);
if (running)
*running = now - event->tstamp_running;
} else if (running) {
*running = event->total_time_running;
}
*value = local64_read(&event->count);
out:
@ -8072,6 +8082,7 @@ static void bpf_overflow_handler(struct perf_event *event,
struct bpf_perf_event_data_kern ctx = {
.data = data,
.regs = regs,
.event = event,
};
int ret = 0;

View File

@ -275,7 +275,7 @@ BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
if (!ee)
return -ENOENT;
err = perf_event_read_local(ee->event, &value);
err = perf_event_read_local(ee->event, &value, NULL, NULL);
/*
* this api is ugly since we miss [-22..-2] range of valid
* counter values, but that's uapi