libperf: Add perf_thread_map__nr/perf_thread_map__pid functions

So it's part of libperf library as basic functions operating on
perf_thread_map objects.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20190822111141.25823-6-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Jiri Olsa 2019-08-22 13:11:41 +02:00 committed by Arnaldo Carvalho de Melo
parent 5e51b0bb24
commit a2f354e3ab
17 changed files with 45 additions and 41 deletions

View File

@ -158,7 +158,7 @@ static int set_tracing_pid(struct perf_ftrace *ftrace)
if (target__has_cpu(&ftrace->target))
return 0;
for (i = 0; i < thread_map__nr(ftrace->evlist->core.threads); i++) {
for (i = 0; i < perf_thread_map__nr(ftrace->evlist->core.threads); i++) {
scnprintf(buf, sizeof(buf), "%d",
ftrace->evlist->core.threads->map[i]);
if (append_tracing_file("set_ftrace_pid", buf) < 0)

View File

@ -1906,7 +1906,7 @@ static struct scripting_ops *scripting_ops;
static void __process_stat(struct evsel *counter, u64 tstamp)
{
int nthreads = thread_map__nr(counter->core.threads);
int nthreads = perf_thread_map__nr(counter->core.threads);
int ncpus = perf_evsel__nr_cpus(counter);
int cpu, thread;
static int header_printed;
@ -1928,7 +1928,7 @@ static void __process_stat(struct evsel *counter, u64 tstamp)
printf("%3d %8d %15" PRIu64 " %15" PRIu64 " %15" PRIu64 " %15" PRIu64 " %s\n",
counter->core.cpus->map[cpu],
thread_map__pid(counter->core.threads, thread),
perf_thread_map__pid(counter->core.threads, thread),
counts->val,
counts->ena,
counts->run,

View File

@ -264,7 +264,7 @@ static int read_single_counter(struct evsel *counter, int cpu,
*/
static int read_counter(struct evsel *counter, struct timespec *rs)
{
int nthreads = thread_map__nr(evsel_list->core.threads);
int nthreads = perf_thread_map__nr(evsel_list->core.threads);
int ncpus, cpu, thread;
if (target__has_cpu(&target) && !target__has_per_thread(&target))
@ -1893,7 +1893,7 @@ int cmd_stat(int argc, const char **argv)
thread_map__read_comms(evsel_list->core.threads);
if (target.system_wide) {
if (runtime_stat_new(&stat_config,
thread_map__nr(evsel_list->core.threads))) {
perf_thread_map__nr(evsel_list->core.threads))) {
goto out;
}
}

View File

@ -3188,7 +3188,7 @@ static int trace__set_filter_pids(struct trace *trace)
err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
trace->filter_pids.entries);
}
} else if (thread_map__pid(trace->evlist->core.threads, 0) == -1) {
} else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
err = trace__set_filter_loop_pids(trace);
}
@ -3417,7 +3417,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
evlist__enable(evlist);
}
trace->multiple_threads = thread_map__pid(evlist->core.threads, 0) == -1 ||
trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
evlist->core.threads->nr > 1 ||
perf_evlist__first(evlist)->core.attr.inherit;

View File

@ -11,6 +11,8 @@ LIBPERF_API struct perf_thread_map *perf_thread_map__new_dummy(void);
LIBPERF_API void perf_thread_map__set_pid(struct perf_thread_map *map, int thread, pid_t pid);
LIBPERF_API char *perf_thread_map__comm(struct perf_thread_map *map, int thread);
LIBPERF_API int perf_thread_map__nr(struct perf_thread_map *threads);
LIBPERF_API pid_t perf_thread_map__pid(struct perf_thread_map *map, int thread);
LIBPERF_API struct perf_thread_map *perf_thread_map__get(struct perf_thread_map *map);
LIBPERF_API void perf_thread_map__put(struct perf_thread_map *map);

View File

@ -12,6 +12,8 @@ LIBPERF_0.0.1 {
perf_thread_map__new_dummy;
perf_thread_map__set_pid;
perf_thread_map__comm;
perf_thread_map__nr;
perf_thread_map__pid;
perf_thread_map__get;
perf_thread_map__put;
perf_evsel__new;

View File

@ -79,3 +79,13 @@ void perf_thread_map__put(struct perf_thread_map *map)
if (map && refcount_dec_and_test(&map->refcnt))
perf_thread_map__delete(map);
}
int perf_thread_map__nr(struct perf_thread_map *threads)
{
return threads ? threads->nr : 1;
}
pid_t perf_thread_map__pid(struct perf_thread_map *map, int thread)
{
return map->map[thread].pid;
}

View File

@ -26,7 +26,7 @@ int test__thread_map(struct test *test __maybe_unused, int subtest __maybe_unuse
TEST_ASSERT_VAL("wrong nr", map->nr == 1);
TEST_ASSERT_VAL("wrong pid",
thread_map__pid(map, 0) == getpid());
perf_thread_map__pid(map, 0) == getpid());
TEST_ASSERT_VAL("wrong comm",
perf_thread_map__comm(map, 0) &&
!strcmp(perf_thread_map__comm(map, 0), NAME));
@ -41,7 +41,7 @@ int test__thread_map(struct test *test __maybe_unused, int subtest __maybe_unuse
thread_map__read_comms(map);
TEST_ASSERT_VAL("wrong nr", map->nr == 1);
TEST_ASSERT_VAL("wrong pid", thread_map__pid(map, 0) == -1);
TEST_ASSERT_VAL("wrong pid", perf_thread_map__pid(map, 0) == -1);
TEST_ASSERT_VAL("wrong comm",
perf_thread_map__comm(map, 0) &&
!strcmp(perf_thread_map__comm(map, 0), "dummy"));
@ -68,7 +68,7 @@ static int process_event(struct perf_tool *tool __maybe_unused,
TEST_ASSERT_VAL("wrong nr", threads->nr == 1);
TEST_ASSERT_VAL("wrong pid",
thread_map__pid(threads, 0) == getpid());
perf_thread_map__pid(threads, 0) == getpid());
TEST_ASSERT_VAL("wrong comm",
perf_thread_map__comm(threads, 0) &&
!strcmp(perf_thread_map__comm(threads, 0), NAME));

View File

@ -132,12 +132,12 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
if (per_cpu) {
mp->cpu = evlist->core.cpus->map[idx];
if (evlist->core.threads)
mp->tid = thread_map__pid(evlist->core.threads, 0);
mp->tid = perf_thread_map__pid(evlist->core.threads, 0);
else
mp->tid = -1;
} else {
mp->cpu = -1;
mp->tid = thread_map__pid(evlist->core.threads, idx);
mp->tid = perf_thread_map__pid(evlist->core.threads, idx);
}
}

View File

@ -647,7 +647,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
for (thread = 0; thread < threads->nr; ++thread) {
if (__event__synthesize_thread(comm_event, mmap_event,
fork_event, namespaces_event,
thread_map__pid(threads, thread), 0,
perf_thread_map__pid(threads, thread), 0,
process, tool, machine,
mmap_data)) {
err = -1;
@ -658,12 +658,12 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
* comm.pid is set to thread group id by
* perf_event__synthesize_comm
*/
if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) {
if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) {
bool need_leader = true;
/* is thread group leader in thread_map? */
for (j = 0; j < threads->nr; ++j) {
if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) {
if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) {
need_leader = false;
break;
}
@ -997,7 +997,7 @@ int perf_event__synthesize_thread_map2(struct perf_tool *tool,
if (!comm)
comm = (char *) "";
entry->pid = thread_map__pid(threads, i);
entry->pid = perf_thread_map__pid(threads, i);
strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
}

View File

@ -316,7 +316,7 @@ static int perf_evlist__nr_threads(struct evlist *evlist,
if (evsel->system_wide)
return 1;
else
return thread_map__nr(evlist->core.threads);
return perf_thread_map__nr(evlist->core.threads);
}
void evlist__disable(struct evlist *evlist)
@ -399,7 +399,7 @@ int perf_evlist__enable_event_idx(struct evlist *evlist,
int perf_evlist__alloc_pollfd(struct evlist *evlist)
{
int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
int nr_threads = thread_map__nr(evlist->core.threads);
int nr_threads = perf_thread_map__nr(evlist->core.threads);
int nfds = 0;
struct evsel *evsel;
@ -531,7 +531,7 @@ static void perf_evlist__set_sid_idx(struct evlist *evlist,
else
sid->cpu = -1;
if (!evsel->system_wide && evlist->core.threads && thread >= 0)
sid->tid = thread_map__pid(evlist->core.threads, thread);
sid->tid = perf_thread_map__pid(evlist->core.threads, thread);
else
sid->tid = -1;
}
@ -696,7 +696,7 @@ static struct perf_mmap *perf_evlist__alloc_mmap(struct evlist *evlist,
evlist->nr_mmaps = perf_cpu_map__nr(evlist->core.cpus);
if (perf_cpu_map__empty(evlist->core.cpus))
evlist->nr_mmaps = thread_map__nr(evlist->core.threads);
evlist->nr_mmaps = perf_thread_map__nr(evlist->core.threads);
map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
if (!map)
return NULL;
@ -810,7 +810,7 @@ static int perf_evlist__mmap_per_cpu(struct evlist *evlist,
{
int cpu, thread;
int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
int nr_threads = thread_map__nr(evlist->core.threads);
int nr_threads = perf_thread_map__nr(evlist->core.threads);
pr_debug2("perf event ring buffer mmapped per cpu\n");
for (cpu = 0; cpu < nr_cpus; cpu++) {
@ -838,7 +838,7 @@ static int perf_evlist__mmap_per_thread(struct evlist *evlist,
struct mmap_params *mp)
{
int thread;
int nr_threads = thread_map__nr(evlist->core.threads);
int nr_threads = perf_thread_map__nr(evlist->core.threads);
pr_debug2("perf event ring buffer mmapped per thread\n");
for (thread = 0; thread < nr_threads; thread++) {

View File

@ -1653,7 +1653,7 @@ static bool ignore_missing_thread(struct evsel *evsel,
struct perf_thread_map *threads,
int thread, int err)
{
pid_t ignore_pid = thread_map__pid(threads, thread);
pid_t ignore_pid = perf_thread_map__pid(threads, thread);
if (!evsel->ignore_missing_thread)
return false;
@ -1816,7 +1816,7 @@ retry_sample_id:
int fd, group_fd;
if (!evsel->cgrp && !evsel->system_wide)
pid = thread_map__pid(threads, thread);
pid = perf_thread_map__pid(threads, thread);
group_fd = get_group_fd(evsel, cpu, thread);
retry_open:

View File

@ -1406,7 +1406,7 @@ static void python_process_stat(struct perf_stat_config *config,
for (thread = 0; thread < threads->nr; thread++) {
for (cpu = 0; cpu < cpus->nr; cpu++) {
process_stat(counter, cpus->map[cpu],
thread_map__pid(threads, thread), tstamp,
perf_thread_map__pid(threads, thread), tstamp,
perf_counts(counter->counts, cpu, thread));
}
}

View File

@ -119,7 +119,7 @@ static void aggr_printout(struct perf_stat_config *config,
config->csv_output ? 0 : 16,
perf_thread_map__comm(evsel->core.threads, id),
config->csv_output ? 0 : -8,
thread_map__pid(evsel->core.threads, id),
perf_thread_map__pid(evsel->core.threads, id),
config->csv_sep);
break;
case AGGR_GLOBAL:
@ -745,7 +745,7 @@ static void print_aggr_thread(struct perf_stat_config *config,
struct evsel *counter, char *prefix)
{
FILE *output = config->output;
int nthreads = thread_map__nr(counter->core.threads);
int nthreads = perf_thread_map__nr(counter->core.threads);
int ncpus = perf_cpu_map__nr(counter->core.cpus);
int thread, sorted_threads, id;
struct perf_aggr_thread_value *buf;

View File

@ -159,7 +159,7 @@ static void perf_evsel__free_prev_raw_counts(struct evsel *evsel)
static int perf_evsel__alloc_stats(struct evsel *evsel, bool alloc_raw)
{
int ncpus = perf_evsel__nr_cpus(evsel);
int nthreads = thread_map__nr(evsel->core.threads);
int nthreads = perf_thread_map__nr(evsel->core.threads);
if (perf_evsel__alloc_stat_priv(evsel) < 0 ||
perf_evsel__alloc_counts(evsel, ncpus, nthreads) < 0 ||
@ -309,7 +309,7 @@ process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
static int process_counter_maps(struct perf_stat_config *config,
struct evsel *counter)
{
int nthreads = thread_map__nr(counter->core.threads);
int nthreads = perf_thread_map__nr(counter->core.threads);
int ncpus = perf_evsel__nr_cpus(counter);
int cpu, thread;

View File

@ -310,7 +310,7 @@ size_t thread_map__fprintf(struct perf_thread_map *threads, FILE *fp)
size_t printed = fprintf(fp, "%d thread%s: ",
threads->nr, threads->nr > 1 ? "s" : "");
for (i = 0; i < threads->nr; ++i)
printed += fprintf(fp, "%s%d", i ? ", " : "", thread_map__pid(threads, i));
printed += fprintf(fp, "%s%d", i ? ", " : "", perf_thread_map__pid(threads, i));
return printed + fprintf(fp, "\n");
}
@ -341,7 +341,7 @@ static int get_comm(char **comm, pid_t pid)
static void comm_init(struct perf_thread_map *map, int i)
{
pid_t pid = thread_map__pid(map, i);
pid_t pid = perf_thread_map__pid(map, i);
char *comm = NULL;
/* dummy pid comm initialization */

View File

@ -25,16 +25,6 @@ struct perf_thread_map *thread_map__new_by_tid_str(const char *tid_str);
size_t thread_map__fprintf(struct perf_thread_map *threads, FILE *fp);
static inline int thread_map__nr(struct perf_thread_map *threads)
{
return threads ? threads->nr : 1;
}
static inline pid_t thread_map__pid(struct perf_thread_map *map, int thread)
{
return map->map[thread].pid;
}
void thread_map__read_comms(struct perf_thread_map *threads);
bool thread_map__has(struct perf_thread_map *threads, pid_t pid);
int thread_map__remove(struct perf_thread_map *threads, int idx);