2009-11-20 08:53:25 +01:00
|
|
|
#include "builtin.h"
|
|
|
|
#include "perf.h"
|
|
|
|
|
2012-09-24 15:46:54 +02:00
|
|
|
#include "util/evlist.h"
|
2012-08-07 14:58:03 +02:00
|
|
|
#include "util/evsel.h"
|
2009-11-20 08:53:25 +01:00
|
|
|
#include "util/util.h"
|
|
|
|
#include "util/cache.h"
|
|
|
|
#include "util/symbol.h"
|
|
|
|
#include "util/thread.h"
|
|
|
|
#include "util/header.h"
|
2009-12-12 00:24:02 +01:00
|
|
|
#include "util/session.h"
|
2011-11-28 11:30:20 +01:00
|
|
|
#include "util/tool.h"
|
2009-11-20 08:53:25 +01:00
|
|
|
|
|
|
|
#include "util/parse-options.h"
|
|
|
|
#include "util/trace-event.h"
|
2013-10-15 16:27:32 +02:00
|
|
|
#include "util/data.h"
|
2014-04-07 20:55:23 +02:00
|
|
|
#include "util/cpumap.h"
|
2009-11-20 08:53:25 +01:00
|
|
|
|
|
|
|
#include "util/debug.h"
|
|
|
|
|
|
|
|
#include <linux/rbtree.h>
|
2013-01-25 02:24:57 +01:00
|
|
|
#include <linux/string.h>
|
2015-03-23 07:30:40 +01:00
|
|
|
#include <locale.h>
|
2009-11-20 08:53:25 +01:00
|
|
|
|
2015-04-06 07:36:10 +02:00
|
|
|
static int kmem_slab;
|
|
|
|
static int kmem_page;
|
|
|
|
|
|
|
|
static long kmem_page_size;
|
|
|
|
|
2009-11-20 08:53:25 +01:00
|
|
|
struct alloc_stat;
|
|
|
|
typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
|
|
|
|
|
|
|
|
static int alloc_flag;
|
|
|
|
static int caller_flag;
|
|
|
|
|
|
|
|
static int alloc_lines = -1;
|
|
|
|
static int caller_lines = -1;
|
|
|
|
|
2009-11-24 06:25:48 +01:00
|
|
|
static bool raw_ip;
|
|
|
|
|
2009-11-20 08:53:25 +01:00
|
|
|
struct alloc_stat {
|
2009-11-24 06:26:55 +01:00
|
|
|
u64 call_site;
|
|
|
|
u64 ptr;
|
2009-11-20 08:53:25 +01:00
|
|
|
u64 bytes_req;
|
|
|
|
u64 bytes_alloc;
|
|
|
|
u32 hit;
|
2009-11-24 06:26:55 +01:00
|
|
|
u32 pingpong;
|
|
|
|
|
|
|
|
short alloc_cpu;
|
2009-11-20 08:53:25 +01:00
|
|
|
|
|
|
|
struct rb_node node;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct rb_root root_alloc_stat;
|
|
|
|
static struct rb_root root_alloc_sorted;
|
|
|
|
static struct rb_root root_caller_stat;
|
|
|
|
static struct rb_root root_caller_sorted;
|
|
|
|
|
|
|
|
static unsigned long total_requested, total_allocated;
|
2009-11-24 06:26:31 +01:00
|
|
|
static unsigned long nr_allocs, nr_cross_allocs;
|
2009-11-20 08:53:25 +01:00
|
|
|
|
2012-09-09 03:53:06 +02:00
|
|
|
static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
|
|
|
|
int bytes_req, int bytes_alloc, int cpu)
|
2009-11-20 08:53:25 +01:00
|
|
|
{
|
|
|
|
struct rb_node **node = &root_alloc_stat.rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct alloc_stat *data = NULL;
|
|
|
|
|
|
|
|
while (*node) {
|
|
|
|
parent = *node;
|
|
|
|
data = rb_entry(*node, struct alloc_stat, node);
|
|
|
|
|
|
|
|
if (ptr > data->ptr)
|
|
|
|
node = &(*node)->rb_right;
|
|
|
|
else if (ptr < data->ptr)
|
|
|
|
node = &(*node)->rb_left;
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (data && data->ptr == ptr) {
|
|
|
|
data->hit++;
|
|
|
|
data->bytes_req += bytes_req;
|
2009-12-21 10:52:55 +01:00
|
|
|
data->bytes_alloc += bytes_alloc;
|
2009-11-20 08:53:25 +01:00
|
|
|
} else {
|
|
|
|
data = malloc(sizeof(*data));
|
2012-09-09 03:53:06 +02:00
|
|
|
if (!data) {
|
|
|
|
pr_err("%s: malloc failed\n", __func__);
|
|
|
|
return -1;
|
|
|
|
}
|
2009-11-20 08:53:25 +01:00
|
|
|
data->ptr = ptr;
|
2009-11-24 06:26:55 +01:00
|
|
|
data->pingpong = 0;
|
2009-11-20 08:53:25 +01:00
|
|
|
data->hit = 1;
|
|
|
|
data->bytes_req = bytes_req;
|
|
|
|
data->bytes_alloc = bytes_alloc;
|
|
|
|
|
|
|
|
rb_link_node(&data->node, parent, node);
|
|
|
|
rb_insert_color(&data->node, &root_alloc_stat);
|
|
|
|
}
|
2009-11-24 06:26:55 +01:00
|
|
|
data->call_site = call_site;
|
|
|
|
data->alloc_cpu = cpu;
|
2012-09-09 03:53:06 +02:00
|
|
|
return 0;
|
2009-11-20 08:53:25 +01:00
|
|
|
}
|
|
|
|
|
2012-09-09 03:53:06 +02:00
|
|
|
static int insert_caller_stat(unsigned long call_site,
|
2009-11-20 08:53:25 +01:00
|
|
|
int bytes_req, int bytes_alloc)
|
|
|
|
{
|
|
|
|
struct rb_node **node = &root_caller_stat.rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct alloc_stat *data = NULL;
|
|
|
|
|
|
|
|
while (*node) {
|
|
|
|
parent = *node;
|
|
|
|
data = rb_entry(*node, struct alloc_stat, node);
|
|
|
|
|
|
|
|
if (call_site > data->call_site)
|
|
|
|
node = &(*node)->rb_right;
|
|
|
|
else if (call_site < data->call_site)
|
|
|
|
node = &(*node)->rb_left;
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (data && data->call_site == call_site) {
|
|
|
|
data->hit++;
|
|
|
|
data->bytes_req += bytes_req;
|
2009-12-21 10:52:55 +01:00
|
|
|
data->bytes_alloc += bytes_alloc;
|
2009-11-20 08:53:25 +01:00
|
|
|
} else {
|
|
|
|
data = malloc(sizeof(*data));
|
2012-09-09 03:53:06 +02:00
|
|
|
if (!data) {
|
|
|
|
pr_err("%s: malloc failed\n", __func__);
|
|
|
|
return -1;
|
|
|
|
}
|
2009-11-20 08:53:25 +01:00
|
|
|
data->call_site = call_site;
|
2009-11-24 06:26:55 +01:00
|
|
|
data->pingpong = 0;
|
2009-11-20 08:53:25 +01:00
|
|
|
data->hit = 1;
|
|
|
|
data->bytes_req = bytes_req;
|
|
|
|
data->bytes_alloc = bytes_alloc;
|
|
|
|
|
|
|
|
rb_link_node(&data->node, parent, node);
|
|
|
|
rb_insert_color(&data->node, &root_caller_stat);
|
|
|
|
}
|
2012-09-09 03:53:06 +02:00
|
|
|
|
|
|
|
return 0;
|
2009-11-20 08:53:25 +01:00
|
|
|
}
|
|
|
|
|
2012-09-09 03:53:06 +02:00
|
|
|
static int perf_evsel__process_alloc_event(struct perf_evsel *evsel,
|
2012-09-24 15:46:54 +02:00
|
|
|
struct perf_sample *sample)
|
2009-11-20 08:53:25 +01:00
|
|
|
{
|
2012-09-24 15:46:54 +02:00
|
|
|
unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"),
|
|
|
|
call_site = perf_evsel__intval(evsel, sample, "call_site");
|
|
|
|
int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"),
|
|
|
|
bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc");
|
|
|
|
|
|
|
|
if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
|
2012-09-09 03:53:06 +02:00
|
|
|
insert_caller_stat(call_site, bytes_req, bytes_alloc))
|
|
|
|
return -1;
|
2009-11-20 08:53:25 +01:00
|
|
|
|
|
|
|
total_requested += bytes_req;
|
|
|
|
total_allocated += bytes_alloc;
|
2009-11-24 06:26:31 +01:00
|
|
|
|
2012-09-24 15:46:54 +02:00
|
|
|
nr_allocs++;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int perf_evsel__process_alloc_node_event(struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample)
|
|
|
|
{
|
|
|
|
int ret = perf_evsel__process_alloc_event(evsel, sample);
|
|
|
|
|
|
|
|
if (!ret) {
|
2014-04-07 20:55:23 +02:00
|
|
|
int node1 = cpu__get_node(sample->cpu),
|
2012-09-24 15:46:54 +02:00
|
|
|
node2 = perf_evsel__intval(evsel, sample, "node");
|
|
|
|
|
2009-11-24 06:26:31 +01:00
|
|
|
if (node1 != node2)
|
|
|
|
nr_cross_allocs++;
|
|
|
|
}
|
2012-09-24 15:46:54 +02:00
|
|
|
|
|
|
|
return ret;
|
2009-11-20 08:53:25 +01:00
|
|
|
}
|
|
|
|
|
2009-11-24 06:26:55 +01:00
|
|
|
static int ptr_cmp(struct alloc_stat *, struct alloc_stat *);
|
|
|
|
static int callsite_cmp(struct alloc_stat *, struct alloc_stat *);
|
|
|
|
|
|
|
|
static struct alloc_stat *search_alloc_stat(unsigned long ptr,
|
|
|
|
unsigned long call_site,
|
|
|
|
struct rb_root *root,
|
|
|
|
sort_fn_t sort_fn)
|
|
|
|
{
|
|
|
|
struct rb_node *node = root->rb_node;
|
|
|
|
struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
|
|
|
|
|
|
|
|
while (node) {
|
|
|
|
struct alloc_stat *data;
|
|
|
|
int cmp;
|
|
|
|
|
|
|
|
data = rb_entry(node, struct alloc_stat, node);
|
|
|
|
|
|
|
|
cmp = sort_fn(&key, data);
|
|
|
|
if (cmp < 0)
|
|
|
|
node = node->rb_left;
|
|
|
|
else if (cmp > 0)
|
|
|
|
node = node->rb_right;
|
|
|
|
else
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-09-09 03:53:06 +02:00
|
|
|
static int perf_evsel__process_free_event(struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample)
|
2009-11-20 08:53:25 +01:00
|
|
|
{
|
2012-09-24 15:46:54 +02:00
|
|
|
unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr");
|
2009-11-24 06:26:55 +01:00
|
|
|
struct alloc_stat *s_alloc, *s_caller;
|
|
|
|
|
|
|
|
s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
|
|
|
|
if (!s_alloc)
|
2012-09-09 03:53:06 +02:00
|
|
|
return 0;
|
2009-11-24 06:26:55 +01:00
|
|
|
|
2012-08-07 15:56:43 +02:00
|
|
|
if ((short)sample->cpu != s_alloc->alloc_cpu) {
|
2009-11-24 06:26:55 +01:00
|
|
|
s_alloc->pingpong++;
|
|
|
|
|
|
|
|
s_caller = search_alloc_stat(0, s_alloc->call_site,
|
|
|
|
&root_caller_stat, callsite_cmp);
|
2012-09-09 03:53:06 +02:00
|
|
|
if (!s_caller)
|
|
|
|
return -1;
|
2009-11-24 06:26:55 +01:00
|
|
|
s_caller->pingpong++;
|
|
|
|
}
|
|
|
|
s_alloc->alloc_cpu = -1;
|
2012-09-09 03:53:06 +02:00
|
|
|
|
|
|
|
return 0;
|
2009-11-20 08:53:25 +01:00
|
|
|
}
|
|
|
|
|
2015-04-06 07:36:10 +02:00
|
|
|
static u64 total_page_alloc_bytes;
|
|
|
|
static u64 total_page_free_bytes;
|
|
|
|
static u64 total_page_nomatch_bytes;
|
|
|
|
static u64 total_page_fail_bytes;
|
|
|
|
static unsigned long nr_page_allocs;
|
|
|
|
static unsigned long nr_page_frees;
|
|
|
|
static unsigned long nr_page_fails;
|
|
|
|
static unsigned long nr_page_nomatch;
|
|
|
|
|
|
|
|
static bool use_pfn;
|
|
|
|
|
|
|
|
#define MAX_MIGRATE_TYPES 6
|
|
|
|
#define MAX_PAGE_ORDER 11
|
|
|
|
|
|
|
|
static int order_stats[MAX_PAGE_ORDER][MAX_MIGRATE_TYPES];
|
|
|
|
|
|
|
|
struct page_stat {
|
|
|
|
struct rb_node node;
|
|
|
|
u64 page;
|
|
|
|
int order;
|
|
|
|
unsigned gfp_flags;
|
|
|
|
unsigned migrate_type;
|
|
|
|
u64 alloc_bytes;
|
|
|
|
u64 free_bytes;
|
|
|
|
int nr_alloc;
|
|
|
|
int nr_free;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct rb_root page_tree;
|
|
|
|
static struct rb_root page_alloc_tree;
|
|
|
|
static struct rb_root page_alloc_sorted;
|
|
|
|
|
|
|
|
static struct page_stat *search_page(unsigned long page, bool create)
|
|
|
|
{
|
|
|
|
struct rb_node **node = &page_tree.rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct page_stat *data;
|
|
|
|
|
|
|
|
while (*node) {
|
|
|
|
s64 cmp;
|
|
|
|
|
|
|
|
parent = *node;
|
|
|
|
data = rb_entry(*node, struct page_stat, node);
|
|
|
|
|
|
|
|
cmp = data->page - page;
|
|
|
|
if (cmp < 0)
|
|
|
|
node = &parent->rb_left;
|
|
|
|
else if (cmp > 0)
|
|
|
|
node = &parent->rb_right;
|
|
|
|
else
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!create)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
data = zalloc(sizeof(*data));
|
|
|
|
if (data != NULL) {
|
|
|
|
data->page = page;
|
|
|
|
|
|
|
|
rb_link_node(&data->node, parent, node);
|
|
|
|
rb_insert_color(&data->node, &page_tree);
|
|
|
|
}
|
|
|
|
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int page_stat_cmp(struct page_stat *a, struct page_stat *b)
|
|
|
|
{
|
|
|
|
if (a->page > b->page)
|
|
|
|
return -1;
|
|
|
|
if (a->page < b->page)
|
|
|
|
return 1;
|
|
|
|
if (a->order > b->order)
|
|
|
|
return -1;
|
|
|
|
if (a->order < b->order)
|
|
|
|
return 1;
|
|
|
|
if (a->migrate_type > b->migrate_type)
|
|
|
|
return -1;
|
|
|
|
if (a->migrate_type < b->migrate_type)
|
|
|
|
return 1;
|
|
|
|
if (a->gfp_flags > b->gfp_flags)
|
|
|
|
return -1;
|
|
|
|
if (a->gfp_flags < b->gfp_flags)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-14 19:49:33 +02:00
|
|
|
static struct page_stat *search_page_alloc_stat(struct page_stat *pstat, bool create)
|
2015-04-06 07:36:10 +02:00
|
|
|
{
|
|
|
|
struct rb_node **node = &page_alloc_tree.rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct page_stat *data;
|
|
|
|
|
|
|
|
while (*node) {
|
|
|
|
s64 cmp;
|
|
|
|
|
|
|
|
parent = *node;
|
|
|
|
data = rb_entry(*node, struct page_stat, node);
|
|
|
|
|
2015-04-14 19:49:33 +02:00
|
|
|
cmp = page_stat_cmp(data, pstat);
|
2015-04-06 07:36:10 +02:00
|
|
|
if (cmp < 0)
|
|
|
|
node = &parent->rb_left;
|
|
|
|
else if (cmp > 0)
|
|
|
|
node = &parent->rb_right;
|
|
|
|
else
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!create)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
data = zalloc(sizeof(*data));
|
|
|
|
if (data != NULL) {
|
2015-04-14 19:49:33 +02:00
|
|
|
data->page = pstat->page;
|
|
|
|
data->order = pstat->order;
|
|
|
|
data->gfp_flags = pstat->gfp_flags;
|
|
|
|
data->migrate_type = pstat->migrate_type;
|
2015-04-06 07:36:10 +02:00
|
|
|
|
|
|
|
rb_link_node(&data->node, parent, node);
|
|
|
|
rb_insert_color(&data->node, &page_alloc_tree);
|
|
|
|
}
|
|
|
|
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool valid_page(u64 pfn_or_page)
|
|
|
|
{
|
|
|
|
if (use_pfn && pfn_or_page == -1UL)
|
|
|
|
return false;
|
|
|
|
if (!use_pfn && pfn_or_page == 0)
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample)
|
|
|
|
{
|
|
|
|
u64 page;
|
|
|
|
unsigned int order = perf_evsel__intval(evsel, sample, "order");
|
|
|
|
unsigned int gfp_flags = perf_evsel__intval(evsel, sample, "gfp_flags");
|
|
|
|
unsigned int migrate_type = perf_evsel__intval(evsel, sample,
|
|
|
|
"migratetype");
|
|
|
|
u64 bytes = kmem_page_size << order;
|
2015-04-14 19:49:33 +02:00
|
|
|
struct page_stat *pstat;
|
2015-04-06 07:36:10 +02:00
|
|
|
struct page_stat this = {
|
|
|
|
.order = order,
|
|
|
|
.gfp_flags = gfp_flags,
|
|
|
|
.migrate_type = migrate_type,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (use_pfn)
|
|
|
|
page = perf_evsel__intval(evsel, sample, "pfn");
|
|
|
|
else
|
|
|
|
page = perf_evsel__intval(evsel, sample, "page");
|
|
|
|
|
|
|
|
nr_page_allocs++;
|
|
|
|
total_page_alloc_bytes += bytes;
|
|
|
|
|
|
|
|
if (!valid_page(page)) {
|
|
|
|
nr_page_fails++;
|
|
|
|
total_page_fail_bytes += bytes;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is to find the current page (with correct gfp flags and
|
|
|
|
* migrate type) at free event.
|
|
|
|
*/
|
2015-04-14 19:49:33 +02:00
|
|
|
pstat = search_page(page, true);
|
|
|
|
if (pstat == NULL)
|
2015-04-06 07:36:10 +02:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2015-04-14 19:49:33 +02:00
|
|
|
pstat->order = order;
|
|
|
|
pstat->gfp_flags = gfp_flags;
|
|
|
|
pstat->migrate_type = migrate_type;
|
2015-04-06 07:36:10 +02:00
|
|
|
|
|
|
|
this.page = page;
|
2015-04-14 19:49:33 +02:00
|
|
|
pstat = search_page_alloc_stat(&this, true);
|
|
|
|
if (pstat == NULL)
|
2015-04-06 07:36:10 +02:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2015-04-14 19:49:33 +02:00
|
|
|
pstat->nr_alloc++;
|
|
|
|
pstat->alloc_bytes += bytes;
|
2015-04-06 07:36:10 +02:00
|
|
|
|
|
|
|
order_stats[order][migrate_type]++;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int perf_evsel__process_page_free_event(struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample)
|
|
|
|
{
|
|
|
|
u64 page;
|
|
|
|
unsigned int order = perf_evsel__intval(evsel, sample, "order");
|
|
|
|
u64 bytes = kmem_page_size << order;
|
2015-04-14 19:49:33 +02:00
|
|
|
struct page_stat *pstat;
|
2015-04-06 07:36:10 +02:00
|
|
|
struct page_stat this = {
|
|
|
|
.order = order,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (use_pfn)
|
|
|
|
page = perf_evsel__intval(evsel, sample, "pfn");
|
|
|
|
else
|
|
|
|
page = perf_evsel__intval(evsel, sample, "page");
|
|
|
|
|
|
|
|
nr_page_frees++;
|
|
|
|
total_page_free_bytes += bytes;
|
|
|
|
|
2015-04-14 19:49:33 +02:00
|
|
|
pstat = search_page(page, false);
|
|
|
|
if (pstat == NULL) {
|
2015-04-06 07:36:10 +02:00
|
|
|
pr_debug2("missing free at page %"PRIx64" (order: %d)\n",
|
|
|
|
page, order);
|
|
|
|
|
|
|
|
nr_page_nomatch++;
|
|
|
|
total_page_nomatch_bytes += bytes;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
this.page = page;
|
2015-04-14 19:49:33 +02:00
|
|
|
this.gfp_flags = pstat->gfp_flags;
|
|
|
|
this.migrate_type = pstat->migrate_type;
|
2015-04-06 07:36:10 +02:00
|
|
|
|
2015-04-14 19:49:33 +02:00
|
|
|
rb_erase(&pstat->node, &page_tree);
|
|
|
|
free(pstat);
|
2015-04-06 07:36:10 +02:00
|
|
|
|
2015-04-14 19:49:33 +02:00
|
|
|
pstat = search_page_alloc_stat(&this, false);
|
|
|
|
if (pstat == NULL)
|
2015-04-06 07:36:10 +02:00
|
|
|
return -ENOENT;
|
|
|
|
|
2015-04-14 19:49:33 +02:00
|
|
|
pstat->nr_free++;
|
|
|
|
pstat->free_bytes += bytes;
|
2015-04-06 07:36:10 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-09-24 15:46:54 +02:00
|
|
|
typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample);
|
2009-11-20 08:53:25 +01:00
|
|
|
|
2012-09-11 00:15:03 +02:00
|
|
|
static int process_sample_event(struct perf_tool *tool __maybe_unused,
|
2011-11-25 11:19:45 +01:00
|
|
|
union perf_event *event,
|
2011-01-29 17:01:45 +01:00
|
|
|
struct perf_sample *sample,
|
2012-08-07 14:58:03 +02:00
|
|
|
struct perf_evsel *evsel,
|
2011-11-28 10:56:39 +01:00
|
|
|
struct machine *machine)
|
2009-11-20 08:53:25 +01:00
|
|
|
{
|
2013-08-27 10:23:06 +02:00
|
|
|
struct thread *thread = machine__findnew_thread(machine, sample->pid,
|
2014-05-12 02:56:42 +02:00
|
|
|
sample->tid);
|
2009-11-20 08:53:25 +01:00
|
|
|
|
|
|
|
if (thread == NULL) {
|
|
|
|
pr_debug("problem processing %d event, skipping it.\n",
|
|
|
|
event->header.type);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2013-09-11 14:46:56 +02:00
|
|
|
dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
|
2009-11-20 08:53:25 +01:00
|
|
|
|
2013-11-06 14:17:38 +01:00
|
|
|
if (evsel->handler != NULL) {
|
|
|
|
tracepoint_handler f = evsel->handler;
|
2012-09-24 15:46:54 +02:00
|
|
|
return f(evsel, sample);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2009-11-20 08:53:25 +01:00
|
|
|
}
|
|
|
|
|
2012-08-07 14:58:03 +02:00
|
|
|
static struct perf_tool perf_kmem = {
|
|
|
|
.sample = process_sample_event,
|
|
|
|
.comm = perf_event__process_comm,
|
2014-08-01 07:59:31 +02:00
|
|
|
.mmap = perf_event__process_mmap,
|
|
|
|
.mmap2 = perf_event__process_mmap2,
|
2014-07-06 14:18:21 +02:00
|
|
|
.ordered_events = true,
|
2009-11-20 08:53:25 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
static double fragmentation(unsigned long n_req, unsigned long n_alloc)
|
|
|
|
{
|
|
|
|
if (n_alloc == 0)
|
|
|
|
return 0.0;
|
|
|
|
else
|
|
|
|
return 100.0 - (100.0 * n_req / n_alloc);
|
|
|
|
}
|
|
|
|
|
2015-04-06 07:36:10 +02:00
|
|
|
static void __print_slab_result(struct rb_root *root,
|
|
|
|
struct perf_session *session,
|
|
|
|
int n_lines, int is_caller)
|
2009-11-20 08:53:25 +01:00
|
|
|
{
|
|
|
|
struct rb_node *next;
|
2012-12-19 13:04:24 +01:00
|
|
|
struct machine *machine = &session->machines.host;
|
2009-11-20 08:53:25 +01:00
|
|
|
|
2015-03-12 08:32:48 +01:00
|
|
|
printf("%.105s\n", graph_dotted_line);
|
2009-11-24 06:26:55 +01:00
|
|
|
printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
|
2010-01-19 18:23:23 +01:00
|
|
|
printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
|
2015-03-12 08:32:48 +01:00
|
|
|
printf("%.105s\n", graph_dotted_line);
|
2009-11-20 08:53:25 +01:00
|
|
|
|
|
|
|
next = rb_first(root);
|
|
|
|
|
|
|
|
while (next && n_lines--) {
|
2009-11-23 20:51:09 +01:00
|
|
|
struct alloc_stat *data = rb_entry(next, struct alloc_stat,
|
|
|
|
node);
|
|
|
|
struct symbol *sym = NULL;
|
2010-04-02 02:24:38 +02:00
|
|
|
struct map *map;
|
2009-11-24 06:26:55 +01:00
|
|
|
char buf[BUFSIZ];
|
2009-11-23 20:51:09 +01:00
|
|
|
u64 addr;
|
|
|
|
|
|
|
|
if (is_caller) {
|
|
|
|
addr = data->call_site;
|
2009-11-24 06:25:48 +01:00
|
|
|
if (!raw_ip)
|
2010-04-29 20:25:23 +02:00
|
|
|
sym = machine__find_kernel_function(machine, addr, &map, NULL);
|
2009-11-23 20:51:09 +01:00
|
|
|
} else
|
|
|
|
addr = data->ptr;
|
|
|
|
|
|
|
|
if (sym != NULL)
|
2011-01-22 23:37:02 +01:00
|
|
|
snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
|
2010-04-02 02:24:38 +02:00
|
|
|
addr - map->unmap_ip(map, sym->start));
|
2009-11-23 20:51:09 +01:00
|
|
|
else
|
2011-01-22 23:37:02 +01:00
|
|
|
snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
|
2009-11-24 06:26:55 +01:00
|
|
|
printf(" %-34s |", buf);
|
2009-11-20 08:53:25 +01:00
|
|
|
|
2015-03-12 08:32:48 +01:00
|
|
|
printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %9lu | %6.3f%%\n",
|
2009-11-24 06:26:55 +01:00
|
|
|
(unsigned long long)data->bytes_alloc,
|
2009-11-20 08:53:25 +01:00
|
|
|
(unsigned long)data->bytes_alloc / data->hit,
|
|
|
|
(unsigned long long)data->bytes_req,
|
|
|
|
(unsigned long)data->bytes_req / data->hit,
|
|
|
|
(unsigned long)data->hit,
|
2009-11-24 06:26:55 +01:00
|
|
|
(unsigned long)data->pingpong,
|
2009-11-20 08:53:25 +01:00
|
|
|
fragmentation(data->bytes_req, data->bytes_alloc));
|
|
|
|
|
|
|
|
next = rb_next(next);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (n_lines == -1)
|
2015-03-12 08:32:48 +01:00
|
|
|
printf(" ... | ... | ... | ... | ... | ... \n");
|
2009-11-20 08:53:25 +01:00
|
|
|
|
2015-03-12 08:32:48 +01:00
|
|
|
printf("%.105s\n", graph_dotted_line);
|
2009-11-20 08:53:25 +01:00
|
|
|
}
|
|
|
|
|
2015-04-06 07:36:10 +02:00
|
|
|
static const char * const migrate_type_str[] = {
|
|
|
|
"UNMOVABL",
|
|
|
|
"RECLAIM",
|
|
|
|
"MOVABLE",
|
|
|
|
"RESERVED",
|
|
|
|
"CMA/ISLT",
|
|
|
|
"UNKNOWN",
|
|
|
|
};
|
|
|
|
|
|
|
|
static void __print_page_result(struct rb_root *root,
|
|
|
|
struct perf_session *session __maybe_unused,
|
|
|
|
int n_lines)
|
|
|
|
{
|
|
|
|
struct rb_node *next = rb_first(root);
|
|
|
|
const char *format;
|
|
|
|
|
|
|
|
printf("\n%.80s\n", graph_dotted_line);
|
|
|
|
printf(" %-16s | Total alloc (KB) | Hits | Order | Mig.type | GFP flags\n",
|
|
|
|
use_pfn ? "PFN" : "Page");
|
|
|
|
printf("%.80s\n", graph_dotted_line);
|
|
|
|
|
|
|
|
if (use_pfn)
|
|
|
|
format = " %16llu | %'16llu | %'9d | %5d | %8s | %08lx\n";
|
|
|
|
else
|
|
|
|
format = " %016llx | %'16llu | %'9d | %5d | %8s | %08lx\n";
|
|
|
|
|
|
|
|
while (next && n_lines--) {
|
|
|
|
struct page_stat *data;
|
|
|
|
|
|
|
|
data = rb_entry(next, struct page_stat, node);
|
|
|
|
|
|
|
|
printf(format, (unsigned long long)data->page,
|
|
|
|
(unsigned long long)data->alloc_bytes / 1024,
|
|
|
|
data->nr_alloc, data->order,
|
|
|
|
migrate_type_str[data->migrate_type],
|
|
|
|
(unsigned long)data->gfp_flags);
|
|
|
|
|
|
|
|
next = rb_next(next);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (n_lines == -1)
|
|
|
|
printf(" ... | ... | ... | ... | ... | ... \n");
|
|
|
|
|
|
|
|
printf("%.80s\n", graph_dotted_line);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void print_slab_summary(void)
|
2009-11-20 08:53:25 +01:00
|
|
|
{
|
2015-04-06 07:36:10 +02:00
|
|
|
printf("\nSUMMARY (SLAB allocator)");
|
|
|
|
printf("\n========================\n");
|
2015-03-23 07:30:40 +01:00
|
|
|
printf("Total bytes requested: %'lu\n", total_requested);
|
|
|
|
printf("Total bytes allocated: %'lu\n", total_allocated);
|
|
|
|
printf("Total bytes wasted on internal fragmentation: %'lu\n",
|
2009-11-20 08:53:25 +01:00
|
|
|
total_allocated - total_requested);
|
|
|
|
printf("Internal fragmentation: %f%%\n",
|
|
|
|
fragmentation(total_requested, total_allocated));
|
2015-03-23 07:30:40 +01:00
|
|
|
printf("Cross CPU allocations: %'lu/%'lu\n", nr_cross_allocs, nr_allocs);
|
2009-11-20 08:53:25 +01:00
|
|
|
}
|
|
|
|
|
2015-04-06 07:36:10 +02:00
|
|
|
static void print_page_summary(void)
|
|
|
|
{
|
|
|
|
int o, m;
|
|
|
|
u64 nr_alloc_freed = nr_page_frees - nr_page_nomatch;
|
|
|
|
u64 total_alloc_freed_bytes = total_page_free_bytes - total_page_nomatch_bytes;
|
|
|
|
|
|
|
|
printf("\nSUMMARY (page allocator)");
|
|
|
|
printf("\n========================\n");
|
|
|
|
printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation requests",
|
|
|
|
nr_page_allocs, total_page_alloc_bytes / 1024);
|
|
|
|
printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free requests",
|
|
|
|
nr_page_frees, total_page_free_bytes / 1024);
|
|
|
|
printf("\n");
|
|
|
|
|
2015-04-23 15:40:37 +02:00
|
|
|
printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests",
|
2015-04-06 07:36:10 +02:00
|
|
|
nr_alloc_freed, (total_alloc_freed_bytes) / 1024);
|
2015-04-23 15:40:37 +02:00
|
|
|
printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc-only requests",
|
2015-04-06 07:36:10 +02:00
|
|
|
nr_page_allocs - nr_alloc_freed,
|
|
|
|
(total_page_alloc_bytes - total_alloc_freed_bytes) / 1024);
|
|
|
|
printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free-only requests",
|
|
|
|
nr_page_nomatch, total_page_nomatch_bytes / 1024);
|
|
|
|
printf("\n");
|
|
|
|
|
|
|
|
printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation failures",
|
|
|
|
nr_page_fails, total_page_fail_bytes / 1024);
|
|
|
|
printf("\n");
|
|
|
|
|
|
|
|
printf("%5s %12s %12s %12s %12s %12s\n", "Order", "Unmovable",
|
|
|
|
"Reclaimable", "Movable", "Reserved", "CMA/Isolated");
|
|
|
|
printf("%.5s %.12s %.12s %.12s %.12s %.12s\n", graph_dotted_line,
|
|
|
|
graph_dotted_line, graph_dotted_line, graph_dotted_line,
|
|
|
|
graph_dotted_line, graph_dotted_line);
|
|
|
|
|
|
|
|
for (o = 0; o < MAX_PAGE_ORDER; o++) {
|
|
|
|
printf("%5d", o);
|
|
|
|
for (m = 0; m < MAX_MIGRATE_TYPES - 1; m++) {
|
|
|
|
if (order_stats[o][m])
|
|
|
|
printf(" %'12d", order_stats[o][m]);
|
|
|
|
else
|
|
|
|
printf(" %12c", '.');
|
|
|
|
}
|
|
|
|
printf("\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void print_slab_result(struct perf_session *session)
|
2009-11-20 08:53:25 +01:00
|
|
|
{
|
|
|
|
if (caller_flag)
|
2015-04-06 07:36:10 +02:00
|
|
|
__print_slab_result(&root_caller_sorted, session, caller_lines, 1);
|
|
|
|
if (alloc_flag)
|
|
|
|
__print_slab_result(&root_alloc_sorted, session, alloc_lines, 0);
|
|
|
|
print_slab_summary();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void print_page_result(struct perf_session *session)
|
|
|
|
{
|
2009-11-20 08:53:25 +01:00
|
|
|
if (alloc_flag)
|
2015-04-06 07:36:10 +02:00
|
|
|
__print_page_result(&page_alloc_sorted, session, alloc_lines);
|
|
|
|
print_page_summary();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void print_result(struct perf_session *session)
|
|
|
|
{
|
|
|
|
if (kmem_slab)
|
|
|
|
print_slab_result(session);
|
|
|
|
if (kmem_page)
|
|
|
|
print_page_result(session);
|
2009-11-20 08:53:25 +01:00
|
|
|
}
|
|
|
|
|
2009-11-24 06:26:10 +01:00
|
|
|
struct sort_dimension {
|
|
|
|
const char name[20];
|
|
|
|
sort_fn_t cmp;
|
|
|
|
struct list_head list;
|
|
|
|
};
|
|
|
|
|
|
|
|
static LIST_HEAD(caller_sort);
|
|
|
|
static LIST_HEAD(alloc_sort);
|
|
|
|
|
2015-04-06 07:36:10 +02:00
|
|
|
static void sort_slab_insert(struct rb_root *root, struct alloc_stat *data,
|
|
|
|
struct list_head *sort_list)
|
2009-11-20 08:53:25 +01:00
|
|
|
{
|
|
|
|
struct rb_node **new = &(root->rb_node);
|
|
|
|
struct rb_node *parent = NULL;
|
2009-11-24 06:26:10 +01:00
|
|
|
struct sort_dimension *sort;
|
2009-11-20 08:53:25 +01:00
|
|
|
|
|
|
|
while (*new) {
|
|
|
|
struct alloc_stat *this;
|
2009-11-24 06:26:10 +01:00
|
|
|
int cmp = 0;
|
2009-11-20 08:53:25 +01:00
|
|
|
|
|
|
|
this = rb_entry(*new, struct alloc_stat, node);
|
|
|
|
parent = *new;
|
|
|
|
|
2009-11-24 06:26:10 +01:00
|
|
|
list_for_each_entry(sort, sort_list, list) {
|
|
|
|
cmp = sort->cmp(data, this);
|
|
|
|
if (cmp)
|
|
|
|
break;
|
|
|
|
}
|
2009-11-20 08:53:25 +01:00
|
|
|
|
|
|
|
if (cmp > 0)
|
|
|
|
new = &((*new)->rb_left);
|
|
|
|
else
|
|
|
|
new = &((*new)->rb_right);
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_link_node(&data->node, parent, new);
|
|
|
|
rb_insert_color(&data->node, root);
|
|
|
|
}
|
|
|
|
|
2015-04-06 07:36:10 +02:00
|
|
|
static void __sort_slab_result(struct rb_root *root, struct rb_root *root_sorted,
|
|
|
|
struct list_head *sort_list)
|
2009-11-20 08:53:25 +01:00
|
|
|
{
|
|
|
|
struct rb_node *node;
|
|
|
|
struct alloc_stat *data;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
node = rb_first(root);
|
|
|
|
if (!node)
|
|
|
|
break;
|
|
|
|
|
|
|
|
rb_erase(node, root);
|
|
|
|
data = rb_entry(node, struct alloc_stat, node);
|
2015-04-06 07:36:10 +02:00
|
|
|
sort_slab_insert(root_sorted, data, sort_list);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sort_page_insert(struct rb_root *root, struct page_stat *data)
|
|
|
|
{
|
|
|
|
struct rb_node **new = &root->rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
|
|
|
|
while (*new) {
|
|
|
|
struct page_stat *this;
|
|
|
|
int cmp = 0;
|
|
|
|
|
|
|
|
this = rb_entry(*new, struct page_stat, node);
|
|
|
|
parent = *new;
|
|
|
|
|
|
|
|
/* TODO: support more sort key */
|
|
|
|
cmp = data->alloc_bytes - this->alloc_bytes;
|
|
|
|
|
|
|
|
if (cmp > 0)
|
|
|
|
new = &parent->rb_left;
|
|
|
|
else
|
|
|
|
new = &parent->rb_right;
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_link_node(&data->node, parent, new);
|
|
|
|
rb_insert_color(&data->node, root);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __sort_page_result(struct rb_root *root, struct rb_root *root_sorted)
|
|
|
|
{
|
|
|
|
struct rb_node *node;
|
|
|
|
struct page_stat *data;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
node = rb_first(root);
|
|
|
|
if (!node)
|
|
|
|
break;
|
|
|
|
|
|
|
|
rb_erase(node, root);
|
|
|
|
data = rb_entry(node, struct page_stat, node);
|
|
|
|
sort_page_insert(root_sorted, data);
|
2009-11-20 08:53:25 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sort_result(void)
|
|
|
|
{
|
2015-04-06 07:36:10 +02:00
|
|
|
if (kmem_slab) {
|
|
|
|
__sort_slab_result(&root_alloc_stat, &root_alloc_sorted,
|
|
|
|
&alloc_sort);
|
|
|
|
__sort_slab_result(&root_caller_stat, &root_caller_sorted,
|
|
|
|
&caller_sort);
|
|
|
|
}
|
|
|
|
if (kmem_page) {
|
|
|
|
__sort_page_result(&page_alloc_tree, &page_alloc_sorted);
|
|
|
|
}
|
2009-11-20 08:53:25 +01:00
|
|
|
}
|
|
|
|
|
2014-08-12 08:40:38 +02:00
|
|
|
static int __cmd_kmem(struct perf_session *session)
|
2009-11-20 08:53:25 +01:00
|
|
|
{
|
2009-12-28 00:37:02 +01:00
|
|
|
int err = -EINVAL;
|
2015-04-06 07:36:10 +02:00
|
|
|
struct perf_evsel *evsel;
|
2012-09-24 15:46:54 +02:00
|
|
|
const struct perf_evsel_str_handler kmem_tracepoints[] = {
|
2015-04-06 07:36:10 +02:00
|
|
|
/* slab allocator */
|
2012-09-24 15:46:54 +02:00
|
|
|
{ "kmem:kmalloc", perf_evsel__process_alloc_event, },
|
|
|
|
{ "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, },
|
|
|
|
{ "kmem:kmalloc_node", perf_evsel__process_alloc_node_event, },
|
|
|
|
{ "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, },
|
|
|
|
{ "kmem:kfree", perf_evsel__process_free_event, },
|
|
|
|
{ "kmem:kmem_cache_free", perf_evsel__process_free_event, },
|
2015-04-06 07:36:10 +02:00
|
|
|
/* page allocator */
|
|
|
|
{ "kmem:mm_page_alloc", perf_evsel__process_page_alloc_event, },
|
|
|
|
{ "kmem:mm_page_free", perf_evsel__process_page_free_event, },
|
2012-09-24 15:46:54 +02:00
|
|
|
};
|
perf session: Move kmaps to perf_session
There is still some more work to do to disentangle map creation
from DSO loading, but this happens only for the kernel, and for
the early adopters of perf diff, where this disentanglement
matters most, we'll be testing different kernels, so no problem
here.
Further clarification: right now we create the kernel maps for
the various modules and discontiguous kernel text maps when
loading the DSO, we should do it as a two step process, first
creating the maps, for multiple mappings with the same DSO
store, then doing the dso load just once, for the first hit on
one of the maps sharing this DSO backing store.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1260741029-4430-6-git-send-email-acme@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-12-13 22:50:29 +01:00
|
|
|
|
2009-12-28 00:37:02 +01:00
|
|
|
if (!perf_session__has_traces(session, "kmem record"))
|
2014-08-12 08:40:38 +02:00
|
|
|
goto out;
|
2009-12-28 00:37:02 +01:00
|
|
|
|
2012-09-24 15:46:54 +02:00
|
|
|
if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
|
|
|
|
pr_err("Initializing perf session tracepoint handlers failed\n");
|
2014-08-12 08:40:38 +02:00
|
|
|
goto out;
|
2012-09-24 15:46:54 +02:00
|
|
|
}
|
|
|
|
|
2015-04-06 07:36:10 +02:00
|
|
|
evlist__for_each(session->evlist, evsel) {
|
|
|
|
if (!strcmp(perf_evsel__name(evsel), "kmem:mm_page_alloc") &&
|
|
|
|
perf_evsel__field(evsel, "pfn")) {
|
|
|
|
use_pfn = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-20 08:53:25 +01:00
|
|
|
setup_pager();
|
2015-03-03 15:58:45 +01:00
|
|
|
err = perf_session__process_events(session);
|
2015-04-06 07:36:10 +02:00
|
|
|
if (err != 0) {
|
|
|
|
pr_err("error during process events: %d\n", err);
|
2014-08-12 08:40:38 +02:00
|
|
|
goto out;
|
2015-04-06 07:36:10 +02:00
|
|
|
}
|
2009-11-20 08:53:25 +01:00
|
|
|
sort_result();
|
perf session: Move kmaps to perf_session
There is still some more work to do to disentangle map creation
from DSO loading, but this happens only for the kernel, and for
the early adopters of perf diff, where this disentanglement
matters most, we'll be testing different kernels, so no problem
here.
Further clarification: right now we create the kernel maps for
the various modules and discontiguous kernel text maps when
loading the DSO, we should do it as a two step process, first
creating the maps, for multiple mappings with the same DSO
store, then doing the dso load just once, for the first hit on
one of the maps sharing this DSO backing store.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1260741029-4430-6-git-send-email-acme@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-12-13 22:50:29 +01:00
|
|
|
print_result(session);
|
2014-08-12 08:40:38 +02:00
|
|
|
out:
|
perf session: Move kmaps to perf_session
There is still some more work to do to disentangle map creation
from DSO loading, but this happens only for the kernel, and for
the early adopters of perf diff, where this disentanglement
matters most, we'll be testing different kernels, so no problem
here.
Further clarification: right now we create the kernel maps for
the various modules and discontiguous kernel text maps when
loading the DSO, we should do it as a two step process, first
creating the maps, for multiple mappings with the same DSO
store, then doing the dso load just once, for the first hit on
one of the maps sharing this DSO backing store.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1260741029-4430-6-git-send-email-acme@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-12-13 22:50:29 +01:00
|
|
|
return err;
|
2009-11-20 08:53:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r)
|
|
|
|
{
|
|
|
|
if (l->ptr < r->ptr)
|
|
|
|
return -1;
|
|
|
|
else if (l->ptr > r->ptr)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-24 06:26:10 +01:00
|
|
|
static struct sort_dimension ptr_sort_dimension = {
|
|
|
|
.name = "ptr",
|
|
|
|
.cmp = ptr_cmp,
|
|
|
|
};
|
|
|
|
|
2009-11-20 08:53:25 +01:00
|
|
|
static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
|
|
|
|
{
|
|
|
|
if (l->call_site < r->call_site)
|
|
|
|
return -1;
|
|
|
|
else if (l->call_site > r->call_site)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-24 06:26:10 +01:00
|
|
|
static struct sort_dimension callsite_sort_dimension = {
|
|
|
|
.name = "callsite",
|
|
|
|
.cmp = callsite_cmp,
|
|
|
|
};
|
|
|
|
|
2009-11-22 10:58:00 +01:00
|
|
|
static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r)
|
|
|
|
{
|
|
|
|
if (l->hit < r->hit)
|
|
|
|
return -1;
|
|
|
|
else if (l->hit > r->hit)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-24 06:26:10 +01:00
|
|
|
static struct sort_dimension hit_sort_dimension = {
|
|
|
|
.name = "hit",
|
|
|
|
.cmp = hit_cmp,
|
|
|
|
};
|
|
|
|
|
2009-11-20 08:53:25 +01:00
|
|
|
static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
|
|
|
|
{
|
|
|
|
if (l->bytes_alloc < r->bytes_alloc)
|
|
|
|
return -1;
|
|
|
|
else if (l->bytes_alloc > r->bytes_alloc)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-24 06:26:10 +01:00
|
|
|
static struct sort_dimension bytes_sort_dimension = {
|
|
|
|
.name = "bytes",
|
|
|
|
.cmp = bytes_cmp,
|
|
|
|
};
|
|
|
|
|
2009-11-22 10:58:00 +01:00
|
|
|
static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r)
|
|
|
|
{
|
|
|
|
double x, y;
|
|
|
|
|
|
|
|
x = fragmentation(l->bytes_req, l->bytes_alloc);
|
|
|
|
y = fragmentation(r->bytes_req, r->bytes_alloc);
|
|
|
|
|
|
|
|
if (x < y)
|
|
|
|
return -1;
|
|
|
|
else if (x > y)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-24 06:26:10 +01:00
|
|
|
static struct sort_dimension frag_sort_dimension = {
|
|
|
|
.name = "frag",
|
|
|
|
.cmp = frag_cmp,
|
|
|
|
};
|
|
|
|
|
2009-11-24 06:26:55 +01:00
|
|
|
static int pingpong_cmp(struct alloc_stat *l, struct alloc_stat *r)
|
|
|
|
{
|
|
|
|
if (l->pingpong < r->pingpong)
|
|
|
|
return -1;
|
|
|
|
else if (l->pingpong > r->pingpong)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sort_dimension pingpong_sort_dimension = {
|
|
|
|
.name = "pingpong",
|
|
|
|
.cmp = pingpong_cmp,
|
|
|
|
};
|
|
|
|
|
2009-11-24 06:26:10 +01:00
|
|
|
static struct sort_dimension *avail_sorts[] = {
|
|
|
|
&ptr_sort_dimension,
|
|
|
|
&callsite_sort_dimension,
|
|
|
|
&hit_sort_dimension,
|
|
|
|
&bytes_sort_dimension,
|
|
|
|
&frag_sort_dimension,
|
2009-11-24 06:26:55 +01:00
|
|
|
&pingpong_sort_dimension,
|
2009-11-24 06:26:10 +01:00
|
|
|
};
|
|
|
|
|
2012-12-20 20:11:16 +01:00
|
|
|
#define NUM_AVAIL_SORTS ((int)ARRAY_SIZE(avail_sorts))
|
2009-11-24 06:26:10 +01:00
|
|
|
|
|
|
|
static int sort_dimension__add(const char *tok, struct list_head *list)
|
|
|
|
{
|
|
|
|
struct sort_dimension *sort;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < NUM_AVAIL_SORTS; i++) {
|
|
|
|
if (!strcmp(avail_sorts[i]->name, tok)) {
|
2013-01-25 02:24:57 +01:00
|
|
|
sort = memdup(avail_sorts[i], sizeof(*avail_sorts[i]));
|
2012-09-09 03:53:06 +02:00
|
|
|
if (!sort) {
|
2013-01-25 02:24:57 +01:00
|
|
|
pr_err("%s: memdup failed\n", __func__);
|
2012-09-09 03:53:06 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2009-11-24 06:26:10 +01:00
|
|
|
list_add_tail(&sort->list, list);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int setup_sorting(struct list_head *sort_list, const char *arg)
|
|
|
|
{
|
|
|
|
char *tok;
|
|
|
|
char *str = strdup(arg);
|
2015-03-12 08:32:46 +01:00
|
|
|
char *pos = str;
|
2009-11-24 06:26:10 +01:00
|
|
|
|
2012-09-09 03:53:06 +02:00
|
|
|
if (!str) {
|
|
|
|
pr_err("%s: strdup failed\n", __func__);
|
|
|
|
return -1;
|
|
|
|
}
|
2009-11-24 06:26:10 +01:00
|
|
|
|
|
|
|
while (true) {
|
2015-03-12 08:32:46 +01:00
|
|
|
tok = strsep(&pos, ",");
|
2009-11-24 06:26:10 +01:00
|
|
|
if (!tok)
|
|
|
|
break;
|
|
|
|
if (sort_dimension__add(tok, sort_list) < 0) {
|
|
|
|
error("Unknown --sort key: '%s'", tok);
|
2012-01-07 18:25:29 +01:00
|
|
|
free(str);
|
2009-11-24 06:26:10 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
free(str);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-09-11 00:15:03 +02:00
|
|
|
static int parse_sort_opt(const struct option *opt __maybe_unused,
|
|
|
|
const char *arg, int unset __maybe_unused)
|
2009-11-20 08:53:25 +01:00
|
|
|
{
|
|
|
|
if (!arg)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (caller_flag > alloc_flag)
|
2009-11-24 06:26:10 +01:00
|
|
|
return setup_sorting(&caller_sort, arg);
|
2009-11-20 08:53:25 +01:00
|
|
|
else
|
2009-11-24 06:26:10 +01:00
|
|
|
return setup_sorting(&alloc_sort, arg);
|
2009-11-20 08:53:25 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-09-11 00:15:03 +02:00
|
|
|
static int parse_caller_opt(const struct option *opt __maybe_unused,
|
|
|
|
const char *arg __maybe_unused,
|
|
|
|
int unset __maybe_unused)
|
2009-11-20 08:53:25 +01:00
|
|
|
{
|
2009-12-10 08:21:57 +01:00
|
|
|
caller_flag = (alloc_flag + 1);
|
|
|
|
return 0;
|
|
|
|
}
|
2009-11-20 08:53:25 +01:00
|
|
|
|
2012-09-11 00:15:03 +02:00
|
|
|
static int parse_alloc_opt(const struct option *opt __maybe_unused,
|
|
|
|
const char *arg __maybe_unused,
|
|
|
|
int unset __maybe_unused)
|
2009-12-10 08:21:57 +01:00
|
|
|
{
|
|
|
|
alloc_flag = (caller_flag + 1);
|
2009-11-20 08:53:25 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-06 07:36:10 +02:00
|
|
|
static int parse_slab_opt(const struct option *opt __maybe_unused,
|
|
|
|
const char *arg __maybe_unused,
|
|
|
|
int unset __maybe_unused)
|
|
|
|
{
|
|
|
|
kmem_slab = (kmem_page + 1);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int parse_page_opt(const struct option *opt __maybe_unused,
|
|
|
|
const char *arg __maybe_unused,
|
|
|
|
int unset __maybe_unused)
|
|
|
|
{
|
|
|
|
kmem_page = (kmem_slab + 1);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-09-11 00:15:03 +02:00
|
|
|
static int parse_line_opt(const struct option *opt __maybe_unused,
|
|
|
|
const char *arg, int unset __maybe_unused)
|
2009-11-20 08:53:25 +01:00
|
|
|
{
|
|
|
|
int lines;
|
|
|
|
|
|
|
|
if (!arg)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
lines = strtoul(arg, NULL, 10);
|
|
|
|
|
|
|
|
if (caller_flag > alloc_flag)
|
|
|
|
caller_lines = lines;
|
|
|
|
else
|
|
|
|
alloc_lines = lines;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-01 20:20:58 +02:00
|
|
|
static int __cmd_record(int argc, const char **argv)
|
|
|
|
{
|
|
|
|
const char * const record_args[] = {
|
2013-06-05 13:37:21 +02:00
|
|
|
"record", "-a", "-R", "-c", "1",
|
2015-04-06 07:36:10 +02:00
|
|
|
};
|
|
|
|
const char * const slab_events[] = {
|
2009-11-20 08:53:25 +01:00
|
|
|
"-e", "kmem:kmalloc",
|
|
|
|
"-e", "kmem:kmalloc_node",
|
|
|
|
"-e", "kmem:kfree",
|
|
|
|
"-e", "kmem:kmem_cache_alloc",
|
|
|
|
"-e", "kmem:kmem_cache_alloc_node",
|
|
|
|
"-e", "kmem:kmem_cache_free",
|
2012-10-01 20:20:58 +02:00
|
|
|
};
|
2015-04-06 07:36:10 +02:00
|
|
|
const char * const page_events[] = {
|
|
|
|
"-e", "kmem:mm_page_alloc",
|
|
|
|
"-e", "kmem:mm_page_free",
|
|
|
|
};
|
2009-11-20 08:53:25 +01:00
|
|
|
unsigned int rec_argc, i, j;
|
|
|
|
const char **rec_argv;
|
|
|
|
|
|
|
|
rec_argc = ARRAY_SIZE(record_args) + argc - 1;
|
2015-04-06 07:36:10 +02:00
|
|
|
if (kmem_slab)
|
|
|
|
rec_argc += ARRAY_SIZE(slab_events);
|
|
|
|
if (kmem_page)
|
|
|
|
rec_argc += ARRAY_SIZE(page_events);
|
|
|
|
|
2009-11-20 08:53:25 +01:00
|
|
|
rec_argv = calloc(rec_argc + 1, sizeof(char *));
|
|
|
|
|
2010-11-13 03:35:06 +01:00
|
|
|
if (rec_argv == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2009-11-20 08:53:25 +01:00
|
|
|
for (i = 0; i < ARRAY_SIZE(record_args); i++)
|
|
|
|
rec_argv[i] = strdup(record_args[i]);
|
|
|
|
|
2015-04-06 07:36:10 +02:00
|
|
|
if (kmem_slab) {
|
|
|
|
for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++)
|
|
|
|
rec_argv[i] = strdup(slab_events[j]);
|
|
|
|
}
|
|
|
|
if (kmem_page) {
|
|
|
|
for (j = 0; j < ARRAY_SIZE(page_events); j++, i++)
|
|
|
|
rec_argv[i] = strdup(page_events[j]);
|
|
|
|
}
|
|
|
|
|
2009-11-20 08:53:25 +01:00
|
|
|
for (j = 1; j < (unsigned int)argc; j++, i++)
|
|
|
|
rec_argv[i] = argv[j];
|
|
|
|
|
|
|
|
return cmd_record(i, rec_argv, NULL);
|
|
|
|
}
|
|
|
|
|
2012-09-11 00:15:03 +02:00
|
|
|
int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
|
2009-11-20 08:53:25 +01:00
|
|
|
{
|
2012-10-01 20:20:58 +02:00
|
|
|
const char * const default_sort_order = "frag,hit,bytes";
|
perf kmem: Support using -f to override perf.data file ownership
Enable perf kmem to use perf.data when it is not owned by current user
or root.
Example:
# perf kmem record ls
# chown Yunlong.Song:Yunlong.Song perf.data
# ls -al perf.data
-rw------- 1 Yunlong.Song Yunlong.Song 5315665 Apr 2 10:54 perf.data
# id
uid=0(root) gid=0(root) groups=0(root),64(pkcs11)
Before this patch:
# perf kmem stat
File perf.data not owned by current user or root (use -f to override)
# perf kmem stat -f
Error: unknown switch `f'
usage: perf kmem [<options>] {record|stat}
-i, --input <file> input file name
-v, --verbose be more verbose (show symbol address, etc)
--caller show per-callsite statistics
--alloc show per-allocation statistics
-s, --sort <key[,key2...]>
sort by keys: ptr, call_site, bytes, hit,
pingpong, frag
-l, --line <num> show n lines
--raw-ip show raw ip instead of symbol
As shown above, the -f option does not work at all.
After this patch:
# perf kmem stat
File perf.data not owned by current user or root (use -f to override)
# perf kmem stat -f
SUMMARY
=======
Total bytes requested: 437599
Total bytes allocated: 615472
Total bytes wasted on internal fragmentation: 177873
Internal fragmentation: 28.900259%
Cross CPU allocations: 6/1192
As shown above, the -f option really works now.
Signed-off-by: Yunlong Song <yunlong.song@huawei.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/r/1427982439-27388-4-git-send-email-yunlong.song@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-02 15:47:12 +02:00
|
|
|
struct perf_data_file file = {
|
|
|
|
.mode = PERF_DATA_MODE_READ,
|
|
|
|
};
|
2012-10-01 20:20:58 +02:00
|
|
|
const struct option kmem_options[] = {
|
|
|
|
OPT_STRING('i', "input", &input_name, "file", "input file name"),
|
2015-03-12 08:32:47 +01:00
|
|
|
OPT_INCR('v', "verbose", &verbose,
|
|
|
|
"be more verbose (show symbol address, etc)"),
|
2012-10-01 20:20:58 +02:00
|
|
|
OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
|
|
|
|
"show per-callsite statistics", parse_caller_opt),
|
|
|
|
OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
|
|
|
|
"show per-allocation statistics", parse_alloc_opt),
|
|
|
|
OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
|
|
|
|
"sort by keys: ptr, call_site, bytes, hit, pingpong, frag",
|
|
|
|
parse_sort_opt),
|
|
|
|
OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
|
|
|
|
OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
|
perf kmem: Support using -f to override perf.data file ownership
Enable perf kmem to use perf.data when it is not owned by current user
or root.
Example:
# perf kmem record ls
# chown Yunlong.Song:Yunlong.Song perf.data
# ls -al perf.data
-rw------- 1 Yunlong.Song Yunlong.Song 5315665 Apr 2 10:54 perf.data
# id
uid=0(root) gid=0(root) groups=0(root),64(pkcs11)
Before this patch:
# perf kmem stat
File perf.data not owned by current user or root (use -f to override)
# perf kmem stat -f
Error: unknown switch `f'
usage: perf kmem [<options>] {record|stat}
-i, --input <file> input file name
-v, --verbose be more verbose (show symbol address, etc)
--caller show per-callsite statistics
--alloc show per-allocation statistics
-s, --sort <key[,key2...]>
sort by keys: ptr, call_site, bytes, hit,
pingpong, frag
-l, --line <num> show n lines
--raw-ip show raw ip instead of symbol
As shown above, the -f option does not work at all.
After this patch:
# perf kmem stat
File perf.data not owned by current user or root (use -f to override)
# perf kmem stat -f
SUMMARY
=======
Total bytes requested: 437599
Total bytes allocated: 615472
Total bytes wasted on internal fragmentation: 177873
Internal fragmentation: 28.900259%
Cross CPU allocations: 6/1192
As shown above, the -f option really works now.
Signed-off-by: Yunlong Song <yunlong.song@huawei.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/r/1427982439-27388-4-git-send-email-yunlong.song@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-02 15:47:12 +02:00
|
|
|
OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
|
2015-04-06 07:36:10 +02:00
|
|
|
OPT_CALLBACK_NOOPT(0, "slab", NULL, NULL, "Analyze slab allocator",
|
|
|
|
parse_slab_opt),
|
|
|
|
OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator",
|
|
|
|
parse_page_opt),
|
2012-10-01 20:20:58 +02:00
|
|
|
OPT_END()
|
|
|
|
};
|
2014-03-15 04:17:51 +01:00
|
|
|
const char *const kmem_subcommands[] = { "record", "stat", NULL };
|
|
|
|
const char *kmem_usage[] = {
|
|
|
|
NULL,
|
2012-10-01 20:20:58 +02:00
|
|
|
NULL
|
|
|
|
};
|
2014-08-12 08:40:38 +02:00
|
|
|
struct perf_session *session;
|
|
|
|
int ret = -1;
|
|
|
|
|
2014-03-15 04:17:51 +01:00
|
|
|
argc = parse_options_subcommand(argc, argv, kmem_options,
|
|
|
|
kmem_subcommands, kmem_usage, 0);
|
2009-11-20 08:53:25 +01:00
|
|
|
|
2009-12-10 08:21:57 +01:00
|
|
|
if (!argc)
|
2009-11-20 08:53:25 +01:00
|
|
|
usage_with_options(kmem_usage, kmem_options);
|
|
|
|
|
2015-04-06 07:36:10 +02:00
|
|
|
if (kmem_slab == 0 && kmem_page == 0)
|
|
|
|
kmem_slab = 1; /* for backward compatibility */
|
|
|
|
|
2009-12-10 08:21:57 +01:00
|
|
|
if (!strncmp(argv[0], "rec", 3)) {
|
2014-08-12 08:40:45 +02:00
|
|
|
symbol__init(NULL);
|
2009-12-10 08:21:57 +01:00
|
|
|
return __cmd_record(argc, argv);
|
2014-08-12 08:40:38 +02:00
|
|
|
}
|
|
|
|
|
2015-04-06 07:36:08 +02:00
|
|
|
file.path = input_name;
|
|
|
|
|
2014-08-12 08:40:38 +02:00
|
|
|
session = perf_session__new(&file, false, &perf_kmem);
|
|
|
|
if (session == NULL)
|
2014-09-24 03:33:37 +02:00
|
|
|
return -1;
|
2014-08-12 08:40:38 +02:00
|
|
|
|
2015-04-06 07:36:10 +02:00
|
|
|
if (kmem_page) {
|
|
|
|
struct perf_evsel *evsel = perf_evlist__first(session->evlist);
|
|
|
|
|
|
|
|
if (evsel == NULL || evsel->tp_format == NULL) {
|
|
|
|
pr_err("invalid event found.. aborting\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
kmem_page_size = pevent_get_page_size(evsel->tp_format->pevent);
|
|
|
|
}
|
|
|
|
|
2014-08-12 08:40:45 +02:00
|
|
|
symbol__init(&session->header.env);
|
2014-08-12 08:40:38 +02:00
|
|
|
|
|
|
|
if (!strcmp(argv[0], "stat")) {
|
2015-03-23 07:30:40 +01:00
|
|
|
setlocale(LC_ALL, "");
|
|
|
|
|
2014-04-07 20:55:23 +02:00
|
|
|
if (cpu__setup_cpunode_map())
|
2014-08-12 08:40:38 +02:00
|
|
|
goto out_delete;
|
2009-12-10 08:21:57 +01:00
|
|
|
|
|
|
|
if (list_empty(&caller_sort))
|
|
|
|
setup_sorting(&caller_sort, default_sort_order);
|
|
|
|
if (list_empty(&alloc_sort))
|
|
|
|
setup_sorting(&alloc_sort, default_sort_order);
|
2009-11-20 08:53:25 +01:00
|
|
|
|
2014-08-12 08:40:38 +02:00
|
|
|
ret = __cmd_kmem(session);
|
2010-01-19 18:26:11 +01:00
|
|
|
} else
|
|
|
|
usage_with_options(kmem_usage, kmem_options);
|
2009-11-24 06:26:31 +01:00
|
|
|
|
2014-08-12 08:40:38 +02:00
|
|
|
out_delete:
|
|
|
|
perf_session__delete(session);
|
|
|
|
|
|
|
|
return ret;
|
2009-11-20 08:53:25 +01:00
|
|
|
}
|
|
|
|
|