2009-11-20 07:53:25 +00:00
|
|
|
#include "builtin.h"
|
|
|
|
#include "perf.h"
|
|
|
|
|
2012-09-24 13:46:54 +00:00
|
|
|
#include "util/evlist.h"
|
2012-08-07 12:58:03 +00:00
|
|
|
#include "util/evsel.h"
|
2009-11-20 07:53:25 +00:00
|
|
|
#include "util/util.h"
|
2016-06-23 08:55:17 +00:00
|
|
|
#include "util/config.h"
|
2009-11-20 07:53:25 +00:00
|
|
|
#include "util/symbol.h"
|
|
|
|
#include "util/thread.h"
|
|
|
|
#include "util/header.h"
|
2009-12-11 23:24:02 +00:00
|
|
|
#include "util/session.h"
|
2011-11-28 10:30:20 +00:00
|
|
|
#include "util/tool.h"
|
2015-04-21 04:55:02 +00:00
|
|
|
#include "util/callchain.h"
|
2016-11-29 17:15:45 +00:00
|
|
|
#include "util/time-utils.h"
|
2009-11-20 07:53:25 +00:00
|
|
|
|
2015-12-15 15:39:39 +00:00
|
|
|
#include <subcmd/parse-options.h>
|
2009-11-20 07:53:25 +00:00
|
|
|
#include "util/trace-event.h"
|
2013-10-15 14:27:32 +00:00
|
|
|
#include "util/data.h"
|
2014-04-07 18:55:23 +00:00
|
|
|
#include "util/cpumap.h"
|
2009-11-20 07:53:25 +00:00
|
|
|
|
|
|
|
#include "util/debug.h"
|
|
|
|
|
|
|
|
#include <linux/rbtree.h>
|
2013-01-25 01:24:57 +00:00
|
|
|
#include <linux/string.h>
|
2015-03-23 06:30:40 +00:00
|
|
|
#include <locale.h>
|
2015-04-21 04:55:02 +00:00
|
|
|
#include <regex.h>
|
2009-11-20 07:53:25 +00:00
|
|
|
|
2015-04-06 05:36:10 +00:00
|
|
|
static int kmem_slab;
|
|
|
|
static int kmem_page;
|
|
|
|
|
|
|
|
static long kmem_page_size;
|
2015-04-21 04:55:06 +00:00
|
|
|
static enum {
|
|
|
|
KMEM_SLAB,
|
|
|
|
KMEM_PAGE,
|
|
|
|
} kmem_default = KMEM_SLAB; /* for backward compatibility */
|
2015-04-06 05:36:10 +00:00
|
|
|
|
2009-11-20 07:53:25 +00:00
|
|
|
struct alloc_stat;
|
2015-04-21 04:55:03 +00:00
|
|
|
typedef int (*sort_fn_t)(void *, void *);
|
2009-11-20 07:53:25 +00:00
|
|
|
|
|
|
|
static int alloc_flag;
|
|
|
|
static int caller_flag;
|
|
|
|
|
|
|
|
static int alloc_lines = -1;
|
|
|
|
static int caller_lines = -1;
|
|
|
|
|
2009-11-24 05:25:48 +00:00
|
|
|
static bool raw_ip;
|
|
|
|
|
2009-11-20 07:53:25 +00:00
|
|
|
struct alloc_stat {
|
2009-11-24 05:26:55 +00:00
|
|
|
u64 call_site;
|
|
|
|
u64 ptr;
|
2009-11-20 07:53:25 +00:00
|
|
|
u64 bytes_req;
|
|
|
|
u64 bytes_alloc;
|
2016-11-25 21:42:13 +00:00
|
|
|
u64 last_alloc;
|
2009-11-20 07:53:25 +00:00
|
|
|
u32 hit;
|
2009-11-24 05:26:55 +00:00
|
|
|
u32 pingpong;
|
|
|
|
|
|
|
|
short alloc_cpu;
|
2009-11-20 07:53:25 +00:00
|
|
|
|
|
|
|
struct rb_node node;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct rb_root root_alloc_stat;
|
|
|
|
static struct rb_root root_alloc_sorted;
|
|
|
|
static struct rb_root root_caller_stat;
|
|
|
|
static struct rb_root root_caller_sorted;
|
|
|
|
|
2016-11-25 21:42:13 +00:00
|
|
|
static unsigned long total_requested, total_allocated, total_freed;
|
2009-11-24 05:26:31 +00:00
|
|
|
static unsigned long nr_allocs, nr_cross_allocs;
|
2009-11-20 07:53:25 +00:00
|
|
|
|
2016-11-29 17:15:45 +00:00
|
|
|
/* filters for controlling start and stop of time of analysis */
|
|
|
|
static struct perf_time_interval ptime;
|
|
|
|
const char *time_str;
|
|
|
|
|
2012-09-09 01:53:06 +00:00
|
|
|
static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
|
|
|
|
int bytes_req, int bytes_alloc, int cpu)
|
2009-11-20 07:53:25 +00:00
|
|
|
{
|
|
|
|
struct rb_node **node = &root_alloc_stat.rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct alloc_stat *data = NULL;
|
|
|
|
|
|
|
|
while (*node) {
|
|
|
|
parent = *node;
|
|
|
|
data = rb_entry(*node, struct alloc_stat, node);
|
|
|
|
|
|
|
|
if (ptr > data->ptr)
|
|
|
|
node = &(*node)->rb_right;
|
|
|
|
else if (ptr < data->ptr)
|
|
|
|
node = &(*node)->rb_left;
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (data && data->ptr == ptr) {
|
|
|
|
data->hit++;
|
|
|
|
data->bytes_req += bytes_req;
|
2009-12-21 09:52:55 +00:00
|
|
|
data->bytes_alloc += bytes_alloc;
|
2009-11-20 07:53:25 +00:00
|
|
|
} else {
|
|
|
|
data = malloc(sizeof(*data));
|
2012-09-09 01:53:06 +00:00
|
|
|
if (!data) {
|
|
|
|
pr_err("%s: malloc failed\n", __func__);
|
|
|
|
return -1;
|
|
|
|
}
|
2009-11-20 07:53:25 +00:00
|
|
|
data->ptr = ptr;
|
2009-11-24 05:26:55 +00:00
|
|
|
data->pingpong = 0;
|
2009-11-20 07:53:25 +00:00
|
|
|
data->hit = 1;
|
|
|
|
data->bytes_req = bytes_req;
|
|
|
|
data->bytes_alloc = bytes_alloc;
|
|
|
|
|
|
|
|
rb_link_node(&data->node, parent, node);
|
|
|
|
rb_insert_color(&data->node, &root_alloc_stat);
|
|
|
|
}
|
2009-11-24 05:26:55 +00:00
|
|
|
data->call_site = call_site;
|
|
|
|
data->alloc_cpu = cpu;
|
2016-11-25 21:42:13 +00:00
|
|
|
data->last_alloc = bytes_alloc;
|
|
|
|
|
2012-09-09 01:53:06 +00:00
|
|
|
return 0;
|
2009-11-20 07:53:25 +00:00
|
|
|
}
|
|
|
|
|
2012-09-09 01:53:06 +00:00
|
|
|
static int insert_caller_stat(unsigned long call_site,
|
2009-11-20 07:53:25 +00:00
|
|
|
int bytes_req, int bytes_alloc)
|
|
|
|
{
|
|
|
|
struct rb_node **node = &root_caller_stat.rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct alloc_stat *data = NULL;
|
|
|
|
|
|
|
|
while (*node) {
|
|
|
|
parent = *node;
|
|
|
|
data = rb_entry(*node, struct alloc_stat, node);
|
|
|
|
|
|
|
|
if (call_site > data->call_site)
|
|
|
|
node = &(*node)->rb_right;
|
|
|
|
else if (call_site < data->call_site)
|
|
|
|
node = &(*node)->rb_left;
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (data && data->call_site == call_site) {
|
|
|
|
data->hit++;
|
|
|
|
data->bytes_req += bytes_req;
|
2009-12-21 09:52:55 +00:00
|
|
|
data->bytes_alloc += bytes_alloc;
|
2009-11-20 07:53:25 +00:00
|
|
|
} else {
|
|
|
|
data = malloc(sizeof(*data));
|
2012-09-09 01:53:06 +00:00
|
|
|
if (!data) {
|
|
|
|
pr_err("%s: malloc failed\n", __func__);
|
|
|
|
return -1;
|
|
|
|
}
|
2009-11-20 07:53:25 +00:00
|
|
|
data->call_site = call_site;
|
2009-11-24 05:26:55 +00:00
|
|
|
data->pingpong = 0;
|
2009-11-20 07:53:25 +00:00
|
|
|
data->hit = 1;
|
|
|
|
data->bytes_req = bytes_req;
|
|
|
|
data->bytes_alloc = bytes_alloc;
|
|
|
|
|
|
|
|
rb_link_node(&data->node, parent, node);
|
|
|
|
rb_insert_color(&data->node, &root_caller_stat);
|
|
|
|
}
|
2012-09-09 01:53:06 +00:00
|
|
|
|
|
|
|
return 0;
|
2009-11-20 07:53:25 +00:00
|
|
|
}
|
|
|
|
|
2012-09-09 01:53:06 +00:00
|
|
|
static int perf_evsel__process_alloc_event(struct perf_evsel *evsel,
|
2012-09-24 13:46:54 +00:00
|
|
|
struct perf_sample *sample)
|
2009-11-20 07:53:25 +00:00
|
|
|
{
|
2012-09-24 13:46:54 +00:00
|
|
|
unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"),
|
|
|
|
call_site = perf_evsel__intval(evsel, sample, "call_site");
|
|
|
|
int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"),
|
|
|
|
bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc");
|
|
|
|
|
|
|
|
if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
|
2012-09-09 01:53:06 +00:00
|
|
|
insert_caller_stat(call_site, bytes_req, bytes_alloc))
|
|
|
|
return -1;
|
2009-11-20 07:53:25 +00:00
|
|
|
|
|
|
|
total_requested += bytes_req;
|
|
|
|
total_allocated += bytes_alloc;
|
2009-11-24 05:26:31 +00:00
|
|
|
|
2012-09-24 13:46:54 +00:00
|
|
|
nr_allocs++;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int perf_evsel__process_alloc_node_event(struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample)
|
|
|
|
{
|
|
|
|
int ret = perf_evsel__process_alloc_event(evsel, sample);
|
|
|
|
|
|
|
|
if (!ret) {
|
2014-04-07 18:55:23 +00:00
|
|
|
int node1 = cpu__get_node(sample->cpu),
|
2012-09-24 13:46:54 +00:00
|
|
|
node2 = perf_evsel__intval(evsel, sample, "node");
|
|
|
|
|
2009-11-24 05:26:31 +00:00
|
|
|
if (node1 != node2)
|
|
|
|
nr_cross_allocs++;
|
|
|
|
}
|
2012-09-24 13:46:54 +00:00
|
|
|
|
|
|
|
return ret;
|
2009-11-20 07:53:25 +00:00
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:03 +00:00
|
|
|
static int ptr_cmp(void *, void *);
|
|
|
|
static int slab_callsite_cmp(void *, void *);
|
2009-11-24 05:26:55 +00:00
|
|
|
|
|
|
|
static struct alloc_stat *search_alloc_stat(unsigned long ptr,
|
|
|
|
unsigned long call_site,
|
|
|
|
struct rb_root *root,
|
|
|
|
sort_fn_t sort_fn)
|
|
|
|
{
|
|
|
|
struct rb_node *node = root->rb_node;
|
|
|
|
struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
|
|
|
|
|
|
|
|
while (node) {
|
|
|
|
struct alloc_stat *data;
|
|
|
|
int cmp;
|
|
|
|
|
|
|
|
data = rb_entry(node, struct alloc_stat, node);
|
|
|
|
|
|
|
|
cmp = sort_fn(&key, data);
|
|
|
|
if (cmp < 0)
|
|
|
|
node = node->rb_left;
|
|
|
|
else if (cmp > 0)
|
|
|
|
node = node->rb_right;
|
|
|
|
else
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-09-09 01:53:06 +00:00
|
|
|
static int perf_evsel__process_free_event(struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample)
|
2009-11-20 07:53:25 +00:00
|
|
|
{
|
2012-09-24 13:46:54 +00:00
|
|
|
unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr");
|
2009-11-24 05:26:55 +00:00
|
|
|
struct alloc_stat *s_alloc, *s_caller;
|
|
|
|
|
|
|
|
s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
|
|
|
|
if (!s_alloc)
|
2012-09-09 01:53:06 +00:00
|
|
|
return 0;
|
2009-11-24 05:26:55 +00:00
|
|
|
|
2016-11-25 21:42:13 +00:00
|
|
|
total_freed += s_alloc->last_alloc;
|
|
|
|
|
2012-08-07 13:56:43 +00:00
|
|
|
if ((short)sample->cpu != s_alloc->alloc_cpu) {
|
2009-11-24 05:26:55 +00:00
|
|
|
s_alloc->pingpong++;
|
|
|
|
|
|
|
|
s_caller = search_alloc_stat(0, s_alloc->call_site,
|
2015-04-21 04:55:03 +00:00
|
|
|
&root_caller_stat,
|
|
|
|
slab_callsite_cmp);
|
2012-09-09 01:53:06 +00:00
|
|
|
if (!s_caller)
|
|
|
|
return -1;
|
2009-11-24 05:26:55 +00:00
|
|
|
s_caller->pingpong++;
|
|
|
|
}
|
|
|
|
s_alloc->alloc_cpu = -1;
|
2012-09-09 01:53:06 +00:00
|
|
|
|
|
|
|
return 0;
|
2009-11-20 07:53:25 +00:00
|
|
|
}
|
|
|
|
|
2015-04-06 05:36:10 +00:00
|
|
|
static u64 total_page_alloc_bytes;
|
|
|
|
static u64 total_page_free_bytes;
|
|
|
|
static u64 total_page_nomatch_bytes;
|
|
|
|
static u64 total_page_fail_bytes;
|
|
|
|
static unsigned long nr_page_allocs;
|
|
|
|
static unsigned long nr_page_frees;
|
|
|
|
static unsigned long nr_page_fails;
|
|
|
|
static unsigned long nr_page_nomatch;
|
|
|
|
|
|
|
|
static bool use_pfn;
|
perf kmem: Add --live option for current allocation stat
Currently 'perf kmem stat --page' shows total (page) allocation stat by
default, but sometimes one might want to see live (total alloc-only)
requests/pages only. The new --live option does this by subtracting freed
allocation from the stat.
E.g.:
# perf kmem stat --page
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 172,173 3,083 806,686 . .
1 284 . . . .
2 6,124 58 . . .
3 114 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . 1 . .
10 . . . . .
# perf kmem stat --page --live
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 2,214 3,025 97,156 . .
1 59 . . . .
2 19 58 . . .
3 23 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . . . .
10 . . . . .
#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Pekka Enberg <penberg@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1429592107-1807-4-git-send-email-namhyung@kernel.org
[ Added examples to the changeset log ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-21 04:55:04 +00:00
|
|
|
static bool live_page;
|
2015-04-21 04:55:02 +00:00
|
|
|
static struct perf_session *kmem_session;
|
2015-04-06 05:36:10 +00:00
|
|
|
|
|
|
|
#define MAX_MIGRATE_TYPES 6
|
|
|
|
#define MAX_PAGE_ORDER 11
|
|
|
|
|
|
|
|
static int order_stats[MAX_PAGE_ORDER][MAX_MIGRATE_TYPES];
|
|
|
|
|
|
|
|
struct page_stat {
|
|
|
|
struct rb_node node;
|
|
|
|
u64 page;
|
2015-04-21 04:55:02 +00:00
|
|
|
u64 callsite;
|
2015-04-06 05:36:10 +00:00
|
|
|
int order;
|
|
|
|
unsigned gfp_flags;
|
|
|
|
unsigned migrate_type;
|
|
|
|
u64 alloc_bytes;
|
|
|
|
u64 free_bytes;
|
|
|
|
int nr_alloc;
|
|
|
|
int nr_free;
|
|
|
|
};
|
|
|
|
|
perf kmem: Add --live option for current allocation stat
Currently 'perf kmem stat --page' shows total (page) allocation stat by
default, but sometimes one might want to see live (total alloc-only)
requests/pages only. The new --live option does this by subtracting freed
allocation from the stat.
E.g.:
# perf kmem stat --page
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 172,173 3,083 806,686 . .
1 284 . . . .
2 6,124 58 . . .
3 114 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . 1 . .
10 . . . . .
# perf kmem stat --page --live
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 2,214 3,025 97,156 . .
1 59 . . . .
2 19 58 . . .
3 23 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . . . .
10 . . . . .
#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Pekka Enberg <penberg@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1429592107-1807-4-git-send-email-namhyung@kernel.org
[ Added examples to the changeset log ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-21 04:55:04 +00:00
|
|
|
static struct rb_root page_live_tree;
|
2015-04-06 05:36:10 +00:00
|
|
|
static struct rb_root page_alloc_tree;
|
|
|
|
static struct rb_root page_alloc_sorted;
|
2015-04-21 04:55:02 +00:00
|
|
|
static struct rb_root page_caller_tree;
|
|
|
|
static struct rb_root page_caller_sorted;
|
2015-04-06 05:36:10 +00:00
|
|
|
|
2015-04-21 04:55:02 +00:00
|
|
|
struct alloc_func {
|
|
|
|
u64 start;
|
|
|
|
u64 end;
|
|
|
|
char *name;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int nr_alloc_funcs;
|
|
|
|
static struct alloc_func *alloc_func_list;
|
|
|
|
|
|
|
|
static int funcmp(const void *a, const void *b)
|
|
|
|
{
|
|
|
|
const struct alloc_func *fa = a;
|
|
|
|
const struct alloc_func *fb = b;
|
|
|
|
|
|
|
|
if (fa->start > fb->start)
|
|
|
|
return 1;
|
|
|
|
else
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int callcmp(const void *a, const void *b)
|
|
|
|
{
|
|
|
|
const struct alloc_func *fa = a;
|
|
|
|
const struct alloc_func *fb = b;
|
|
|
|
|
|
|
|
if (fb->start <= fa->start && fa->end < fb->end)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (fa->start > fb->start)
|
|
|
|
return 1;
|
|
|
|
else
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int build_alloc_func_list(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct map *kernel_map;
|
|
|
|
struct symbol *sym;
|
|
|
|
struct rb_node *node;
|
|
|
|
struct alloc_func *func;
|
|
|
|
struct machine *machine = &kmem_session->machines.host;
|
|
|
|
regex_t alloc_func_regex;
|
|
|
|
const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
|
|
|
|
|
|
|
|
ret = regcomp(&alloc_func_regex, pattern, REG_EXTENDED);
|
|
|
|
if (ret) {
|
|
|
|
char err[BUFSIZ];
|
|
|
|
|
|
|
|
regerror(ret, &alloc_func_regex, err, sizeof(err));
|
|
|
|
pr_err("Invalid regex: %s\n%s", pattern, err);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-09-30 14:54:04 +00:00
|
|
|
kernel_map = machine__kernel_map(machine);
|
2016-09-01 22:25:52 +00:00
|
|
|
if (map__load(kernel_map) < 0) {
|
2015-04-21 04:55:02 +00:00
|
|
|
pr_err("cannot load kernel map\n");
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
map__for_each_symbol(kernel_map, sym, node) {
|
|
|
|
if (regexec(&alloc_func_regex, sym->name, 0, NULL, 0))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
func = realloc(alloc_func_list,
|
|
|
|
(nr_alloc_funcs + 1) * sizeof(*func));
|
|
|
|
if (func == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
pr_debug("alloc func: %s\n", sym->name);
|
|
|
|
func[nr_alloc_funcs].start = sym->start;
|
|
|
|
func[nr_alloc_funcs].end = sym->end;
|
|
|
|
func[nr_alloc_funcs].name = sym->name;
|
|
|
|
|
|
|
|
alloc_func_list = func;
|
|
|
|
nr_alloc_funcs++;
|
|
|
|
}
|
|
|
|
|
|
|
|
qsort(alloc_func_list, nr_alloc_funcs, sizeof(*func), funcmp);
|
|
|
|
|
|
|
|
regfree(&alloc_func_regex);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find first non-memory allocation function from callchain.
|
|
|
|
* The allocation functions are in the 'alloc_func_list'.
|
|
|
|
*/
|
|
|
|
static u64 find_callsite(struct perf_evsel *evsel, struct perf_sample *sample)
|
|
|
|
{
|
|
|
|
struct addr_location al;
|
|
|
|
struct machine *machine = &kmem_session->machines.host;
|
|
|
|
struct callchain_cursor_node *node;
|
|
|
|
|
|
|
|
if (alloc_func_list == NULL) {
|
|
|
|
if (build_alloc_func_list() < 0)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
al.thread = machine__findnew_thread(machine, sample->pid, sample->tid);
|
2016-04-14 17:48:07 +00:00
|
|
|
sample__resolve_callchain(sample, &callchain_cursor, NULL, evsel, &al, 16);
|
2015-04-21 04:55:02 +00:00
|
|
|
|
|
|
|
callchain_cursor_commit(&callchain_cursor);
|
|
|
|
while (true) {
|
|
|
|
struct alloc_func key, *caller;
|
|
|
|
u64 addr;
|
|
|
|
|
|
|
|
node = callchain_cursor_current(&callchain_cursor);
|
|
|
|
if (node == NULL)
|
|
|
|
break;
|
|
|
|
|
|
|
|
key.start = key.end = node->ip;
|
|
|
|
caller = bsearch(&key, alloc_func_list, nr_alloc_funcs,
|
|
|
|
sizeof(key), callcmp);
|
|
|
|
if (!caller) {
|
|
|
|
/* found */
|
|
|
|
if (node->map)
|
|
|
|
addr = map__unmap_ip(node->map, node->ip);
|
|
|
|
else
|
|
|
|
addr = node->ip;
|
|
|
|
|
|
|
|
return addr;
|
|
|
|
} else
|
|
|
|
pr_debug3("skipping alloc function: %s\n", caller->name);
|
|
|
|
|
|
|
|
callchain_cursor_advance(&callchain_cursor);
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
pr_debug2("unknown callsite: %"PRIx64 "\n", sample->ip);
|
|
|
|
return sample->ip;
|
|
|
|
}
|
|
|
|
|
perf kmem: Add --live option for current allocation stat
Currently 'perf kmem stat --page' shows total (page) allocation stat by
default, but sometimes one might want to see live (total alloc-only)
requests/pages only. The new --live option does this by subtracting freed
allocation from the stat.
E.g.:
# perf kmem stat --page
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 172,173 3,083 806,686 . .
1 284 . . . .
2 6,124 58 . . .
3 114 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . 1 . .
10 . . . . .
# perf kmem stat --page --live
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 2,214 3,025 97,156 . .
1 59 . . . .
2 19 58 . . .
3 23 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . . . .
10 . . . . .
#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Pekka Enberg <penberg@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1429592107-1807-4-git-send-email-namhyung@kernel.org
[ Added examples to the changeset log ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-21 04:55:04 +00:00
|
|
|
struct sort_dimension {
|
|
|
|
const char name[20];
|
|
|
|
sort_fn_t cmp;
|
|
|
|
struct list_head list;
|
|
|
|
};
|
|
|
|
|
|
|
|
static LIST_HEAD(page_alloc_sort_input);
|
|
|
|
static LIST_HEAD(page_caller_sort_input);
|
|
|
|
|
2015-04-21 04:55:02 +00:00
|
|
|
static struct page_stat *
|
perf kmem: Add --live option for current allocation stat
Currently 'perf kmem stat --page' shows total (page) allocation stat by
default, but sometimes one might want to see live (total alloc-only)
requests/pages only. The new --live option does this by subtracting freed
allocation from the stat.
E.g.:
# perf kmem stat --page
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 172,173 3,083 806,686 . .
1 284 . . . .
2 6,124 58 . . .
3 114 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . 1 . .
10 . . . . .
# perf kmem stat --page --live
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 2,214 3,025 97,156 . .
1 59 . . . .
2 19 58 . . .
3 23 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . . . .
10 . . . . .
#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Pekka Enberg <penberg@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1429592107-1807-4-git-send-email-namhyung@kernel.org
[ Added examples to the changeset log ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-21 04:55:04 +00:00
|
|
|
__page_stat__findnew_page(struct page_stat *pstat, bool create)
|
2015-04-06 05:36:10 +00:00
|
|
|
{
|
perf kmem: Add --live option for current allocation stat
Currently 'perf kmem stat --page' shows total (page) allocation stat by
default, but sometimes one might want to see live (total alloc-only)
requests/pages only. The new --live option does this by subtracting freed
allocation from the stat.
E.g.:
# perf kmem stat --page
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 172,173 3,083 806,686 . .
1 284 . . . .
2 6,124 58 . . .
3 114 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . 1 . .
10 . . . . .
# perf kmem stat --page --live
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 2,214 3,025 97,156 . .
1 59 . . . .
2 19 58 . . .
3 23 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . . . .
10 . . . . .
#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Pekka Enberg <penberg@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1429592107-1807-4-git-send-email-namhyung@kernel.org
[ Added examples to the changeset log ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-21 04:55:04 +00:00
|
|
|
struct rb_node **node = &page_live_tree.rb_node;
|
2015-04-06 05:36:10 +00:00
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct page_stat *data;
|
|
|
|
|
|
|
|
while (*node) {
|
|
|
|
s64 cmp;
|
|
|
|
|
|
|
|
parent = *node;
|
|
|
|
data = rb_entry(*node, struct page_stat, node);
|
|
|
|
|
perf kmem: Add --live option for current allocation stat
Currently 'perf kmem stat --page' shows total (page) allocation stat by
default, but sometimes one might want to see live (total alloc-only)
requests/pages only. The new --live option does this by subtracting freed
allocation from the stat.
E.g.:
# perf kmem stat --page
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 172,173 3,083 806,686 . .
1 284 . . . .
2 6,124 58 . . .
3 114 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . 1 . .
10 . . . . .
# perf kmem stat --page --live
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 2,214 3,025 97,156 . .
1 59 . . . .
2 19 58 . . .
3 23 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . . . .
10 . . . . .
#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Pekka Enberg <penberg@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1429592107-1807-4-git-send-email-namhyung@kernel.org
[ Added examples to the changeset log ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-21 04:55:04 +00:00
|
|
|
cmp = data->page - pstat->page;
|
2015-04-06 05:36:10 +00:00
|
|
|
if (cmp < 0)
|
|
|
|
node = &parent->rb_left;
|
|
|
|
else if (cmp > 0)
|
|
|
|
node = &parent->rb_right;
|
|
|
|
else
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!create)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
data = zalloc(sizeof(*data));
|
|
|
|
if (data != NULL) {
|
perf kmem: Add --live option for current allocation stat
Currently 'perf kmem stat --page' shows total (page) allocation stat by
default, but sometimes one might want to see live (total alloc-only)
requests/pages only. The new --live option does this by subtracting freed
allocation from the stat.
E.g.:
# perf kmem stat --page
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 172,173 3,083 806,686 . .
1 284 . . . .
2 6,124 58 . . .
3 114 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . 1 . .
10 . . . . .
# perf kmem stat --page --live
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 2,214 3,025 97,156 . .
1 59 . . . .
2 19 58 . . .
3 23 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . . . .
10 . . . . .
#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Pekka Enberg <penberg@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1429592107-1807-4-git-send-email-namhyung@kernel.org
[ Added examples to the changeset log ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-21 04:55:04 +00:00
|
|
|
data->page = pstat->page;
|
|
|
|
data->order = pstat->order;
|
|
|
|
data->gfp_flags = pstat->gfp_flags;
|
|
|
|
data->migrate_type = pstat->migrate_type;
|
2015-04-06 05:36:10 +00:00
|
|
|
|
|
|
|
rb_link_node(&data->node, parent, node);
|
perf kmem: Add --live option for current allocation stat
Currently 'perf kmem stat --page' shows total (page) allocation stat by
default, but sometimes one might want to see live (total alloc-only)
requests/pages only. The new --live option does this by subtracting freed
allocation from the stat.
E.g.:
# perf kmem stat --page
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 172,173 3,083 806,686 . .
1 284 . . . .
2 6,124 58 . . .
3 114 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . 1 . .
10 . . . . .
# perf kmem stat --page --live
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 2,214 3,025 97,156 . .
1 59 . . . .
2 19 58 . . .
3 23 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . . . .
10 . . . . .
#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Pekka Enberg <penberg@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1429592107-1807-4-git-send-email-namhyung@kernel.org
[ Added examples to the changeset log ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-21 04:55:04 +00:00
|
|
|
rb_insert_color(&data->node, &page_live_tree);
|
2015-04-06 05:36:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
perf kmem: Add --live option for current allocation stat
Currently 'perf kmem stat --page' shows total (page) allocation stat by
default, but sometimes one might want to see live (total alloc-only)
requests/pages only. The new --live option does this by subtracting freed
allocation from the stat.
E.g.:
# perf kmem stat --page
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 172,173 3,083 806,686 . .
1 284 . . . .
2 6,124 58 . . .
3 114 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . 1 . .
10 . . . . .
# perf kmem stat --page --live
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 2,214 3,025 97,156 . .
1 59 . . . .
2 19 58 . . .
3 23 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . . . .
10 . . . . .
#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Pekka Enberg <penberg@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1429592107-1807-4-git-send-email-namhyung@kernel.org
[ Added examples to the changeset log ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-21 04:55:04 +00:00
|
|
|
static struct page_stat *page_stat__find_page(struct page_stat *pstat)
|
2015-04-21 04:55:02 +00:00
|
|
|
{
|
perf kmem: Add --live option for current allocation stat
Currently 'perf kmem stat --page' shows total (page) allocation stat by
default, but sometimes one might want to see live (total alloc-only)
requests/pages only. The new --live option does this by subtracting freed
allocation from the stat.
E.g.:
# perf kmem stat --page
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 172,173 3,083 806,686 . .
1 284 . . . .
2 6,124 58 . . .
3 114 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . 1 . .
10 . . . . .
# perf kmem stat --page --live
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 2,214 3,025 97,156 . .
1 59 . . . .
2 19 58 . . .
3 23 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . . . .
10 . . . . .
#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Pekka Enberg <penberg@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1429592107-1807-4-git-send-email-namhyung@kernel.org
[ Added examples to the changeset log ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-21 04:55:04 +00:00
|
|
|
return __page_stat__findnew_page(pstat, false);
|
2015-04-21 04:55:02 +00:00
|
|
|
}
|
|
|
|
|
perf kmem: Add --live option for current allocation stat
Currently 'perf kmem stat --page' shows total (page) allocation stat by
default, but sometimes one might want to see live (total alloc-only)
requests/pages only. The new --live option does this by subtracting freed
allocation from the stat.
E.g.:
# perf kmem stat --page
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 172,173 3,083 806,686 . .
1 284 . . . .
2 6,124 58 . . .
3 114 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . 1 . .
10 . . . . .
# perf kmem stat --page --live
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 2,214 3,025 97,156 . .
1 59 . . . .
2 19 58 . . .
3 23 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . . . .
10 . . . . .
#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Pekka Enberg <penberg@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1429592107-1807-4-git-send-email-namhyung@kernel.org
[ Added examples to the changeset log ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-21 04:55:04 +00:00
|
|
|
static struct page_stat *page_stat__findnew_page(struct page_stat *pstat)
|
2015-04-21 04:55:02 +00:00
|
|
|
{
|
perf kmem: Add --live option for current allocation stat
Currently 'perf kmem stat --page' shows total (page) allocation stat by
default, but sometimes one might want to see live (total alloc-only)
requests/pages only. The new --live option does this by subtracting freed
allocation from the stat.
E.g.:
# perf kmem stat --page
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 172,173 3,083 806,686 . .
1 284 . . . .
2 6,124 58 . . .
3 114 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . 1 . .
10 . . . . .
# perf kmem stat --page --live
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 2,214 3,025 97,156 . .
1 59 . . . .
2 19 58 . . .
3 23 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . . . .
10 . . . . .
#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Pekka Enberg <penberg@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1429592107-1807-4-git-send-email-namhyung@kernel.org
[ Added examples to the changeset log ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-21 04:55:04 +00:00
|
|
|
return __page_stat__findnew_page(pstat, true);
|
2015-04-21 04:55:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct page_stat *
|
|
|
|
__page_stat__findnew_alloc(struct page_stat *pstat, bool create)
|
2015-04-06 05:36:10 +00:00
|
|
|
{
|
|
|
|
struct rb_node **node = &page_alloc_tree.rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct page_stat *data;
|
2015-04-21 04:55:03 +00:00
|
|
|
struct sort_dimension *sort;
|
2015-04-06 05:36:10 +00:00
|
|
|
|
|
|
|
while (*node) {
|
2015-04-21 04:55:03 +00:00
|
|
|
int cmp = 0;
|
2015-04-06 05:36:10 +00:00
|
|
|
|
|
|
|
parent = *node;
|
|
|
|
data = rb_entry(*node, struct page_stat, node);
|
|
|
|
|
2015-04-21 04:55:03 +00:00
|
|
|
list_for_each_entry(sort, &page_alloc_sort_input, list) {
|
|
|
|
cmp = sort->cmp(pstat, data);
|
|
|
|
if (cmp)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-04-06 05:36:10 +00:00
|
|
|
if (cmp < 0)
|
|
|
|
node = &parent->rb_left;
|
|
|
|
else if (cmp > 0)
|
|
|
|
node = &parent->rb_right;
|
|
|
|
else
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!create)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
data = zalloc(sizeof(*data));
|
|
|
|
if (data != NULL) {
|
2015-04-14 17:49:33 +00:00
|
|
|
data->page = pstat->page;
|
|
|
|
data->order = pstat->order;
|
|
|
|
data->gfp_flags = pstat->gfp_flags;
|
|
|
|
data->migrate_type = pstat->migrate_type;
|
2015-04-06 05:36:10 +00:00
|
|
|
|
|
|
|
rb_link_node(&data->node, parent, node);
|
|
|
|
rb_insert_color(&data->node, &page_alloc_tree);
|
|
|
|
}
|
|
|
|
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:02 +00:00
|
|
|
static struct page_stat *page_stat__find_alloc(struct page_stat *pstat)
|
|
|
|
{
|
|
|
|
return __page_stat__findnew_alloc(pstat, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct page_stat *page_stat__findnew_alloc(struct page_stat *pstat)
|
|
|
|
{
|
|
|
|
return __page_stat__findnew_alloc(pstat, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct page_stat *
|
2015-04-21 04:55:03 +00:00
|
|
|
__page_stat__findnew_caller(struct page_stat *pstat, bool create)
|
2015-04-21 04:55:02 +00:00
|
|
|
{
|
|
|
|
struct rb_node **node = &page_caller_tree.rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct page_stat *data;
|
2015-04-21 04:55:03 +00:00
|
|
|
struct sort_dimension *sort;
|
2015-04-21 04:55:02 +00:00
|
|
|
|
|
|
|
while (*node) {
|
2015-04-21 04:55:03 +00:00
|
|
|
int cmp = 0;
|
2015-04-21 04:55:02 +00:00
|
|
|
|
|
|
|
parent = *node;
|
|
|
|
data = rb_entry(*node, struct page_stat, node);
|
|
|
|
|
2015-04-21 04:55:03 +00:00
|
|
|
list_for_each_entry(sort, &page_caller_sort_input, list) {
|
|
|
|
cmp = sort->cmp(pstat, data);
|
|
|
|
if (cmp)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:02 +00:00
|
|
|
if (cmp < 0)
|
|
|
|
node = &parent->rb_left;
|
|
|
|
else if (cmp > 0)
|
|
|
|
node = &parent->rb_right;
|
|
|
|
else
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!create)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
data = zalloc(sizeof(*data));
|
|
|
|
if (data != NULL) {
|
2015-04-21 04:55:03 +00:00
|
|
|
data->callsite = pstat->callsite;
|
|
|
|
data->order = pstat->order;
|
|
|
|
data->gfp_flags = pstat->gfp_flags;
|
|
|
|
data->migrate_type = pstat->migrate_type;
|
2015-04-21 04:55:02 +00:00
|
|
|
|
|
|
|
rb_link_node(&data->node, parent, node);
|
|
|
|
rb_insert_color(&data->node, &page_caller_tree);
|
|
|
|
}
|
|
|
|
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:03 +00:00
|
|
|
static struct page_stat *page_stat__find_caller(struct page_stat *pstat)
|
2015-04-21 04:55:02 +00:00
|
|
|
{
|
2015-04-21 04:55:03 +00:00
|
|
|
return __page_stat__findnew_caller(pstat, false);
|
2015-04-21 04:55:02 +00:00
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:03 +00:00
|
|
|
static struct page_stat *page_stat__findnew_caller(struct page_stat *pstat)
|
2015-04-21 04:55:02 +00:00
|
|
|
{
|
2015-04-21 04:55:03 +00:00
|
|
|
return __page_stat__findnew_caller(pstat, true);
|
2015-04-21 04:55:02 +00:00
|
|
|
}
|
|
|
|
|
2015-04-06 05:36:10 +00:00
|
|
|
static bool valid_page(u64 pfn_or_page)
|
|
|
|
{
|
|
|
|
if (use_pfn && pfn_or_page == -1UL)
|
|
|
|
return false;
|
|
|
|
if (!use_pfn && pfn_or_page == 0)
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:05 +00:00
|
|
|
struct gfp_flag {
|
|
|
|
unsigned int flags;
|
|
|
|
char *compact_str;
|
|
|
|
char *human_readable;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct gfp_flag *gfps;
|
|
|
|
static int nr_gfps;
|
|
|
|
|
|
|
|
static int gfpcmp(const void *a, const void *b)
|
|
|
|
{
|
|
|
|
const struct gfp_flag *fa = a;
|
|
|
|
const struct gfp_flag *fb = b;
|
|
|
|
|
|
|
|
return fa->flags - fb->flags;
|
|
|
|
}
|
|
|
|
|
mm, tracing: unify mm flags handling in tracepoints and printk
In tracepoints, it's possible to print gfp flags in a human-friendly
format through a macro show_gfp_flags(), which defines a translation
array and passes is to __print_flags(). Since the following patch will
introduce support for gfp flags printing in printk(), it would be nice
to reuse the array. This is not straightforward, since __print_flags()
can't simply reference an array defined in a .c file such as mm/debug.c
- it has to be a macro to allow the macro magic to communicate the
format to userspace tools such as trace-cmd.
The solution is to create a macro __def_gfpflag_names which is used both
in show_gfp_flags(), and to define the gfpflag_names[] array in
mm/debug.c.
On the other hand, mm/debug.c also defines translation tables for page
flags and vma flags, and desire was expressed (but not implemented in
this series) to use these also from tracepoints. Thus, this patch also
renames the events/gfpflags.h file to events/mmflags.h and moves the
table definitions there, using the same macro approach as for gfpflags.
This allows translating all three kinds of mm-specific flags both in
tracepoints and printk.
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Michal Hocko <mhocko@suse.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-03-15 21:55:52 +00:00
|
|
|
/* see include/trace/events/mmflags.h */
|
2015-04-21 04:55:05 +00:00
|
|
|
static const struct {
|
|
|
|
const char *original;
|
|
|
|
const char *compact;
|
|
|
|
} gfp_compact_table[] = {
|
|
|
|
{ "GFP_TRANSHUGE", "THP" },
|
mm, thp: remove __GFP_NORETRY from khugepaged and madvised allocations
After the previous patch, we can distinguish costly allocations that
should be really lightweight, such as THP page faults, with
__GFP_NORETRY. This means we don't need to recognize khugepaged
allocations via PF_KTHREAD anymore. We can also change THP page faults
in areas where madvise(MADV_HUGEPAGE) was used to try as hard as
khugepaged, as the process has indicated that it benefits from THP's and
is willing to pay some initial latency costs.
We can also make the flags handling less cryptic by distinguishing
GFP_TRANSHUGE_LIGHT (no reclaim at all, default mode in page fault) from
GFP_TRANSHUGE (only direct reclaim, khugepaged default). Adding
__GFP_NORETRY or __GFP_KSWAPD_RECLAIM is done where needed.
The patch effectively changes the current GFP_TRANSHUGE users as
follows:
* get_huge_zero_page() - the zero page lifetime should be relatively
long and it's shared by multiple users, so it's worth spending some
effort on it. We use GFP_TRANSHUGE, and __GFP_NORETRY is not added.
This also restores direct reclaim to this allocation, which was
unintentionally removed by commit e4a49efe4e7e ("mm: thp: set THP defrag
by default to madvise and add a stall-free defrag option")
* alloc_hugepage_khugepaged_gfpmask() - this is khugepaged, so latency
is not an issue. So if khugepaged "defrag" is enabled (the default), do
reclaim via GFP_TRANSHUGE without __GFP_NORETRY. We can remove the
PF_KTHREAD check from page alloc.
As a side-effect, khugepaged will now no longer check if the initial
compaction was deferred or contended. This is OK, as khugepaged sleep
times between collapsion attempts are long enough to prevent noticeable
disruption, so we should allow it to spend some effort.
* migrate_misplaced_transhuge_page() - already was masking out
__GFP_RECLAIM, so just convert to GFP_TRANSHUGE_LIGHT which is
equivalent.
* alloc_hugepage_direct_gfpmask() - vma's with VM_HUGEPAGE (via madvise)
are now allocating without __GFP_NORETRY. Other vma's keep using
__GFP_NORETRY if direct reclaim/compaction is at all allowed (by default
it's allowed only for madvised vma's). The rest is conversion to
GFP_TRANSHUGE(_LIGHT).
[mhocko@suse.com: suggested GFP_TRANSHUGE_LIGHT]
Link: http://lkml.kernel.org/r/20160721073614.24395-7-vbabka@suse.cz
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-07-28 22:49:25 +00:00
|
|
|
{ "GFP_TRANSHUGE_LIGHT", "THL" },
|
2015-04-21 04:55:05 +00:00
|
|
|
{ "GFP_HIGHUSER_MOVABLE", "HUM" },
|
|
|
|
{ "GFP_HIGHUSER", "HU" },
|
|
|
|
{ "GFP_USER", "U" },
|
|
|
|
{ "GFP_TEMPORARY", "TMP" },
|
2016-03-15 21:55:49 +00:00
|
|
|
{ "GFP_KERNEL_ACCOUNT", "KAC" },
|
2015-04-21 04:55:05 +00:00
|
|
|
{ "GFP_KERNEL", "K" },
|
|
|
|
{ "GFP_NOFS", "NF" },
|
|
|
|
{ "GFP_ATOMIC", "A" },
|
|
|
|
{ "GFP_NOIO", "NI" },
|
|
|
|
{ "GFP_NOWAIT", "NW" },
|
2016-03-15 21:55:49 +00:00
|
|
|
{ "GFP_DMA", "D" },
|
|
|
|
{ "__GFP_HIGHMEM", "HM" },
|
|
|
|
{ "GFP_DMA32", "D32" },
|
|
|
|
{ "__GFP_HIGH", "H" },
|
|
|
|
{ "__GFP_ATOMIC", "_A" },
|
|
|
|
{ "__GFP_IO", "I" },
|
|
|
|
{ "__GFP_FS", "F" },
|
|
|
|
{ "__GFP_COLD", "CO" },
|
|
|
|
{ "__GFP_NOWARN", "NWR" },
|
|
|
|
{ "__GFP_REPEAT", "R" },
|
|
|
|
{ "__GFP_NOFAIL", "NF" },
|
|
|
|
{ "__GFP_NORETRY", "NR" },
|
|
|
|
{ "__GFP_COMP", "C" },
|
|
|
|
{ "__GFP_ZERO", "Z" },
|
|
|
|
{ "__GFP_NOMEMALLOC", "NMA" },
|
|
|
|
{ "__GFP_MEMALLOC", "MA" },
|
|
|
|
{ "__GFP_HARDWALL", "HW" },
|
|
|
|
{ "__GFP_THISNODE", "TN" },
|
|
|
|
{ "__GFP_RECLAIMABLE", "RC" },
|
|
|
|
{ "__GFP_MOVABLE", "M" },
|
|
|
|
{ "__GFP_ACCOUNT", "AC" },
|
|
|
|
{ "__GFP_NOTRACK", "NT" },
|
|
|
|
{ "__GFP_WRITE", "WR" },
|
|
|
|
{ "__GFP_RECLAIM", "R" },
|
|
|
|
{ "__GFP_DIRECT_RECLAIM", "DR" },
|
|
|
|
{ "__GFP_KSWAPD_RECLAIM", "KR" },
|
2015-04-21 04:55:05 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static size_t max_gfp_len;
|
|
|
|
|
|
|
|
static char *compact_gfp_flags(char *gfp_flags)
|
|
|
|
{
|
|
|
|
char *orig_flags = strdup(gfp_flags);
|
|
|
|
char *new_flags = NULL;
|
2015-05-29 12:48:13 +00:00
|
|
|
char *str, *pos = NULL;
|
2015-04-21 04:55:05 +00:00
|
|
|
size_t len = 0;
|
|
|
|
|
|
|
|
if (orig_flags == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
str = strtok_r(orig_flags, "|", &pos);
|
|
|
|
while (str) {
|
|
|
|
size_t i;
|
|
|
|
char *new;
|
|
|
|
const char *cpt;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(gfp_compact_table); i++) {
|
|
|
|
if (strcmp(gfp_compact_table[i].original, str))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
cpt = gfp_compact_table[i].compact;
|
|
|
|
new = realloc(new_flags, len + strlen(cpt) + 2);
|
|
|
|
if (new == NULL) {
|
|
|
|
free(new_flags);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
new_flags = new;
|
|
|
|
|
|
|
|
if (!len) {
|
|
|
|
strcpy(new_flags, cpt);
|
|
|
|
} else {
|
|
|
|
strcat(new_flags, "|");
|
|
|
|
strcat(new_flags, cpt);
|
|
|
|
len++;
|
|
|
|
}
|
|
|
|
|
|
|
|
len += strlen(cpt);
|
|
|
|
}
|
|
|
|
|
|
|
|
str = strtok_r(NULL, "|", &pos);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (max_gfp_len < len)
|
|
|
|
max_gfp_len = len;
|
|
|
|
|
|
|
|
free(orig_flags);
|
|
|
|
return new_flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
static char *compact_gfp_string(unsigned long gfp_flags)
|
|
|
|
{
|
|
|
|
struct gfp_flag key = {
|
|
|
|
.flags = gfp_flags,
|
|
|
|
};
|
|
|
|
struct gfp_flag *gfp;
|
|
|
|
|
|
|
|
gfp = bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp);
|
|
|
|
if (gfp)
|
|
|
|
return gfp->compact_str;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int parse_gfp_flags(struct perf_evsel *evsel, struct perf_sample *sample,
|
|
|
|
unsigned int gfp_flags)
|
|
|
|
{
|
|
|
|
struct pevent_record record = {
|
|
|
|
.cpu = sample->cpu,
|
|
|
|
.data = sample->raw_data,
|
|
|
|
.size = sample->raw_size,
|
|
|
|
};
|
|
|
|
struct trace_seq seq;
|
2015-05-11 14:41:17 +00:00
|
|
|
char *str, *pos = NULL;
|
2015-04-21 04:55:05 +00:00
|
|
|
|
|
|
|
if (nr_gfps) {
|
|
|
|
struct gfp_flag key = {
|
|
|
|
.flags = gfp_flags,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_seq_init(&seq);
|
|
|
|
pevent_event_info(&seq, evsel->tp_format, &record);
|
|
|
|
|
|
|
|
str = strtok_r(seq.buffer, " ", &pos);
|
|
|
|
while (str) {
|
|
|
|
if (!strncmp(str, "gfp_flags=", 10)) {
|
|
|
|
struct gfp_flag *new;
|
|
|
|
|
|
|
|
new = realloc(gfps, (nr_gfps + 1) * sizeof(*gfps));
|
|
|
|
if (new == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
gfps = new;
|
|
|
|
new += nr_gfps++;
|
|
|
|
|
|
|
|
new->flags = gfp_flags;
|
|
|
|
new->human_readable = strdup(str + 10);
|
|
|
|
new->compact_str = compact_gfp_flags(str + 10);
|
|
|
|
if (!new->human_readable || !new->compact_str)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
qsort(gfps, nr_gfps, sizeof(*gfps), gfpcmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
str = strtok_r(NULL, " ", &pos);
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_seq_destroy(&seq);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-06 05:36:10 +00:00
|
|
|
static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample)
|
|
|
|
{
|
|
|
|
u64 page;
|
|
|
|
unsigned int order = perf_evsel__intval(evsel, sample, "order");
|
|
|
|
unsigned int gfp_flags = perf_evsel__intval(evsel, sample, "gfp_flags");
|
|
|
|
unsigned int migrate_type = perf_evsel__intval(evsel, sample,
|
|
|
|
"migratetype");
|
|
|
|
u64 bytes = kmem_page_size << order;
|
2015-04-21 04:55:02 +00:00
|
|
|
u64 callsite;
|
2015-04-14 17:49:33 +00:00
|
|
|
struct page_stat *pstat;
|
2015-04-06 05:36:10 +00:00
|
|
|
struct page_stat this = {
|
|
|
|
.order = order,
|
|
|
|
.gfp_flags = gfp_flags,
|
|
|
|
.migrate_type = migrate_type,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (use_pfn)
|
|
|
|
page = perf_evsel__intval(evsel, sample, "pfn");
|
|
|
|
else
|
|
|
|
page = perf_evsel__intval(evsel, sample, "page");
|
|
|
|
|
|
|
|
nr_page_allocs++;
|
|
|
|
total_page_alloc_bytes += bytes;
|
|
|
|
|
|
|
|
if (!valid_page(page)) {
|
|
|
|
nr_page_fails++;
|
|
|
|
total_page_fail_bytes += bytes;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:05 +00:00
|
|
|
if (parse_gfp_flags(evsel, sample, gfp_flags) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2015-04-21 04:55:02 +00:00
|
|
|
callsite = find_callsite(evsel, sample);
|
|
|
|
|
2015-04-06 05:36:10 +00:00
|
|
|
/*
|
|
|
|
* This is to find the current page (with correct gfp flags and
|
|
|
|
* migrate type) at free event.
|
|
|
|
*/
|
|
|
|
this.page = page;
|
perf kmem: Add --live option for current allocation stat
Currently 'perf kmem stat --page' shows total (page) allocation stat by
default, but sometimes one might want to see live (total alloc-only)
requests/pages only. The new --live option does this by subtracting freed
allocation from the stat.
E.g.:
# perf kmem stat --page
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 172,173 3,083 806,686 . .
1 284 . . . .
2 6,124 58 . . .
3 114 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . 1 . .
10 . . . . .
# perf kmem stat --page --live
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 2,214 3,025 97,156 . .
1 59 . . . .
2 19 58 . . .
3 23 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . . . .
10 . . . . .
#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Pekka Enberg <penberg@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1429592107-1807-4-git-send-email-namhyung@kernel.org
[ Added examples to the changeset log ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-21 04:55:04 +00:00
|
|
|
pstat = page_stat__findnew_page(&this);
|
2015-04-14 17:49:33 +00:00
|
|
|
if (pstat == NULL)
|
2015-04-06 05:36:10 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2015-04-21 04:55:02 +00:00
|
|
|
pstat->nr_alloc++;
|
|
|
|
pstat->alloc_bytes += bytes;
|
|
|
|
pstat->callsite = callsite;
|
|
|
|
|
perf kmem: Add --live option for current allocation stat
Currently 'perf kmem stat --page' shows total (page) allocation stat by
default, but sometimes one might want to see live (total alloc-only)
requests/pages only. The new --live option does this by subtracting freed
allocation from the stat.
E.g.:
# perf kmem stat --page
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 172,173 3,083 806,686 . .
1 284 . . . .
2 6,124 58 . . .
3 114 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . 1 . .
10 . . . . .
# perf kmem stat --page --live
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 2,214 3,025 97,156 . .
1 59 . . . .
2 19 58 . . .
3 23 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . . . .
10 . . . . .
#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Pekka Enberg <penberg@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1429592107-1807-4-git-send-email-namhyung@kernel.org
[ Added examples to the changeset log ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-21 04:55:04 +00:00
|
|
|
if (!live_page) {
|
|
|
|
pstat = page_stat__findnew_alloc(&this);
|
|
|
|
if (pstat == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
pstat->nr_alloc++;
|
|
|
|
pstat->alloc_bytes += bytes;
|
|
|
|
pstat->callsite = callsite;
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:03 +00:00
|
|
|
this.callsite = callsite;
|
|
|
|
pstat = page_stat__findnew_caller(&this);
|
2015-04-21 04:55:02 +00:00
|
|
|
if (pstat == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2015-04-14 17:49:33 +00:00
|
|
|
pstat->nr_alloc++;
|
|
|
|
pstat->alloc_bytes += bytes;
|
2015-04-06 05:36:10 +00:00
|
|
|
|
|
|
|
order_stats[order][migrate_type]++;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int perf_evsel__process_page_free_event(struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample)
|
|
|
|
{
|
|
|
|
u64 page;
|
|
|
|
unsigned int order = perf_evsel__intval(evsel, sample, "order");
|
|
|
|
u64 bytes = kmem_page_size << order;
|
2015-04-14 17:49:33 +00:00
|
|
|
struct page_stat *pstat;
|
2015-04-06 05:36:10 +00:00
|
|
|
struct page_stat this = {
|
|
|
|
.order = order,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (use_pfn)
|
|
|
|
page = perf_evsel__intval(evsel, sample, "pfn");
|
|
|
|
else
|
|
|
|
page = perf_evsel__intval(evsel, sample, "page");
|
|
|
|
|
|
|
|
nr_page_frees++;
|
|
|
|
total_page_free_bytes += bytes;
|
|
|
|
|
perf kmem: Add --live option for current allocation stat
Currently 'perf kmem stat --page' shows total (page) allocation stat by
default, but sometimes one might want to see live (total alloc-only)
requests/pages only. The new --live option does this by subtracting freed
allocation from the stat.
E.g.:
# perf kmem stat --page
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 172,173 3,083 806,686 . .
1 284 . . . .
2 6,124 58 . . .
3 114 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . 1 . .
10 . . . . .
# perf kmem stat --page --live
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 2,214 3,025 97,156 . .
1 59 . . . .
2 19 58 . . .
3 23 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . . . .
10 . . . . .
#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Pekka Enberg <penberg@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1429592107-1807-4-git-send-email-namhyung@kernel.org
[ Added examples to the changeset log ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-21 04:55:04 +00:00
|
|
|
this.page = page;
|
|
|
|
pstat = page_stat__find_page(&this);
|
2015-04-14 17:49:33 +00:00
|
|
|
if (pstat == NULL) {
|
2015-04-06 05:36:10 +00:00
|
|
|
pr_debug2("missing free at page %"PRIx64" (order: %d)\n",
|
|
|
|
page, order);
|
|
|
|
|
|
|
|
nr_page_nomatch++;
|
|
|
|
total_page_nomatch_bytes += bytes;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-14 17:49:33 +00:00
|
|
|
this.gfp_flags = pstat->gfp_flags;
|
|
|
|
this.migrate_type = pstat->migrate_type;
|
2015-04-21 04:55:02 +00:00
|
|
|
this.callsite = pstat->callsite;
|
2015-04-06 05:36:10 +00:00
|
|
|
|
perf kmem: Add --live option for current allocation stat
Currently 'perf kmem stat --page' shows total (page) allocation stat by
default, but sometimes one might want to see live (total alloc-only)
requests/pages only. The new --live option does this by subtracting freed
allocation from the stat.
E.g.:
# perf kmem stat --page
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 172,173 3,083 806,686 . .
1 284 . . . .
2 6,124 58 . . .
3 114 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . 1 . .
10 . . . . .
# perf kmem stat --page --live
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 2,214 3,025 97,156 . .
1 59 . . . .
2 19 58 . . .
3 23 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . . . .
10 . . . . .
#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Pekka Enberg <penberg@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1429592107-1807-4-git-send-email-namhyung@kernel.org
[ Added examples to the changeset log ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-21 04:55:04 +00:00
|
|
|
rb_erase(&pstat->node, &page_live_tree);
|
2015-04-14 17:49:33 +00:00
|
|
|
free(pstat);
|
2015-04-06 05:36:10 +00:00
|
|
|
|
perf kmem: Add --live option for current allocation stat
Currently 'perf kmem stat --page' shows total (page) allocation stat by
default, but sometimes one might want to see live (total alloc-only)
requests/pages only. The new --live option does this by subtracting freed
allocation from the stat.
E.g.:
# perf kmem stat --page
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 172,173 3,083 806,686 . .
1 284 . . . .
2 6,124 58 . . .
3 114 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . 1 . .
10 . . . . .
# perf kmem stat --page --live
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 2,214 3,025 97,156 . .
1 59 . . . .
2 19 58 . . .
3 23 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . . . .
10 . . . . .
#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Pekka Enberg <penberg@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1429592107-1807-4-git-send-email-namhyung@kernel.org
[ Added examples to the changeset log ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-21 04:55:04 +00:00
|
|
|
if (live_page) {
|
|
|
|
order_stats[this.order][this.migrate_type]--;
|
|
|
|
} else {
|
|
|
|
pstat = page_stat__find_alloc(&this);
|
|
|
|
if (pstat == NULL)
|
|
|
|
return -ENOMEM;
|
2015-04-21 04:55:02 +00:00
|
|
|
|
perf kmem: Add --live option for current allocation stat
Currently 'perf kmem stat --page' shows total (page) allocation stat by
default, but sometimes one might want to see live (total alloc-only)
requests/pages only. The new --live option does this by subtracting freed
allocation from the stat.
E.g.:
# perf kmem stat --page
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 172,173 3,083 806,686 . .
1 284 . . . .
2 6,124 58 . . .
3 114 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . 1 . .
10 . . . . .
# perf kmem stat --page --live
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 2,214 3,025 97,156 . .
1 59 . . . .
2 19 58 . . .
3 23 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . . . .
10 . . . . .
#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Pekka Enberg <penberg@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1429592107-1807-4-git-send-email-namhyung@kernel.org
[ Added examples to the changeset log ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-21 04:55:04 +00:00
|
|
|
pstat->nr_free++;
|
|
|
|
pstat->free_bytes += bytes;
|
|
|
|
}
|
2015-04-21 04:55:02 +00:00
|
|
|
|
2015-04-21 04:55:03 +00:00
|
|
|
pstat = page_stat__find_caller(&this);
|
2015-04-14 17:49:33 +00:00
|
|
|
if (pstat == NULL)
|
2015-04-06 05:36:10 +00:00
|
|
|
return -ENOENT;
|
|
|
|
|
2015-04-14 17:49:33 +00:00
|
|
|
pstat->nr_free++;
|
|
|
|
pstat->free_bytes += bytes;
|
2015-04-06 05:36:10 +00:00
|
|
|
|
perf kmem: Add --live option for current allocation stat
Currently 'perf kmem stat --page' shows total (page) allocation stat by
default, but sometimes one might want to see live (total alloc-only)
requests/pages only. The new --live option does this by subtracting freed
allocation from the stat.
E.g.:
# perf kmem stat --page
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 172,173 3,083 806,686 . .
1 284 . . . .
2 6,124 58 . . .
3 114 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . 1 . .
10 . . . . .
# perf kmem stat --page --live
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 2,214 3,025 97,156 . .
1 59 . . . .
2 19 58 . . .
3 23 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . . . .
10 . . . . .
#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Pekka Enberg <penberg@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1429592107-1807-4-git-send-email-namhyung@kernel.org
[ Added examples to the changeset log ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-21 04:55:04 +00:00
|
|
|
if (live_page) {
|
|
|
|
pstat->nr_alloc--;
|
|
|
|
pstat->alloc_bytes -= bytes;
|
|
|
|
|
|
|
|
if (pstat->nr_alloc == 0) {
|
|
|
|
rb_erase(&pstat->node, &page_caller_tree);
|
|
|
|
free(pstat);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-06 05:36:10 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-29 17:15:45 +00:00
|
|
|
static bool perf_kmem__skip_sample(struct perf_sample *sample)
|
|
|
|
{
|
|
|
|
/* skip sample based on time? */
|
|
|
|
if (perf_time__skip_sample(&ptime, sample->time))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-09-24 13:46:54 +00:00
|
|
|
typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample);
|
2009-11-20 07:53:25 +00:00
|
|
|
|
2012-09-10 22:15:03 +00:00
|
|
|
static int process_sample_event(struct perf_tool *tool __maybe_unused,
|
2011-11-25 10:19:45 +00:00
|
|
|
union perf_event *event,
|
2011-01-29 16:01:45 +00:00
|
|
|
struct perf_sample *sample,
|
2012-08-07 12:58:03 +00:00
|
|
|
struct perf_evsel *evsel,
|
2011-11-28 09:56:39 +00:00
|
|
|
struct machine *machine)
|
2009-11-20 07:53:25 +00:00
|
|
|
{
|
perf machine: Protect the machine->threads with a rwlock
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.
That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.
So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.
I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".
The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-06 23:43:22 +00:00
|
|
|
int err = 0;
|
2013-08-27 08:23:06 +00:00
|
|
|
struct thread *thread = machine__findnew_thread(machine, sample->pid,
|
2014-05-12 00:56:42 +00:00
|
|
|
sample->tid);
|
2009-11-20 07:53:25 +00:00
|
|
|
|
|
|
|
if (thread == NULL) {
|
|
|
|
pr_debug("problem processing %d event, skipping it.\n",
|
|
|
|
event->header.type);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-11-29 17:15:45 +00:00
|
|
|
if (perf_kmem__skip_sample(sample))
|
|
|
|
return 0;
|
|
|
|
|
2013-09-11 12:46:56 +00:00
|
|
|
dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
|
2009-11-20 07:53:25 +00:00
|
|
|
|
2013-11-06 13:17:38 +00:00
|
|
|
if (evsel->handler != NULL) {
|
|
|
|
tracepoint_handler f = evsel->handler;
|
perf machine: Protect the machine->threads with a rwlock
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.
That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.
So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.
I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".
The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-06 23:43:22 +00:00
|
|
|
err = f(evsel, sample);
|
2012-09-24 13:46:54 +00:00
|
|
|
}
|
|
|
|
|
perf machine: Protect the machine->threads with a rwlock
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.
That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.
So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.
I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".
The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-06 23:43:22 +00:00
|
|
|
thread__put(thread);
|
|
|
|
|
|
|
|
return err;
|
2009-11-20 07:53:25 +00:00
|
|
|
}
|
|
|
|
|
2012-08-07 12:58:03 +00:00
|
|
|
static struct perf_tool perf_kmem = {
|
|
|
|
.sample = process_sample_event,
|
|
|
|
.comm = perf_event__process_comm,
|
2014-08-01 05:59:31 +00:00
|
|
|
.mmap = perf_event__process_mmap,
|
|
|
|
.mmap2 = perf_event__process_mmap2,
|
perf tools: Add PERF_RECORD_NAMESPACES to include namespaces related info
Introduce a new option to record PERF_RECORD_NAMESPACES events emitted
by the kernel when fork, clone, setns or unshare are invoked. And update
perf-record documentation with the new option to record namespace
events.
Committer notes:
Combined it with a later patch to allow printing it via 'perf report -D'
and be able to test the feature introduced in this patch. Had to move
here also perf_ns__name(), that was introduced in another later patch.
Also used PRIu64 and PRIx64 to fix the build in some enfironments wrt:
util/event.c:1129:39: error: format '%lx' expects argument of type 'long unsigned int', but argument 6 has type 'long long unsigned int' [-Werror=format=]
ret += fprintf(fp, "%u/%s: %lu/0x%lx%s", idx
^
Testing it:
# perf record --namespaces -a
^C[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 1.083 MB perf.data (423 samples) ]
#
# perf report -D
<SNIP>
3 2028902078892 0x115140 [0xa0]: PERF_RECORD_NAMESPACES 14783/14783 - nr_namespaces: 7
[0/net: 3/0xf0000081, 1/uts: 3/0xeffffffe, 2/ipc: 3/0xefffffff, 3/pid: 3/0xeffffffc,
4/user: 3/0xeffffffd, 5/mnt: 3/0xf0000000, 6/cgroup: 3/0xeffffffb]
0x1151e0 [0x30]: event: 9
.
. ... raw event: size 48 bytes
. 0000: 09 00 00 00 02 00 30 00 c4 71 82 68 0c 7f 00 00 ......0..q.h....
. 0010: a9 39 00 00 a9 39 00 00 94 28 fe 63 d8 01 00 00 .9...9...(.c....
. 0020: 03 00 00 00 00 00 00 00 ce c4 02 00 00 00 00 00 ................
<SNIP>
NAMESPACES events: 1
<SNIP>
#
Signed-off-by: Hari Bathini <hbathini@linux.vnet.ibm.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexei Starovoitov <ast@fb.com>
Cc: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
Cc: Aravinda Prasad <aravinda@linux.vnet.ibm.com>
Cc: Brendan Gregg <brendan.d.gregg@gmail.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Eric Biederman <ebiederm@xmission.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sargun Dhillon <sargun@sargun.me>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/148891930386.25309.18412039920746995488.stgit@hbathini.in.ibm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-03-07 20:41:43 +00:00
|
|
|
.namespaces = perf_event__process_namespaces,
|
2014-07-06 12:18:21 +00:00
|
|
|
.ordered_events = true,
|
2009-11-20 07:53:25 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static double fragmentation(unsigned long n_req, unsigned long n_alloc)
|
|
|
|
{
|
|
|
|
if (n_alloc == 0)
|
|
|
|
return 0.0;
|
|
|
|
else
|
|
|
|
return 100.0 - (100.0 * n_req / n_alloc);
|
|
|
|
}
|
|
|
|
|
2015-04-06 05:36:10 +00:00
|
|
|
static void __print_slab_result(struct rb_root *root,
|
|
|
|
struct perf_session *session,
|
|
|
|
int n_lines, int is_caller)
|
2009-11-20 07:53:25 +00:00
|
|
|
{
|
|
|
|
struct rb_node *next;
|
2012-12-19 12:04:24 +00:00
|
|
|
struct machine *machine = &session->machines.host;
|
2009-11-20 07:53:25 +00:00
|
|
|
|
2015-03-12 07:32:48 +00:00
|
|
|
printf("%.105s\n", graph_dotted_line);
|
2009-11-24 05:26:55 +00:00
|
|
|
printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
|
2010-01-19 17:23:23 +00:00
|
|
|
printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
|
2015-03-12 07:32:48 +00:00
|
|
|
printf("%.105s\n", graph_dotted_line);
|
2009-11-20 07:53:25 +00:00
|
|
|
|
|
|
|
next = rb_first(root);
|
|
|
|
|
|
|
|
while (next && n_lines--) {
|
2009-11-23 19:51:09 +00:00
|
|
|
struct alloc_stat *data = rb_entry(next, struct alloc_stat,
|
|
|
|
node);
|
|
|
|
struct symbol *sym = NULL;
|
2010-04-02 00:24:38 +00:00
|
|
|
struct map *map;
|
2009-11-24 05:26:55 +00:00
|
|
|
char buf[BUFSIZ];
|
2009-11-23 19:51:09 +00:00
|
|
|
u64 addr;
|
|
|
|
|
|
|
|
if (is_caller) {
|
|
|
|
addr = data->call_site;
|
2009-11-24 05:25:48 +00:00
|
|
|
if (!raw_ip)
|
2016-09-01 22:25:52 +00:00
|
|
|
sym = machine__find_kernel_function(machine, addr, &map);
|
2009-11-23 19:51:09 +00:00
|
|
|
} else
|
|
|
|
addr = data->ptr;
|
|
|
|
|
|
|
|
if (sym != NULL)
|
2011-01-22 22:37:02 +00:00
|
|
|
snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
|
2010-04-02 00:24:38 +00:00
|
|
|
addr - map->unmap_ip(map, sym->start));
|
2009-11-23 19:51:09 +00:00
|
|
|
else
|
2011-01-22 22:37:02 +00:00
|
|
|
snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
|
2009-11-24 05:26:55 +00:00
|
|
|
printf(" %-34s |", buf);
|
2009-11-20 07:53:25 +00:00
|
|
|
|
2015-03-12 07:32:48 +00:00
|
|
|
printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %9lu | %6.3f%%\n",
|
2009-11-24 05:26:55 +00:00
|
|
|
(unsigned long long)data->bytes_alloc,
|
2009-11-20 07:53:25 +00:00
|
|
|
(unsigned long)data->bytes_alloc / data->hit,
|
|
|
|
(unsigned long long)data->bytes_req,
|
|
|
|
(unsigned long)data->bytes_req / data->hit,
|
|
|
|
(unsigned long)data->hit,
|
2009-11-24 05:26:55 +00:00
|
|
|
(unsigned long)data->pingpong,
|
2009-11-20 07:53:25 +00:00
|
|
|
fragmentation(data->bytes_req, data->bytes_alloc));
|
|
|
|
|
|
|
|
next = rb_next(next);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (n_lines == -1)
|
2015-03-12 07:32:48 +00:00
|
|
|
printf(" ... | ... | ... | ... | ... | ... \n");
|
2009-11-20 07:53:25 +00:00
|
|
|
|
2015-03-12 07:32:48 +00:00
|
|
|
printf("%.105s\n", graph_dotted_line);
|
2009-11-20 07:53:25 +00:00
|
|
|
}
|
|
|
|
|
2015-04-06 05:36:10 +00:00
|
|
|
static const char * const migrate_type_str[] = {
|
|
|
|
"UNMOVABL",
|
|
|
|
"RECLAIM",
|
|
|
|
"MOVABLE",
|
|
|
|
"RESERVED",
|
|
|
|
"CMA/ISLT",
|
|
|
|
"UNKNOWN",
|
|
|
|
};
|
|
|
|
|
2015-04-21 04:55:02 +00:00
|
|
|
static void __print_page_alloc_result(struct perf_session *session, int n_lines)
|
2015-04-06 05:36:10 +00:00
|
|
|
{
|
2015-04-21 04:55:02 +00:00
|
|
|
struct rb_node *next = rb_first(&page_alloc_sorted);
|
|
|
|
struct machine *machine = &session->machines.host;
|
2015-04-06 05:36:10 +00:00
|
|
|
const char *format;
|
2015-04-21 04:55:05 +00:00
|
|
|
int gfp_len = max(strlen("GFP flags"), max_gfp_len);
|
2015-04-06 05:36:10 +00:00
|
|
|
|
2015-04-21 04:55:02 +00:00
|
|
|
printf("\n%.105s\n", graph_dotted_line);
|
2015-04-21 04:55:05 +00:00
|
|
|
printf(" %-16s | %5s alloc (KB) | Hits | Order | Mig.type | %-*s | Callsite\n",
|
|
|
|
use_pfn ? "PFN" : "Page", live_page ? "Live" : "Total",
|
|
|
|
gfp_len, "GFP flags");
|
2015-04-21 04:55:02 +00:00
|
|
|
printf("%.105s\n", graph_dotted_line);
|
2015-04-06 05:36:10 +00:00
|
|
|
|
|
|
|
if (use_pfn)
|
2015-04-21 04:55:05 +00:00
|
|
|
format = " %16llu | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
|
2015-04-06 05:36:10 +00:00
|
|
|
else
|
2015-04-21 04:55:05 +00:00
|
|
|
format = " %016llx | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
|
2015-04-06 05:36:10 +00:00
|
|
|
|
|
|
|
while (next && n_lines--) {
|
|
|
|
struct page_stat *data;
|
2015-04-21 04:55:02 +00:00
|
|
|
struct symbol *sym;
|
|
|
|
struct map *map;
|
|
|
|
char buf[32];
|
|
|
|
char *caller = buf;
|
2015-04-06 05:36:10 +00:00
|
|
|
|
|
|
|
data = rb_entry(next, struct page_stat, node);
|
2016-09-01 22:25:52 +00:00
|
|
|
sym = machine__find_kernel_function(machine, data->callsite, &map);
|
2017-02-13 19:52:15 +00:00
|
|
|
if (sym)
|
2015-04-21 04:55:02 +00:00
|
|
|
caller = sym->name;
|
|
|
|
else
|
|
|
|
scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
|
2015-04-06 05:36:10 +00:00
|
|
|
|
|
|
|
printf(format, (unsigned long long)data->page,
|
|
|
|
(unsigned long long)data->alloc_bytes / 1024,
|
|
|
|
data->nr_alloc, data->order,
|
|
|
|
migrate_type_str[data->migrate_type],
|
2015-04-21 04:55:05 +00:00
|
|
|
gfp_len, compact_gfp_string(data->gfp_flags), caller);
|
2015-04-06 05:36:10 +00:00
|
|
|
|
|
|
|
next = rb_next(next);
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:05 +00:00
|
|
|
if (n_lines == -1) {
|
|
|
|
printf(" ... | ... | ... | ... | ... | %-*s | ...\n",
|
|
|
|
gfp_len, "...");
|
|
|
|
}
|
2015-04-06 05:36:10 +00:00
|
|
|
|
2015-04-21 04:55:02 +00:00
|
|
|
printf("%.105s\n", graph_dotted_line);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __print_page_caller_result(struct perf_session *session, int n_lines)
|
|
|
|
{
|
|
|
|
struct rb_node *next = rb_first(&page_caller_sorted);
|
|
|
|
struct machine *machine = &session->machines.host;
|
2015-04-21 04:55:05 +00:00
|
|
|
int gfp_len = max(strlen("GFP flags"), max_gfp_len);
|
2015-04-21 04:55:02 +00:00
|
|
|
|
|
|
|
printf("\n%.105s\n", graph_dotted_line);
|
2015-04-21 04:55:05 +00:00
|
|
|
printf(" %5s alloc (KB) | Hits | Order | Mig.type | %-*s | Callsite\n",
|
|
|
|
live_page ? "Live" : "Total", gfp_len, "GFP flags");
|
2015-04-21 04:55:02 +00:00
|
|
|
printf("%.105s\n", graph_dotted_line);
|
|
|
|
|
|
|
|
while (next && n_lines--) {
|
|
|
|
struct page_stat *data;
|
|
|
|
struct symbol *sym;
|
|
|
|
struct map *map;
|
|
|
|
char buf[32];
|
|
|
|
char *caller = buf;
|
|
|
|
|
|
|
|
data = rb_entry(next, struct page_stat, node);
|
2016-09-01 22:25:52 +00:00
|
|
|
sym = machine__find_kernel_function(machine, data->callsite, &map);
|
2017-02-13 19:52:15 +00:00
|
|
|
if (sym)
|
2015-04-21 04:55:02 +00:00
|
|
|
caller = sym->name;
|
|
|
|
else
|
|
|
|
scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
|
|
|
|
|
2015-04-21 04:55:05 +00:00
|
|
|
printf(" %'16llu | %'9d | %5d | %8s | %-*s | %s\n",
|
2015-04-21 04:55:02 +00:00
|
|
|
(unsigned long long)data->alloc_bytes / 1024,
|
|
|
|
data->nr_alloc, data->order,
|
|
|
|
migrate_type_str[data->migrate_type],
|
2015-04-21 04:55:05 +00:00
|
|
|
gfp_len, compact_gfp_string(data->gfp_flags), caller);
|
2015-04-21 04:55:02 +00:00
|
|
|
|
|
|
|
next = rb_next(next);
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:05 +00:00
|
|
|
if (n_lines == -1) {
|
|
|
|
printf(" ... | ... | ... | ... | %-*s | ...\n",
|
|
|
|
gfp_len, "...");
|
|
|
|
}
|
2015-04-21 04:55:02 +00:00
|
|
|
|
|
|
|
printf("%.105s\n", graph_dotted_line);
|
2015-04-06 05:36:10 +00:00
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:05 +00:00
|
|
|
static void print_gfp_flags(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
printf("#\n");
|
|
|
|
printf("# GFP flags\n");
|
|
|
|
printf("# ---------\n");
|
|
|
|
for (i = 0; i < nr_gfps; i++) {
|
|
|
|
printf("# %08x: %*s: %s\n", gfps[i].flags,
|
|
|
|
(int) max_gfp_len, gfps[i].compact_str,
|
|
|
|
gfps[i].human_readable);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-06 05:36:10 +00:00
|
|
|
static void print_slab_summary(void)
|
2009-11-20 07:53:25 +00:00
|
|
|
{
|
2015-04-06 05:36:10 +00:00
|
|
|
printf("\nSUMMARY (SLAB allocator)");
|
|
|
|
printf("\n========================\n");
|
2015-03-23 06:30:40 +00:00
|
|
|
printf("Total bytes requested: %'lu\n", total_requested);
|
|
|
|
printf("Total bytes allocated: %'lu\n", total_allocated);
|
2016-11-25 21:42:13 +00:00
|
|
|
printf("Total bytes freed: %'lu\n", total_freed);
|
|
|
|
if (total_allocated > total_freed) {
|
|
|
|
printf("Net total bytes allocated: %'lu\n",
|
|
|
|
total_allocated - total_freed);
|
|
|
|
}
|
2015-03-23 06:30:40 +00:00
|
|
|
printf("Total bytes wasted on internal fragmentation: %'lu\n",
|
2009-11-20 07:53:25 +00:00
|
|
|
total_allocated - total_requested);
|
|
|
|
printf("Internal fragmentation: %f%%\n",
|
|
|
|
fragmentation(total_requested, total_allocated));
|
2015-03-23 06:30:40 +00:00
|
|
|
printf("Cross CPU allocations: %'lu/%'lu\n", nr_cross_allocs, nr_allocs);
|
2009-11-20 07:53:25 +00:00
|
|
|
}
|
|
|
|
|
2015-04-06 05:36:10 +00:00
|
|
|
static void print_page_summary(void)
|
|
|
|
{
|
|
|
|
int o, m;
|
|
|
|
u64 nr_alloc_freed = nr_page_frees - nr_page_nomatch;
|
|
|
|
u64 total_alloc_freed_bytes = total_page_free_bytes - total_page_nomatch_bytes;
|
|
|
|
|
|
|
|
printf("\nSUMMARY (page allocator)");
|
|
|
|
printf("\n========================\n");
|
|
|
|
printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation requests",
|
|
|
|
nr_page_allocs, total_page_alloc_bytes / 1024);
|
|
|
|
printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free requests",
|
|
|
|
nr_page_frees, total_page_free_bytes / 1024);
|
|
|
|
printf("\n");
|
|
|
|
|
2015-04-23 13:40:37 +00:00
|
|
|
printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests",
|
2015-04-06 05:36:10 +00:00
|
|
|
nr_alloc_freed, (total_alloc_freed_bytes) / 1024);
|
2015-04-23 13:40:37 +00:00
|
|
|
printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc-only requests",
|
2015-04-06 05:36:10 +00:00
|
|
|
nr_page_allocs - nr_alloc_freed,
|
|
|
|
(total_page_alloc_bytes - total_alloc_freed_bytes) / 1024);
|
|
|
|
printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free-only requests",
|
|
|
|
nr_page_nomatch, total_page_nomatch_bytes / 1024);
|
|
|
|
printf("\n");
|
|
|
|
|
|
|
|
printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation failures",
|
|
|
|
nr_page_fails, total_page_fail_bytes / 1024);
|
|
|
|
printf("\n");
|
|
|
|
|
|
|
|
printf("%5s %12s %12s %12s %12s %12s\n", "Order", "Unmovable",
|
|
|
|
"Reclaimable", "Movable", "Reserved", "CMA/Isolated");
|
|
|
|
printf("%.5s %.12s %.12s %.12s %.12s %.12s\n", graph_dotted_line,
|
|
|
|
graph_dotted_line, graph_dotted_line, graph_dotted_line,
|
|
|
|
graph_dotted_line, graph_dotted_line);
|
|
|
|
|
|
|
|
for (o = 0; o < MAX_PAGE_ORDER; o++) {
|
|
|
|
printf("%5d", o);
|
|
|
|
for (m = 0; m < MAX_MIGRATE_TYPES - 1; m++) {
|
|
|
|
if (order_stats[o][m])
|
|
|
|
printf(" %'12d", order_stats[o][m]);
|
|
|
|
else
|
|
|
|
printf(" %12c", '.');
|
|
|
|
}
|
|
|
|
printf("\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void print_slab_result(struct perf_session *session)
|
2009-11-20 07:53:25 +00:00
|
|
|
{
|
|
|
|
if (caller_flag)
|
2015-04-06 05:36:10 +00:00
|
|
|
__print_slab_result(&root_caller_sorted, session, caller_lines, 1);
|
|
|
|
if (alloc_flag)
|
|
|
|
__print_slab_result(&root_alloc_sorted, session, alloc_lines, 0);
|
|
|
|
print_slab_summary();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void print_page_result(struct perf_session *session)
|
|
|
|
{
|
2015-04-21 04:55:05 +00:00
|
|
|
if (caller_flag || alloc_flag)
|
|
|
|
print_gfp_flags();
|
2015-04-21 04:55:02 +00:00
|
|
|
if (caller_flag)
|
|
|
|
__print_page_caller_result(session, caller_lines);
|
2009-11-20 07:53:25 +00:00
|
|
|
if (alloc_flag)
|
2015-04-21 04:55:02 +00:00
|
|
|
__print_page_alloc_result(session, alloc_lines);
|
2015-04-06 05:36:10 +00:00
|
|
|
print_page_summary();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void print_result(struct perf_session *session)
|
|
|
|
{
|
|
|
|
if (kmem_slab)
|
|
|
|
print_slab_result(session);
|
|
|
|
if (kmem_page)
|
|
|
|
print_page_result(session);
|
2009-11-20 07:53:25 +00:00
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:03 +00:00
|
|
|
static LIST_HEAD(slab_caller_sort);
|
|
|
|
static LIST_HEAD(slab_alloc_sort);
|
|
|
|
static LIST_HEAD(page_caller_sort);
|
|
|
|
static LIST_HEAD(page_alloc_sort);
|
2009-11-24 05:26:10 +00:00
|
|
|
|
2015-04-06 05:36:10 +00:00
|
|
|
static void sort_slab_insert(struct rb_root *root, struct alloc_stat *data,
|
|
|
|
struct list_head *sort_list)
|
2009-11-20 07:53:25 +00:00
|
|
|
{
|
|
|
|
struct rb_node **new = &(root->rb_node);
|
|
|
|
struct rb_node *parent = NULL;
|
2009-11-24 05:26:10 +00:00
|
|
|
struct sort_dimension *sort;
|
2009-11-20 07:53:25 +00:00
|
|
|
|
|
|
|
while (*new) {
|
|
|
|
struct alloc_stat *this;
|
2009-11-24 05:26:10 +00:00
|
|
|
int cmp = 0;
|
2009-11-20 07:53:25 +00:00
|
|
|
|
|
|
|
this = rb_entry(*new, struct alloc_stat, node);
|
|
|
|
parent = *new;
|
|
|
|
|
2009-11-24 05:26:10 +00:00
|
|
|
list_for_each_entry(sort, sort_list, list) {
|
|
|
|
cmp = sort->cmp(data, this);
|
|
|
|
if (cmp)
|
|
|
|
break;
|
|
|
|
}
|
2009-11-20 07:53:25 +00:00
|
|
|
|
|
|
|
if (cmp > 0)
|
|
|
|
new = &((*new)->rb_left);
|
|
|
|
else
|
|
|
|
new = &((*new)->rb_right);
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_link_node(&data->node, parent, new);
|
|
|
|
rb_insert_color(&data->node, root);
|
|
|
|
}
|
|
|
|
|
2015-04-06 05:36:10 +00:00
|
|
|
static void __sort_slab_result(struct rb_root *root, struct rb_root *root_sorted,
|
|
|
|
struct list_head *sort_list)
|
2009-11-20 07:53:25 +00:00
|
|
|
{
|
|
|
|
struct rb_node *node;
|
|
|
|
struct alloc_stat *data;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
node = rb_first(root);
|
|
|
|
if (!node)
|
|
|
|
break;
|
|
|
|
|
|
|
|
rb_erase(node, root);
|
|
|
|
data = rb_entry(node, struct alloc_stat, node);
|
2015-04-06 05:36:10 +00:00
|
|
|
sort_slab_insert(root_sorted, data, sort_list);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:03 +00:00
|
|
|
static void sort_page_insert(struct rb_root *root, struct page_stat *data,
|
|
|
|
struct list_head *sort_list)
|
2015-04-06 05:36:10 +00:00
|
|
|
{
|
|
|
|
struct rb_node **new = &root->rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
2015-04-21 04:55:03 +00:00
|
|
|
struct sort_dimension *sort;
|
2015-04-06 05:36:10 +00:00
|
|
|
|
|
|
|
while (*new) {
|
|
|
|
struct page_stat *this;
|
|
|
|
int cmp = 0;
|
|
|
|
|
|
|
|
this = rb_entry(*new, struct page_stat, node);
|
|
|
|
parent = *new;
|
|
|
|
|
2015-04-21 04:55:03 +00:00
|
|
|
list_for_each_entry(sort, sort_list, list) {
|
|
|
|
cmp = sort->cmp(data, this);
|
|
|
|
if (cmp)
|
|
|
|
break;
|
|
|
|
}
|
2015-04-06 05:36:10 +00:00
|
|
|
|
|
|
|
if (cmp > 0)
|
|
|
|
new = &parent->rb_left;
|
|
|
|
else
|
|
|
|
new = &parent->rb_right;
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_link_node(&data->node, parent, new);
|
|
|
|
rb_insert_color(&data->node, root);
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:03 +00:00
|
|
|
static void __sort_page_result(struct rb_root *root, struct rb_root *root_sorted,
|
|
|
|
struct list_head *sort_list)
|
2015-04-06 05:36:10 +00:00
|
|
|
{
|
|
|
|
struct rb_node *node;
|
|
|
|
struct page_stat *data;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
node = rb_first(root);
|
|
|
|
if (!node)
|
|
|
|
break;
|
|
|
|
|
|
|
|
rb_erase(node, root);
|
|
|
|
data = rb_entry(node, struct page_stat, node);
|
2015-04-21 04:55:03 +00:00
|
|
|
sort_page_insert(root_sorted, data, sort_list);
|
2009-11-20 07:53:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sort_result(void)
|
|
|
|
{
|
2015-04-06 05:36:10 +00:00
|
|
|
if (kmem_slab) {
|
|
|
|
__sort_slab_result(&root_alloc_stat, &root_alloc_sorted,
|
2015-04-21 04:55:03 +00:00
|
|
|
&slab_alloc_sort);
|
2015-04-06 05:36:10 +00:00
|
|
|
__sort_slab_result(&root_caller_stat, &root_caller_sorted,
|
2015-04-21 04:55:03 +00:00
|
|
|
&slab_caller_sort);
|
2015-04-06 05:36:10 +00:00
|
|
|
}
|
|
|
|
if (kmem_page) {
|
perf kmem: Add --live option for current allocation stat
Currently 'perf kmem stat --page' shows total (page) allocation stat by
default, but sometimes one might want to see live (total alloc-only)
requests/pages only. The new --live option does this by subtracting freed
allocation from the stat.
E.g.:
# perf kmem stat --page
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 172,173 3,083 806,686 . .
1 284 . . . .
2 6,124 58 . . .
3 114 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . 1 . .
10 . . . . .
# perf kmem stat --page --live
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 2,214 3,025 97,156 . .
1 59 . . . .
2 19 58 . . .
3 23 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . . . .
10 . . . . .
#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Pekka Enberg <penberg@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1429592107-1807-4-git-send-email-namhyung@kernel.org
[ Added examples to the changeset log ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-21 04:55:04 +00:00
|
|
|
if (live_page)
|
|
|
|
__sort_page_result(&page_live_tree, &page_alloc_sorted,
|
|
|
|
&page_alloc_sort);
|
|
|
|
else
|
|
|
|
__sort_page_result(&page_alloc_tree, &page_alloc_sorted,
|
|
|
|
&page_alloc_sort);
|
|
|
|
|
2015-04-21 04:55:03 +00:00
|
|
|
__sort_page_result(&page_caller_tree, &page_caller_sorted,
|
|
|
|
&page_caller_sort);
|
2015-04-06 05:36:10 +00:00
|
|
|
}
|
2009-11-20 07:53:25 +00:00
|
|
|
}
|
|
|
|
|
2014-08-12 06:40:38 +00:00
|
|
|
static int __cmd_kmem(struct perf_session *session)
|
2009-11-20 07:53:25 +00:00
|
|
|
{
|
2009-12-27 23:37:02 +00:00
|
|
|
int err = -EINVAL;
|
2015-04-06 05:36:10 +00:00
|
|
|
struct perf_evsel *evsel;
|
2012-09-24 13:46:54 +00:00
|
|
|
const struct perf_evsel_str_handler kmem_tracepoints[] = {
|
2015-04-06 05:36:10 +00:00
|
|
|
/* slab allocator */
|
2012-09-24 13:46:54 +00:00
|
|
|
{ "kmem:kmalloc", perf_evsel__process_alloc_event, },
|
|
|
|
{ "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, },
|
|
|
|
{ "kmem:kmalloc_node", perf_evsel__process_alloc_node_event, },
|
|
|
|
{ "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, },
|
|
|
|
{ "kmem:kfree", perf_evsel__process_free_event, },
|
|
|
|
{ "kmem:kmem_cache_free", perf_evsel__process_free_event, },
|
2015-04-06 05:36:10 +00:00
|
|
|
/* page allocator */
|
|
|
|
{ "kmem:mm_page_alloc", perf_evsel__process_page_alloc_event, },
|
|
|
|
{ "kmem:mm_page_free", perf_evsel__process_page_free_event, },
|
2012-09-24 13:46:54 +00:00
|
|
|
};
|
perf session: Move kmaps to perf_session
There is still some more work to do to disentangle map creation
from DSO loading, but this happens only for the kernel, and for
the early adopters of perf diff, where this disentanglement
matters most, we'll be testing different kernels, so no problem
here.
Further clarification: right now we create the kernel maps for
the various modules and discontiguous kernel text maps when
loading the DSO, we should do it as a two step process, first
creating the maps, for multiple mappings with the same DSO
store, then doing the dso load just once, for the first hit on
one of the maps sharing this DSO backing store.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1260741029-4430-6-git-send-email-acme@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-12-13 21:50:29 +00:00
|
|
|
|
2009-12-27 23:37:02 +00:00
|
|
|
if (!perf_session__has_traces(session, "kmem record"))
|
2014-08-12 06:40:38 +00:00
|
|
|
goto out;
|
2009-12-27 23:37:02 +00:00
|
|
|
|
2012-09-24 13:46:54 +00:00
|
|
|
if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
|
|
|
|
pr_err("Initializing perf session tracepoint handlers failed\n");
|
2014-08-12 06:40:38 +00:00
|
|
|
goto out;
|
2012-09-24 13:46:54 +00:00
|
|
|
}
|
|
|
|
|
2016-06-23 14:26:15 +00:00
|
|
|
evlist__for_each_entry(session->evlist, evsel) {
|
2015-04-06 05:36:10 +00:00
|
|
|
if (!strcmp(perf_evsel__name(evsel), "kmem:mm_page_alloc") &&
|
|
|
|
perf_evsel__field(evsel, "pfn")) {
|
|
|
|
use_pfn = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-20 07:53:25 +00:00
|
|
|
setup_pager();
|
2015-03-03 14:58:45 +00:00
|
|
|
err = perf_session__process_events(session);
|
2015-04-06 05:36:10 +00:00
|
|
|
if (err != 0) {
|
|
|
|
pr_err("error during process events: %d\n", err);
|
2014-08-12 06:40:38 +00:00
|
|
|
goto out;
|
2015-04-06 05:36:10 +00:00
|
|
|
}
|
2009-11-20 07:53:25 +00:00
|
|
|
sort_result();
|
perf session: Move kmaps to perf_session
There is still some more work to do to disentangle map creation
from DSO loading, but this happens only for the kernel, and for
the early adopters of perf diff, where this disentanglement
matters most, we'll be testing different kernels, so no problem
here.
Further clarification: right now we create the kernel maps for
the various modules and discontiguous kernel text maps when
loading the DSO, we should do it as a two step process, first
creating the maps, for multiple mappings with the same DSO
store, then doing the dso load just once, for the first hit on
one of the maps sharing this DSO backing store.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1260741029-4430-6-git-send-email-acme@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-12-13 21:50:29 +00:00
|
|
|
print_result(session);
|
2014-08-12 06:40:38 +00:00
|
|
|
out:
|
perf session: Move kmaps to perf_session
There is still some more work to do to disentangle map creation
from DSO loading, but this happens only for the kernel, and for
the early adopters of perf diff, where this disentanglement
matters most, we'll be testing different kernels, so no problem
here.
Further clarification: right now we create the kernel maps for
the various modules and discontiguous kernel text maps when
loading the DSO, we should do it as a two step process, first
creating the maps, for multiple mappings with the same DSO
store, then doing the dso load just once, for the first hit on
one of the maps sharing this DSO backing store.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1260741029-4430-6-git-send-email-acme@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-12-13 21:50:29 +00:00
|
|
|
return err;
|
2009-11-20 07:53:25 +00:00
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:03 +00:00
|
|
|
/* slab sort keys */
|
|
|
|
static int ptr_cmp(void *a, void *b)
|
2009-11-20 07:53:25 +00:00
|
|
|
{
|
2015-04-21 04:55:03 +00:00
|
|
|
struct alloc_stat *l = a;
|
|
|
|
struct alloc_stat *r = b;
|
|
|
|
|
2009-11-20 07:53:25 +00:00
|
|
|
if (l->ptr < r->ptr)
|
|
|
|
return -1;
|
|
|
|
else if (l->ptr > r->ptr)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-24 05:26:10 +00:00
|
|
|
static struct sort_dimension ptr_sort_dimension = {
|
|
|
|
.name = "ptr",
|
|
|
|
.cmp = ptr_cmp,
|
|
|
|
};
|
|
|
|
|
2015-04-21 04:55:03 +00:00
|
|
|
static int slab_callsite_cmp(void *a, void *b)
|
2009-11-20 07:53:25 +00:00
|
|
|
{
|
2015-04-21 04:55:03 +00:00
|
|
|
struct alloc_stat *l = a;
|
|
|
|
struct alloc_stat *r = b;
|
|
|
|
|
2009-11-20 07:53:25 +00:00
|
|
|
if (l->call_site < r->call_site)
|
|
|
|
return -1;
|
|
|
|
else if (l->call_site > r->call_site)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-24 05:26:10 +00:00
|
|
|
static struct sort_dimension callsite_sort_dimension = {
|
|
|
|
.name = "callsite",
|
2015-04-21 04:55:03 +00:00
|
|
|
.cmp = slab_callsite_cmp,
|
2009-11-24 05:26:10 +00:00
|
|
|
};
|
|
|
|
|
2015-04-21 04:55:03 +00:00
|
|
|
static int hit_cmp(void *a, void *b)
|
2009-11-22 09:58:00 +00:00
|
|
|
{
|
2015-04-21 04:55:03 +00:00
|
|
|
struct alloc_stat *l = a;
|
|
|
|
struct alloc_stat *r = b;
|
|
|
|
|
2009-11-22 09:58:00 +00:00
|
|
|
if (l->hit < r->hit)
|
|
|
|
return -1;
|
|
|
|
else if (l->hit > r->hit)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-24 05:26:10 +00:00
|
|
|
static struct sort_dimension hit_sort_dimension = {
|
|
|
|
.name = "hit",
|
|
|
|
.cmp = hit_cmp,
|
|
|
|
};
|
|
|
|
|
2015-04-21 04:55:03 +00:00
|
|
|
static int bytes_cmp(void *a, void *b)
|
2009-11-20 07:53:25 +00:00
|
|
|
{
|
2015-04-21 04:55:03 +00:00
|
|
|
struct alloc_stat *l = a;
|
|
|
|
struct alloc_stat *r = b;
|
|
|
|
|
2009-11-20 07:53:25 +00:00
|
|
|
if (l->bytes_alloc < r->bytes_alloc)
|
|
|
|
return -1;
|
|
|
|
else if (l->bytes_alloc > r->bytes_alloc)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-24 05:26:10 +00:00
|
|
|
static struct sort_dimension bytes_sort_dimension = {
|
|
|
|
.name = "bytes",
|
|
|
|
.cmp = bytes_cmp,
|
|
|
|
};
|
|
|
|
|
2015-04-21 04:55:03 +00:00
|
|
|
static int frag_cmp(void *a, void *b)
|
2009-11-22 09:58:00 +00:00
|
|
|
{
|
|
|
|
double x, y;
|
2015-04-21 04:55:03 +00:00
|
|
|
struct alloc_stat *l = a;
|
|
|
|
struct alloc_stat *r = b;
|
2009-11-22 09:58:00 +00:00
|
|
|
|
|
|
|
x = fragmentation(l->bytes_req, l->bytes_alloc);
|
|
|
|
y = fragmentation(r->bytes_req, r->bytes_alloc);
|
|
|
|
|
|
|
|
if (x < y)
|
|
|
|
return -1;
|
|
|
|
else if (x > y)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-24 05:26:10 +00:00
|
|
|
static struct sort_dimension frag_sort_dimension = {
|
|
|
|
.name = "frag",
|
|
|
|
.cmp = frag_cmp,
|
|
|
|
};
|
|
|
|
|
2015-04-21 04:55:03 +00:00
|
|
|
static int pingpong_cmp(void *a, void *b)
|
2009-11-24 05:26:55 +00:00
|
|
|
{
|
2015-04-21 04:55:03 +00:00
|
|
|
struct alloc_stat *l = a;
|
|
|
|
struct alloc_stat *r = b;
|
|
|
|
|
2009-11-24 05:26:55 +00:00
|
|
|
if (l->pingpong < r->pingpong)
|
|
|
|
return -1;
|
|
|
|
else if (l->pingpong > r->pingpong)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sort_dimension pingpong_sort_dimension = {
|
|
|
|
.name = "pingpong",
|
|
|
|
.cmp = pingpong_cmp,
|
|
|
|
};
|
|
|
|
|
2015-04-21 04:55:03 +00:00
|
|
|
/* page sort keys */
|
|
|
|
static int page_cmp(void *a, void *b)
|
|
|
|
{
|
|
|
|
struct page_stat *l = a;
|
|
|
|
struct page_stat *r = b;
|
|
|
|
|
|
|
|
if (l->page < r->page)
|
|
|
|
return -1;
|
|
|
|
else if (l->page > r->page)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sort_dimension page_sort_dimension = {
|
|
|
|
.name = "page",
|
|
|
|
.cmp = page_cmp,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int page_callsite_cmp(void *a, void *b)
|
|
|
|
{
|
|
|
|
struct page_stat *l = a;
|
|
|
|
struct page_stat *r = b;
|
|
|
|
|
|
|
|
if (l->callsite < r->callsite)
|
|
|
|
return -1;
|
|
|
|
else if (l->callsite > r->callsite)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sort_dimension page_callsite_sort_dimension = {
|
|
|
|
.name = "callsite",
|
|
|
|
.cmp = page_callsite_cmp,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int page_hit_cmp(void *a, void *b)
|
|
|
|
{
|
|
|
|
struct page_stat *l = a;
|
|
|
|
struct page_stat *r = b;
|
|
|
|
|
|
|
|
if (l->nr_alloc < r->nr_alloc)
|
|
|
|
return -1;
|
|
|
|
else if (l->nr_alloc > r->nr_alloc)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sort_dimension page_hit_sort_dimension = {
|
|
|
|
.name = "hit",
|
|
|
|
.cmp = page_hit_cmp,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int page_bytes_cmp(void *a, void *b)
|
|
|
|
{
|
|
|
|
struct page_stat *l = a;
|
|
|
|
struct page_stat *r = b;
|
|
|
|
|
|
|
|
if (l->alloc_bytes < r->alloc_bytes)
|
|
|
|
return -1;
|
|
|
|
else if (l->alloc_bytes > r->alloc_bytes)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sort_dimension page_bytes_sort_dimension = {
|
|
|
|
.name = "bytes",
|
|
|
|
.cmp = page_bytes_cmp,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int page_order_cmp(void *a, void *b)
|
|
|
|
{
|
|
|
|
struct page_stat *l = a;
|
|
|
|
struct page_stat *r = b;
|
|
|
|
|
|
|
|
if (l->order < r->order)
|
|
|
|
return -1;
|
|
|
|
else if (l->order > r->order)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sort_dimension page_order_sort_dimension = {
|
|
|
|
.name = "order",
|
|
|
|
.cmp = page_order_cmp,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int migrate_type_cmp(void *a, void *b)
|
|
|
|
{
|
|
|
|
struct page_stat *l = a;
|
|
|
|
struct page_stat *r = b;
|
|
|
|
|
|
|
|
/* for internal use to find free'd page */
|
|
|
|
if (l->migrate_type == -1U)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (l->migrate_type < r->migrate_type)
|
|
|
|
return -1;
|
|
|
|
else if (l->migrate_type > r->migrate_type)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sort_dimension migrate_type_sort_dimension = {
|
|
|
|
.name = "migtype",
|
|
|
|
.cmp = migrate_type_cmp,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int gfp_flags_cmp(void *a, void *b)
|
|
|
|
{
|
|
|
|
struct page_stat *l = a;
|
|
|
|
struct page_stat *r = b;
|
|
|
|
|
|
|
|
/* for internal use to find free'd page */
|
|
|
|
if (l->gfp_flags == -1U)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (l->gfp_flags < r->gfp_flags)
|
|
|
|
return -1;
|
|
|
|
else if (l->gfp_flags > r->gfp_flags)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sort_dimension gfp_flags_sort_dimension = {
|
|
|
|
.name = "gfp",
|
|
|
|
.cmp = gfp_flags_cmp,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct sort_dimension *slab_sorts[] = {
|
2009-11-24 05:26:10 +00:00
|
|
|
&ptr_sort_dimension,
|
|
|
|
&callsite_sort_dimension,
|
|
|
|
&hit_sort_dimension,
|
|
|
|
&bytes_sort_dimension,
|
|
|
|
&frag_sort_dimension,
|
2009-11-24 05:26:55 +00:00
|
|
|
&pingpong_sort_dimension,
|
2009-11-24 05:26:10 +00:00
|
|
|
};
|
|
|
|
|
2015-04-21 04:55:03 +00:00
|
|
|
static struct sort_dimension *page_sorts[] = {
|
|
|
|
&page_sort_dimension,
|
|
|
|
&page_callsite_sort_dimension,
|
|
|
|
&page_hit_sort_dimension,
|
|
|
|
&page_bytes_sort_dimension,
|
|
|
|
&page_order_sort_dimension,
|
|
|
|
&migrate_type_sort_dimension,
|
|
|
|
&gfp_flags_sort_dimension,
|
|
|
|
};
|
2009-11-24 05:26:10 +00:00
|
|
|
|
2015-04-21 04:55:03 +00:00
|
|
|
static int slab_sort_dimension__add(const char *tok, struct list_head *list)
|
2009-11-24 05:26:10 +00:00
|
|
|
{
|
|
|
|
struct sort_dimension *sort;
|
|
|
|
int i;
|
|
|
|
|
2015-04-21 04:55:03 +00:00
|
|
|
for (i = 0; i < (int)ARRAY_SIZE(slab_sorts); i++) {
|
|
|
|
if (!strcmp(slab_sorts[i]->name, tok)) {
|
|
|
|
sort = memdup(slab_sorts[i], sizeof(*slab_sorts[i]));
|
2012-09-09 01:53:06 +00:00
|
|
|
if (!sort) {
|
2013-01-25 01:24:57 +00:00
|
|
|
pr_err("%s: memdup failed\n", __func__);
|
2012-09-09 01:53:06 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2009-11-24 05:26:10 +00:00
|
|
|
list_add_tail(&sort->list, list);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-04-21 04:55:03 +00:00
|
|
|
static int page_sort_dimension__add(const char *tok, struct list_head *list)
|
|
|
|
{
|
|
|
|
struct sort_dimension *sort;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < (int)ARRAY_SIZE(page_sorts); i++) {
|
|
|
|
if (!strcmp(page_sorts[i]->name, tok)) {
|
|
|
|
sort = memdup(page_sorts[i], sizeof(*page_sorts[i]));
|
|
|
|
if (!sort) {
|
|
|
|
pr_err("%s: memdup failed\n", __func__);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
list_add_tail(&sort->list, list);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int setup_slab_sorting(struct list_head *sort_list, const char *arg)
|
2009-11-24 05:26:10 +00:00
|
|
|
{
|
|
|
|
char *tok;
|
|
|
|
char *str = strdup(arg);
|
2015-03-12 07:32:46 +00:00
|
|
|
char *pos = str;
|
2009-11-24 05:26:10 +00:00
|
|
|
|
2012-09-09 01:53:06 +00:00
|
|
|
if (!str) {
|
|
|
|
pr_err("%s: strdup failed\n", __func__);
|
|
|
|
return -1;
|
|
|
|
}
|
2009-11-24 05:26:10 +00:00
|
|
|
|
|
|
|
while (true) {
|
2015-03-12 07:32:46 +00:00
|
|
|
tok = strsep(&pos, ",");
|
2009-11-24 05:26:10 +00:00
|
|
|
if (!tok)
|
|
|
|
break;
|
2015-04-21 04:55:03 +00:00
|
|
|
if (slab_sort_dimension__add(tok, sort_list) < 0) {
|
|
|
|
error("Unknown slab --sort key: '%s'", tok);
|
|
|
|
free(str);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
free(str);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int setup_page_sorting(struct list_head *sort_list, const char *arg)
|
|
|
|
{
|
|
|
|
char *tok;
|
|
|
|
char *str = strdup(arg);
|
|
|
|
char *pos = str;
|
|
|
|
|
|
|
|
if (!str) {
|
|
|
|
pr_err("%s: strdup failed\n", __func__);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
tok = strsep(&pos, ",");
|
|
|
|
if (!tok)
|
|
|
|
break;
|
|
|
|
if (page_sort_dimension__add(tok, sort_list) < 0) {
|
|
|
|
error("Unknown page --sort key: '%s'", tok);
|
2012-01-07 17:25:29 +00:00
|
|
|
free(str);
|
2009-11-24 05:26:10 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
free(str);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-09-10 22:15:03 +00:00
|
|
|
static int parse_sort_opt(const struct option *opt __maybe_unused,
|
|
|
|
const char *arg, int unset __maybe_unused)
|
2009-11-20 07:53:25 +00:00
|
|
|
{
|
|
|
|
if (!arg)
|
|
|
|
return -1;
|
|
|
|
|
2015-04-21 04:55:06 +00:00
|
|
|
if (kmem_page > kmem_slab ||
|
|
|
|
(kmem_page == 0 && kmem_slab == 0 && kmem_default == KMEM_PAGE)) {
|
2015-04-21 04:55:03 +00:00
|
|
|
if (caller_flag > alloc_flag)
|
|
|
|
return setup_page_sorting(&page_caller_sort, arg);
|
|
|
|
else
|
|
|
|
return setup_page_sorting(&page_alloc_sort, arg);
|
|
|
|
} else {
|
|
|
|
if (caller_flag > alloc_flag)
|
|
|
|
return setup_slab_sorting(&slab_caller_sort, arg);
|
|
|
|
else
|
|
|
|
return setup_slab_sorting(&slab_alloc_sort, arg);
|
|
|
|
}
|
2009-11-20 07:53:25 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-09-10 22:15:03 +00:00
|
|
|
static int parse_caller_opt(const struct option *opt __maybe_unused,
|
|
|
|
const char *arg __maybe_unused,
|
|
|
|
int unset __maybe_unused)
|
2009-11-20 07:53:25 +00:00
|
|
|
{
|
2009-12-10 07:21:57 +00:00
|
|
|
caller_flag = (alloc_flag + 1);
|
|
|
|
return 0;
|
|
|
|
}
|
2009-11-20 07:53:25 +00:00
|
|
|
|
2012-09-10 22:15:03 +00:00
|
|
|
static int parse_alloc_opt(const struct option *opt __maybe_unused,
|
|
|
|
const char *arg __maybe_unused,
|
|
|
|
int unset __maybe_unused)
|
2009-12-10 07:21:57 +00:00
|
|
|
{
|
|
|
|
alloc_flag = (caller_flag + 1);
|
2009-11-20 07:53:25 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-06 05:36:10 +00:00
|
|
|
static int parse_slab_opt(const struct option *opt __maybe_unused,
|
|
|
|
const char *arg __maybe_unused,
|
|
|
|
int unset __maybe_unused)
|
|
|
|
{
|
|
|
|
kmem_slab = (kmem_page + 1);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int parse_page_opt(const struct option *opt __maybe_unused,
|
|
|
|
const char *arg __maybe_unused,
|
|
|
|
int unset __maybe_unused)
|
|
|
|
{
|
|
|
|
kmem_page = (kmem_slab + 1);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-09-10 22:15:03 +00:00
|
|
|
static int parse_line_opt(const struct option *opt __maybe_unused,
|
|
|
|
const char *arg, int unset __maybe_unused)
|
2009-11-20 07:53:25 +00:00
|
|
|
{
|
|
|
|
int lines;
|
|
|
|
|
|
|
|
if (!arg)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
lines = strtoul(arg, NULL, 10);
|
|
|
|
|
|
|
|
if (caller_flag > alloc_flag)
|
|
|
|
caller_lines = lines;
|
|
|
|
else
|
|
|
|
alloc_lines = lines;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-01 18:20:58 +00:00
|
|
|
static int __cmd_record(int argc, const char **argv)
|
|
|
|
{
|
|
|
|
const char * const record_args[] = {
|
2013-06-05 11:37:21 +00:00
|
|
|
"record", "-a", "-R", "-c", "1",
|
2015-04-06 05:36:10 +00:00
|
|
|
};
|
|
|
|
const char * const slab_events[] = {
|
2009-11-20 07:53:25 +00:00
|
|
|
"-e", "kmem:kmalloc",
|
|
|
|
"-e", "kmem:kmalloc_node",
|
|
|
|
"-e", "kmem:kfree",
|
|
|
|
"-e", "kmem:kmem_cache_alloc",
|
|
|
|
"-e", "kmem:kmem_cache_alloc_node",
|
|
|
|
"-e", "kmem:kmem_cache_free",
|
2012-10-01 18:20:58 +00:00
|
|
|
};
|
2015-04-06 05:36:10 +00:00
|
|
|
const char * const page_events[] = {
|
|
|
|
"-e", "kmem:mm_page_alloc",
|
|
|
|
"-e", "kmem:mm_page_free",
|
|
|
|
};
|
2009-11-20 07:53:25 +00:00
|
|
|
unsigned int rec_argc, i, j;
|
|
|
|
const char **rec_argv;
|
|
|
|
|
|
|
|
rec_argc = ARRAY_SIZE(record_args) + argc - 1;
|
2015-04-06 05:36:10 +00:00
|
|
|
if (kmem_slab)
|
|
|
|
rec_argc += ARRAY_SIZE(slab_events);
|
|
|
|
if (kmem_page)
|
2015-04-21 04:55:02 +00:00
|
|
|
rec_argc += ARRAY_SIZE(page_events) + 1; /* for -g */
|
2015-04-06 05:36:10 +00:00
|
|
|
|
2009-11-20 07:53:25 +00:00
|
|
|
rec_argv = calloc(rec_argc + 1, sizeof(char *));
|
|
|
|
|
2010-11-13 02:35:06 +00:00
|
|
|
if (rec_argv == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2009-11-20 07:53:25 +00:00
|
|
|
for (i = 0; i < ARRAY_SIZE(record_args); i++)
|
|
|
|
rec_argv[i] = strdup(record_args[i]);
|
|
|
|
|
2015-04-06 05:36:10 +00:00
|
|
|
if (kmem_slab) {
|
|
|
|
for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++)
|
|
|
|
rec_argv[i] = strdup(slab_events[j]);
|
|
|
|
}
|
|
|
|
if (kmem_page) {
|
2015-04-21 04:55:02 +00:00
|
|
|
rec_argv[i++] = strdup("-g");
|
|
|
|
|
2015-04-06 05:36:10 +00:00
|
|
|
for (j = 0; j < ARRAY_SIZE(page_events); j++, i++)
|
|
|
|
rec_argv[i] = strdup(page_events[j]);
|
|
|
|
}
|
|
|
|
|
2009-11-20 07:53:25 +00:00
|
|
|
for (j = 1; j < (unsigned int)argc; j++, i++)
|
|
|
|
rec_argv[i] = argv[j];
|
|
|
|
|
2017-03-27 14:47:20 +00:00
|
|
|
return cmd_record(i, rec_argv);
|
2009-11-20 07:53:25 +00:00
|
|
|
}
|
|
|
|
|
2016-02-26 09:31:51 +00:00
|
|
|
static int kmem_config(const char *var, const char *value, void *cb __maybe_unused)
|
2015-04-21 04:55:06 +00:00
|
|
|
{
|
|
|
|
if (!strcmp(var, "kmem.default")) {
|
|
|
|
if (!strcmp(value, "slab"))
|
|
|
|
kmem_default = KMEM_SLAB;
|
|
|
|
else if (!strcmp(value, "page"))
|
|
|
|
kmem_default = KMEM_PAGE;
|
|
|
|
else
|
|
|
|
pr_err("invalid default value ('slab' or 'page' required): %s\n",
|
|
|
|
value);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-02-26 09:31:51 +00:00
|
|
|
return 0;
|
2015-04-21 04:55:06 +00:00
|
|
|
}
|
|
|
|
|
2017-03-27 14:47:20 +00:00
|
|
|
int cmd_kmem(int argc, const char **argv)
|
2009-11-20 07:53:25 +00:00
|
|
|
{
|
2015-04-21 04:55:03 +00:00
|
|
|
const char * const default_slab_sort = "frag,hit,bytes";
|
|
|
|
const char * const default_page_sort = "bytes,hit";
|
perf kmem: Support using -f to override perf.data file ownership
Enable perf kmem to use perf.data when it is not owned by current user
or root.
Example:
# perf kmem record ls
# chown Yunlong.Song:Yunlong.Song perf.data
# ls -al perf.data
-rw------- 1 Yunlong.Song Yunlong.Song 5315665 Apr 2 10:54 perf.data
# id
uid=0(root) gid=0(root) groups=0(root),64(pkcs11)
Before this patch:
# perf kmem stat
File perf.data not owned by current user or root (use -f to override)
# perf kmem stat -f
Error: unknown switch `f'
usage: perf kmem [<options>] {record|stat}
-i, --input <file> input file name
-v, --verbose be more verbose (show symbol address, etc)
--caller show per-callsite statistics
--alloc show per-allocation statistics
-s, --sort <key[,key2...]>
sort by keys: ptr, call_site, bytes, hit,
pingpong, frag
-l, --line <num> show n lines
--raw-ip show raw ip instead of symbol
As shown above, the -f option does not work at all.
After this patch:
# perf kmem stat
File perf.data not owned by current user or root (use -f to override)
# perf kmem stat -f
SUMMARY
=======
Total bytes requested: 437599
Total bytes allocated: 615472
Total bytes wasted on internal fragmentation: 177873
Internal fragmentation: 28.900259%
Cross CPU allocations: 6/1192
As shown above, the -f option really works now.
Signed-off-by: Yunlong Song <yunlong.song@huawei.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/r/1427982439-27388-4-git-send-email-yunlong.song@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-02 13:47:12 +00:00
|
|
|
struct perf_data_file file = {
|
|
|
|
.mode = PERF_DATA_MODE_READ,
|
|
|
|
};
|
2012-10-01 18:20:58 +00:00
|
|
|
const struct option kmem_options[] = {
|
|
|
|
OPT_STRING('i', "input", &input_name, "file", "input file name"),
|
2015-03-12 07:32:47 +00:00
|
|
|
OPT_INCR('v', "verbose", &verbose,
|
|
|
|
"be more verbose (show symbol address, etc)"),
|
2012-10-01 18:20:58 +00:00
|
|
|
OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
|
|
|
|
"show per-callsite statistics", parse_caller_opt),
|
|
|
|
OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
|
|
|
|
"show per-allocation statistics", parse_alloc_opt),
|
|
|
|
OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
|
2015-04-21 04:55:03 +00:00
|
|
|
"sort by keys: ptr, callsite, bytes, hit, pingpong, frag, "
|
|
|
|
"page, order, migtype, gfp", parse_sort_opt),
|
2012-10-01 18:20:58 +00:00
|
|
|
OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
|
|
|
|
OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
|
perf kmem: Support using -f to override perf.data file ownership
Enable perf kmem to use perf.data when it is not owned by current user
or root.
Example:
# perf kmem record ls
# chown Yunlong.Song:Yunlong.Song perf.data
# ls -al perf.data
-rw------- 1 Yunlong.Song Yunlong.Song 5315665 Apr 2 10:54 perf.data
# id
uid=0(root) gid=0(root) groups=0(root),64(pkcs11)
Before this patch:
# perf kmem stat
File perf.data not owned by current user or root (use -f to override)
# perf kmem stat -f
Error: unknown switch `f'
usage: perf kmem [<options>] {record|stat}
-i, --input <file> input file name
-v, --verbose be more verbose (show symbol address, etc)
--caller show per-callsite statistics
--alloc show per-allocation statistics
-s, --sort <key[,key2...]>
sort by keys: ptr, call_site, bytes, hit,
pingpong, frag
-l, --line <num> show n lines
--raw-ip show raw ip instead of symbol
As shown above, the -f option does not work at all.
After this patch:
# perf kmem stat
File perf.data not owned by current user or root (use -f to override)
# perf kmem stat -f
SUMMARY
=======
Total bytes requested: 437599
Total bytes allocated: 615472
Total bytes wasted on internal fragmentation: 177873
Internal fragmentation: 28.900259%
Cross CPU allocations: 6/1192
As shown above, the -f option really works now.
Signed-off-by: Yunlong Song <yunlong.song@huawei.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/r/1427982439-27388-4-git-send-email-yunlong.song@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-02 13:47:12 +00:00
|
|
|
OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
|
2015-04-06 05:36:10 +00:00
|
|
|
OPT_CALLBACK_NOOPT(0, "slab", NULL, NULL, "Analyze slab allocator",
|
|
|
|
parse_slab_opt),
|
|
|
|
OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator",
|
|
|
|
parse_page_opt),
|
perf kmem: Add --live option for current allocation stat
Currently 'perf kmem stat --page' shows total (page) allocation stat by
default, but sometimes one might want to see live (total alloc-only)
requests/pages only. The new --live option does this by subtracting freed
allocation from the stat.
E.g.:
# perf kmem stat --page
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 172,173 3,083 806,686 . .
1 284 . . . .
2 6,124 58 . . .
3 114 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . 1 . .
10 . . . . .
# perf kmem stat --page --live
SUMMARY (page allocator)
========================
Total allocation requests : 988,858 [ 4,045,368 KB ]
Total free requests : 886,484 [ 3,624,996 KB ]
Total alloc+freed requests : 885,969 [ 3,622,628 KB ]
Total alloc-only requests : 102,889 [ 422,740 KB ]
Total free-only requests : 515 [ 2,368 KB ]
Total allocation failures : 0 [ 0 KB ]
Order Unmovable Reclaimable Movable Reserved CMA/Isolated
----- ------------ ------------ ------------ ------------ ------------
0 2,214 3,025 97,156 . .
1 59 . . . .
2 19 58 . . .
3 23 335 . . .
4 . . . . .
5 . . . . .
6 . . . . .
7 . . . . .
8 . . . . .
9 . . . . .
10 . . . . .
#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Pekka Enberg <penberg@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1429592107-1807-4-git-send-email-namhyung@kernel.org
[ Added examples to the changeset log ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-21 04:55:04 +00:00
|
|
|
OPT_BOOLEAN(0, "live", &live_page, "Show live page stat"),
|
2016-11-29 17:15:45 +00:00
|
|
|
OPT_STRING(0, "time", &time_str, "str",
|
|
|
|
"Time span of interest (start,stop)"),
|
2012-10-01 18:20:58 +00:00
|
|
|
OPT_END()
|
|
|
|
};
|
2014-03-15 03:17:51 +00:00
|
|
|
const char *const kmem_subcommands[] = { "record", "stat", NULL };
|
|
|
|
const char *kmem_usage[] = {
|
|
|
|
NULL,
|
2012-10-01 18:20:58 +00:00
|
|
|
NULL
|
|
|
|
};
|
2014-08-12 06:40:38 +00:00
|
|
|
struct perf_session *session;
|
2015-05-05 14:52:52 +00:00
|
|
|
const char errmsg[] = "No %s allocation events found. Have you run 'perf kmem record --%s'?\n";
|
2017-01-24 16:44:10 +00:00
|
|
|
int ret = perf_config(kmem_config, NULL);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2014-08-12 06:40:38 +00:00
|
|
|
|
2014-03-15 03:17:51 +00:00
|
|
|
argc = parse_options_subcommand(argc, argv, kmem_options,
|
|
|
|
kmem_subcommands, kmem_usage, 0);
|
2009-11-20 07:53:25 +00:00
|
|
|
|
2009-12-10 07:21:57 +00:00
|
|
|
if (!argc)
|
2009-11-20 07:53:25 +00:00
|
|
|
usage_with_options(kmem_usage, kmem_options);
|
|
|
|
|
2015-04-21 04:55:06 +00:00
|
|
|
if (kmem_slab == 0 && kmem_page == 0) {
|
|
|
|
if (kmem_default == KMEM_SLAB)
|
|
|
|
kmem_slab = 1;
|
|
|
|
else
|
|
|
|
kmem_page = 1;
|
|
|
|
}
|
2015-04-06 05:36:10 +00:00
|
|
|
|
2009-12-10 07:21:57 +00:00
|
|
|
if (!strncmp(argv[0], "rec", 3)) {
|
2014-08-12 06:40:45 +00:00
|
|
|
symbol__init(NULL);
|
2009-12-10 07:21:57 +00:00
|
|
|
return __cmd_record(argc, argv);
|
2014-08-12 06:40:38 +00:00
|
|
|
}
|
|
|
|
|
2015-04-06 05:36:08 +00:00
|
|
|
file.path = input_name;
|
|
|
|
|
2015-04-21 04:55:02 +00:00
|
|
|
kmem_session = session = perf_session__new(&file, false, &perf_kmem);
|
2014-08-12 06:40:38 +00:00
|
|
|
if (session == NULL)
|
2014-09-24 01:33:37 +00:00
|
|
|
return -1;
|
2014-08-12 06:40:38 +00:00
|
|
|
|
2017-01-24 16:44:10 +00:00
|
|
|
ret = -1;
|
|
|
|
|
2015-05-05 14:52:52 +00:00
|
|
|
if (kmem_slab) {
|
|
|
|
if (!perf_evlist__find_tracepoint_by_name(session->evlist,
|
|
|
|
"kmem:kmalloc")) {
|
|
|
|
pr_err(errmsg, "slab", "slab");
|
2015-06-30 08:15:21 +00:00
|
|
|
goto out_delete;
|
2015-05-05 14:52:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-06 05:36:10 +00:00
|
|
|
if (kmem_page) {
|
2015-05-05 14:52:52 +00:00
|
|
|
struct perf_evsel *evsel;
|
2015-04-06 05:36:10 +00:00
|
|
|
|
2015-05-05 14:52:52 +00:00
|
|
|
evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
|
|
|
|
"kmem:mm_page_alloc");
|
|
|
|
if (evsel == NULL) {
|
|
|
|
pr_err(errmsg, "page", "page");
|
2015-06-30 08:15:21 +00:00
|
|
|
goto out_delete;
|
2015-04-06 05:36:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
kmem_page_size = pevent_get_page_size(evsel->tp_format->pevent);
|
2015-04-21 04:55:02 +00:00
|
|
|
symbol_conf.use_callchain = true;
|
2015-04-06 05:36:10 +00:00
|
|
|
}
|
|
|
|
|
2014-08-12 06:40:45 +00:00
|
|
|
symbol__init(&session->header.env);
|
2014-08-12 06:40:38 +00:00
|
|
|
|
2016-11-29 17:15:45 +00:00
|
|
|
if (perf_time__parse_str(&ptime, time_str) != 0) {
|
|
|
|
pr_err("Invalid time string\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2014-08-12 06:40:38 +00:00
|
|
|
if (!strcmp(argv[0], "stat")) {
|
2015-03-23 06:30:40 +00:00
|
|
|
setlocale(LC_ALL, "");
|
|
|
|
|
2014-04-07 18:55:23 +00:00
|
|
|
if (cpu__setup_cpunode_map())
|
2014-08-12 06:40:38 +00:00
|
|
|
goto out_delete;
|
2009-12-10 07:21:57 +00:00
|
|
|
|
2015-04-21 04:55:03 +00:00
|
|
|
if (list_empty(&slab_caller_sort))
|
|
|
|
setup_slab_sorting(&slab_caller_sort, default_slab_sort);
|
|
|
|
if (list_empty(&slab_alloc_sort))
|
|
|
|
setup_slab_sorting(&slab_alloc_sort, default_slab_sort);
|
|
|
|
if (list_empty(&page_caller_sort))
|
|
|
|
setup_page_sorting(&page_caller_sort, default_page_sort);
|
|
|
|
if (list_empty(&page_alloc_sort))
|
|
|
|
setup_page_sorting(&page_alloc_sort, default_page_sort);
|
|
|
|
|
|
|
|
if (kmem_page) {
|
|
|
|
setup_page_sorting(&page_alloc_sort_input,
|
|
|
|
"page,order,migtype,gfp");
|
|
|
|
setup_page_sorting(&page_caller_sort_input,
|
|
|
|
"callsite,order,migtype,gfp");
|
|
|
|
}
|
2014-08-12 06:40:38 +00:00
|
|
|
ret = __cmd_kmem(session);
|
2010-01-19 17:26:11 +00:00
|
|
|
} else
|
|
|
|
usage_with_options(kmem_usage, kmem_options);
|
2009-11-24 05:26:31 +00:00
|
|
|
|
2014-08-12 06:40:38 +00:00
|
|
|
out_delete:
|
|
|
|
perf_session__delete(session);
|
|
|
|
|
|
|
|
return ret;
|
2009-11-20 07:53:25 +00:00
|
|
|
}
|
|
|
|
|