2012-08-20 04:52:05 +00:00
|
|
|
#include <stdio.h>
|
|
|
|
|
|
|
|
#include "../../util/util.h"
|
|
|
|
#include "../../util/hist.h"
|
|
|
|
#include "../../util/sort.h"
|
2013-01-22 09:09:37 +00:00
|
|
|
#include "../../util/evsel.h"
|
2012-08-20 04:52:05 +00:00
|
|
|
|
|
|
|
|
|
|
|
static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int ret = fprintf(fp, " ");
|
|
|
|
|
|
|
|
for (i = 0; i < left_margin; i++)
|
|
|
|
ret += fprintf(fp, " ");
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
|
|
|
|
int left_margin)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
size_t ret = callchain__fprintf_left_margin(fp, left_margin);
|
|
|
|
|
|
|
|
for (i = 0; i < depth; i++)
|
|
|
|
if (depth_mask & (1 << i))
|
|
|
|
ret += fprintf(fp, "| ");
|
|
|
|
else
|
|
|
|
ret += fprintf(fp, " ");
|
|
|
|
|
|
|
|
ret += fprintf(fp, "\n");
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-11-09 05:45:39 +00:00
|
|
|
static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
|
|
|
|
struct callchain_list *chain,
|
2012-08-20 04:52:05 +00:00
|
|
|
int depth, int depth_mask, int period,
|
2015-11-09 05:45:39 +00:00
|
|
|
u64 total_samples, int left_margin)
|
2012-08-20 04:52:05 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
size_t ret = 0;
|
2014-11-13 02:05:23 +00:00
|
|
|
char bf[1024];
|
2012-08-20 04:52:05 +00:00
|
|
|
|
|
|
|
ret += callchain__fprintf_left_margin(fp, left_margin);
|
|
|
|
for (i = 0; i < depth; i++) {
|
|
|
|
if (depth_mask & (1 << i))
|
|
|
|
ret += fprintf(fp, "|");
|
|
|
|
else
|
|
|
|
ret += fprintf(fp, " ");
|
|
|
|
if (!period && i == depth - 1) {
|
2015-11-09 05:45:39 +00:00
|
|
|
ret += fprintf(fp, "--");
|
|
|
|
ret += callchain_node__fprintf_value(node, fp, total_samples);
|
|
|
|
ret += fprintf(fp, "--");
|
2012-08-20 04:52:05 +00:00
|
|
|
} else
|
|
|
|
ret += fprintf(fp, "%s", " ");
|
|
|
|
}
|
2014-11-13 02:05:23 +00:00
|
|
|
fputs(callchain_list__sym_name(chain, bf, sizeof(bf), false), fp);
|
|
|
|
fputc('\n', fp);
|
2012-08-20 04:52:05 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct symbol *rem_sq_bracket;
|
|
|
|
static struct callchain_list rem_hits;
|
|
|
|
|
|
|
|
static void init_rem_hits(void)
|
|
|
|
{
|
|
|
|
rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
|
|
|
|
if (!rem_sq_bracket) {
|
|
|
|
fprintf(stderr, "Not enough memory to display remaining hits\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
strcpy(rem_sq_bracket->name, "[...]");
|
|
|
|
rem_hits.ms.sym = rem_sq_bracket;
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
|
|
|
|
u64 total_samples, int depth,
|
|
|
|
int depth_mask, int left_margin)
|
|
|
|
{
|
|
|
|
struct rb_node *node, *next;
|
2015-11-09 05:45:41 +00:00
|
|
|
struct callchain_node *child = NULL;
|
2012-08-20 04:52:05 +00:00
|
|
|
struct callchain_list *chain;
|
|
|
|
int new_depth_mask = depth_mask;
|
|
|
|
u64 remaining;
|
|
|
|
size_t ret = 0;
|
|
|
|
int i;
|
|
|
|
uint entries_printed = 0;
|
2015-11-09 05:45:41 +00:00
|
|
|
int cumul_count = 0;
|
2012-08-20 04:52:05 +00:00
|
|
|
|
|
|
|
remaining = total_samples;
|
|
|
|
|
|
|
|
node = rb_first(root);
|
|
|
|
while (node) {
|
|
|
|
u64 new_total;
|
|
|
|
u64 cumul;
|
|
|
|
|
|
|
|
child = rb_entry(node, struct callchain_node, rb_node);
|
|
|
|
cumul = callchain_cumul_hits(child);
|
|
|
|
remaining -= cumul;
|
2015-11-09 05:45:41 +00:00
|
|
|
cumul_count += callchain_cumul_counts(child);
|
2012-08-20 04:52:05 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The depth mask manages the output of pipes that show
|
|
|
|
* the depth. We don't want to keep the pipes of the current
|
|
|
|
* level for the last child of this depth.
|
|
|
|
* Except if we have remaining filtered hits. They will
|
|
|
|
* supersede the last child
|
|
|
|
*/
|
|
|
|
next = rb_next(node);
|
|
|
|
if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
|
|
|
|
new_depth_mask &= ~(1 << (depth - 1));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* But we keep the older depth mask for the line separator
|
|
|
|
* to keep the level link until we reach the last child
|
|
|
|
*/
|
|
|
|
ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
|
|
|
|
left_margin);
|
|
|
|
i = 0;
|
|
|
|
list_for_each_entry(chain, &child->val, list) {
|
2015-11-09 05:45:39 +00:00
|
|
|
ret += ipchain__fprintf_graph(fp, child, chain, depth,
|
2012-08-20 04:52:05 +00:00
|
|
|
new_depth_mask, i++,
|
|
|
|
total_samples,
|
|
|
|
left_margin);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (callchain_param.mode == CHAIN_GRAPH_REL)
|
|
|
|
new_total = child->children_hit;
|
|
|
|
else
|
|
|
|
new_total = total_samples;
|
|
|
|
|
|
|
|
ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
|
|
|
|
depth + 1,
|
|
|
|
new_depth_mask | (1 << depth),
|
|
|
|
left_margin);
|
|
|
|
node = next;
|
|
|
|
if (++entries_printed == callchain_param.print_limit)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (callchain_param.mode == CHAIN_GRAPH_REL &&
|
|
|
|
remaining && remaining != total_samples) {
|
2015-11-09 05:45:39 +00:00
|
|
|
struct callchain_node rem_node = {
|
|
|
|
.hit = remaining,
|
|
|
|
};
|
2012-08-20 04:52:05 +00:00
|
|
|
|
|
|
|
if (!rem_sq_bracket)
|
|
|
|
return ret;
|
|
|
|
|
2015-11-09 05:45:41 +00:00
|
|
|
if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
|
|
|
|
rem_node.count = child->parent->children_count - cumul_count;
|
|
|
|
if (rem_node.count <= 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-08-20 04:52:05 +00:00
|
|
|
new_depth_mask &= ~(1 << (depth - 1));
|
2015-11-09 05:45:39 +00:00
|
|
|
ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
|
2012-08-20 04:52:05 +00:00
|
|
|
new_depth_mask, 0, total_samples,
|
2015-11-09 05:45:39 +00:00
|
|
|
left_margin);
|
2012-08-20 04:52:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-01-27 15:40:53 +00:00
|
|
|
/*
|
|
|
|
* If have one single callchain root, don't bother printing
|
|
|
|
* its percentage (100 % in fractal mode and the same percentage
|
|
|
|
* than the hist in graph mode). This also avoid one level of column.
|
|
|
|
*
|
|
|
|
* However when percent-limit applied, it's possible that single callchain
|
|
|
|
* node have different (non-100% in fractal mode) percentage.
|
|
|
|
*/
|
|
|
|
static bool need_percent_display(struct rb_node *node, u64 parent_samples)
|
|
|
|
{
|
|
|
|
struct callchain_node *cnode;
|
|
|
|
|
|
|
|
if (rb_next(node))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
cnode = rb_entry(node, struct callchain_node, rb_node);
|
|
|
|
return callchain_cumul_hits(cnode) != parent_samples;
|
|
|
|
}
|
|
|
|
|
2012-08-20 04:52:05 +00:00
|
|
|
static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
|
2016-01-27 15:40:52 +00:00
|
|
|
u64 total_samples, u64 parent_samples,
|
|
|
|
int left_margin)
|
2012-08-20 04:52:05 +00:00
|
|
|
{
|
|
|
|
struct callchain_node *cnode;
|
|
|
|
struct callchain_list *chain;
|
|
|
|
u32 entries_printed = 0;
|
|
|
|
bool printed = false;
|
|
|
|
struct rb_node *node;
|
|
|
|
int i = 0;
|
|
|
|
int ret = 0;
|
2014-11-13 02:05:23 +00:00
|
|
|
char bf[1024];
|
2012-08-20 04:52:05 +00:00
|
|
|
|
|
|
|
node = rb_first(root);
|
2016-01-27 15:40:53 +00:00
|
|
|
if (node && !need_percent_display(node, parent_samples)) {
|
2012-08-20 04:52:05 +00:00
|
|
|
cnode = rb_entry(node, struct callchain_node, rb_node);
|
|
|
|
list_for_each_entry(chain, &cnode->val, list) {
|
|
|
|
/*
|
|
|
|
* If we sort by symbol, the first entry is the same than
|
|
|
|
* the symbol. No need to print it otherwise it appears as
|
|
|
|
* displayed twice.
|
|
|
|
*/
|
2014-05-19 05:19:30 +00:00
|
|
|
if (!i++ && field_order == NULL &&
|
|
|
|
sort_order && !prefixcmp(sort_order, "sym"))
|
2012-08-20 04:52:05 +00:00
|
|
|
continue;
|
|
|
|
if (!printed) {
|
|
|
|
ret += callchain__fprintf_left_margin(fp, left_margin);
|
|
|
|
ret += fprintf(fp, "|\n");
|
|
|
|
ret += callchain__fprintf_left_margin(fp, left_margin);
|
|
|
|
ret += fprintf(fp, "---");
|
|
|
|
left_margin += 3;
|
|
|
|
printed = true;
|
|
|
|
} else
|
|
|
|
ret += callchain__fprintf_left_margin(fp, left_margin);
|
|
|
|
|
2014-11-13 02:05:23 +00:00
|
|
|
ret += fprintf(fp, "%s\n", callchain_list__sym_name(chain, bf, sizeof(bf),
|
|
|
|
false));
|
2012-08-20 04:52:05 +00:00
|
|
|
|
|
|
|
if (++entries_printed == callchain_param.print_limit)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
root = &cnode->rb_root;
|
|
|
|
}
|
|
|
|
|
2016-01-27 15:40:52 +00:00
|
|
|
if (callchain_param.mode == CHAIN_GRAPH_REL)
|
|
|
|
total_samples = parent_samples;
|
|
|
|
|
2012-08-20 04:52:05 +00:00
|
|
|
ret += __callchain__fprintf_graph(fp, root, total_samples,
|
|
|
|
1, 1, left_margin);
|
2016-01-28 12:24:54 +00:00
|
|
|
if (ret) {
|
|
|
|
/* do not add a blank line if it printed nothing */
|
|
|
|
ret += fprintf(fp, "\n");
|
|
|
|
}
|
2012-08-20 04:52:05 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-11-05 18:32:36 +00:00
|
|
|
static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
|
2012-08-20 04:52:05 +00:00
|
|
|
u64 total_samples)
|
|
|
|
{
|
|
|
|
struct callchain_list *chain;
|
|
|
|
size_t ret = 0;
|
2014-11-13 02:05:23 +00:00
|
|
|
char bf[1024];
|
2012-08-20 04:52:05 +00:00
|
|
|
|
2013-11-05 18:32:36 +00:00
|
|
|
if (!node)
|
2012-08-20 04:52:05 +00:00
|
|
|
return 0;
|
|
|
|
|
2013-11-05 18:32:36 +00:00
|
|
|
ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
|
2012-08-20 04:52:05 +00:00
|
|
|
|
|
|
|
|
2013-11-05 18:32:36 +00:00
|
|
|
list_for_each_entry(chain, &node->val, list) {
|
2012-08-20 04:52:05 +00:00
|
|
|
if (chain->ip >= PERF_CONTEXT_MAX)
|
|
|
|
continue;
|
2014-11-13 02:05:23 +00:00
|
|
|
ret += fprintf(fp, " %s\n", callchain_list__sym_name(chain,
|
|
|
|
bf, sizeof(bf), false));
|
2012-08-20 04:52:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-11-05 18:32:36 +00:00
|
|
|
static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
|
2012-08-20 04:52:05 +00:00
|
|
|
u64 total_samples)
|
|
|
|
{
|
|
|
|
size_t ret = 0;
|
|
|
|
u32 entries_printed = 0;
|
|
|
|
struct callchain_node *chain;
|
2013-11-05 18:32:36 +00:00
|
|
|
struct rb_node *rb_node = rb_first(tree);
|
2012-08-20 04:52:05 +00:00
|
|
|
|
|
|
|
while (rb_node) {
|
|
|
|
chain = rb_entry(rb_node, struct callchain_node, rb_node);
|
|
|
|
|
2015-11-09 05:45:39 +00:00
|
|
|
ret += fprintf(fp, " ");
|
|
|
|
ret += callchain_node__fprintf_value(chain, fp, total_samples);
|
|
|
|
ret += fprintf(fp, "\n");
|
2012-08-20 04:52:05 +00:00
|
|
|
ret += __callchain__fprintf_flat(fp, chain, total_samples);
|
|
|
|
ret += fprintf(fp, "\n");
|
|
|
|
if (++entries_printed == callchain_param.print_limit)
|
|
|
|
break;
|
|
|
|
|
|
|
|
rb_node = rb_next(rb_node);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-11-09 05:45:37 +00:00
|
|
|
static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
|
|
|
|
{
|
|
|
|
const char *sep = symbol_conf.field_sep ?: ";";
|
|
|
|
struct callchain_list *chain;
|
|
|
|
size_t ret = 0;
|
|
|
|
char bf[1024];
|
|
|
|
bool first;
|
|
|
|
|
|
|
|
if (!node)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret += __callchain__fprintf_folded(fp, node->parent);
|
|
|
|
|
|
|
|
first = (ret == 0);
|
|
|
|
list_for_each_entry(chain, &node->val, list) {
|
|
|
|
if (chain->ip >= PERF_CONTEXT_MAX)
|
|
|
|
continue;
|
|
|
|
ret += fprintf(fp, "%s%s", first ? "" : sep,
|
|
|
|
callchain_list__sym_name(chain,
|
|
|
|
bf, sizeof(bf), false));
|
|
|
|
first = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
|
|
|
|
u64 total_samples)
|
|
|
|
{
|
|
|
|
size_t ret = 0;
|
|
|
|
u32 entries_printed = 0;
|
|
|
|
struct callchain_node *chain;
|
|
|
|
struct rb_node *rb_node = rb_first(tree);
|
|
|
|
|
|
|
|
while (rb_node) {
|
|
|
|
|
|
|
|
chain = rb_entry(rb_node, struct callchain_node, rb_node);
|
|
|
|
|
2015-11-09 05:45:39 +00:00
|
|
|
ret += callchain_node__fprintf_value(chain, fp, total_samples);
|
|
|
|
ret += fprintf(fp, " ");
|
2015-11-09 05:45:37 +00:00
|
|
|
ret += __callchain__fprintf_folded(fp, chain);
|
|
|
|
ret += fprintf(fp, "\n");
|
|
|
|
if (++entries_printed == callchain_param.print_limit)
|
|
|
|
break;
|
|
|
|
|
|
|
|
rb_node = rb_next(rb_node);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-08-20 04:52:05 +00:00
|
|
|
static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
|
|
|
|
u64 total_samples, int left_margin,
|
|
|
|
FILE *fp)
|
|
|
|
{
|
2016-01-27 15:40:52 +00:00
|
|
|
u64 parent_samples = he->stat.period;
|
|
|
|
|
|
|
|
if (symbol_conf.cumulate_callchain)
|
|
|
|
parent_samples = he->stat_acc->period;
|
|
|
|
|
2012-08-20 04:52:05 +00:00
|
|
|
switch (callchain_param.mode) {
|
|
|
|
case CHAIN_GRAPH_REL:
|
2016-01-27 15:40:52 +00:00
|
|
|
return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
|
|
|
|
parent_samples, left_margin);
|
2012-08-20 04:52:05 +00:00
|
|
|
break;
|
|
|
|
case CHAIN_GRAPH_ABS:
|
|
|
|
return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
|
2016-01-27 15:40:52 +00:00
|
|
|
parent_samples, left_margin);
|
2012-08-20 04:52:05 +00:00
|
|
|
break;
|
|
|
|
case CHAIN_FLAT:
|
|
|
|
return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
|
|
|
|
break;
|
2015-11-09 05:45:37 +00:00
|
|
|
case CHAIN_FOLDED:
|
|
|
|
return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
|
|
|
|
break;
|
2012-08-20 04:52:05 +00:00
|
|
|
case CHAIN_NONE:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pr_err("Bad callchain mode\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-03-03 07:16:20 +00:00
|
|
|
static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
|
2013-02-04 15:33:19 +00:00
|
|
|
{
|
|
|
|
const char *sep = symbol_conf.field_sep;
|
|
|
|
struct perf_hpp_fmt *fmt;
|
|
|
|
char *start = hpp->buf;
|
|
|
|
int ret;
|
|
|
|
bool first = true;
|
|
|
|
|
|
|
|
if (symbol_conf.exclude_other && !he->parent)
|
|
|
|
return 0;
|
|
|
|
|
2016-01-18 09:24:23 +00:00
|
|
|
hists__for_each_format(he->hists, fmt) {
|
2015-12-22 17:07:08 +00:00
|
|
|
if (perf_hpp__should_skip(fmt, he->hists))
|
2014-03-18 04:00:59 +00:00
|
|
|
continue;
|
|
|
|
|
2013-02-04 15:33:19 +00:00
|
|
|
/*
|
|
|
|
* If there's no field_sep, we still need
|
|
|
|
* to display initial ' '.
|
|
|
|
*/
|
|
|
|
if (!sep || !first) {
|
|
|
|
ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
|
|
|
|
advance_hpp(hpp, ret);
|
|
|
|
} else
|
|
|
|
first = false;
|
|
|
|
|
2013-10-25 11:24:53 +00:00
|
|
|
if (perf_hpp__use_color() && fmt->color)
|
2013-02-04 15:33:19 +00:00
|
|
|
ret = fmt->color(fmt, hpp, he);
|
|
|
|
else
|
|
|
|
ret = fmt->entry(fmt, hpp, he);
|
|
|
|
|
perf hists: Do column alignment on the format iterator
We were doing column alignment in the format function for each cell,
returning a string padded with spaces so that when the next column is
printed the cursor is at its column alignment.
This ends up needlessly printing trailing spaces, do it at the format
iterator, that is where we know if it is needed, i.e. if there is more
columns to be printed.
This eliminates the need for triming lines when doing a dump using 'P'
in the TUI browser and also produces far saner results with things like
piping 'perf report' to 'less'.
Right now only the formatters for sym->name and the 'locked' column
(perf mem report), that are the ones that end up at the end of lines
in the default 'perf report', 'perf top' and 'perf mem report' tools,
the others will be done in a subsequent patch.
In the end the 'width' parameter for the formatters now mean, in
'printf' terms, the 'precision', where before it was the field 'width'.
Reported-by: Dave Jones <davej@codemonkey.org.uk>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: http://lkml.kernel.org/n/tip-s7iwl2gj23w92l6tibnrcqzr@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-02-11 20:14:13 +00:00
|
|
|
ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
|
2013-02-04 15:33:19 +00:00
|
|
|
advance_hpp(hpp, ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
return hpp->buf - start;
|
|
|
|
}
|
|
|
|
|
2016-02-24 15:13:41 +00:00
|
|
|
static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
|
|
|
|
struct perf_hpp *hpp,
|
|
|
|
int nr_sort_key, struct hists *hists,
|
|
|
|
FILE *fp)
|
|
|
|
{
|
|
|
|
const char *sep = symbol_conf.field_sep;
|
|
|
|
struct perf_hpp_fmt *fmt;
|
|
|
|
char *buf = hpp->buf;
|
2016-02-26 18:52:45 +00:00
|
|
|
size_t size = hpp->size;
|
2016-02-24 15:13:41 +00:00
|
|
|
int ret, printed = 0;
|
|
|
|
bool first = true;
|
|
|
|
|
|
|
|
if (symbol_conf.exclude_other && !he->parent)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
|
|
|
|
advance_hpp(hpp, ret);
|
|
|
|
|
|
|
|
hists__for_each_format(he->hists, fmt) {
|
|
|
|
if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If there's no field_sep, we still need
|
|
|
|
* to display initial ' '.
|
|
|
|
*/
|
|
|
|
if (!sep || !first) {
|
|
|
|
ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
|
|
|
|
advance_hpp(hpp, ret);
|
|
|
|
} else
|
|
|
|
first = false;
|
|
|
|
|
|
|
|
if (perf_hpp__use_color() && fmt->color)
|
|
|
|
ret = fmt->color(fmt, hpp, he);
|
|
|
|
else
|
|
|
|
ret = fmt->entry(fmt, hpp, he);
|
|
|
|
|
|
|
|
ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
|
|
|
|
advance_hpp(hpp, ret);
|
|
|
|
}
|
|
|
|
|
2016-03-07 19:44:46 +00:00
|
|
|
if (!sep)
|
2016-02-24 15:13:41 +00:00
|
|
|
ret = scnprintf(hpp->buf, hpp->size, "%*s",
|
2016-03-07 19:44:46 +00:00
|
|
|
(nr_sort_key - 1) * HIERARCHY_INDENT, "");
|
2016-02-24 15:13:41 +00:00
|
|
|
advance_hpp(hpp, ret);
|
|
|
|
|
2016-02-26 18:52:45 +00:00
|
|
|
printed += fprintf(fp, "%s", buf);
|
|
|
|
|
2016-03-07 19:44:46 +00:00
|
|
|
perf_hpp_list__for_each_format(he->hpp_list, fmt) {
|
|
|
|
hpp->buf = buf;
|
|
|
|
hpp->size = size;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* No need to call hist_entry__snprintf_alignment() since this
|
|
|
|
* fmt is always the last column in the hierarchy mode.
|
|
|
|
*/
|
|
|
|
if (perf_hpp__use_color() && fmt->color)
|
|
|
|
fmt->color(fmt, hpp, he);
|
|
|
|
else
|
|
|
|
fmt->entry(fmt, hpp, he);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* dynamic entries are right-aligned but we want left-aligned
|
|
|
|
* in the hierarchy mode
|
|
|
|
*/
|
|
|
|
printed += fprintf(fp, "%s%s", sep ?: " ", ltrim(buf));
|
|
|
|
}
|
|
|
|
printed += putc('\n', fp);
|
2016-02-24 15:13:41 +00:00
|
|
|
|
|
|
|
if (symbol_conf.use_callchain && he->leaf) {
|
|
|
|
u64 total = hists__total_period(hists);
|
|
|
|
|
|
|
|
printed += hist_entry_callchain__fprintf(he, total, 0, fp);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
return printed;
|
|
|
|
}
|
|
|
|
|
2012-08-20 04:52:05 +00:00
|
|
|
static int hist_entry__fprintf(struct hist_entry *he, size_t size,
|
2013-09-05 18:39:12 +00:00
|
|
|
struct hists *hists,
|
|
|
|
char *bf, size_t bfsz, FILE *fp)
|
2012-08-20 04:52:05 +00:00
|
|
|
{
|
|
|
|
int ret;
|
2012-09-03 02:53:06 +00:00
|
|
|
struct perf_hpp hpp = {
|
|
|
|
.buf = bf,
|
|
|
|
.size = size,
|
|
|
|
};
|
2016-01-27 15:40:51 +00:00
|
|
|
u64 total_period = hists->stats.total_period;
|
2012-08-20 04:52:05 +00:00
|
|
|
|
2013-09-05 18:39:12 +00:00
|
|
|
if (size == 0 || size > bfsz)
|
|
|
|
size = hpp.size = bfsz;
|
2012-08-20 04:52:05 +00:00
|
|
|
|
2016-02-24 15:13:41 +00:00
|
|
|
if (symbol_conf.report_hierarchy) {
|
perf report: Fix indentation of dynamic entries in hierarchy
When dynamic entries are used in the hierarchy mode with multiple
events, the output might not be aligned properly. In the hierarchy
mode, the each sort column is indented using total number of sort keys.
So it keeps track of number of sort keys when adding them. However
a dynamic sort key can be added more than once when multiple events have
same field names. This results in unnecessarily long indentation in the
output.
For example perf kmem records following events:
$ perf evlist --trace-fields -i perf.data.kmem
kmem:kmalloc: trace_fields: call_site,ptr,bytes_req,bytes_alloc,gfp_flags
kmem:kmalloc_node: trace_fields: call_site,ptr,bytes_req,bytes_alloc,gfp_flags,node
kmem:kfree: trace_fields: call_site,ptr
kmem:kmem_cache_alloc: trace_fields: call_site,ptr,bytes_req,bytes_alloc,gfp_flags
kmem:kmem_cache_alloc_node: trace_fields: call_site,ptr,bytes_req,bytes_alloc,gfp_flags,node
kmem:kmem_cache_free: trace_fields: call_site,ptr
kmem:mm_page_alloc: trace_fields: page,order,gfp_flags,migratetype
kmem:mm_page_free: trace_fields: page,order
As you can see, many field names shared between kmem events. So adding
'ptr' dynamic sort key alone will set nr_sort_keys to 6. And this adds
many unnecessary spaces between columns.
Before:
$ perf report -i perf.data.kmem --hierarchy -s ptr -g none --stdio
...
# Overhead ptr
# ....................... ...................................
#
99.89% 0xffff8803ffb79720
0.06% 0xffff8803d228a000
0.03% 0xffff8803f7678f00
0.00% 0xffff880401dc5280
0.00% 0xffff880406172380
0.00% 0xffff8803ffac3a00
0.00% 0xffff8803ffac1600
After:
# Overhead ptr
# ........ ....................
#
99.89% 0xffff8803ffb79720
0.06% 0xffff8803d228a000
0.03% 0xffff8803f7678f00
0.00% 0xffff880401dc5280
0.00% 0xffff880406172380
0.00% 0xffff8803ffac3a00
0.00% 0xffff8803ffac1600
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/r/1456512767-1164-2-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-02-26 18:52:44 +00:00
|
|
|
int nr_sort = hists->nr_sort_keys;
|
2016-02-24 15:13:41 +00:00
|
|
|
|
|
|
|
return hist_entry__hierarchy_fprintf(he, &hpp, nr_sort,
|
|
|
|
hists, fp);
|
|
|
|
}
|
|
|
|
|
2014-03-03 07:16:20 +00:00
|
|
|
hist_entry__snprintf(he, &hpp);
|
2012-08-20 04:52:05 +00:00
|
|
|
|
2012-08-20 04:52:06 +00:00
|
|
|
ret = fprintf(fp, "%s\n", bf);
|
2012-08-20 04:52:05 +00:00
|
|
|
|
2012-08-20 04:52:06 +00:00
|
|
|
if (symbol_conf.use_callchain)
|
2016-01-27 15:40:51 +00:00
|
|
|
ret += hist_entry_callchain__fprintf(he, total_period, 0, fp);
|
2012-08-20 04:52:05 +00:00
|
|
|
|
2012-08-20 04:52:06 +00:00
|
|
|
return ret;
|
2012-08-20 04:52:05 +00:00
|
|
|
}
|
|
|
|
|
2016-02-24 15:13:42 +00:00
|
|
|
static int print_hierarchy_indent(const char *sep, int nr_sort,
|
|
|
|
const char *line, FILE *fp)
|
|
|
|
{
|
|
|
|
if (sep != NULL || nr_sort < 1)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return fprintf(fp, "%-.*s", (nr_sort - 1) * HIERARCHY_INDENT, line);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int print_hierarchy_header(struct hists *hists, struct perf_hpp *hpp,
|
|
|
|
const char *sep, FILE *fp)
|
|
|
|
{
|
|
|
|
bool first = true;
|
|
|
|
int nr_sort;
|
2016-02-26 18:52:45 +00:00
|
|
|
int depth;
|
2016-02-24 15:13:42 +00:00
|
|
|
unsigned width = 0;
|
|
|
|
unsigned header_width = 0;
|
|
|
|
struct perf_hpp_fmt *fmt;
|
|
|
|
|
perf report: Fix indentation of dynamic entries in hierarchy
When dynamic entries are used in the hierarchy mode with multiple
events, the output might not be aligned properly. In the hierarchy
mode, the each sort column is indented using total number of sort keys.
So it keeps track of number of sort keys when adding them. However
a dynamic sort key can be added more than once when multiple events have
same field names. This results in unnecessarily long indentation in the
output.
For example perf kmem records following events:
$ perf evlist --trace-fields -i perf.data.kmem
kmem:kmalloc: trace_fields: call_site,ptr,bytes_req,bytes_alloc,gfp_flags
kmem:kmalloc_node: trace_fields: call_site,ptr,bytes_req,bytes_alloc,gfp_flags,node
kmem:kfree: trace_fields: call_site,ptr
kmem:kmem_cache_alloc: trace_fields: call_site,ptr,bytes_req,bytes_alloc,gfp_flags
kmem:kmem_cache_alloc_node: trace_fields: call_site,ptr,bytes_req,bytes_alloc,gfp_flags,node
kmem:kmem_cache_free: trace_fields: call_site,ptr
kmem:mm_page_alloc: trace_fields: page,order,gfp_flags,migratetype
kmem:mm_page_free: trace_fields: page,order
As you can see, many field names shared between kmem events. So adding
'ptr' dynamic sort key alone will set nr_sort_keys to 6. And this adds
many unnecessary spaces between columns.
Before:
$ perf report -i perf.data.kmem --hierarchy -s ptr -g none --stdio
...
# Overhead ptr
# ....................... ...................................
#
99.89% 0xffff8803ffb79720
0.06% 0xffff8803d228a000
0.03% 0xffff8803f7678f00
0.00% 0xffff880401dc5280
0.00% 0xffff880406172380
0.00% 0xffff8803ffac3a00
0.00% 0xffff8803ffac1600
After:
# Overhead ptr
# ........ ....................
#
99.89% 0xffff8803ffb79720
0.06% 0xffff8803d228a000
0.03% 0xffff8803f7678f00
0.00% 0xffff880401dc5280
0.00% 0xffff880406172380
0.00% 0xffff8803ffac3a00
0.00% 0xffff8803ffac1600
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/r/1456512767-1164-2-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-02-26 18:52:44 +00:00
|
|
|
nr_sort = hists->nr_sort_keys;
|
2016-02-24 15:13:42 +00:00
|
|
|
|
|
|
|
/* preserve max indent depth for column headers */
|
|
|
|
print_hierarchy_indent(sep, nr_sort, spaces, fp);
|
|
|
|
|
|
|
|
hists__for_each_format(hists, fmt) {
|
|
|
|
if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!first)
|
|
|
|
fprintf(fp, "%s", sep ?: " ");
|
|
|
|
else
|
|
|
|
first = false;
|
|
|
|
|
|
|
|
fmt->header(fmt, hpp, hists_to_evsel(hists));
|
|
|
|
fprintf(fp, "%s", hpp->buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* combine sort headers with ' / ' */
|
|
|
|
first = true;
|
|
|
|
hists__for_each_format(hists, fmt) {
|
|
|
|
if (!perf_hpp__is_sort_entry(fmt) && !perf_hpp__is_dynamic_entry(fmt))
|
|
|
|
continue;
|
|
|
|
if (perf_hpp__should_skip(fmt, hists))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!first)
|
|
|
|
header_width += fprintf(fp, " / ");
|
|
|
|
else {
|
2016-02-26 18:52:45 +00:00
|
|
|
fprintf(fp, "%s", sep ?: " ");
|
2016-02-24 15:13:42 +00:00
|
|
|
first = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt->header(fmt, hpp, hists_to_evsel(hists));
|
|
|
|
rtrim(hpp->buf);
|
|
|
|
|
2016-02-26 18:52:45 +00:00
|
|
|
header_width += fprintf(fp, "%s", ltrim(hpp->buf));
|
2016-02-24 15:13:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fprintf(fp, "\n# ");
|
|
|
|
|
|
|
|
/* preserve max indent depth for initial dots */
|
|
|
|
print_hierarchy_indent(sep, nr_sort, dots, fp);
|
|
|
|
|
|
|
|
first = true;
|
|
|
|
hists__for_each_format(hists, fmt) {
|
|
|
|
if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!first)
|
|
|
|
fprintf(fp, "%s", sep ?: " ");
|
|
|
|
else
|
|
|
|
first = false;
|
|
|
|
|
|
|
|
width = fmt->width(fmt, hpp, hists_to_evsel(hists));
|
|
|
|
fprintf(fp, "%.*s", width, dots);
|
|
|
|
}
|
|
|
|
|
2016-02-26 18:52:45 +00:00
|
|
|
depth = 0;
|
2016-02-24 15:13:42 +00:00
|
|
|
hists__for_each_format(hists, fmt) {
|
|
|
|
if (!perf_hpp__is_sort_entry(fmt) && !perf_hpp__is_dynamic_entry(fmt))
|
|
|
|
continue;
|
|
|
|
if (perf_hpp__should_skip(fmt, hists))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
width = fmt->width(fmt, hpp, hists_to_evsel(hists));
|
2016-02-26 18:52:45 +00:00
|
|
|
width += depth * HIERARCHY_INDENT;
|
|
|
|
|
2016-02-24 15:13:42 +00:00
|
|
|
if (width > header_width)
|
|
|
|
header_width = width;
|
2016-02-26 18:52:45 +00:00
|
|
|
|
|
|
|
depth++;
|
2016-02-24 15:13:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots);
|
|
|
|
|
|
|
|
fprintf(fp, "\n#\n");
|
|
|
|
|
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
|
2012-10-04 12:49:38 +00:00
|
|
|
size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
|
2013-05-14 02:09:04 +00:00
|
|
|
int max_cols, float min_pcnt, FILE *fp)
|
2012-08-20 04:52:05 +00:00
|
|
|
{
|
2012-10-12 22:06:16 +00:00
|
|
|
struct perf_hpp_fmt *fmt;
|
2012-08-20 04:52:05 +00:00
|
|
|
struct rb_node *nd;
|
|
|
|
size_t ret = 0;
|
|
|
|
unsigned int width;
|
|
|
|
const char *sep = symbol_conf.field_sep;
|
2012-10-12 22:06:16 +00:00
|
|
|
int nr_rows = 0;
|
2012-10-05 14:44:45 +00:00
|
|
|
char bf[96];
|
2012-09-03 02:53:06 +00:00
|
|
|
struct perf_hpp dummy_hpp = {
|
|
|
|
.buf = bf,
|
|
|
|
.size = sizeof(bf),
|
|
|
|
};
|
2012-10-04 12:49:37 +00:00
|
|
|
bool first = true;
|
2013-09-05 18:39:12 +00:00
|
|
|
size_t linesz;
|
|
|
|
char *line = NULL;
|
2016-02-26 12:13:17 +00:00
|
|
|
unsigned indent;
|
2012-08-20 04:52:05 +00:00
|
|
|
|
|
|
|
init_rem_hits();
|
|
|
|
|
2016-01-18 09:24:23 +00:00
|
|
|
hists__for_each_format(hists, fmt)
|
2014-03-20 02:18:54 +00:00
|
|
|
perf_hpp__reset_width(fmt, hists);
|
2014-03-03 07:16:20 +00:00
|
|
|
|
2014-07-31 05:47:38 +00:00
|
|
|
if (symbol_conf.col_width_list_str)
|
|
|
|
perf_hpp__set_user_width(symbol_conf.col_width_list_str);
|
|
|
|
|
2014-03-03 07:16:20 +00:00
|
|
|
if (!show_header)
|
|
|
|
goto print_entries;
|
|
|
|
|
|
|
|
fprintf(fp, "# ");
|
|
|
|
|
2016-02-24 15:13:42 +00:00
|
|
|
if (symbol_conf.report_hierarchy) {
|
|
|
|
nr_rows += print_hierarchy_header(hists, &dummy_hpp, sep, fp);
|
|
|
|
goto print_entries;
|
|
|
|
}
|
|
|
|
|
2016-01-18 09:24:23 +00:00
|
|
|
hists__for_each_format(hists, fmt) {
|
2015-12-22 17:07:08 +00:00
|
|
|
if (perf_hpp__should_skip(fmt, hists))
|
2014-03-18 04:00:59 +00:00
|
|
|
continue;
|
|
|
|
|
2014-03-03 07:16:20 +00:00
|
|
|
if (!first)
|
|
|
|
fprintf(fp, "%s", sep ?: " ");
|
|
|
|
else
|
|
|
|
first = false;
|
|
|
|
|
|
|
|
fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
|
|
|
|
fprintf(fp, "%s", bf);
|
2012-08-20 04:52:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fprintf(fp, "\n");
|
|
|
|
if (max_rows && ++nr_rows >= max_rows)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (sep)
|
|
|
|
goto print_entries;
|
|
|
|
|
2012-10-04 12:49:37 +00:00
|
|
|
first = true;
|
|
|
|
|
2012-09-03 02:53:06 +00:00
|
|
|
fprintf(fp, "# ");
|
|
|
|
|
2016-01-18 09:24:23 +00:00
|
|
|
hists__for_each_format(hists, fmt) {
|
2012-10-12 22:06:16 +00:00
|
|
|
unsigned int i;
|
2012-09-03 02:53:06 +00:00
|
|
|
|
2015-12-22 17:07:08 +00:00
|
|
|
if (perf_hpp__should_skip(fmt, hists))
|
2014-03-18 04:00:59 +00:00
|
|
|
continue;
|
|
|
|
|
2012-10-04 12:49:37 +00:00
|
|
|
if (!first)
|
2012-09-03 02:53:06 +00:00
|
|
|
fprintf(fp, "%s", sep ?: " ");
|
2012-10-04 12:49:37 +00:00
|
|
|
else
|
|
|
|
first = false;
|
2012-09-03 02:53:06 +00:00
|
|
|
|
2014-03-10 07:43:52 +00:00
|
|
|
width = fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists));
|
2012-09-03 02:53:06 +00:00
|
|
|
for (i = 0; i < width; i++)
|
|
|
|
fprintf(fp, ".");
|
2012-08-20 04:52:05 +00:00
|
|
|
}
|
2012-09-03 02:53:06 +00:00
|
|
|
|
2012-08-20 04:52:05 +00:00
|
|
|
fprintf(fp, "\n");
|
|
|
|
if (max_rows && ++nr_rows >= max_rows)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
fprintf(fp, "#\n");
|
|
|
|
if (max_rows && ++nr_rows >= max_rows)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
print_entries:
|
2013-09-05 18:39:12 +00:00
|
|
|
linesz = hists__sort_list_width(hists) + 3 + 1;
|
2013-10-25 11:24:53 +00:00
|
|
|
linesz += perf_hpp__color_overhead();
|
2013-09-05 18:39:12 +00:00
|
|
|
line = malloc(linesz);
|
|
|
|
if (line == NULL) {
|
|
|
|
ret = -1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-02-26 12:13:17 +00:00
|
|
|
indent = hists__overhead_width(hists) + 4;
|
|
|
|
|
2016-02-24 15:13:41 +00:00
|
|
|
for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
|
2012-08-20 04:52:05 +00:00
|
|
|
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
|
2013-10-31 01:17:39 +00:00
|
|
|
float percent;
|
2012-08-20 04:52:05 +00:00
|
|
|
|
|
|
|
if (h->filtered)
|
|
|
|
continue;
|
|
|
|
|
2013-10-31 01:17:39 +00:00
|
|
|
percent = hist_entry__get_percent_limit(h);
|
2013-05-14 02:09:04 +00:00
|
|
|
if (percent < min_pcnt)
|
|
|
|
continue;
|
|
|
|
|
2013-09-05 18:39:12 +00:00
|
|
|
ret += hist_entry__fprintf(h, max_cols, hists, line, linesz, fp);
|
2012-08-20 04:52:05 +00:00
|
|
|
|
|
|
|
if (max_rows && ++nr_rows >= max_rows)
|
2013-09-05 18:39:12 +00:00
|
|
|
break;
|
2012-08-20 04:52:05 +00:00
|
|
|
|
2016-02-26 12:13:17 +00:00
|
|
|
/*
|
|
|
|
* If all children are filtered out or percent-limited,
|
|
|
|
* display "no entry >= x.xx%" message.
|
|
|
|
*/
|
|
|
|
if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
|
perf report: Fix indentation of dynamic entries in hierarchy
When dynamic entries are used in the hierarchy mode with multiple
events, the output might not be aligned properly. In the hierarchy
mode, the each sort column is indented using total number of sort keys.
So it keeps track of number of sort keys when adding them. However
a dynamic sort key can be added more than once when multiple events have
same field names. This results in unnecessarily long indentation in the
output.
For example perf kmem records following events:
$ perf evlist --trace-fields -i perf.data.kmem
kmem:kmalloc: trace_fields: call_site,ptr,bytes_req,bytes_alloc,gfp_flags
kmem:kmalloc_node: trace_fields: call_site,ptr,bytes_req,bytes_alloc,gfp_flags,node
kmem:kfree: trace_fields: call_site,ptr
kmem:kmem_cache_alloc: trace_fields: call_site,ptr,bytes_req,bytes_alloc,gfp_flags
kmem:kmem_cache_alloc_node: trace_fields: call_site,ptr,bytes_req,bytes_alloc,gfp_flags,node
kmem:kmem_cache_free: trace_fields: call_site,ptr
kmem:mm_page_alloc: trace_fields: page,order,gfp_flags,migratetype
kmem:mm_page_free: trace_fields: page,order
As you can see, many field names shared between kmem events. So adding
'ptr' dynamic sort key alone will set nr_sort_keys to 6. And this adds
many unnecessary spaces between columns.
Before:
$ perf report -i perf.data.kmem --hierarchy -s ptr -g none --stdio
...
# Overhead ptr
# ....................... ...................................
#
99.89% 0xffff8803ffb79720
0.06% 0xffff8803d228a000
0.03% 0xffff8803f7678f00
0.00% 0xffff880401dc5280
0.00% 0xffff880406172380
0.00% 0xffff8803ffac3a00
0.00% 0xffff8803ffac1600
After:
# Overhead ptr
# ........ ....................
#
99.89% 0xffff8803ffb79720
0.06% 0xffff8803d228a000
0.03% 0xffff8803f7678f00
0.00% 0xffff880401dc5280
0.00% 0xffff880406172380
0.00% 0xffff8803ffac3a00
0.00% 0xffff8803ffac1600
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/r/1456512767-1164-2-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-02-26 18:52:44 +00:00
|
|
|
int nr_sort = hists->nr_sort_keys;
|
2016-02-26 12:13:17 +00:00
|
|
|
|
|
|
|
print_hierarchy_indent(sep, nr_sort + h->depth + 1, spaces, fp);
|
|
|
|
fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
|
|
|
|
|
|
|
|
if (max_rows && ++nr_rows >= max_rows)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-08-20 04:52:05 +00:00
|
|
|
if (h->ms.map == NULL && verbose > 1) {
|
2014-03-21 20:57:01 +00:00
|
|
|
__map_groups__fprintf_maps(h->thread->mg,
|
2014-07-14 21:46:47 +00:00
|
|
|
MAP__FUNCTION, fp);
|
2012-08-20 04:52:05 +00:00
|
|
|
fprintf(fp, "%.10s end\n", graph_dotted_line);
|
|
|
|
}
|
|
|
|
}
|
2013-09-05 18:39:12 +00:00
|
|
|
|
|
|
|
free(line);
|
2012-08-20 04:52:05 +00:00
|
|
|
out:
|
2013-12-27 19:55:14 +00:00
|
|
|
zfree(&rem_sq_bracket);
|
2012-08-20 04:52:05 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-12-18 19:02:17 +00:00
|
|
|
size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
|
2012-08-20 04:52:05 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
size_t ret = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
|
|
|
|
const char *name;
|
|
|
|
|
2012-12-18 19:02:17 +00:00
|
|
|
if (stats->nr_events[i] == 0)
|
2012-08-20 04:52:05 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
name = perf_event__name(i);
|
|
|
|
if (!strcmp(name, "UNKNOWN"))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ret += fprintf(fp, "%16s events: %10d\n", name,
|
2012-12-18 19:02:17 +00:00
|
|
|
stats->nr_events[i]);
|
2012-08-20 04:52:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|