2009-04-20 13:00:56 +00:00
|
|
|
|
/*
|
2009-06-02 21:37:05 +00:00
|
|
|
|
* builtin-top.c
|
|
|
|
|
*
|
|
|
|
|
* Builtin top command: Display a continuously updated profile of
|
|
|
|
|
* any workload, CPU or specific PID.
|
|
|
|
|
*
|
|
|
|
|
* Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
|
|
|
|
|
*
|
|
|
|
|
* Improvements and fixes by:
|
|
|
|
|
*
|
|
|
|
|
* Arjan van de Ven <arjan@linux.intel.com>
|
|
|
|
|
* Yanmin Zhang <yanmin.zhang@intel.com>
|
|
|
|
|
* Wu Fengguang <fengguang.wu@intel.com>
|
|
|
|
|
* Mike Galbraith <efault@gmx.de>
|
|
|
|
|
* Paul Mackerras <paulus@samba.org>
|
|
|
|
|
*
|
|
|
|
|
* Released under the GPL v2. (and only v2, not any later version)
|
2009-04-20 13:00:56 +00:00
|
|
|
|
*/
|
2009-06-02 21:37:05 +00:00
|
|
|
|
#include "builtin.h"
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
2009-05-23 16:28:58 +00:00
|
|
|
|
#include "perf.h"
|
2009-06-02 21:37:05 +00:00
|
|
|
|
|
2009-05-28 17:55:41 +00:00
|
|
|
|
#include "util/symbol.h"
|
2009-06-04 13:19:47 +00:00
|
|
|
|
#include "util/color.h"
|
2009-04-27 06:02:14 +00:00
|
|
|
|
#include "util/util.h"
|
2009-07-01 15:28:37 +00:00
|
|
|
|
#include <linux/rbtree.h>
|
2009-05-26 07:17:18 +00:00
|
|
|
|
#include "util/parse-options.h"
|
|
|
|
|
#include "util/parse-events.h"
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
|
|
|
|
#include <assert.h>
|
|
|
|
|
#include <fcntl.h>
|
2009-05-26 07:17:18 +00:00
|
|
|
|
|
2009-04-20 13:00:56 +00:00
|
|
|
|
#include <stdio.h>
|
perf_counter tools: Fix/resurrect perf top annotation in a simple interactive form
perf top used to have annotation support, but it has bitrotted and
removed.
This patch restores that: it allows the user to select any symbol
in kernel space for source level annotation on the fly, switch
between event counters and alter display variables. When symbol
details are being displayed, stopping annotation reverts to normal.
known keys:
[d] select display delay.
[e] select display entries (lines).
[E] select annotation event counter.
[f] select normal display count filter.
[F] select annotation display count filter (percentage).
[qQ] quit.
[s] select annotation symbol and start annotation.
[S] stop annotation, revert to normal display.
[z] toggle event count zeroing.
Sample:
------------------------------------------------------------------------------
PerfTop: 16719 irqs/sec kernel:78.7% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing cache-misses for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% /* adjust length to remove Ethernet CRC */
0 0.0% if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
0 0.0% length -= 4;
436 5.0% f039: 41 f6 84 24 5c 29 00 testb $0x1,0x295c(%r12)
0 0.0% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
0 0.0% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
811 9.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
0 0.0% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
7226 82.6% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Available events:
0 cache-misses
1 cache-references
2 instructions
3 cycles
Enter details event counter: 2
------------------------------------------------------------------------------
PerfTop: 15035 irqs/sec kernel:79.0% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing instructions for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% int *work_done, int work_to_do)
0 0.0% {
175 0.9% eebf: 55 push %rbp
1898 9.8% eec0: 48 89 e5 mov %rsp,%rbp
0 0.0%
0 0.0% i = rx_ring->next_to_clean;
140 0.7% ef0a: 0f b7 41 1a movzwl 0x1a(%rcx),%eax
670 3.4% ef0e: 89 45 ac mov %eax,-0x54(%rbp)
0 0.0% {
0 0.0% memcpy(skb->data + offset, from, len);
91 0.5% f07b: 49 8b b6 e8 00 00 00 mov 0xe8(%r14),%rsi
1153 5.9% f082: 48 8b b8 e8 00 00 00 mov 0xe8(%rax),%rdi
42 0.2% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
14 0.1% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
1618 8.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% /* return some buffers to hardware, one at a time is too slow */
0 0.0% if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
867 4.5% f0e7: 83 7d b0 0f cmpl $0xf,-0x50(%rbp)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
37 0.2% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
4047 20.8% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-07-22 18:36:03 +00:00
|
|
|
|
#include <termios.h>
|
|
|
|
|
#include <unistd.h>
|
2009-05-26 07:17:18 +00:00
|
|
|
|
|
2009-04-20 13:00:56 +00:00
|
|
|
|
#include <errno.h>
|
|
|
|
|
#include <time.h>
|
|
|
|
|
#include <sched.h>
|
|
|
|
|
#include <pthread.h>
|
|
|
|
|
|
|
|
|
|
#include <sys/syscall.h>
|
|
|
|
|
#include <sys/ioctl.h>
|
|
|
|
|
#include <sys/poll.h>
|
|
|
|
|
#include <sys/prctl.h>
|
|
|
|
|
#include <sys/wait.h>
|
|
|
|
|
#include <sys/uio.h>
|
|
|
|
|
#include <sys/mman.h>
|
|
|
|
|
|
|
|
|
|
#include <linux/unistd.h>
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
|
|
2009-06-06 07:58:57 +00:00
|
|
|
|
static int fd[MAX_NR_CPUS][MAX_COUNTERS];
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
2009-06-06 07:58:57 +00:00
|
|
|
|
static int system_wide = 0;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
2009-06-06 07:58:57 +00:00
|
|
|
|
static int default_interval = 100000;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
perf_counter tools: Fix/resurrect perf top annotation in a simple interactive form
perf top used to have annotation support, but it has bitrotted and
removed.
This patch restores that: it allows the user to select any symbol
in kernel space for source level annotation on the fly, switch
between event counters and alter display variables. When symbol
details are being displayed, stopping annotation reverts to normal.
known keys:
[d] select display delay.
[e] select display entries (lines).
[E] select annotation event counter.
[f] select normal display count filter.
[F] select annotation display count filter (percentage).
[qQ] quit.
[s] select annotation symbol and start annotation.
[S] stop annotation, revert to normal display.
[z] toggle event count zeroing.
Sample:
------------------------------------------------------------------------------
PerfTop: 16719 irqs/sec kernel:78.7% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing cache-misses for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% /* adjust length to remove Ethernet CRC */
0 0.0% if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
0 0.0% length -= 4;
436 5.0% f039: 41 f6 84 24 5c 29 00 testb $0x1,0x295c(%r12)
0 0.0% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
0 0.0% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
811 9.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
0 0.0% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
7226 82.6% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Available events:
0 cache-misses
1 cache-references
2 instructions
3 cycles
Enter details event counter: 2
------------------------------------------------------------------------------
PerfTop: 15035 irqs/sec kernel:79.0% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing instructions for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% int *work_done, int work_to_do)
0 0.0% {
175 0.9% eebf: 55 push %rbp
1898 9.8% eec0: 48 89 e5 mov %rsp,%rbp
0 0.0%
0 0.0% i = rx_ring->next_to_clean;
140 0.7% ef0a: 0f b7 41 1a movzwl 0x1a(%rcx),%eax
670 3.4% ef0e: 89 45 ac mov %eax,-0x54(%rbp)
0 0.0% {
0 0.0% memcpy(skb->data + offset, from, len);
91 0.5% f07b: 49 8b b6 e8 00 00 00 mov 0xe8(%r14),%rsi
1153 5.9% f082: 48 8b b8 e8 00 00 00 mov 0xe8(%rax),%rdi
42 0.2% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
14 0.1% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
1618 8.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% /* return some buffers to hardware, one at a time is too slow */
0 0.0% if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
867 4.5% f0e7: 83 7d b0 0f cmpl $0xf,-0x50(%rbp)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
37 0.2% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
4047 20.8% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-07-22 18:36:03 +00:00
|
|
|
|
static int count_filter = 5;
|
2009-06-04 06:53:05 +00:00
|
|
|
|
static int print_entries = 15;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
2009-06-04 06:53:05 +00:00
|
|
|
|
static int target_pid = -1;
|
2009-07-21 08:30:36 +00:00
|
|
|
|
static int inherit = 0;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
static int profile_cpu = -1;
|
|
|
|
|
static int nr_cpus = 0;
|
|
|
|
|
static unsigned int realtime_prio = 0;
|
|
|
|
|
static int group = 0;
|
|
|
|
|
static unsigned int page_size;
|
2009-06-05 11:27:02 +00:00
|
|
|
|
static unsigned int mmap_pages = 16;
|
|
|
|
|
static int freq = 0;
|
2009-06-07 15:39:02 +00:00
|
|
|
|
static int verbose = 0;
|
2009-07-02 06:09:46 +00:00
|
|
|
|
static char *vmlinux = NULL;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
|
|
|
|
static int delay_secs = 2;
|
|
|
|
|
static int zero;
|
|
|
|
|
static int dump_symtab;
|
|
|
|
|
|
perf_counter tools: Fix/resurrect perf top annotation in a simple interactive form
perf top used to have annotation support, but it has bitrotted and
removed.
This patch restores that: it allows the user to select any symbol
in kernel space for source level annotation on the fly, switch
between event counters and alter display variables. When symbol
details are being displayed, stopping annotation reverts to normal.
known keys:
[d] select display delay.
[e] select display entries (lines).
[E] select annotation event counter.
[f] select normal display count filter.
[F] select annotation display count filter (percentage).
[qQ] quit.
[s] select annotation symbol and start annotation.
[S] stop annotation, revert to normal display.
[z] toggle event count zeroing.
Sample:
------------------------------------------------------------------------------
PerfTop: 16719 irqs/sec kernel:78.7% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing cache-misses for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% /* adjust length to remove Ethernet CRC */
0 0.0% if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
0 0.0% length -= 4;
436 5.0% f039: 41 f6 84 24 5c 29 00 testb $0x1,0x295c(%r12)
0 0.0% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
0 0.0% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
811 9.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
0 0.0% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
7226 82.6% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Available events:
0 cache-misses
1 cache-references
2 instructions
3 cycles
Enter details event counter: 2
------------------------------------------------------------------------------
PerfTop: 15035 irqs/sec kernel:79.0% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing instructions for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% int *work_done, int work_to_do)
0 0.0% {
175 0.9% eebf: 55 push %rbp
1898 9.8% eec0: 48 89 e5 mov %rsp,%rbp
0 0.0%
0 0.0% i = rx_ring->next_to_clean;
140 0.7% ef0a: 0f b7 41 1a movzwl 0x1a(%rcx),%eax
670 3.4% ef0e: 89 45 ac mov %eax,-0x54(%rbp)
0 0.0% {
0 0.0% memcpy(skb->data + offset, from, len);
91 0.5% f07b: 49 8b b6 e8 00 00 00 mov 0xe8(%r14),%rsi
1153 5.9% f082: 48 8b b8 e8 00 00 00 mov 0xe8(%rax),%rdi
42 0.2% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
14 0.1% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
1618 8.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% /* return some buffers to hardware, one at a time is too slow */
0 0.0% if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
867 4.5% f0e7: 83 7d b0 0f cmpl $0xf,-0x50(%rbp)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
37 0.2% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
4047 20.8% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-07-22 18:36:03 +00:00
|
|
|
|
/*
|
|
|
|
|
* Source
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
struct source_line {
|
|
|
|
|
u64 eip;
|
|
|
|
|
unsigned long count[MAX_COUNTERS];
|
|
|
|
|
char *line;
|
|
|
|
|
struct source_line *next;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static char *sym_filter = NULL;
|
|
|
|
|
struct sym_entry *sym_filter_entry = NULL;
|
|
|
|
|
static int sym_pcnt_filter = 5;
|
|
|
|
|
static int sym_counter = 0;
|
2009-07-24 08:09:50 +00:00
|
|
|
|
static int display_weighted = -1;
|
perf_counter tools: Fix/resurrect perf top annotation in a simple interactive form
perf top used to have annotation support, but it has bitrotted and
removed.
This patch restores that: it allows the user to select any symbol
in kernel space for source level annotation on the fly, switch
between event counters and alter display variables. When symbol
details are being displayed, stopping annotation reverts to normal.
known keys:
[d] select display delay.
[e] select display entries (lines).
[E] select annotation event counter.
[f] select normal display count filter.
[F] select annotation display count filter (percentage).
[qQ] quit.
[s] select annotation symbol and start annotation.
[S] stop annotation, revert to normal display.
[z] toggle event count zeroing.
Sample:
------------------------------------------------------------------------------
PerfTop: 16719 irqs/sec kernel:78.7% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing cache-misses for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% /* adjust length to remove Ethernet CRC */
0 0.0% if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
0 0.0% length -= 4;
436 5.0% f039: 41 f6 84 24 5c 29 00 testb $0x1,0x295c(%r12)
0 0.0% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
0 0.0% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
811 9.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
0 0.0% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
7226 82.6% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Available events:
0 cache-misses
1 cache-references
2 instructions
3 cycles
Enter details event counter: 2
------------------------------------------------------------------------------
PerfTop: 15035 irqs/sec kernel:79.0% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing instructions for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% int *work_done, int work_to_do)
0 0.0% {
175 0.9% eebf: 55 push %rbp
1898 9.8% eec0: 48 89 e5 mov %rsp,%rbp
0 0.0%
0 0.0% i = rx_ring->next_to_clean;
140 0.7% ef0a: 0f b7 41 1a movzwl 0x1a(%rcx),%eax
670 3.4% ef0e: 89 45 ac mov %eax,-0x54(%rbp)
0 0.0% {
0 0.0% memcpy(skb->data + offset, from, len);
91 0.5% f07b: 49 8b b6 e8 00 00 00 mov 0xe8(%r14),%rsi
1153 5.9% f082: 48 8b b8 e8 00 00 00 mov 0xe8(%rax),%rdi
42 0.2% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
14 0.1% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
1618 8.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% /* return some buffers to hardware, one at a time is too slow */
0 0.0% if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
867 4.5% f0e7: 83 7d b0 0f cmpl $0xf,-0x50(%rbp)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
37 0.2% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
4047 20.8% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-07-22 18:36:03 +00:00
|
|
|
|
|
2009-04-20 13:00:56 +00:00
|
|
|
|
/*
|
|
|
|
|
* Symbols
|
|
|
|
|
*/
|
|
|
|
|
|
perf_counter tools: Define and use our own u64, s64 etc. definitions
On 64-bit powerpc, __u64 is defined to be unsigned long rather than
unsigned long long. This causes compiler warnings every time we
print a __u64 value with %Lx.
Rather than changing __u64, we define our own u64 to be unsigned long
long on all architectures, and similarly s64 as signed long long.
For consistency we also define u32, s32, u16, s16, u8 and s8. These
definitions are put in a new header, types.h, because these definitions
are needed in util/string.h and util/symbol.h.
The main change here is the mechanical change of __[us]{64,32,16,8}
to remove the "__". The other changes are:
* Create types.h
* Include types.h in perf.h, util/string.h and util/symbol.h
* Add types.h to the LIB_H definition in Makefile
* Added (u64) casts in process_overflow_event() and print_sym_table()
to kill two remaining warnings.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: benh@kernel.crashing.org
LKML-Reference: <19003.33494.495844.956580@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-19 12:21:42 +00:00
|
|
|
|
static u64 min_ip;
|
|
|
|
|
static u64 max_ip = -1ll;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
|
|
|
|
struct sym_entry {
|
2009-05-28 17:55:41 +00:00
|
|
|
|
struct rb_node rb_node;
|
|
|
|
|
struct list_head node;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
unsigned long count[MAX_COUNTERS];
|
2009-05-29 20:03:07 +00:00
|
|
|
|
unsigned long snap_count;
|
|
|
|
|
double weight;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
int skip;
|
perf_counter tools: Fix/resurrect perf top annotation in a simple interactive form
perf top used to have annotation support, but it has bitrotted and
removed.
This patch restores that: it allows the user to select any symbol
in kernel space for source level annotation on the fly, switch
between event counters and alter display variables. When symbol
details are being displayed, stopping annotation reverts to normal.
known keys:
[d] select display delay.
[e] select display entries (lines).
[E] select annotation event counter.
[f] select normal display count filter.
[F] select annotation display count filter (percentage).
[qQ] quit.
[s] select annotation symbol and start annotation.
[S] stop annotation, revert to normal display.
[z] toggle event count zeroing.
Sample:
------------------------------------------------------------------------------
PerfTop: 16719 irqs/sec kernel:78.7% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing cache-misses for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% /* adjust length to remove Ethernet CRC */
0 0.0% if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
0 0.0% length -= 4;
436 5.0% f039: 41 f6 84 24 5c 29 00 testb $0x1,0x295c(%r12)
0 0.0% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
0 0.0% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
811 9.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
0 0.0% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
7226 82.6% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Available events:
0 cache-misses
1 cache-references
2 instructions
3 cycles
Enter details event counter: 2
------------------------------------------------------------------------------
PerfTop: 15035 irqs/sec kernel:79.0% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing instructions for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% int *work_done, int work_to_do)
0 0.0% {
175 0.9% eebf: 55 push %rbp
1898 9.8% eec0: 48 89 e5 mov %rsp,%rbp
0 0.0%
0 0.0% i = rx_ring->next_to_clean;
140 0.7% ef0a: 0f b7 41 1a movzwl 0x1a(%rcx),%eax
670 3.4% ef0e: 89 45 ac mov %eax,-0x54(%rbp)
0 0.0% {
0 0.0% memcpy(skb->data + offset, from, len);
91 0.5% f07b: 49 8b b6 e8 00 00 00 mov 0xe8(%r14),%rsi
1153 5.9% f082: 48 8b b8 e8 00 00 00 mov 0xe8(%rax),%rdi
42 0.2% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
14 0.1% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
1618 8.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% /* return some buffers to hardware, one at a time is too slow */
0 0.0% if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
867 4.5% f0e7: 83 7d b0 0f cmpl $0xf,-0x50(%rbp)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
37 0.2% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
4047 20.8% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-07-22 18:36:03 +00:00
|
|
|
|
struct source_line *source;
|
|
|
|
|
struct source_line *lines;
|
|
|
|
|
struct source_line **lines_tail;
|
|
|
|
|
pthread_mutex_t source_lock;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
};
|
|
|
|
|
|
perf_counter tools: Fix/resurrect perf top annotation in a simple interactive form
perf top used to have annotation support, but it has bitrotted and
removed.
This patch restores that: it allows the user to select any symbol
in kernel space for source level annotation on the fly, switch
between event counters and alter display variables. When symbol
details are being displayed, stopping annotation reverts to normal.
known keys:
[d] select display delay.
[e] select display entries (lines).
[E] select annotation event counter.
[f] select normal display count filter.
[F] select annotation display count filter (percentage).
[qQ] quit.
[s] select annotation symbol and start annotation.
[S] stop annotation, revert to normal display.
[z] toggle event count zeroing.
Sample:
------------------------------------------------------------------------------
PerfTop: 16719 irqs/sec kernel:78.7% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing cache-misses for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% /* adjust length to remove Ethernet CRC */
0 0.0% if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
0 0.0% length -= 4;
436 5.0% f039: 41 f6 84 24 5c 29 00 testb $0x1,0x295c(%r12)
0 0.0% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
0 0.0% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
811 9.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
0 0.0% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
7226 82.6% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Available events:
0 cache-misses
1 cache-references
2 instructions
3 cycles
Enter details event counter: 2
------------------------------------------------------------------------------
PerfTop: 15035 irqs/sec kernel:79.0% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing instructions for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% int *work_done, int work_to_do)
0 0.0% {
175 0.9% eebf: 55 push %rbp
1898 9.8% eec0: 48 89 e5 mov %rsp,%rbp
0 0.0%
0 0.0% i = rx_ring->next_to_clean;
140 0.7% ef0a: 0f b7 41 1a movzwl 0x1a(%rcx),%eax
670 3.4% ef0e: 89 45 ac mov %eax,-0x54(%rbp)
0 0.0% {
0 0.0% memcpy(skb->data + offset, from, len);
91 0.5% f07b: 49 8b b6 e8 00 00 00 mov 0xe8(%r14),%rsi
1153 5.9% f082: 48 8b b8 e8 00 00 00 mov 0xe8(%rax),%rdi
42 0.2% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
14 0.1% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
1618 8.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% /* return some buffers to hardware, one at a time is too slow */
0 0.0% if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
867 4.5% f0e7: 83 7d b0 0f cmpl $0xf,-0x50(%rbp)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
37 0.2% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
4047 20.8% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-07-22 18:36:03 +00:00
|
|
|
|
/*
|
|
|
|
|
* Source functions
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void parse_source(struct sym_entry *syme)
|
|
|
|
|
{
|
|
|
|
|
struct symbol *sym;
|
|
|
|
|
struct module *module;
|
|
|
|
|
struct section *section = NULL;
|
|
|
|
|
FILE *file;
|
|
|
|
|
char command[PATH_MAX*2], *path = vmlinux;
|
|
|
|
|
u64 start, end, len;
|
|
|
|
|
|
|
|
|
|
if (!syme)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
if (syme->lines) {
|
|
|
|
|
pthread_mutex_lock(&syme->source_lock);
|
|
|
|
|
goto out_assign;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
sym = (struct symbol *)(syme + 1);
|
|
|
|
|
module = sym->module;
|
|
|
|
|
|
|
|
|
|
if (module)
|
|
|
|
|
path = module->path;
|
|
|
|
|
if (!path)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
start = sym->obj_start;
|
|
|
|
|
if (!start)
|
|
|
|
|
start = sym->start;
|
|
|
|
|
|
|
|
|
|
if (module) {
|
|
|
|
|
section = module->sections->find_section(module->sections, ".text");
|
|
|
|
|
if (section)
|
|
|
|
|
start -= section->vma;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
end = start + sym->end - sym->start + 1;
|
|
|
|
|
len = sym->end - sym->start;
|
|
|
|
|
|
|
|
|
|
sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s", start, end, path);
|
|
|
|
|
|
|
|
|
|
file = popen(command, "r");
|
|
|
|
|
if (!file)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
pthread_mutex_lock(&syme->source_lock);
|
|
|
|
|
syme->lines_tail = &syme->lines;
|
|
|
|
|
while (!feof(file)) {
|
|
|
|
|
struct source_line *src;
|
|
|
|
|
size_t dummy = 0;
|
|
|
|
|
char *c;
|
|
|
|
|
|
|
|
|
|
src = malloc(sizeof(struct source_line));
|
|
|
|
|
assert(src != NULL);
|
|
|
|
|
memset(src, 0, sizeof(struct source_line));
|
|
|
|
|
|
|
|
|
|
if (getline(&src->line, &dummy, file) < 0)
|
|
|
|
|
break;
|
|
|
|
|
if (!src->line)
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
c = strchr(src->line, '\n');
|
|
|
|
|
if (c)
|
|
|
|
|
*c = 0;
|
|
|
|
|
|
|
|
|
|
src->next = NULL;
|
|
|
|
|
*syme->lines_tail = src;
|
|
|
|
|
syme->lines_tail = &src->next;
|
|
|
|
|
|
|
|
|
|
if (strlen(src->line)>8 && src->line[8] == ':') {
|
|
|
|
|
src->eip = strtoull(src->line, NULL, 16);
|
|
|
|
|
if (section)
|
|
|
|
|
src->eip += section->vma;
|
|
|
|
|
}
|
|
|
|
|
if (strlen(src->line)>8 && src->line[16] == ':') {
|
|
|
|
|
src->eip = strtoull(src->line, NULL, 16);
|
|
|
|
|
if (section)
|
|
|
|
|
src->eip += section->vma;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
pclose(file);
|
|
|
|
|
out_assign:
|
|
|
|
|
sym_filter_entry = syme;
|
|
|
|
|
pthread_mutex_unlock(&syme->source_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void __zero_source_counters(struct sym_entry *syme)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
struct source_line *line;
|
|
|
|
|
|
|
|
|
|
line = syme->lines;
|
|
|
|
|
while (line) {
|
|
|
|
|
for (i = 0; i < nr_counters; i++)
|
|
|
|
|
line->count[i] = 0;
|
|
|
|
|
line = line->next;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
|
|
|
|
|
{
|
|
|
|
|
struct source_line *line;
|
|
|
|
|
|
|
|
|
|
if (syme != sym_filter_entry)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
if (pthread_mutex_trylock(&syme->source_lock))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
if (!syme->source)
|
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
|
|
for (line = syme->lines; line; line = line->next) {
|
|
|
|
|
if (line->eip == ip) {
|
|
|
|
|
line->count[counter]++;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
if (line->eip > ip)
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
out_unlock:
|
|
|
|
|
pthread_mutex_unlock(&syme->source_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void lookup_sym_source(struct sym_entry *syme)
|
|
|
|
|
{
|
|
|
|
|
struct symbol *symbol = (struct symbol *)(syme + 1);
|
|
|
|
|
struct source_line *line;
|
|
|
|
|
char pattern[PATH_MAX];
|
|
|
|
|
char *idx;
|
|
|
|
|
|
|
|
|
|
sprintf(pattern, "<%s>:", symbol->name);
|
|
|
|
|
|
|
|
|
|
if (symbol->module) {
|
|
|
|
|
idx = strstr(pattern, "\t");
|
|
|
|
|
if (idx)
|
|
|
|
|
*idx = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pthread_mutex_lock(&syme->source_lock);
|
|
|
|
|
for (line = syme->lines; line; line = line->next) {
|
|
|
|
|
if (strstr(line->line, pattern)) {
|
|
|
|
|
syme->source = line;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
pthread_mutex_unlock(&syme->source_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void show_lines(struct source_line *queue, int count, int total)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
struct source_line *line;
|
|
|
|
|
|
|
|
|
|
line = queue;
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
|
float pcnt = 100.0*(float)line->count[sym_counter]/(float)total;
|
|
|
|
|
|
|
|
|
|
printf("%8li %4.1f%%\t%s\n", line->count[sym_counter], pcnt, line->line);
|
|
|
|
|
line = line->next;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define TRACE_COUNT 3
|
|
|
|
|
|
|
|
|
|
static void show_details(struct sym_entry *syme)
|
|
|
|
|
{
|
|
|
|
|
struct symbol *symbol;
|
|
|
|
|
struct source_line *line;
|
|
|
|
|
struct source_line *line_queue = NULL;
|
|
|
|
|
int displayed = 0;
|
|
|
|
|
int line_queue_count = 0, total = 0, more = 0;
|
|
|
|
|
|
|
|
|
|
if (!syme)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
if (!syme->source)
|
|
|
|
|
lookup_sym_source(syme);
|
|
|
|
|
|
|
|
|
|
if (!syme->source)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
symbol = (struct symbol *)(syme + 1);
|
|
|
|
|
printf("Showing %s for %s\n", event_name(sym_counter), symbol->name);
|
|
|
|
|
printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter);
|
|
|
|
|
|
|
|
|
|
pthread_mutex_lock(&syme->source_lock);
|
|
|
|
|
line = syme->source;
|
|
|
|
|
while (line) {
|
|
|
|
|
total += line->count[sym_counter];
|
|
|
|
|
line = line->next;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
line = syme->source;
|
|
|
|
|
while (line) {
|
|
|
|
|
float pcnt = 0.0;
|
|
|
|
|
|
|
|
|
|
if (!line_queue_count)
|
|
|
|
|
line_queue = line;
|
|
|
|
|
line_queue_count++;
|
|
|
|
|
|
|
|
|
|
if (line->count[sym_counter])
|
|
|
|
|
pcnt = 100.0 * line->count[sym_counter] / (float)total;
|
|
|
|
|
if (pcnt >= (float)sym_pcnt_filter) {
|
|
|
|
|
if (displayed <= print_entries)
|
|
|
|
|
show_lines(line_queue, line_queue_count, total);
|
|
|
|
|
else more++;
|
|
|
|
|
displayed += line_queue_count;
|
|
|
|
|
line_queue_count = 0;
|
|
|
|
|
line_queue = NULL;
|
|
|
|
|
} else if (line_queue_count > TRACE_COUNT) {
|
|
|
|
|
line_queue = line_queue->next;
|
|
|
|
|
line_queue_count--;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
line->count[sym_counter] = zero ? 0 : line->count[sym_counter] * 7 / 8;
|
|
|
|
|
line = line->next;
|
|
|
|
|
}
|
|
|
|
|
pthread_mutex_unlock(&syme->source_lock);
|
|
|
|
|
if (more)
|
|
|
|
|
printf("%d lines not displayed, maybe increase display entries [e]\n", more);
|
|
|
|
|
}
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
2009-06-06 07:58:57 +00:00
|
|
|
|
struct dso *kernel_dso;
|
2009-05-28 17:55:41 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Symbols will be added here in record_ip and will get out
|
|
|
|
|
* after decayed.
|
|
|
|
|
*/
|
|
|
|
|
static LIST_HEAD(active_symbols);
|
2009-05-29 20:03:07 +00:00
|
|
|
|
static pthread_mutex_t active_symbols_lock = PTHREAD_MUTEX_INITIALIZER;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Ordering weight: count-1 * count-2 * ... / count-n
|
|
|
|
|
*/
|
|
|
|
|
static double sym_weight(const struct sym_entry *sym)
|
|
|
|
|
{
|
2009-05-29 20:03:07 +00:00
|
|
|
|
double weight = sym->snap_count;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
int counter;
|
|
|
|
|
|
2009-07-24 08:09:50 +00:00
|
|
|
|
if (!display_weighted)
|
|
|
|
|
return weight;
|
|
|
|
|
|
2009-04-20 13:00:56 +00:00
|
|
|
|
for (counter = 1; counter < nr_counters-1; counter++)
|
|
|
|
|
weight *= sym->count[counter];
|
|
|
|
|
|
|
|
|
|
weight /= (sym->count[counter] + 1);
|
|
|
|
|
|
|
|
|
|
return weight;
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-05 12:29:10 +00:00
|
|
|
|
static long samples;
|
|
|
|
|
static long userspace_samples;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
static const char CONSOLE_CLEAR[] = "[H[2J";
|
|
|
|
|
|
2009-05-29 20:03:07 +00:00
|
|
|
|
static void __list_insert_active_sym(struct sym_entry *syme)
|
2009-05-28 17:55:41 +00:00
|
|
|
|
{
|
|
|
|
|
list_add(&syme->node, &active_symbols);
|
|
|
|
|
}
|
|
|
|
|
|
2009-05-29 20:03:07 +00:00
|
|
|
|
static void list_remove_active_sym(struct sym_entry *syme)
|
|
|
|
|
{
|
|
|
|
|
pthread_mutex_lock(&active_symbols_lock);
|
|
|
|
|
list_del_init(&syme->node);
|
|
|
|
|
pthread_mutex_unlock(&active_symbols_lock);
|
|
|
|
|
}
|
|
|
|
|
|
2009-05-28 17:55:41 +00:00
|
|
|
|
static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se)
|
|
|
|
|
{
|
|
|
|
|
struct rb_node **p = &tree->rb_node;
|
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
|
struct sym_entry *iter;
|
|
|
|
|
|
|
|
|
|
while (*p != NULL) {
|
|
|
|
|
parent = *p;
|
|
|
|
|
iter = rb_entry(parent, struct sym_entry, rb_node);
|
|
|
|
|
|
2009-05-29 20:03:07 +00:00
|
|
|
|
if (se->weight > iter->weight)
|
2009-05-28 17:55:41 +00:00
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
|
else
|
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
rb_link_node(&se->rb_node, parent, p);
|
|
|
|
|
rb_insert_color(&se->rb_node, tree);
|
|
|
|
|
}
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
|
|
|
|
static void print_sym_table(void)
|
|
|
|
|
{
|
2009-06-03 19:48:40 +00:00
|
|
|
|
int printed = 0, j;
|
2009-07-24 08:09:50 +00:00
|
|
|
|
int counter, snap = !display_weighted ? sym_counter : 0;
|
2009-06-05 12:29:10 +00:00
|
|
|
|
float samples_per_sec = samples/delay_secs;
|
|
|
|
|
float ksamples_per_sec = (samples-userspace_samples)/delay_secs;
|
|
|
|
|
float sum_ksamples = 0.0;
|
2009-05-28 17:55:41 +00:00
|
|
|
|
struct sym_entry *syme, *n;
|
|
|
|
|
struct rb_root tmp = RB_ROOT;
|
|
|
|
|
struct rb_node *nd;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
2009-06-05 12:29:10 +00:00
|
|
|
|
samples = userspace_samples = 0;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
2009-05-28 17:55:41 +00:00
|
|
|
|
/* Sort the active symbols */
|
2009-05-29 20:03:07 +00:00
|
|
|
|
pthread_mutex_lock(&active_symbols_lock);
|
|
|
|
|
syme = list_entry(active_symbols.next, struct sym_entry, node);
|
|
|
|
|
pthread_mutex_unlock(&active_symbols_lock);
|
|
|
|
|
|
|
|
|
|
list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
|
2009-07-24 08:09:50 +00:00
|
|
|
|
syme->snap_count = syme->count[snap];
|
2009-05-29 20:03:07 +00:00
|
|
|
|
if (syme->snap_count != 0) {
|
|
|
|
|
syme->weight = sym_weight(syme);
|
2009-05-28 17:55:41 +00:00
|
|
|
|
rb_insert_active_sym(&tmp, syme);
|
2009-06-05 12:29:10 +00:00
|
|
|
|
sum_ksamples += syme->snap_count;
|
perf top: Reduce display overhead
Iterate over the symbol table once per display interval, and
copy/sort/tally/decay only those symbols which are active.
Before:
top - 10:14:53 up 4:08, 17 users, load average: 1.17, 1.53, 1.49
Tasks: 273 total, 5 running, 268 sleeping, 0 stopped, 0 zombie
Cpu(s): 6.9%us, 38.2%sy, 0.0%ni, 19.9%id, 0.0%wa, 0.0%hi, 35.0%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28504 root 20 0 1044 260 164 S 58 0.0 0:04.19 2 netserver
28499 root 20 0 1040 412 316 R 51 0.0 0:04.15 0 netperf
28500 root 20 0 1040 408 316 R 50 0.0 0:04.14 1 netperf
28503 root 20 0 1044 260 164 S 50 0.0 0:04.01 1 netserver
28501 root 20 0 1044 260 164 S 49 0.0 0:03.99 0 netserver
28502 root 20 0 1040 412 316 S 43 0.0 0:03.96 2 netperf
28468 root 20 0 1892m 325m 972 S 16 10.8 0:10.50 3 perf
28467 root 20 0 1892m 325m 972 R 2 10.8 0:00.72 3 perf
After:
top - 10:16:30 up 4:10, 17 users, load average: 2.27, 1.88, 1.62
Tasks: 273 total, 6 running, 267 sleeping, 0 stopped, 0 zombie
Cpu(s): 2.5%us, 39.7%sy, 0.0%ni, 24.6%id, 0.0%wa, 0.0%hi, 33.3%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28590 root 20 0 1040 412 316 S 54 0.0 0:07.85 2 netperf
28589 root 20 0 1044 260 164 R 54 0.0 0:07.84 0 netserver
28588 root 20 0 1040 412 316 R 50 0.0 0:07.89 1 netperf
28591 root 20 0 1044 256 164 S 50 0.0 0:07.82 1 netserver
28587 root 20 0 1040 408 316 R 47 0.0 0:07.61 0 netperf
28592 root 20 0 1044 260 164 R 47 0.0 0:07.85 2 netserver
28378 root 20 0 8732 1300 860 R 2 0.0 0:01.81 3 top
28577 root 20 0 1892m 165m 972 R 2 5.5 0:00.48 3 perf
28578 root 20 0 1892m 165m 972 S 2 5.5 0:00.04 3 perf
[ Impact: optimization ]
Signed-off-by: Mike Galbraith <efault@gmx.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-25 07:57:56 +00:00
|
|
|
|
|
|
|
|
|
for (j = 0; j < nr_counters; j++)
|
2009-05-28 17:55:41 +00:00
|
|
|
|
syme->count[j] = zero ? 0 : syme->count[j] * 7 / 8;
|
|
|
|
|
} else
|
2009-05-29 20:03:07 +00:00
|
|
|
|
list_remove_active_sym(syme);
|
perf top: Reduce display overhead
Iterate over the symbol table once per display interval, and
copy/sort/tally/decay only those symbols which are active.
Before:
top - 10:14:53 up 4:08, 17 users, load average: 1.17, 1.53, 1.49
Tasks: 273 total, 5 running, 268 sleeping, 0 stopped, 0 zombie
Cpu(s): 6.9%us, 38.2%sy, 0.0%ni, 19.9%id, 0.0%wa, 0.0%hi, 35.0%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28504 root 20 0 1044 260 164 S 58 0.0 0:04.19 2 netserver
28499 root 20 0 1040 412 316 R 51 0.0 0:04.15 0 netperf
28500 root 20 0 1040 408 316 R 50 0.0 0:04.14 1 netperf
28503 root 20 0 1044 260 164 S 50 0.0 0:04.01 1 netserver
28501 root 20 0 1044 260 164 S 49 0.0 0:03.99 0 netserver
28502 root 20 0 1040 412 316 S 43 0.0 0:03.96 2 netperf
28468 root 20 0 1892m 325m 972 S 16 10.8 0:10.50 3 perf
28467 root 20 0 1892m 325m 972 R 2 10.8 0:00.72 3 perf
After:
top - 10:16:30 up 4:10, 17 users, load average: 2.27, 1.88, 1.62
Tasks: 273 total, 6 running, 267 sleeping, 0 stopped, 0 zombie
Cpu(s): 2.5%us, 39.7%sy, 0.0%ni, 24.6%id, 0.0%wa, 0.0%hi, 33.3%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28590 root 20 0 1040 412 316 S 54 0.0 0:07.85 2 netperf
28589 root 20 0 1044 260 164 R 54 0.0 0:07.84 0 netserver
28588 root 20 0 1040 412 316 R 50 0.0 0:07.89 1 netperf
28591 root 20 0 1044 256 164 S 50 0.0 0:07.82 1 netserver
28587 root 20 0 1040 408 316 R 47 0.0 0:07.61 0 netperf
28592 root 20 0 1044 260 164 R 47 0.0 0:07.85 2 netserver
28378 root 20 0 8732 1300 860 R 2 0.0 0:01.81 3 top
28577 root 20 0 1892m 165m 972 R 2 5.5 0:00.48 3 perf
28578 root 20 0 1892m 165m 972 S 2 5.5 0:00.04 3 perf
[ Impact: optimization ]
Signed-off-by: Mike Galbraith <efault@gmx.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-25 07:57:56 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-06-04 18:48:04 +00:00
|
|
|
|
puts(CONSOLE_CLEAR);
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
|
|
|
|
printf(
|
|
|
|
|
"------------------------------------------------------------------------------\n");
|
2009-06-03 17:17:25 +00:00
|
|
|
|
printf( " PerfTop:%8.0f irqs/sec kernel:%4.1f%% [",
|
2009-06-05 12:29:10 +00:00
|
|
|
|
samples_per_sec,
|
|
|
|
|
100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec)));
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
2009-07-24 08:09:50 +00:00
|
|
|
|
if (nr_counters == 1 || !display_weighted) {
|
perf_counter tools: Define and use our own u64, s64 etc. definitions
On 64-bit powerpc, __u64 is defined to be unsigned long rather than
unsigned long long. This causes compiler warnings every time we
print a __u64 value with %Lx.
Rather than changing __u64, we define our own u64 to be unsigned long
long on all architectures, and similarly s64 as signed long long.
For consistency we also define u32, s32, u16, s16, u8 and s8. These
definitions are put in a new header, types.h, because these definitions
are needed in util/string.h and util/symbol.h.
The main change here is the mechanical change of __[us]{64,32,16,8}
to remove the "__". The other changes are:
* Create types.h
* Include types.h in perf.h, util/string.h and util/symbol.h
* Add types.h to the LIB_H definition in Makefile
* Added (u64) casts in process_overflow_event() and print_sym_table()
to kill two remaining warnings.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: benh@kernel.crashing.org
LKML-Reference: <19003.33494.495844.956580@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-19 12:21:42 +00:00
|
|
|
|
printf("%Ld", (u64)attrs[0].sample_period);
|
2009-06-05 11:27:02 +00:00
|
|
|
|
if (freq)
|
|
|
|
|
printf("Hz ");
|
|
|
|
|
else
|
|
|
|
|
printf(" ");
|
|
|
|
|
}
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
2009-07-24 08:09:50 +00:00
|
|
|
|
if (!display_weighted)
|
|
|
|
|
printf("%s", event_name(sym_counter));
|
|
|
|
|
else for (counter = 0; counter < nr_counters; counter++) {
|
2009-04-20 13:00:56 +00:00
|
|
|
|
if (counter)
|
|
|
|
|
printf("/");
|
|
|
|
|
|
|
|
|
|
printf("%s", event_name(counter));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
printf( "], ");
|
|
|
|
|
|
2009-05-26 07:17:18 +00:00
|
|
|
|
if (target_pid != -1)
|
|
|
|
|
printf(" (target_pid: %d", target_pid);
|
2009-04-20 13:00:56 +00:00
|
|
|
|
else
|
|
|
|
|
printf(" (all");
|
|
|
|
|
|
|
|
|
|
if (profile_cpu != -1)
|
|
|
|
|
printf(", cpu: %d)\n", profile_cpu);
|
|
|
|
|
else {
|
2009-05-26 07:17:18 +00:00
|
|
|
|
if (target_pid != -1)
|
2009-04-20 13:00:56 +00:00
|
|
|
|
printf(")\n");
|
|
|
|
|
else
|
|
|
|
|
printf(", %d CPUs)\n", nr_cpus);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
printf("------------------------------------------------------------------------------\n\n");
|
|
|
|
|
|
perf_counter tools: Fix/resurrect perf top annotation in a simple interactive form
perf top used to have annotation support, but it has bitrotted and
removed.
This patch restores that: it allows the user to select any symbol
in kernel space for source level annotation on the fly, switch
between event counters and alter display variables. When symbol
details are being displayed, stopping annotation reverts to normal.
known keys:
[d] select display delay.
[e] select display entries (lines).
[E] select annotation event counter.
[f] select normal display count filter.
[F] select annotation display count filter (percentage).
[qQ] quit.
[s] select annotation symbol and start annotation.
[S] stop annotation, revert to normal display.
[z] toggle event count zeroing.
Sample:
------------------------------------------------------------------------------
PerfTop: 16719 irqs/sec kernel:78.7% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing cache-misses for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% /* adjust length to remove Ethernet CRC */
0 0.0% if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
0 0.0% length -= 4;
436 5.0% f039: 41 f6 84 24 5c 29 00 testb $0x1,0x295c(%r12)
0 0.0% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
0 0.0% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
811 9.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
0 0.0% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
7226 82.6% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Available events:
0 cache-misses
1 cache-references
2 instructions
3 cycles
Enter details event counter: 2
------------------------------------------------------------------------------
PerfTop: 15035 irqs/sec kernel:79.0% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing instructions for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% int *work_done, int work_to_do)
0 0.0% {
175 0.9% eebf: 55 push %rbp
1898 9.8% eec0: 48 89 e5 mov %rsp,%rbp
0 0.0%
0 0.0% i = rx_ring->next_to_clean;
140 0.7% ef0a: 0f b7 41 1a movzwl 0x1a(%rcx),%eax
670 3.4% ef0e: 89 45 ac mov %eax,-0x54(%rbp)
0 0.0% {
0 0.0% memcpy(skb->data + offset, from, len);
91 0.5% f07b: 49 8b b6 e8 00 00 00 mov 0xe8(%r14),%rsi
1153 5.9% f082: 48 8b b8 e8 00 00 00 mov 0xe8(%rax),%rdi
42 0.2% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
14 0.1% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
1618 8.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% /* return some buffers to hardware, one at a time is too slow */
0 0.0% if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
867 4.5% f0e7: 83 7d b0 0f cmpl $0xf,-0x50(%rbp)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
37 0.2% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
4047 20.8% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-07-22 18:36:03 +00:00
|
|
|
|
if (sym_filter_entry) {
|
|
|
|
|
show_details(sym_filter_entry);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2009-04-20 13:00:56 +00:00
|
|
|
|
if (nr_counters == 1)
|
2009-06-05 12:29:10 +00:00
|
|
|
|
printf(" samples pcnt");
|
2009-04-20 13:00:56 +00:00
|
|
|
|
else
|
2009-06-05 12:29:10 +00:00
|
|
|
|
printf(" weight samples pcnt");
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
|
|
|
|
printf(" RIP kernel function\n"
|
2009-06-05 12:29:10 +00:00
|
|
|
|
" ______ _______ _____ ________________ _______________\n\n"
|
2009-04-20 13:00:56 +00:00
|
|
|
|
);
|
|
|
|
|
|
2009-05-28 17:55:41 +00:00
|
|
|
|
for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
|
|
|
|
|
struct sym_entry *syme = rb_entry(nd, struct sym_entry, rb_node);
|
|
|
|
|
struct symbol *sym = (struct symbol *)(syme + 1);
|
2009-06-04 13:19:47 +00:00
|
|
|
|
double pcnt;
|
perf top: Reduce display overhead
Iterate over the symbol table once per display interval, and
copy/sort/tally/decay only those symbols which are active.
Before:
top - 10:14:53 up 4:08, 17 users, load average: 1.17, 1.53, 1.49
Tasks: 273 total, 5 running, 268 sleeping, 0 stopped, 0 zombie
Cpu(s): 6.9%us, 38.2%sy, 0.0%ni, 19.9%id, 0.0%wa, 0.0%hi, 35.0%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28504 root 20 0 1044 260 164 S 58 0.0 0:04.19 2 netserver
28499 root 20 0 1040 412 316 R 51 0.0 0:04.15 0 netperf
28500 root 20 0 1040 408 316 R 50 0.0 0:04.14 1 netperf
28503 root 20 0 1044 260 164 S 50 0.0 0:04.01 1 netserver
28501 root 20 0 1044 260 164 S 49 0.0 0:03.99 0 netserver
28502 root 20 0 1040 412 316 S 43 0.0 0:03.96 2 netperf
28468 root 20 0 1892m 325m 972 S 16 10.8 0:10.50 3 perf
28467 root 20 0 1892m 325m 972 R 2 10.8 0:00.72 3 perf
After:
top - 10:16:30 up 4:10, 17 users, load average: 2.27, 1.88, 1.62
Tasks: 273 total, 6 running, 267 sleeping, 0 stopped, 0 zombie
Cpu(s): 2.5%us, 39.7%sy, 0.0%ni, 24.6%id, 0.0%wa, 0.0%hi, 33.3%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28590 root 20 0 1040 412 316 S 54 0.0 0:07.85 2 netperf
28589 root 20 0 1044 260 164 R 54 0.0 0:07.84 0 netserver
28588 root 20 0 1040 412 316 R 50 0.0 0:07.89 1 netperf
28591 root 20 0 1044 256 164 S 50 0.0 0:07.82 1 netserver
28587 root 20 0 1040 408 316 R 47 0.0 0:07.61 0 netperf
28592 root 20 0 1044 260 164 R 47 0.0 0:07.85 2 netserver
28378 root 20 0 8732 1300 860 R 2 0.0 0:01.81 3 top
28577 root 20 0 1892m 165m 972 R 2 5.5 0:00.48 3 perf
28578 root 20 0 1892m 165m 972 S 2 5.5 0:00.04 3 perf
[ Impact: optimization ]
Signed-off-by: Mike Galbraith <efault@gmx.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-25 07:57:56 +00:00
|
|
|
|
|
perf_counter tools: Fix/resurrect perf top annotation in a simple interactive form
perf top used to have annotation support, but it has bitrotted and
removed.
This patch restores that: it allows the user to select any symbol
in kernel space for source level annotation on the fly, switch
between event counters and alter display variables. When symbol
details are being displayed, stopping annotation reverts to normal.
known keys:
[d] select display delay.
[e] select display entries (lines).
[E] select annotation event counter.
[f] select normal display count filter.
[F] select annotation display count filter (percentage).
[qQ] quit.
[s] select annotation symbol and start annotation.
[S] stop annotation, revert to normal display.
[z] toggle event count zeroing.
Sample:
------------------------------------------------------------------------------
PerfTop: 16719 irqs/sec kernel:78.7% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing cache-misses for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% /* adjust length to remove Ethernet CRC */
0 0.0% if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
0 0.0% length -= 4;
436 5.0% f039: 41 f6 84 24 5c 29 00 testb $0x1,0x295c(%r12)
0 0.0% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
0 0.0% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
811 9.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
0 0.0% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
7226 82.6% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Available events:
0 cache-misses
1 cache-references
2 instructions
3 cycles
Enter details event counter: 2
------------------------------------------------------------------------------
PerfTop: 15035 irqs/sec kernel:79.0% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing instructions for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% int *work_done, int work_to_do)
0 0.0% {
175 0.9% eebf: 55 push %rbp
1898 9.8% eec0: 48 89 e5 mov %rsp,%rbp
0 0.0%
0 0.0% i = rx_ring->next_to_clean;
140 0.7% ef0a: 0f b7 41 1a movzwl 0x1a(%rcx),%eax
670 3.4% ef0e: 89 45 ac mov %eax,-0x54(%rbp)
0 0.0% {
0 0.0% memcpy(skb->data + offset, from, len);
91 0.5% f07b: 49 8b b6 e8 00 00 00 mov 0xe8(%r14),%rsi
1153 5.9% f082: 48 8b b8 e8 00 00 00 mov 0xe8(%rax),%rdi
42 0.2% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
14 0.1% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
1618 8.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% /* return some buffers to hardware, one at a time is too slow */
0 0.0% if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
867 4.5% f0e7: 83 7d b0 0f cmpl $0xf,-0x50(%rbp)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
37 0.2% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
4047 20.8% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-07-22 18:36:03 +00:00
|
|
|
|
if (++printed > print_entries || (int)syme->snap_count < count_filter)
|
2009-05-29 20:03:07 +00:00
|
|
|
|
continue;
|
perf top: Reduce display overhead
Iterate over the symbol table once per display interval, and
copy/sort/tally/decay only those symbols which are active.
Before:
top - 10:14:53 up 4:08, 17 users, load average: 1.17, 1.53, 1.49
Tasks: 273 total, 5 running, 268 sleeping, 0 stopped, 0 zombie
Cpu(s): 6.9%us, 38.2%sy, 0.0%ni, 19.9%id, 0.0%wa, 0.0%hi, 35.0%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28504 root 20 0 1044 260 164 S 58 0.0 0:04.19 2 netserver
28499 root 20 0 1040 412 316 R 51 0.0 0:04.15 0 netperf
28500 root 20 0 1040 408 316 R 50 0.0 0:04.14 1 netperf
28503 root 20 0 1044 260 164 S 50 0.0 0:04.01 1 netserver
28501 root 20 0 1044 260 164 S 49 0.0 0:03.99 0 netserver
28502 root 20 0 1040 412 316 S 43 0.0 0:03.96 2 netperf
28468 root 20 0 1892m 325m 972 S 16 10.8 0:10.50 3 perf
28467 root 20 0 1892m 325m 972 R 2 10.8 0:00.72 3 perf
After:
top - 10:16:30 up 4:10, 17 users, load average: 2.27, 1.88, 1.62
Tasks: 273 total, 6 running, 267 sleeping, 0 stopped, 0 zombie
Cpu(s): 2.5%us, 39.7%sy, 0.0%ni, 24.6%id, 0.0%wa, 0.0%hi, 33.3%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28590 root 20 0 1040 412 316 S 54 0.0 0:07.85 2 netperf
28589 root 20 0 1044 260 164 R 54 0.0 0:07.84 0 netserver
28588 root 20 0 1040 412 316 R 50 0.0 0:07.89 1 netperf
28591 root 20 0 1044 256 164 S 50 0.0 0:07.82 1 netserver
28587 root 20 0 1040 408 316 R 47 0.0 0:07.61 0 netperf
28592 root 20 0 1044 260 164 R 47 0.0 0:07.85 2 netserver
28378 root 20 0 8732 1300 860 R 2 0.0 0:01.81 3 top
28577 root 20 0 1892m 165m 972 R 2 5.5 0:00.48 3 perf
28578 root 20 0 1892m 165m 972 S 2 5.5 0:00.04 3 perf
[ Impact: optimization ]
Signed-off-by: Mike Galbraith <efault@gmx.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-25 07:57:56 +00:00
|
|
|
|
|
2009-06-05 12:29:10 +00:00
|
|
|
|
pcnt = 100.0 - (100.0 * ((sum_ksamples - syme->snap_count) /
|
|
|
|
|
sum_ksamples));
|
perf top: Reduce display overhead
Iterate over the symbol table once per display interval, and
copy/sort/tally/decay only those symbols which are active.
Before:
top - 10:14:53 up 4:08, 17 users, load average: 1.17, 1.53, 1.49
Tasks: 273 total, 5 running, 268 sleeping, 0 stopped, 0 zombie
Cpu(s): 6.9%us, 38.2%sy, 0.0%ni, 19.9%id, 0.0%wa, 0.0%hi, 35.0%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28504 root 20 0 1044 260 164 S 58 0.0 0:04.19 2 netserver
28499 root 20 0 1040 412 316 R 51 0.0 0:04.15 0 netperf
28500 root 20 0 1040 408 316 R 50 0.0 0:04.14 1 netperf
28503 root 20 0 1044 260 164 S 50 0.0 0:04.01 1 netserver
28501 root 20 0 1044 260 164 S 49 0.0 0:03.99 0 netserver
28502 root 20 0 1040 412 316 S 43 0.0 0:03.96 2 netperf
28468 root 20 0 1892m 325m 972 S 16 10.8 0:10.50 3 perf
28467 root 20 0 1892m 325m 972 R 2 10.8 0:00.72 3 perf
After:
top - 10:16:30 up 4:10, 17 users, load average: 2.27, 1.88, 1.62
Tasks: 273 total, 6 running, 267 sleeping, 0 stopped, 0 zombie
Cpu(s): 2.5%us, 39.7%sy, 0.0%ni, 24.6%id, 0.0%wa, 0.0%hi, 33.3%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28590 root 20 0 1040 412 316 S 54 0.0 0:07.85 2 netperf
28589 root 20 0 1044 260 164 R 54 0.0 0:07.84 0 netserver
28588 root 20 0 1040 412 316 R 50 0.0 0:07.89 1 netperf
28591 root 20 0 1044 256 164 S 50 0.0 0:07.82 1 netserver
28587 root 20 0 1040 408 316 R 47 0.0 0:07.61 0 netperf
28592 root 20 0 1044 260 164 R 47 0.0 0:07.85 2 netserver
28378 root 20 0 8732 1300 860 R 2 0.0 0:01.81 3 top
28577 root 20 0 1892m 165m 972 R 2 5.5 0:00.48 3 perf
28578 root 20 0 1892m 165m 972 S 2 5.5 0:00.04 3 perf
[ Impact: optimization ]
Signed-off-by: Mike Galbraith <efault@gmx.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-25 07:57:56 +00:00
|
|
|
|
|
2009-07-24 08:09:50 +00:00
|
|
|
|
if (nr_counters == 1 || !display_weighted)
|
2009-06-05 12:29:10 +00:00
|
|
|
|
printf("%20.2f - ", syme->weight);
|
perf top: Reduce display overhead
Iterate over the symbol table once per display interval, and
copy/sort/tally/decay only those symbols which are active.
Before:
top - 10:14:53 up 4:08, 17 users, load average: 1.17, 1.53, 1.49
Tasks: 273 total, 5 running, 268 sleeping, 0 stopped, 0 zombie
Cpu(s): 6.9%us, 38.2%sy, 0.0%ni, 19.9%id, 0.0%wa, 0.0%hi, 35.0%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28504 root 20 0 1044 260 164 S 58 0.0 0:04.19 2 netserver
28499 root 20 0 1040 412 316 R 51 0.0 0:04.15 0 netperf
28500 root 20 0 1040 408 316 R 50 0.0 0:04.14 1 netperf
28503 root 20 0 1044 260 164 S 50 0.0 0:04.01 1 netserver
28501 root 20 0 1044 260 164 S 49 0.0 0:03.99 0 netserver
28502 root 20 0 1040 412 316 S 43 0.0 0:03.96 2 netperf
28468 root 20 0 1892m 325m 972 S 16 10.8 0:10.50 3 perf
28467 root 20 0 1892m 325m 972 R 2 10.8 0:00.72 3 perf
After:
top - 10:16:30 up 4:10, 17 users, load average: 2.27, 1.88, 1.62
Tasks: 273 total, 6 running, 267 sleeping, 0 stopped, 0 zombie
Cpu(s): 2.5%us, 39.7%sy, 0.0%ni, 24.6%id, 0.0%wa, 0.0%hi, 33.3%si, 0.0%st
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ P COMMAND
28590 root 20 0 1040 412 316 S 54 0.0 0:07.85 2 netperf
28589 root 20 0 1044 260 164 R 54 0.0 0:07.84 0 netserver
28588 root 20 0 1040 412 316 R 50 0.0 0:07.89 1 netperf
28591 root 20 0 1044 256 164 S 50 0.0 0:07.82 1 netserver
28587 root 20 0 1040 408 316 R 47 0.0 0:07.61 0 netperf
28592 root 20 0 1044 260 164 R 47 0.0 0:07.85 2 netserver
28378 root 20 0 8732 1300 860 R 2 0.0 0:01.81 3 top
28577 root 20 0 1892m 165m 972 R 2 5.5 0:00.48 3 perf
28578 root 20 0 1892m 165m 972 S 2 5.5 0:00.04 3 perf
[ Impact: optimization ]
Signed-off-by: Mike Galbraith <efault@gmx.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-25 07:57:56 +00:00
|
|
|
|
else
|
2009-06-05 12:29:10 +00:00
|
|
|
|
printf("%9.1f %10ld - ", syme->weight, syme->snap_count);
|
2009-06-04 13:19:47 +00:00
|
|
|
|
|
2009-07-02 18:14:34 +00:00
|
|
|
|
percent_color_fprintf(stdout, "%4.1f%%", pcnt);
|
2009-07-02 06:09:46 +00:00
|
|
|
|
printf(" - %016llx : %s", sym->start, sym->name);
|
|
|
|
|
if (sym->module)
|
|
|
|
|
printf("\t[%s]", sym->module->name);
|
|
|
|
|
printf("\n");
|
2009-04-20 13:00:56 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
perf_counter tools: Fix/resurrect perf top annotation in a simple interactive form
perf top used to have annotation support, but it has bitrotted and
removed.
This patch restores that: it allows the user to select any symbol
in kernel space for source level annotation on the fly, switch
between event counters and alter display variables. When symbol
details are being displayed, stopping annotation reverts to normal.
known keys:
[d] select display delay.
[e] select display entries (lines).
[E] select annotation event counter.
[f] select normal display count filter.
[F] select annotation display count filter (percentage).
[qQ] quit.
[s] select annotation symbol and start annotation.
[S] stop annotation, revert to normal display.
[z] toggle event count zeroing.
Sample:
------------------------------------------------------------------------------
PerfTop: 16719 irqs/sec kernel:78.7% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing cache-misses for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% /* adjust length to remove Ethernet CRC */
0 0.0% if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
0 0.0% length -= 4;
436 5.0% f039: 41 f6 84 24 5c 29 00 testb $0x1,0x295c(%r12)
0 0.0% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
0 0.0% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
811 9.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
0 0.0% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
7226 82.6% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Available events:
0 cache-misses
1 cache-references
2 instructions
3 cycles
Enter details event counter: 2
------------------------------------------------------------------------------
PerfTop: 15035 irqs/sec kernel:79.0% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing instructions for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% int *work_done, int work_to_do)
0 0.0% {
175 0.9% eebf: 55 push %rbp
1898 9.8% eec0: 48 89 e5 mov %rsp,%rbp
0 0.0%
0 0.0% i = rx_ring->next_to_clean;
140 0.7% ef0a: 0f b7 41 1a movzwl 0x1a(%rcx),%eax
670 3.4% ef0e: 89 45 ac mov %eax,-0x54(%rbp)
0 0.0% {
0 0.0% memcpy(skb->data + offset, from, len);
91 0.5% f07b: 49 8b b6 e8 00 00 00 mov 0xe8(%r14),%rsi
1153 5.9% f082: 48 8b b8 e8 00 00 00 mov 0xe8(%rax),%rdi
42 0.2% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
14 0.1% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
1618 8.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% /* return some buffers to hardware, one at a time is too slow */
0 0.0% if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
867 4.5% f0e7: 83 7d b0 0f cmpl $0xf,-0x50(%rbp)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
37 0.2% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
4047 20.8% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-07-22 18:36:03 +00:00
|
|
|
|
static void prompt_integer(int *target, const char *msg)
|
|
|
|
|
{
|
|
|
|
|
char *buf = malloc(0), *p;
|
|
|
|
|
size_t dummy = 0;
|
|
|
|
|
int tmp;
|
|
|
|
|
|
|
|
|
|
fprintf(stdout, "\n%s: ", msg);
|
|
|
|
|
if (getline(&buf, &dummy, stdin) < 0)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
p = strchr(buf, '\n');
|
|
|
|
|
if (p)
|
|
|
|
|
*p = 0;
|
|
|
|
|
|
|
|
|
|
p = buf;
|
|
|
|
|
while(*p) {
|
|
|
|
|
if (!isdigit(*p))
|
|
|
|
|
goto out_free;
|
|
|
|
|
p++;
|
|
|
|
|
}
|
|
|
|
|
tmp = strtoul(buf, NULL, 10);
|
|
|
|
|
*target = tmp;
|
|
|
|
|
out_free:
|
|
|
|
|
free(buf);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void prompt_percent(int *target, const char *msg)
|
|
|
|
|
{
|
|
|
|
|
int tmp = 0;
|
|
|
|
|
|
|
|
|
|
prompt_integer(&tmp, msg);
|
|
|
|
|
if (tmp >= 0 && tmp <= 100)
|
|
|
|
|
*target = tmp;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void prompt_symbol(struct sym_entry **target, const char *msg)
|
|
|
|
|
{
|
|
|
|
|
char *buf = malloc(0), *p;
|
|
|
|
|
struct sym_entry *syme = *target, *n, *found = NULL;
|
|
|
|
|
size_t dummy = 0;
|
|
|
|
|
|
|
|
|
|
/* zero counters of active symbol */
|
|
|
|
|
if (syme) {
|
|
|
|
|
pthread_mutex_lock(&syme->source_lock);
|
|
|
|
|
__zero_source_counters(syme);
|
|
|
|
|
*target = NULL;
|
|
|
|
|
pthread_mutex_unlock(&syme->source_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fprintf(stdout, "\n%s: ", msg);
|
|
|
|
|
if (getline(&buf, &dummy, stdin) < 0)
|
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
|
|
p = strchr(buf, '\n');
|
|
|
|
|
if (p)
|
|
|
|
|
*p = 0;
|
|
|
|
|
|
|
|
|
|
pthread_mutex_lock(&active_symbols_lock);
|
|
|
|
|
syme = list_entry(active_symbols.next, struct sym_entry, node);
|
|
|
|
|
pthread_mutex_unlock(&active_symbols_lock);
|
|
|
|
|
|
|
|
|
|
list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
|
|
|
|
|
struct symbol *sym = (struct symbol *)(syme + 1);
|
|
|
|
|
|
|
|
|
|
if (!strcmp(buf, sym->name)) {
|
|
|
|
|
found = syme;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!found) {
|
|
|
|
|
fprintf(stderr, "Sorry, %s is not active.\n", sym_filter);
|
|
|
|
|
sleep(1);
|
|
|
|
|
return;
|
|
|
|
|
} else
|
|
|
|
|
parse_source(found);
|
|
|
|
|
|
|
|
|
|
out_free:
|
|
|
|
|
free(buf);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void print_known_keys(void)
|
|
|
|
|
{
|
|
|
|
|
fprintf(stdout, "\nknown keys:\n");
|
|
|
|
|
fprintf(stdout, "\t[d] select display delay.\n");
|
|
|
|
|
fprintf(stdout, "\t[e] select display entries (lines).\n");
|
2009-07-24 08:09:50 +00:00
|
|
|
|
fprintf(stdout, "\t[E] active event counter. \t(%s)\n", event_name(sym_counter));
|
perf_counter tools: Fix/resurrect perf top annotation in a simple interactive form
perf top used to have annotation support, but it has bitrotted and
removed.
This patch restores that: it allows the user to select any symbol
in kernel space for source level annotation on the fly, switch
between event counters and alter display variables. When symbol
details are being displayed, stopping annotation reverts to normal.
known keys:
[d] select display delay.
[e] select display entries (lines).
[E] select annotation event counter.
[f] select normal display count filter.
[F] select annotation display count filter (percentage).
[qQ] quit.
[s] select annotation symbol and start annotation.
[S] stop annotation, revert to normal display.
[z] toggle event count zeroing.
Sample:
------------------------------------------------------------------------------
PerfTop: 16719 irqs/sec kernel:78.7% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing cache-misses for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% /* adjust length to remove Ethernet CRC */
0 0.0% if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
0 0.0% length -= 4;
436 5.0% f039: 41 f6 84 24 5c 29 00 testb $0x1,0x295c(%r12)
0 0.0% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
0 0.0% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
811 9.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
0 0.0% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
7226 82.6% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Available events:
0 cache-misses
1 cache-references
2 instructions
3 cycles
Enter details event counter: 2
------------------------------------------------------------------------------
PerfTop: 15035 irqs/sec kernel:79.0% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing instructions for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% int *work_done, int work_to_do)
0 0.0% {
175 0.9% eebf: 55 push %rbp
1898 9.8% eec0: 48 89 e5 mov %rsp,%rbp
0 0.0%
0 0.0% i = rx_ring->next_to_clean;
140 0.7% ef0a: 0f b7 41 1a movzwl 0x1a(%rcx),%eax
670 3.4% ef0e: 89 45 ac mov %eax,-0x54(%rbp)
0 0.0% {
0 0.0% memcpy(skb->data + offset, from, len);
91 0.5% f07b: 49 8b b6 e8 00 00 00 mov 0xe8(%r14),%rsi
1153 5.9% f082: 48 8b b8 e8 00 00 00 mov 0xe8(%rax),%rdi
42 0.2% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
14 0.1% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
1618 8.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% /* return some buffers to hardware, one at a time is too slow */
0 0.0% if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
867 4.5% f0e7: 83 7d b0 0f cmpl $0xf,-0x50(%rbp)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
37 0.2% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
4047 20.8% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-07-22 18:36:03 +00:00
|
|
|
|
fprintf(stdout, "\t[f] select normal display count filter.\n");
|
|
|
|
|
fprintf(stdout, "\t[F] select annotation display count filter (percentage).\n");
|
|
|
|
|
fprintf(stdout, "\t[qQ] quit.\n");
|
|
|
|
|
fprintf(stdout, "\t[s] select annotation symbol and start annotation.\n");
|
|
|
|
|
fprintf(stdout, "\t[S] stop annotation, revert to normal display.\n");
|
2009-07-24 08:09:50 +00:00
|
|
|
|
fprintf(stdout, "\t[w] toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0);
|
|
|
|
|
fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", zero ? 1 : 0);
|
perf_counter tools: Fix/resurrect perf top annotation in a simple interactive form
perf top used to have annotation support, but it has bitrotted and
removed.
This patch restores that: it allows the user to select any symbol
in kernel space for source level annotation on the fly, switch
between event counters and alter display variables. When symbol
details are being displayed, stopping annotation reverts to normal.
known keys:
[d] select display delay.
[e] select display entries (lines).
[E] select annotation event counter.
[f] select normal display count filter.
[F] select annotation display count filter (percentage).
[qQ] quit.
[s] select annotation symbol and start annotation.
[S] stop annotation, revert to normal display.
[z] toggle event count zeroing.
Sample:
------------------------------------------------------------------------------
PerfTop: 16719 irqs/sec kernel:78.7% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing cache-misses for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% /* adjust length to remove Ethernet CRC */
0 0.0% if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
0 0.0% length -= 4;
436 5.0% f039: 41 f6 84 24 5c 29 00 testb $0x1,0x295c(%r12)
0 0.0% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
0 0.0% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
811 9.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
0 0.0% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
7226 82.6% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Available events:
0 cache-misses
1 cache-references
2 instructions
3 cycles
Enter details event counter: 2
------------------------------------------------------------------------------
PerfTop: 15035 irqs/sec kernel:79.0% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing instructions for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% int *work_done, int work_to_do)
0 0.0% {
175 0.9% eebf: 55 push %rbp
1898 9.8% eec0: 48 89 e5 mov %rsp,%rbp
0 0.0%
0 0.0% i = rx_ring->next_to_clean;
140 0.7% ef0a: 0f b7 41 1a movzwl 0x1a(%rcx),%eax
670 3.4% ef0e: 89 45 ac mov %eax,-0x54(%rbp)
0 0.0% {
0 0.0% memcpy(skb->data + offset, from, len);
91 0.5% f07b: 49 8b b6 e8 00 00 00 mov 0xe8(%r14),%rsi
1153 5.9% f082: 48 8b b8 e8 00 00 00 mov 0xe8(%rax),%rdi
42 0.2% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
14 0.1% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
1618 8.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% /* return some buffers to hardware, one at a time is too slow */
0 0.0% if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
867 4.5% f0e7: 83 7d b0 0f cmpl $0xf,-0x50(%rbp)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
37 0.2% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
4047 20.8% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-07-22 18:36:03 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void handle_keypress(int c)
|
|
|
|
|
{
|
|
|
|
|
int once = 0;
|
|
|
|
|
repeat:
|
|
|
|
|
switch (c) {
|
|
|
|
|
case 'd':
|
|
|
|
|
prompt_integer(&delay_secs, "Enter display delay");
|
|
|
|
|
break;
|
|
|
|
|
case 'e':
|
|
|
|
|
prompt_integer(&print_entries, "Enter display entries (lines)");
|
|
|
|
|
break;
|
|
|
|
|
case 'E':
|
|
|
|
|
if (nr_counters > 1) {
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
fprintf(stderr, "\nAvailable events:");
|
|
|
|
|
for (i = 0; i < nr_counters; i++)
|
|
|
|
|
fprintf(stderr, "\n\t%d %s", i, event_name(i));
|
|
|
|
|
|
|
|
|
|
prompt_integer(&sym_counter, "Enter details event counter");
|
|
|
|
|
|
|
|
|
|
if (sym_counter >= nr_counters) {
|
|
|
|
|
fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(0));
|
|
|
|
|
sym_counter = 0;
|
|
|
|
|
sleep(1);
|
|
|
|
|
}
|
|
|
|
|
} else sym_counter = 0;
|
|
|
|
|
break;
|
|
|
|
|
case 'f':
|
|
|
|
|
prompt_integer(&count_filter, "Enter display event count filter");
|
|
|
|
|
break;
|
|
|
|
|
case 'F':
|
|
|
|
|
prompt_percent(&sym_pcnt_filter, "Enter details display event filter (percent)");
|
|
|
|
|
break;
|
|
|
|
|
case 'q':
|
|
|
|
|
case 'Q':
|
|
|
|
|
printf("exiting.\n");
|
|
|
|
|
exit(0);
|
|
|
|
|
case 's':
|
|
|
|
|
prompt_symbol(&sym_filter_entry, "Enter details symbol");
|
|
|
|
|
break;
|
|
|
|
|
case 'S':
|
|
|
|
|
if (!sym_filter_entry)
|
|
|
|
|
break;
|
|
|
|
|
else {
|
|
|
|
|
struct sym_entry *syme = sym_filter_entry;
|
|
|
|
|
|
|
|
|
|
pthread_mutex_lock(&syme->source_lock);
|
|
|
|
|
sym_filter_entry = NULL;
|
|
|
|
|
__zero_source_counters(syme);
|
|
|
|
|
pthread_mutex_unlock(&syme->source_lock);
|
|
|
|
|
}
|
|
|
|
|
break;
|
2009-07-24 08:09:50 +00:00
|
|
|
|
case 'w':
|
|
|
|
|
display_weighted = ~display_weighted;
|
|
|
|
|
break;
|
perf_counter tools: Fix/resurrect perf top annotation in a simple interactive form
perf top used to have annotation support, but it has bitrotted and
removed.
This patch restores that: it allows the user to select any symbol
in kernel space for source level annotation on the fly, switch
between event counters and alter display variables. When symbol
details are being displayed, stopping annotation reverts to normal.
known keys:
[d] select display delay.
[e] select display entries (lines).
[E] select annotation event counter.
[f] select normal display count filter.
[F] select annotation display count filter (percentage).
[qQ] quit.
[s] select annotation symbol and start annotation.
[S] stop annotation, revert to normal display.
[z] toggle event count zeroing.
Sample:
------------------------------------------------------------------------------
PerfTop: 16719 irqs/sec kernel:78.7% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing cache-misses for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% /* adjust length to remove Ethernet CRC */
0 0.0% if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
0 0.0% length -= 4;
436 5.0% f039: 41 f6 84 24 5c 29 00 testb $0x1,0x295c(%r12)
0 0.0% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
0 0.0% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
811 9.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
0 0.0% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
7226 82.6% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Available events:
0 cache-misses
1 cache-references
2 instructions
3 cycles
Enter details event counter: 2
------------------------------------------------------------------------------
PerfTop: 15035 irqs/sec kernel:79.0% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing instructions for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% int *work_done, int work_to_do)
0 0.0% {
175 0.9% eebf: 55 push %rbp
1898 9.8% eec0: 48 89 e5 mov %rsp,%rbp
0 0.0%
0 0.0% i = rx_ring->next_to_clean;
140 0.7% ef0a: 0f b7 41 1a movzwl 0x1a(%rcx),%eax
670 3.4% ef0e: 89 45 ac mov %eax,-0x54(%rbp)
0 0.0% {
0 0.0% memcpy(skb->data + offset, from, len);
91 0.5% f07b: 49 8b b6 e8 00 00 00 mov 0xe8(%r14),%rsi
1153 5.9% f082: 48 8b b8 e8 00 00 00 mov 0xe8(%rax),%rdi
42 0.2% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
14 0.1% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
1618 8.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% /* return some buffers to hardware, one at a time is too slow */
0 0.0% if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
867 4.5% f0e7: 83 7d b0 0f cmpl $0xf,-0x50(%rbp)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
37 0.2% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
4047 20.8% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-07-22 18:36:03 +00:00
|
|
|
|
case 'z':
|
|
|
|
|
zero = ~zero;
|
|
|
|
|
break;
|
|
|
|
|
default: {
|
|
|
|
|
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
|
|
|
|
|
struct termios tc, save;
|
|
|
|
|
|
|
|
|
|
if (!once) {
|
|
|
|
|
print_known_keys();
|
|
|
|
|
once++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
tcgetattr(0, &save);
|
|
|
|
|
tc = save;
|
|
|
|
|
tc.c_lflag &= ~(ICANON | ECHO);
|
|
|
|
|
tc.c_cc[VMIN] = 0;
|
|
|
|
|
tc.c_cc[VTIME] = 0;
|
|
|
|
|
tcsetattr(0, TCSANOW, &tc);
|
|
|
|
|
|
|
|
|
|
poll(&stdin_poll, 1, -1);
|
|
|
|
|
c = getc(stdin);
|
|
|
|
|
|
|
|
|
|
tcsetattr(0, TCSAFLUSH, &save);
|
|
|
|
|
goto repeat;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-07-01 10:37:06 +00:00
|
|
|
|
static void *display_thread(void *arg __used)
|
2009-04-20 13:00:56 +00:00
|
|
|
|
{
|
2009-06-04 18:48:04 +00:00
|
|
|
|
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
|
perf_counter tools: Fix/resurrect perf top annotation in a simple interactive form
perf top used to have annotation support, but it has bitrotted and
removed.
This patch restores that: it allows the user to select any symbol
in kernel space for source level annotation on the fly, switch
between event counters and alter display variables. When symbol
details are being displayed, stopping annotation reverts to normal.
known keys:
[d] select display delay.
[e] select display entries (lines).
[E] select annotation event counter.
[f] select normal display count filter.
[F] select annotation display count filter (percentage).
[qQ] quit.
[s] select annotation symbol and start annotation.
[S] stop annotation, revert to normal display.
[z] toggle event count zeroing.
Sample:
------------------------------------------------------------------------------
PerfTop: 16719 irqs/sec kernel:78.7% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing cache-misses for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% /* adjust length to remove Ethernet CRC */
0 0.0% if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
0 0.0% length -= 4;
436 5.0% f039: 41 f6 84 24 5c 29 00 testb $0x1,0x295c(%r12)
0 0.0% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
0 0.0% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
811 9.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
0 0.0% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
7226 82.6% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Available events:
0 cache-misses
1 cache-references
2 instructions
3 cycles
Enter details event counter: 2
------------------------------------------------------------------------------
PerfTop: 15035 irqs/sec kernel:79.0% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing instructions for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% int *work_done, int work_to_do)
0 0.0% {
175 0.9% eebf: 55 push %rbp
1898 9.8% eec0: 48 89 e5 mov %rsp,%rbp
0 0.0%
0 0.0% i = rx_ring->next_to_clean;
140 0.7% ef0a: 0f b7 41 1a movzwl 0x1a(%rcx),%eax
670 3.4% ef0e: 89 45 ac mov %eax,-0x54(%rbp)
0 0.0% {
0 0.0% memcpy(skb->data + offset, from, len);
91 0.5% f07b: 49 8b b6 e8 00 00 00 mov 0xe8(%r14),%rsi
1153 5.9% f082: 48 8b b8 e8 00 00 00 mov 0xe8(%rax),%rdi
42 0.2% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
14 0.1% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
1618 8.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% /* return some buffers to hardware, one at a time is too slow */
0 0.0% if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
867 4.5% f0e7: 83 7d b0 0f cmpl $0xf,-0x50(%rbp)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
37 0.2% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
4047 20.8% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-07-22 18:36:03 +00:00
|
|
|
|
struct termios tc, save;
|
|
|
|
|
int delay_msecs, c;
|
|
|
|
|
|
|
|
|
|
tcgetattr(0, &save);
|
|
|
|
|
tc = save;
|
|
|
|
|
tc.c_lflag &= ~(ICANON | ECHO);
|
|
|
|
|
tc.c_cc[VMIN] = 0;
|
|
|
|
|
tc.c_cc[VTIME] = 0;
|
|
|
|
|
repeat:
|
|
|
|
|
delay_msecs = delay_secs * 1000;
|
|
|
|
|
tcsetattr(0, TCSANOW, &tc);
|
|
|
|
|
/* trash return*/
|
|
|
|
|
getc(stdin);
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
2009-06-04 18:48:04 +00:00
|
|
|
|
do {
|
2009-04-20 13:00:56 +00:00
|
|
|
|
print_sym_table();
|
2009-06-04 18:48:04 +00:00
|
|
|
|
} while (!poll(&stdin_poll, 1, delay_msecs) == 1);
|
|
|
|
|
|
perf_counter tools: Fix/resurrect perf top annotation in a simple interactive form
perf top used to have annotation support, but it has bitrotted and
removed.
This patch restores that: it allows the user to select any symbol
in kernel space for source level annotation on the fly, switch
between event counters and alter display variables. When symbol
details are being displayed, stopping annotation reverts to normal.
known keys:
[d] select display delay.
[e] select display entries (lines).
[E] select annotation event counter.
[f] select normal display count filter.
[F] select annotation display count filter (percentage).
[qQ] quit.
[s] select annotation symbol and start annotation.
[S] stop annotation, revert to normal display.
[z] toggle event count zeroing.
Sample:
------------------------------------------------------------------------------
PerfTop: 16719 irqs/sec kernel:78.7% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing cache-misses for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% /* adjust length to remove Ethernet CRC */
0 0.0% if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
0 0.0% length -= 4;
436 5.0% f039: 41 f6 84 24 5c 29 00 testb $0x1,0x295c(%r12)
0 0.0% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
0 0.0% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
811 9.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
0 0.0% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
7226 82.6% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Available events:
0 cache-misses
1 cache-references
2 instructions
3 cycles
Enter details event counter: 2
------------------------------------------------------------------------------
PerfTop: 15035 irqs/sec kernel:79.0% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing instructions for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% int *work_done, int work_to_do)
0 0.0% {
175 0.9% eebf: 55 push %rbp
1898 9.8% eec0: 48 89 e5 mov %rsp,%rbp
0 0.0%
0 0.0% i = rx_ring->next_to_clean;
140 0.7% ef0a: 0f b7 41 1a movzwl 0x1a(%rcx),%eax
670 3.4% ef0e: 89 45 ac mov %eax,-0x54(%rbp)
0 0.0% {
0 0.0% memcpy(skb->data + offset, from, len);
91 0.5% f07b: 49 8b b6 e8 00 00 00 mov 0xe8(%r14),%rsi
1153 5.9% f082: 48 8b b8 e8 00 00 00 mov 0xe8(%rax),%rdi
42 0.2% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
14 0.1% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
1618 8.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% /* return some buffers to hardware, one at a time is too slow */
0 0.0% if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
867 4.5% f0e7: 83 7d b0 0f cmpl $0xf,-0x50(%rbp)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
37 0.2% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
4047 20.8% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-07-22 18:36:03 +00:00
|
|
|
|
c = getc(stdin);
|
|
|
|
|
tcsetattr(0, TCSAFLUSH, &save);
|
|
|
|
|
|
|
|
|
|
handle_keypress(c);
|
|
|
|
|
goto repeat;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-30 23:00:46 +00:00
|
|
|
|
/* Tag samples to be skipped. */
|
2009-07-01 10:37:06 +00:00
|
|
|
|
static const char *skip_symbols[] = {
|
2009-06-30 23:00:46 +00:00
|
|
|
|
"default_idle",
|
|
|
|
|
"cpu_idle",
|
|
|
|
|
"enter_idle",
|
|
|
|
|
"exit_idle",
|
|
|
|
|
"mwait_idle",
|
2009-07-26 22:06:19 +00:00
|
|
|
|
"mwait_idle_with_hints",
|
2009-06-30 23:00:47 +00:00
|
|
|
|
"ppc64_runlatch_off",
|
|
|
|
|
"pseries_dedicated_idle_sleep",
|
2009-06-30 23:00:46 +00:00
|
|
|
|
NULL
|
|
|
|
|
};
|
|
|
|
|
|
2009-05-28 17:55:41 +00:00
|
|
|
|
static int symbol_filter(struct dso *self, struct symbol *sym)
|
2009-04-20 13:00:56 +00:00
|
|
|
|
{
|
2009-05-28 17:55:41 +00:00
|
|
|
|
struct sym_entry *syme;
|
|
|
|
|
const char *name = sym->name;
|
2009-06-30 23:00:46 +00:00
|
|
|
|
int i;
|
2009-05-28 17:55:41 +00:00
|
|
|
|
|
2009-06-30 23:00:47 +00:00
|
|
|
|
/*
|
|
|
|
|
* ppc64 uses function descriptors and appends a '.' to the
|
|
|
|
|
* start of every instruction address. Remove it.
|
|
|
|
|
*/
|
|
|
|
|
if (name[0] == '.')
|
|
|
|
|
name++;
|
|
|
|
|
|
2009-05-28 17:55:41 +00:00
|
|
|
|
if (!strcmp(name, "_text") ||
|
|
|
|
|
!strcmp(name, "_etext") ||
|
|
|
|
|
!strcmp(name, "_sinittext") ||
|
|
|
|
|
!strncmp("init_module", name, 11) ||
|
|
|
|
|
!strncmp("cleanup_module", name, 14) ||
|
|
|
|
|
strstr(name, "_text_start") ||
|
|
|
|
|
strstr(name, "_text_end"))
|
2009-04-20 13:00:56 +00:00
|
|
|
|
return 1;
|
|
|
|
|
|
2009-05-28 17:55:41 +00:00
|
|
|
|
syme = dso__sym_priv(self, sym);
|
perf_counter tools: Fix/resurrect perf top annotation in a simple interactive form
perf top used to have annotation support, but it has bitrotted and
removed.
This patch restores that: it allows the user to select any symbol
in kernel space for source level annotation on the fly, switch
between event counters and alter display variables. When symbol
details are being displayed, stopping annotation reverts to normal.
known keys:
[d] select display delay.
[e] select display entries (lines).
[E] select annotation event counter.
[f] select normal display count filter.
[F] select annotation display count filter (percentage).
[qQ] quit.
[s] select annotation symbol and start annotation.
[S] stop annotation, revert to normal display.
[z] toggle event count zeroing.
Sample:
------------------------------------------------------------------------------
PerfTop: 16719 irqs/sec kernel:78.7% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing cache-misses for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% /* adjust length to remove Ethernet CRC */
0 0.0% if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
0 0.0% length -= 4;
436 5.0% f039: 41 f6 84 24 5c 29 00 testb $0x1,0x295c(%r12)
0 0.0% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
0 0.0% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
811 9.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
0 0.0% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
7226 82.6% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Available events:
0 cache-misses
1 cache-references
2 instructions
3 cycles
Enter details event counter: 2
------------------------------------------------------------------------------
PerfTop: 15035 irqs/sec kernel:79.0% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing instructions for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% int *work_done, int work_to_do)
0 0.0% {
175 0.9% eebf: 55 push %rbp
1898 9.8% eec0: 48 89 e5 mov %rsp,%rbp
0 0.0%
0 0.0% i = rx_ring->next_to_clean;
140 0.7% ef0a: 0f b7 41 1a movzwl 0x1a(%rcx),%eax
670 3.4% ef0e: 89 45 ac mov %eax,-0x54(%rbp)
0 0.0% {
0 0.0% memcpy(skb->data + offset, from, len);
91 0.5% f07b: 49 8b b6 e8 00 00 00 mov 0xe8(%r14),%rsi
1153 5.9% f082: 48 8b b8 e8 00 00 00 mov 0xe8(%rax),%rdi
42 0.2% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
14 0.1% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
1618 8.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% /* return some buffers to hardware, one at a time is too slow */
0 0.0% if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
867 4.5% f0e7: 83 7d b0 0f cmpl $0xf,-0x50(%rbp)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
37 0.2% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
4047 20.8% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-07-22 18:36:03 +00:00
|
|
|
|
pthread_mutex_init(&syme->source_lock, NULL);
|
|
|
|
|
if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter))
|
|
|
|
|
sym_filter_entry = syme;
|
|
|
|
|
|
2009-06-30 23:00:46 +00:00
|
|
|
|
for (i = 0; skip_symbols[i]; i++) {
|
|
|
|
|
if (!strcmp(skip_symbols[i], name)) {
|
|
|
|
|
syme->skip = 1;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2009-05-28 17:55:41 +00:00
|
|
|
|
static int parse_symbols(void)
|
2009-04-20 13:00:56 +00:00
|
|
|
|
{
|
2009-05-28 17:55:41 +00:00
|
|
|
|
struct rb_node *node;
|
|
|
|
|
struct symbol *sym;
|
2009-07-02 06:09:46 +00:00
|
|
|
|
int modules = vmlinux ? 1 : 0;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
2009-05-28 17:55:41 +00:00
|
|
|
|
kernel_dso = dso__new("[kernel]", sizeof(struct sym_entry));
|
|
|
|
|
if (kernel_dso == NULL)
|
|
|
|
|
return -1;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
2009-07-02 06:09:46 +00:00
|
|
|
|
if (dso__load_kernel(kernel_dso, vmlinux, symbol_filter, verbose, modules) <= 0)
|
2009-05-28 17:55:41 +00:00
|
|
|
|
goto out_delete_dso;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
2009-05-28 17:55:41 +00:00
|
|
|
|
node = rb_first(&kernel_dso->syms);
|
|
|
|
|
sym = rb_entry(node, struct symbol, rb_node);
|
|
|
|
|
min_ip = sym->start;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
2009-05-28 17:55:41 +00:00
|
|
|
|
node = rb_last(&kernel_dso->syms);
|
|
|
|
|
sym = rb_entry(node, struct symbol, rb_node);
|
2009-05-29 06:23:16 +00:00
|
|
|
|
max_ip = sym->end;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
2009-05-28 17:55:41 +00:00
|
|
|
|
if (dump_symtab)
|
2009-05-29 04:46:46 +00:00
|
|
|
|
dso__fprintf(kernel_dso, stderr);
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
2009-05-28 17:55:41 +00:00
|
|
|
|
return 0;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
2009-05-28 17:55:41 +00:00
|
|
|
|
out_delete_dso:
|
|
|
|
|
dso__delete(kernel_dso);
|
|
|
|
|
kernel_dso = NULL;
|
|
|
|
|
return -1;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Binary search in the histogram table and record the hit:
|
|
|
|
|
*/
|
perf_counter tools: Define and use our own u64, s64 etc. definitions
On 64-bit powerpc, __u64 is defined to be unsigned long rather than
unsigned long long. This causes compiler warnings every time we
print a __u64 value with %Lx.
Rather than changing __u64, we define our own u64 to be unsigned long
long on all architectures, and similarly s64 as signed long long.
For consistency we also define u32, s32, u16, s16, u8 and s8. These
definitions are put in a new header, types.h, because these definitions
are needed in util/string.h and util/symbol.h.
The main change here is the mechanical change of __[us]{64,32,16,8}
to remove the "__". The other changes are:
* Create types.h
* Include types.h in perf.h, util/string.h and util/symbol.h
* Add types.h to the LIB_H definition in Makefile
* Added (u64) casts in process_overflow_event() and print_sym_table()
to kill two remaining warnings.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: benh@kernel.crashing.org
LKML-Reference: <19003.33494.495844.956580@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-19 12:21:42 +00:00
|
|
|
|
static void record_ip(u64 ip, int counter)
|
2009-04-20 13:00:56 +00:00
|
|
|
|
{
|
2009-05-28 17:55:41 +00:00
|
|
|
|
struct symbol *sym = dso__find_symbol(kernel_dso, ip);
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
2009-05-28 17:55:41 +00:00
|
|
|
|
if (sym != NULL) {
|
|
|
|
|
struct sym_entry *syme = dso__sym_priv(kernel_dso, sym);
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
2009-05-28 17:55:41 +00:00
|
|
|
|
if (!syme->skip) {
|
|
|
|
|
syme->count[counter]++;
|
perf_counter tools: Fix/resurrect perf top annotation in a simple interactive form
perf top used to have annotation support, but it has bitrotted and
removed.
This patch restores that: it allows the user to select any symbol
in kernel space for source level annotation on the fly, switch
between event counters and alter display variables. When symbol
details are being displayed, stopping annotation reverts to normal.
known keys:
[d] select display delay.
[e] select display entries (lines).
[E] select annotation event counter.
[f] select normal display count filter.
[F] select annotation display count filter (percentage).
[qQ] quit.
[s] select annotation symbol and start annotation.
[S] stop annotation, revert to normal display.
[z] toggle event count zeroing.
Sample:
------------------------------------------------------------------------------
PerfTop: 16719 irqs/sec kernel:78.7% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing cache-misses for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% /* adjust length to remove Ethernet CRC */
0 0.0% if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
0 0.0% length -= 4;
436 5.0% f039: 41 f6 84 24 5c 29 00 testb $0x1,0x295c(%r12)
0 0.0% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
0 0.0% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
811 9.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
0 0.0% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
7226 82.6% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Available events:
0 cache-misses
1 cache-references
2 instructions
3 cycles
Enter details event counter: 2
------------------------------------------------------------------------------
PerfTop: 15035 irqs/sec kernel:79.0% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing instructions for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% int *work_done, int work_to_do)
0 0.0% {
175 0.9% eebf: 55 push %rbp
1898 9.8% eec0: 48 89 e5 mov %rsp,%rbp
0 0.0%
0 0.0% i = rx_ring->next_to_clean;
140 0.7% ef0a: 0f b7 41 1a movzwl 0x1a(%rcx),%eax
670 3.4% ef0e: 89 45 ac mov %eax,-0x54(%rbp)
0 0.0% {
0 0.0% memcpy(skb->data + offset, from, len);
91 0.5% f07b: 49 8b b6 e8 00 00 00 mov 0xe8(%r14),%rsi
1153 5.9% f082: 48 8b b8 e8 00 00 00 mov 0xe8(%rax),%rdi
42 0.2% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
14 0.1% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
1618 8.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% /* return some buffers to hardware, one at a time is too slow */
0 0.0% if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
867 4.5% f0e7: 83 7d b0 0f cmpl $0xf,-0x50(%rbp)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
37 0.2% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
4047 20.8% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-07-22 18:36:03 +00:00
|
|
|
|
record_precise_ip(syme, counter, ip);
|
2009-05-29 20:03:07 +00:00
|
|
|
|
pthread_mutex_lock(&active_symbols_lock);
|
2009-05-28 17:55:41 +00:00
|
|
|
|
if (list_empty(&syme->node) || !syme->node.next)
|
2009-05-29 20:03:07 +00:00
|
|
|
|
__list_insert_active_sym(syme);
|
|
|
|
|
pthread_mutex_unlock(&active_symbols_lock);
|
2009-05-28 17:55:41 +00:00
|
|
|
|
return;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-05 12:29:10 +00:00
|
|
|
|
samples--;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-06-25 09:27:12 +00:00
|
|
|
|
static void process_event(u64 ip, int counter, int user)
|
2009-04-20 13:00:56 +00:00
|
|
|
|
{
|
2009-06-05 12:29:10 +00:00
|
|
|
|
samples++;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
2009-06-25 09:27:12 +00:00
|
|
|
|
if (user) {
|
2009-06-05 12:29:10 +00:00
|
|
|
|
userspace_samples++;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
record_ip(ip, counter);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct mmap_data {
|
2009-06-06 07:58:57 +00:00
|
|
|
|
int counter;
|
|
|
|
|
void *base;
|
2009-07-01 10:37:06 +00:00
|
|
|
|
int mask;
|
2009-06-06 07:58:57 +00:00
|
|
|
|
unsigned int prev;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static unsigned int mmap_read_head(struct mmap_data *md)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter_mmap_page *pc = md->base;
|
|
|
|
|
int head;
|
|
|
|
|
|
|
|
|
|
head = pc->data_head;
|
|
|
|
|
rmb();
|
|
|
|
|
|
|
|
|
|
return head;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct timeval last_read, this_read;
|
|
|
|
|
|
2009-06-06 21:10:43 +00:00
|
|
|
|
static void mmap_read_counter(struct mmap_data *md)
|
2009-04-20 13:00:56 +00:00
|
|
|
|
{
|
|
|
|
|
unsigned int head = mmap_read_head(md);
|
|
|
|
|
unsigned int old = md->prev;
|
|
|
|
|
unsigned char *data = md->base + page_size;
|
|
|
|
|
int diff;
|
|
|
|
|
|
|
|
|
|
gettimeofday(&this_read, NULL);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* If we're further behind than half the buffer, there's a chance
|
2009-06-05 12:29:10 +00:00
|
|
|
|
* the writer will bite our tail and mess up the samples under us.
|
2009-04-20 13:00:56 +00:00
|
|
|
|
*
|
|
|
|
|
* If we somehow ended up ahead of the head, we got messed up.
|
|
|
|
|
*
|
|
|
|
|
* In either case, truncate and restart at head.
|
|
|
|
|
*/
|
|
|
|
|
diff = head - old;
|
|
|
|
|
if (diff > md->mask / 2 || diff < 0) {
|
|
|
|
|
struct timeval iv;
|
|
|
|
|
unsigned long msecs;
|
|
|
|
|
|
|
|
|
|
timersub(&this_read, &last_read, &iv);
|
|
|
|
|
msecs = iv.tv_sec*1000 + iv.tv_usec/1000;
|
|
|
|
|
|
|
|
|
|
fprintf(stderr, "WARNING: failed to keep up with mmap data."
|
|
|
|
|
" Last read %lu msecs ago.\n", msecs);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* head points to a known good entry, start there.
|
|
|
|
|
*/
|
|
|
|
|
old = head;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
last_read = this_read;
|
|
|
|
|
|
|
|
|
|
for (; old != head;) {
|
|
|
|
|
struct ip_event {
|
|
|
|
|
struct perf_event_header header;
|
perf_counter tools: Define and use our own u64, s64 etc. definitions
On 64-bit powerpc, __u64 is defined to be unsigned long rather than
unsigned long long. This causes compiler warnings every time we
print a __u64 value with %Lx.
Rather than changing __u64, we define our own u64 to be unsigned long
long on all architectures, and similarly s64 as signed long long.
For consistency we also define u32, s32, u16, s16, u8 and s8. These
definitions are put in a new header, types.h, because these definitions
are needed in util/string.h and util/symbol.h.
The main change here is the mechanical change of __[us]{64,32,16,8}
to remove the "__". The other changes are:
* Create types.h
* Include types.h in perf.h, util/string.h and util/symbol.h
* Add types.h to the LIB_H definition in Makefile
* Added (u64) casts in process_overflow_event() and print_sym_table()
to kill two remaining warnings.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: benh@kernel.crashing.org
LKML-Reference: <19003.33494.495844.956580@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-19 12:21:42 +00:00
|
|
|
|
u64 ip;
|
|
|
|
|
u32 pid, target_pid;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
};
|
|
|
|
|
struct mmap_event {
|
|
|
|
|
struct perf_event_header header;
|
perf_counter tools: Define and use our own u64, s64 etc. definitions
On 64-bit powerpc, __u64 is defined to be unsigned long rather than
unsigned long long. This causes compiler warnings every time we
print a __u64 value with %Lx.
Rather than changing __u64, we define our own u64 to be unsigned long
long on all architectures, and similarly s64 as signed long long.
For consistency we also define u32, s32, u16, s16, u8 and s8. These
definitions are put in a new header, types.h, because these definitions
are needed in util/string.h and util/symbol.h.
The main change here is the mechanical change of __[us]{64,32,16,8}
to remove the "__". The other changes are:
* Create types.h
* Include types.h in perf.h, util/string.h and util/symbol.h
* Add types.h to the LIB_H definition in Makefile
* Added (u64) casts in process_overflow_event() and print_sym_table()
to kill two remaining warnings.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: benh@kernel.crashing.org
LKML-Reference: <19003.33494.495844.956580@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-19 12:21:42 +00:00
|
|
|
|
u32 pid, target_pid;
|
|
|
|
|
u64 start;
|
|
|
|
|
u64 len;
|
|
|
|
|
u64 pgoff;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
char filename[PATH_MAX];
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
typedef union event_union {
|
|
|
|
|
struct perf_event_header header;
|
|
|
|
|
struct ip_event ip;
|
|
|
|
|
struct mmap_event mmap;
|
|
|
|
|
} event_t;
|
|
|
|
|
|
|
|
|
|
event_t *event = (event_t *)&data[old & md->mask];
|
|
|
|
|
|
|
|
|
|
event_t event_copy;
|
|
|
|
|
|
2009-04-20 13:22:22 +00:00
|
|
|
|
size_t size = event->header.size;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Event straddles the mmap boundary -- header should always
|
|
|
|
|
* be inside due to u64 alignment of output.
|
|
|
|
|
*/
|
|
|
|
|
if ((old & md->mask) + size != ((old + size) & md->mask)) {
|
|
|
|
|
unsigned int offset = old;
|
|
|
|
|
unsigned int len = min(sizeof(*event), size), cpy;
|
|
|
|
|
void *dst = &event_copy;
|
|
|
|
|
|
|
|
|
|
do {
|
|
|
|
|
cpy = min(md->mask + 1 - (offset & md->mask), len);
|
|
|
|
|
memcpy(dst, &data[offset & md->mask], cpy);
|
|
|
|
|
offset += cpy;
|
|
|
|
|
dst += cpy;
|
|
|
|
|
len -= cpy;
|
|
|
|
|
} while (len);
|
|
|
|
|
|
|
|
|
|
event = &event_copy;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
old += size;
|
|
|
|
|
|
2009-06-25 09:27:12 +00:00
|
|
|
|
if (event->header.type == PERF_EVENT_SAMPLE) {
|
|
|
|
|
int user =
|
|
|
|
|
(event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK) == PERF_EVENT_MISC_USER;
|
|
|
|
|
process_event(event->ip.ip, md->counter, user);
|
2009-04-20 13:00:56 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
md->prev = old;
|
|
|
|
|
}
|
|
|
|
|
|
2009-05-24 06:35:49 +00:00
|
|
|
|
static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS];
|
|
|
|
|
static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
|
|
|
|
|
|
2009-06-06 21:10:43 +00:00
|
|
|
|
static void mmap_read(void)
|
|
|
|
|
{
|
|
|
|
|
int i, counter;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < nr_cpus; i++) {
|
|
|
|
|
for (counter = 0; counter < nr_counters; counter++)
|
|
|
|
|
mmap_read_counter(&mmap_array[i][counter]);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-07 15:31:52 +00:00
|
|
|
|
int nr_poll;
|
|
|
|
|
int group_fd;
|
|
|
|
|
|
|
|
|
|
static void start_counter(int i, int counter)
|
2009-04-20 13:00:56 +00:00
|
|
|
|
{
|
2009-06-06 07:58:57 +00:00
|
|
|
|
struct perf_counter_attr *attr;
|
2009-07-21 08:30:36 +00:00
|
|
|
|
int cpu;
|
2009-06-07 15:31:52 +00:00
|
|
|
|
|
|
|
|
|
cpu = profile_cpu;
|
|
|
|
|
if (target_pid == -1 && profile_cpu == -1)
|
|
|
|
|
cpu = i;
|
|
|
|
|
|
|
|
|
|
attr = attrs + counter;
|
|
|
|
|
|
|
|
|
|
attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
|
|
|
|
|
attr->freq = freq;
|
2009-07-21 08:30:36 +00:00
|
|
|
|
attr->inherit = (cpu < 0) && inherit;
|
2009-06-07 15:31:52 +00:00
|
|
|
|
|
|
|
|
|
try_again:
|
|
|
|
|
fd[i][counter] = sys_perf_counter_open(attr, target_pid, cpu, group_fd, 0);
|
|
|
|
|
|
|
|
|
|
if (fd[i][counter] < 0) {
|
|
|
|
|
int err = errno;
|
|
|
|
|
|
|
|
|
|
if (err == EPERM)
|
2009-06-07 15:39:02 +00:00
|
|
|
|
die("No permission - are you root?\n");
|
2009-06-07 15:31:52 +00:00
|
|
|
|
/*
|
|
|
|
|
* If it's cycles then fall back to hrtimer
|
|
|
|
|
* based cpu-clock-tick sw counter, which
|
|
|
|
|
* is always available even if no PMU support:
|
|
|
|
|
*/
|
|
|
|
|
if (attr->type == PERF_TYPE_HARDWARE
|
2009-06-11 12:06:28 +00:00
|
|
|
|
&& attr->config == PERF_COUNT_HW_CPU_CYCLES) {
|
2009-06-07 15:31:52 +00:00
|
|
|
|
|
2009-06-07 15:39:02 +00:00
|
|
|
|
if (verbose)
|
|
|
|
|
warning(" ... trying to fall back to cpu-clock-ticks\n");
|
|
|
|
|
|
2009-06-07 15:31:52 +00:00
|
|
|
|
attr->type = PERF_TYPE_SOFTWARE;
|
2009-06-11 12:06:28 +00:00
|
|
|
|
attr->config = PERF_COUNT_SW_CPU_CLOCK;
|
2009-06-07 15:31:52 +00:00
|
|
|
|
goto try_again;
|
|
|
|
|
}
|
2009-06-07 15:46:24 +00:00
|
|
|
|
printf("\n");
|
|
|
|
|
error("perfcounter syscall returned with %d (%s)\n",
|
|
|
|
|
fd[i][counter], strerror(err));
|
|
|
|
|
die("No CONFIG_PERF_COUNTERS=y kernel support configured?\n");
|
2009-06-07 15:31:52 +00:00
|
|
|
|
exit(-1);
|
|
|
|
|
}
|
|
|
|
|
assert(fd[i][counter] >= 0);
|
|
|
|
|
fcntl(fd[i][counter], F_SETFL, O_NONBLOCK);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* First counter acts as the group leader:
|
|
|
|
|
*/
|
|
|
|
|
if (group && group_fd == -1)
|
|
|
|
|
group_fd = fd[i][counter];
|
|
|
|
|
|
|
|
|
|
event_array[nr_poll].fd = fd[i][counter];
|
|
|
|
|
event_array[nr_poll].events = POLLIN;
|
|
|
|
|
nr_poll++;
|
|
|
|
|
|
|
|
|
|
mmap_array[i][counter].counter = counter;
|
|
|
|
|
mmap_array[i][counter].prev = 0;
|
|
|
|
|
mmap_array[i][counter].mask = mmap_pages*page_size - 1;
|
|
|
|
|
mmap_array[i][counter].base = mmap(NULL, (mmap_pages+1)*page_size,
|
|
|
|
|
PROT_READ, MAP_SHARED, fd[i][counter], 0);
|
|
|
|
|
if (mmap_array[i][counter].base == MAP_FAILED)
|
|
|
|
|
die("failed to mmap with %d (%s)\n", errno, strerror(errno));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int __cmd_top(void)
|
|
|
|
|
{
|
|
|
|
|
pthread_t thread;
|
|
|
|
|
int i, counter;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < nr_cpus; i++) {
|
|
|
|
|
group_fd = -1;
|
2009-06-07 15:31:52 +00:00
|
|
|
|
for (counter = 0; counter < nr_counters; counter++)
|
|
|
|
|
start_counter(i, counter);
|
2009-04-20 13:00:56 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-06-06 21:10:43 +00:00
|
|
|
|
/* Wait for a minimal set of events before starting the snapshot */
|
|
|
|
|
poll(event_array, nr_poll, 100);
|
|
|
|
|
|
|
|
|
|
mmap_read();
|
|
|
|
|
|
2009-04-20 13:00:56 +00:00
|
|
|
|
if (pthread_create(&thread, NULL, display_thread, NULL)) {
|
|
|
|
|
printf("Could not create display thread.\n");
|
|
|
|
|
exit(-1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (realtime_prio) {
|
|
|
|
|
struct sched_param param;
|
|
|
|
|
|
|
|
|
|
param.sched_priority = realtime_prio;
|
|
|
|
|
if (sched_setscheduler(0, SCHED_FIFO, ¶m)) {
|
|
|
|
|
printf("Could not set realtime priority.\n");
|
|
|
|
|
exit(-1);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
while (1) {
|
2009-06-05 12:29:10 +00:00
|
|
|
|
int hits = samples;
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
2009-06-06 21:10:43 +00:00
|
|
|
|
mmap_read();
|
2009-04-20 13:00:56 +00:00
|
|
|
|
|
2009-06-05 12:29:10 +00:00
|
|
|
|
if (hits == samples)
|
2009-04-20 13:00:56 +00:00
|
|
|
|
ret = poll(event_array, nr_poll, 100);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
2009-05-26 07:17:18 +00:00
|
|
|
|
|
|
|
|
|
static const char * const top_usage[] = {
|
|
|
|
|
"perf top [<options>]",
|
|
|
|
|
NULL
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static const struct option options[] = {
|
|
|
|
|
OPT_CALLBACK('e', "event", NULL, "event",
|
2009-06-06 10:24:17 +00:00
|
|
|
|
"event selector. use 'perf list' to list available events",
|
|
|
|
|
parse_events),
|
2009-05-26 07:17:18 +00:00
|
|
|
|
OPT_INTEGER('c', "count", &default_interval,
|
|
|
|
|
"event period to sample"),
|
|
|
|
|
OPT_INTEGER('p', "pid", &target_pid,
|
|
|
|
|
"profile events on existing pid"),
|
|
|
|
|
OPT_BOOLEAN('a', "all-cpus", &system_wide,
|
|
|
|
|
"system-wide collection from all CPUs"),
|
|
|
|
|
OPT_INTEGER('C', "CPU", &profile_cpu,
|
|
|
|
|
"CPU to profile on"),
|
2009-07-02 06:09:46 +00:00
|
|
|
|
OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
|
2009-05-26 07:17:18 +00:00
|
|
|
|
OPT_INTEGER('m', "mmap-pages", &mmap_pages,
|
|
|
|
|
"number of mmap data pages"),
|
|
|
|
|
OPT_INTEGER('r', "realtime", &realtime_prio,
|
|
|
|
|
"collect data with this RT SCHED_FIFO priority"),
|
2009-05-26 13:25:34 +00:00
|
|
|
|
OPT_INTEGER('d', "delay", &delay_secs,
|
2009-05-26 07:17:18 +00:00
|
|
|
|
"number of seconds to delay between refreshes"),
|
|
|
|
|
OPT_BOOLEAN('D', "dump-symtab", &dump_symtab,
|
|
|
|
|
"dump the symbol table used for profiling"),
|
2009-06-04 06:53:05 +00:00
|
|
|
|
OPT_INTEGER('f', "count-filter", &count_filter,
|
2009-05-26 07:17:18 +00:00
|
|
|
|
"only display functions with more events than this"),
|
|
|
|
|
OPT_BOOLEAN('g', "group", &group,
|
|
|
|
|
"put the counters into a counter group"),
|
2009-07-21 08:30:36 +00:00
|
|
|
|
OPT_BOOLEAN('i', "inherit", &inherit,
|
|
|
|
|
"child tasks inherit counters"),
|
perf_counter tools: Fix/resurrect perf top annotation in a simple interactive form
perf top used to have annotation support, but it has bitrotted and
removed.
This patch restores that: it allows the user to select any symbol
in kernel space for source level annotation on the fly, switch
between event counters and alter display variables. When symbol
details are being displayed, stopping annotation reverts to normal.
known keys:
[d] select display delay.
[e] select display entries (lines).
[E] select annotation event counter.
[f] select normal display count filter.
[F] select annotation display count filter (percentage).
[qQ] quit.
[s] select annotation symbol and start annotation.
[S] stop annotation, revert to normal display.
[z] toggle event count zeroing.
Sample:
------------------------------------------------------------------------------
PerfTop: 16719 irqs/sec kernel:78.7% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing cache-misses for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% /* adjust length to remove Ethernet CRC */
0 0.0% if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
0 0.0% length -= 4;
436 5.0% f039: 41 f6 84 24 5c 29 00 testb $0x1,0x295c(%r12)
0 0.0% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
0 0.0% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
811 9.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
0 0.0% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
7226 82.6% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Available events:
0 cache-misses
1 cache-references
2 instructions
3 cycles
Enter details event counter: 2
------------------------------------------------------------------------------
PerfTop: 15035 irqs/sec kernel:79.0% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing instructions for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% int *work_done, int work_to_do)
0 0.0% {
175 0.9% eebf: 55 push %rbp
1898 9.8% eec0: 48 89 e5 mov %rsp,%rbp
0 0.0%
0 0.0% i = rx_ring->next_to_clean;
140 0.7% ef0a: 0f b7 41 1a movzwl 0x1a(%rcx),%eax
670 3.4% ef0e: 89 45 ac mov %eax,-0x54(%rbp)
0 0.0% {
0 0.0% memcpy(skb->data + offset, from, len);
91 0.5% f07b: 49 8b b6 e8 00 00 00 mov 0xe8(%r14),%rsi
1153 5.9% f082: 48 8b b8 e8 00 00 00 mov 0xe8(%rax),%rdi
42 0.2% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
14 0.1% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
1618 8.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% /* return some buffers to hardware, one at a time is too slow */
0 0.0% if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
867 4.5% f0e7: 83 7d b0 0f cmpl $0xf,-0x50(%rbp)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
37 0.2% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
4047 20.8% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-07-22 18:36:03 +00:00
|
|
|
|
OPT_STRING('s', "sym-annotate", &sym_filter, "symbol name",
|
|
|
|
|
"symbol to annotate - requires -k option"),
|
2009-06-30 23:00:44 +00:00
|
|
|
|
OPT_BOOLEAN('z', "zero", &zero,
|
2009-05-26 07:17:18 +00:00
|
|
|
|
"zero history across updates"),
|
2009-06-04 06:53:05 +00:00
|
|
|
|
OPT_INTEGER('F', "freq", &freq,
|
2009-05-26 07:17:18 +00:00
|
|
|
|
"profile at this frequency"),
|
2009-06-04 06:53:05 +00:00
|
|
|
|
OPT_INTEGER('E', "entries", &print_entries,
|
|
|
|
|
"display this many functions"),
|
2009-06-07 15:39:02 +00:00
|
|
|
|
OPT_BOOLEAN('v', "verbose", &verbose,
|
|
|
|
|
"be more verbose (show counter open errors, etc)"),
|
2009-05-26 07:17:18 +00:00
|
|
|
|
OPT_END()
|
|
|
|
|
};
|
|
|
|
|
|
2009-07-01 10:37:06 +00:00
|
|
|
|
int cmd_top(int argc, const char **argv, const char *prefix __used)
|
2009-05-26 07:17:18 +00:00
|
|
|
|
{
|
|
|
|
|
int counter;
|
|
|
|
|
|
2009-07-02 06:09:46 +00:00
|
|
|
|
symbol__init();
|
|
|
|
|
|
2009-05-26 07:17:18 +00:00
|
|
|
|
page_size = sysconf(_SC_PAGE_SIZE);
|
|
|
|
|
|
|
|
|
|
argc = parse_options(argc, argv, options, top_usage, 0);
|
|
|
|
|
if (argc)
|
|
|
|
|
usage_with_options(top_usage, options);
|
|
|
|
|
|
|
|
|
|
if (freq) {
|
|
|
|
|
default_interval = freq;
|
|
|
|
|
freq = 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* CPU and PID are mutually exclusive */
|
|
|
|
|
if (target_pid != -1 && profile_cpu != -1) {
|
|
|
|
|
printf("WARNING: PID switch overriding CPU\n");
|
|
|
|
|
sleep(1);
|
|
|
|
|
profile_cpu = -1;
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-06 07:58:57 +00:00
|
|
|
|
if (!nr_counters)
|
2009-05-26 07:17:18 +00:00
|
|
|
|
nr_counters = 1;
|
|
|
|
|
|
2009-06-05 17:31:01 +00:00
|
|
|
|
if (delay_secs < 1)
|
|
|
|
|
delay_secs = 1;
|
|
|
|
|
|
2009-06-06 07:58:57 +00:00
|
|
|
|
parse_symbols();
|
perf_counter tools: Fix/resurrect perf top annotation in a simple interactive form
perf top used to have annotation support, but it has bitrotted and
removed.
This patch restores that: it allows the user to select any symbol
in kernel space for source level annotation on the fly, switch
between event counters and alter display variables. When symbol
details are being displayed, stopping annotation reverts to normal.
known keys:
[d] select display delay.
[e] select display entries (lines).
[E] select annotation event counter.
[f] select normal display count filter.
[F] select annotation display count filter (percentage).
[qQ] quit.
[s] select annotation symbol and start annotation.
[S] stop annotation, revert to normal display.
[z] toggle event count zeroing.
Sample:
------------------------------------------------------------------------------
PerfTop: 16719 irqs/sec kernel:78.7% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing cache-misses for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% /* adjust length to remove Ethernet CRC */
0 0.0% if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
0 0.0% length -= 4;
436 5.0% f039: 41 f6 84 24 5c 29 00 testb $0x1,0x295c(%r12)
0 0.0% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
0 0.0% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
811 9.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
0 0.0% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
7226 82.6% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Available events:
0 cache-misses
1 cache-references
2 instructions
3 cycles
Enter details event counter: 2
------------------------------------------------------------------------------
PerfTop: 15035 irqs/sec kernel:79.0% [cache-misses/cache-references/instructions/cycles], (all, 4 CPUs)
------------------------------------------------------------------------------
Showing instructions for e1000_clean_rx_irq
Events Pcnt (>=3%)
0 0.0% int *work_done, int work_to_do)
0 0.0% {
175 0.9% eebf: 55 push %rbp
1898 9.8% eec0: 48 89 e5 mov %rsp,%rbp
0 0.0%
0 0.0% i = rx_ring->next_to_clean;
140 0.7% ef0a: 0f b7 41 1a movzwl 0x1a(%rcx),%eax
670 3.4% ef0e: 89 45 ac mov %eax,-0x54(%rbp)
0 0.0% {
0 0.0% memcpy(skb->data + offset, from, len);
91 0.5% f07b: 49 8b b6 e8 00 00 00 mov 0xe8(%r14),%rsi
1153 5.9% f082: 48 8b b8 e8 00 00 00 mov 0xe8(%rax),%rdi
42 0.2% f089: 8b 4d 84 mov -0x7c(%rbp),%ecx
14 0.1% f08c: 48 83 ef 02 sub $0x2,%rdi
0 0.0% f090: 48 83 ee 02 sub $0x2,%rsi
1618 8.3% f094: f3 a4 rep movsb %ds:(%rsi),%es:(%rdi)
0 0.0%
0 0.0% /* return some buffers to hardware, one at a time is too slow */
0 0.0% if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
867 4.5% f0e7: 83 7d b0 0f cmpl $0xf,-0x50(%rbp)
0 0.0%
0 0.0% while (rx_desc->status & E1000_RXD_STAT_DD) {
37 0.2% f114: 41 f6 47 0c 01 testb $0x1,0xc(%r15)
4047 20.8% f119: 0f 85 24 fe ff ff jne ef43 <e1000_clean_rx_irq+0x84>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-07-22 18:36:03 +00:00
|
|
|
|
parse_source(sym_filter_entry);
|
2009-06-06 07:58:57 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Fill in the ones not specifically initialized via -c:
|
|
|
|
|
*/
|
2009-05-26 07:17:18 +00:00
|
|
|
|
for (counter = 0; counter < nr_counters; counter++) {
|
2009-06-06 07:58:57 +00:00
|
|
|
|
if (attrs[counter].sample_period)
|
2009-05-26 07:17:18 +00:00
|
|
|
|
continue;
|
|
|
|
|
|
2009-06-06 07:58:57 +00:00
|
|
|
|
attrs[counter].sample_period = default_interval;
|
2009-05-26 07:17:18 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
|
|
|
|
|
assert(nr_cpus <= MAX_NR_CPUS);
|
|
|
|
|
assert(nr_cpus >= 0);
|
|
|
|
|
|
|
|
|
|
if (target_pid != -1 || profile_cpu != -1)
|
|
|
|
|
nr_cpus = 1;
|
|
|
|
|
|
|
|
|
|
return __cmd_top();
|
|
|
|
|
}
|