forked from Minki/linux
9a92b479b2
When we get sched traces that involve a task that was already created before opening the event, we won't have the comm event for it. So if we can't find the comm event for a given thread, we look at the traces that may contain these informations. Before: ata/1:371 | 0.000 ms | 1 | avg: 3988.693 ms | max: 3988.693 ms | kondemand/1:421 | 0.096 ms | 3 | avg: 345.346 ms | max: 1035.989 ms | kondemand/0:420 | 0.025 ms | 3 | avg: 421.332 ms | max: 964.014 ms | :5124:5124 | 0.103 ms | 5 | avg: 74.082 ms | max: 277.194 ms | :6244:6244 | 0.691 ms | 9 | avg: 125.655 ms | max: 271.306 ms | firefox:5080 | 0.924 ms | 5 | avg: 53.833 ms | max: 257.828 ms | npviewer.bin:6225 | 21.871 ms | 53 | avg: 22.462 ms | max: 220.835 ms | :6245:6245 | 9.631 ms | 21 | avg: 41.864 ms | max: 213.349 ms | After: ata/1:371 | 0.000 ms | 1 | avg: 3988.693 ms | max: 3988.693 ms | kondemand/1:421 | 0.096 ms | 3 | avg: 345.346 ms | max: 1035.989 ms | kondemand/0:420 | 0.025 ms | 3 | avg: 421.332 ms | max: 964.014 ms | firefox:5124 | 0.103 ms | 5 | avg: 74.082 ms | max: 277.194 ms | npviewer.bin:6244 | 0.691 ms | 9 | avg: 125.655 ms | max: 271.306 ms | firefox:5080 | 0.924 ms | 5 | avg: 53.833 ms | max: 257.828 ms | npviewer.bin:6225 | 21.871 ms | 53 | avg: 22.462 ms | max: 220.835 ms | npviewer.bin:6245 | 9.631 ms | 21 | avg: 41.864 ms | max: 213.349 ms | Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> LKML-Reference: <1255012632-7882-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
231 lines
4.7 KiB
C
231 lines
4.7 KiB
C
#include "../perf.h"
|
|
#include <stdlib.h>
|
|
#include <stdio.h>
|
|
#include <string.h>
|
|
#include "thread.h"
|
|
#include "util.h"
|
|
#include "debug.h"
|
|
|
|
static struct thread *thread__new(pid_t pid, int set_comm)
|
|
{
|
|
struct thread *self = calloc(1, sizeof(*self));
|
|
|
|
if (self != NULL) {
|
|
self->pid = pid;
|
|
if (set_comm) {
|
|
self->comm = malloc(32);
|
|
if (self->comm)
|
|
snprintf(self->comm, 32, ":%d", self->pid);
|
|
}
|
|
self->maps = RB_ROOT;
|
|
INIT_LIST_HEAD(&self->removed_maps);
|
|
}
|
|
|
|
return self;
|
|
}
|
|
|
|
int thread__set_comm(struct thread *self, const char *comm)
|
|
{
|
|
if (self->comm)
|
|
free(self->comm);
|
|
self->comm = strdup(comm);
|
|
return self->comm ? 0 : -ENOMEM;
|
|
}
|
|
|
|
static size_t thread__fprintf(struct thread *self, FILE *fp)
|
|
{
|
|
struct rb_node *nd;
|
|
struct map *pos;
|
|
size_t ret = fprintf(fp, "Thread %d %s\nCurrent maps:\n",
|
|
self->pid, self->comm);
|
|
|
|
for (nd = rb_first(&self->maps); nd; nd = rb_next(nd)) {
|
|
pos = rb_entry(nd, struct map, rb_node);
|
|
ret += map__fprintf(pos, fp);
|
|
}
|
|
|
|
ret = fprintf(fp, "Removed maps:\n");
|
|
|
|
list_for_each_entry(pos, &self->removed_maps, node)
|
|
ret += map__fprintf(pos, fp);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static struct thread *
|
|
__threads__findnew(pid_t pid, struct rb_root *threads,
|
|
struct thread **last_match,
|
|
int set_comm)
|
|
{
|
|
struct rb_node **p = &threads->rb_node;
|
|
struct rb_node *parent = NULL;
|
|
struct thread *th;
|
|
|
|
/*
|
|
* Font-end cache - PID lookups come in blocks,
|
|
* so most of the time we dont have to look up
|
|
* the full rbtree:
|
|
*/
|
|
if (*last_match && (*last_match)->pid == pid)
|
|
return *last_match;
|
|
|
|
while (*p != NULL) {
|
|
parent = *p;
|
|
th = rb_entry(parent, struct thread, rb_node);
|
|
|
|
if (th->pid == pid) {
|
|
*last_match = th;
|
|
return th;
|
|
}
|
|
|
|
if (pid < th->pid)
|
|
p = &(*p)->rb_left;
|
|
else
|
|
p = &(*p)->rb_right;
|
|
}
|
|
|
|
th = thread__new(pid, set_comm);
|
|
|
|
if (th != NULL) {
|
|
rb_link_node(&th->rb_node, parent, p);
|
|
rb_insert_color(&th->rb_node, threads);
|
|
*last_match = th;
|
|
}
|
|
|
|
return th;
|
|
}
|
|
|
|
struct thread *
|
|
threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match)
|
|
{
|
|
return __threads__findnew(pid, threads, last_match, 1);
|
|
}
|
|
|
|
struct thread *
|
|
threads__findnew_nocomm(pid_t pid, struct rb_root *threads,
|
|
struct thread **last_match)
|
|
{
|
|
return __threads__findnew(pid, threads, last_match, 0);
|
|
}
|
|
|
|
struct thread *
|
|
register_idle_thread(struct rb_root *threads, struct thread **last_match)
|
|
{
|
|
struct thread *thread = threads__findnew(0, threads, last_match);
|
|
|
|
if (!thread || thread__set_comm(thread, "swapper")) {
|
|
fprintf(stderr, "problem inserting idle task.\n");
|
|
exit(-1);
|
|
}
|
|
|
|
return thread;
|
|
}
|
|
|
|
static void thread__remove_overlappings(struct thread *self, struct map *map)
|
|
{
|
|
struct rb_node *next = rb_first(&self->maps);
|
|
|
|
while (next) {
|
|
struct map *pos = rb_entry(next, struct map, rb_node);
|
|
next = rb_next(&pos->rb_node);
|
|
|
|
if (!map__overlap(pos, map))
|
|
continue;
|
|
|
|
if (verbose >= 2) {
|
|
printf("overlapping maps:\n");
|
|
map__fprintf(map, stdout);
|
|
map__fprintf(pos, stdout);
|
|
}
|
|
|
|
rb_erase(&pos->rb_node, &self->maps);
|
|
/*
|
|
* We may have references to this map, for instance in some
|
|
* hist_entry instances, so just move them to a separate
|
|
* list.
|
|
*/
|
|
list_add_tail(&pos->node, &self->removed_maps);
|
|
}
|
|
}
|
|
|
|
void maps__insert(struct rb_root *maps, struct map *map)
|
|
{
|
|
struct rb_node **p = &maps->rb_node;
|
|
struct rb_node *parent = NULL;
|
|
const u64 ip = map->start;
|
|
struct map *m;
|
|
|
|
while (*p != NULL) {
|
|
parent = *p;
|
|
m = rb_entry(parent, struct map, rb_node);
|
|
if (ip < m->start)
|
|
p = &(*p)->rb_left;
|
|
else
|
|
p = &(*p)->rb_right;
|
|
}
|
|
|
|
rb_link_node(&map->rb_node, parent, p);
|
|
rb_insert_color(&map->rb_node, maps);
|
|
}
|
|
|
|
struct map *maps__find(struct rb_root *maps, u64 ip)
|
|
{
|
|
struct rb_node **p = &maps->rb_node;
|
|
struct rb_node *parent = NULL;
|
|
struct map *m;
|
|
|
|
while (*p != NULL) {
|
|
parent = *p;
|
|
m = rb_entry(parent, struct map, rb_node);
|
|
if (ip < m->start)
|
|
p = &(*p)->rb_left;
|
|
else if (ip > m->end)
|
|
p = &(*p)->rb_right;
|
|
else
|
|
return m;
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
void thread__insert_map(struct thread *self, struct map *map)
|
|
{
|
|
thread__remove_overlappings(self, map);
|
|
maps__insert(&self->maps, map);
|
|
}
|
|
|
|
int thread__fork(struct thread *self, struct thread *parent)
|
|
{
|
|
struct rb_node *nd;
|
|
|
|
if (self->comm)
|
|
free(self->comm);
|
|
self->comm = strdup(parent->comm);
|
|
if (!self->comm)
|
|
return -ENOMEM;
|
|
|
|
for (nd = rb_first(&parent->maps); nd; nd = rb_next(nd)) {
|
|
struct map *map = rb_entry(nd, struct map, rb_node);
|
|
struct map *new = map__clone(map);
|
|
if (!new)
|
|
return -ENOMEM;
|
|
thread__insert_map(self, new);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
size_t threads__fprintf(FILE *fp, struct rb_root *threads)
|
|
{
|
|
size_t ret = 0;
|
|
struct rb_node *nd;
|
|
|
|
for (nd = rb_first(threads); nd; nd = rb_next(nd)) {
|
|
struct thread *pos = rb_entry(nd, struct thread, rb_node);
|
|
|
|
ret += thread__fprintf(pos, fp);
|
|
}
|
|
|
|
return ret;
|
|
}
|