perf/core improvements and fixes:

User visible:
 
 - Fix "Command" sort_entry's cmp and collapse function (Jiri Olsa)
 
 - Load map's symtab before 'perf probe' glob matching (Wang Nan)
 
 - Set vmlinux_path__nr_entries to 0 in vmlinux_path__exit, to fix
   the use case where this code is called multiple times, which wasn't
   that common when it was introduced but seems to be now (Wang Nan).
 
 Infrastructure:
 
 - Protect dso symtab and cache operations with a mutex (Namhyung Kim)
 
 - Make all refcnt operations use atomic.h (Arnaldo Carvalho de Melo)
 
 - Install libtraceevent.a into libdir (Wang Nan)
 
 Build fixes:
 
 - Fix one build failure on RHEL5 by making 'perf bench numa' use the
   __weak sched_getcpu() provided by cloexec.h (Arnaldo Carvalho de Melo)
 
 - Fix dwarf-aux.c compilation on i386 (Jiri Olsa)
 
 Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJVWgd/AAoJENZQFvNTUqpAxVcQALoXFAklZESEki/OiE32SOGZ
 QyDwCMoJUA07LcHPgiFDNl51RZf2sZWnHJhJrDmUBMz626zhp6X2c/yJi8lCBXPB
 cBaTRq7iWyycW94F1RQTwSoywKq5ni7KFzAl+GlbIolKG60e277gsbUrbH1rD1os
 d5435LqHS4rUD+4PEU9MZeh0n6LxX2nu/TQsM79/i0Sb+Gq2i7Yn2SRi0CB3Pkpa
 nr/5OBgz6QNPEJHU9tf7QGO0J5dtkYrSTXu9TviOTFQcbgzbcWO9MaoKi39jRhTp
 sNTokYd8reMEAsePTd9spgjjLIZV44CHW5ZiaPCl5PhjTrJh/LATlyjfDH0ZiCAS
 7iFQI4fTLOt5GhfG3acvkfCvIFlibBT1oxJrWqja5uJwCObQY16KdJ8ax4rJWPwg
 JBaX/Btm3ZBzBNKdCtsIxN9Qb/HO53zpwUTJV77lHUC2nIcv5EC5Rx9ireDtrH1n
 9j16yeW2+kymaXVTeocjvS+jIuxUiwt4LSmHRU5Bx7QfIXxUPhbaOzcJChzweXUA
 dN2VZqbPuavmBLYYNmzs597njlGRpxQ/hrb6MEmSBghNbj6zgvuevz0X1dwW3Tut
 vNrWgfeQ+iYcMay1roNzrHkjnr1YddGs/uCEtpQRZbT/C/1Wiv9WU16cpenD4Z17
 h+5WGFLbc7kx8EGCHn5n
 =rKev
 -----END PGP SIGNATURE-----

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

User visible changes:

  - Fix "Command" sort_entry's cmp and collapse function (Jiri Olsa)

  - Load map's symtab before 'perf probe' glob matching (Wang Nan)

  - Set vmlinux_path__nr_entries to 0 in vmlinux_path__exit, to fix
    the use case where this code is called multiple times, which wasn't
    that common when it was introduced but seems to be now (Wang Nan).

Infrastructure changes:

  - Protect dso symtab and cache operations with a mutex (Namhyung Kim)

  - Make all refcnt operations use atomic.h (Arnaldo Carvalho de Melo)

  - Install libtraceevent.a into libdir (Wang Nan)

Build fixes:

  - Fix one build failure on RHEL5 by making 'perf bench numa' use the
    __weak sched_getcpu() provided by cloexec.h (Arnaldo Carvalho de Melo)

  - Fix dwarf-aux.c compilation on i386 (Jiri Olsa)

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2015-05-20 13:23:55 +02:00
commit d499c10684
23 changed files with 263 additions and 99 deletions

View File

@ -27,7 +27,7 @@ endef
# the rule that uses them - an example for that is the 'bionic'
# feature check. ]
#
FEATURE_TESTS = \
FEATURE_TESTS ?= \
backtrace \
dwarf \
fortify-source \
@ -53,7 +53,7 @@ FEATURE_TESTS = \
zlib \
lzma
FEATURE_DISPLAY = \
FEATURE_DISPLAY ?= \
dwarf \
glibc \
gtk2 \

View File

@ -64,6 +64,10 @@ typedef struct {
int counter;
} atomic_t;
#ifndef __aligned_u64
# define __aligned_u64 __u64 __attribute__((aligned(8)))
#endif
struct list_head {
struct list_head *next, *prev;
};

View File

@ -34,9 +34,15 @@ INSTALL = install
DESTDIR ?=
DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
ifeq ($(LP64), 1)
libdir_relative = lib64
else
libdir_relative = lib
endif
prefix ?= /usr/local
bindir_relative = bin
bindir = $(prefix)/$(bindir_relative)
libdir = $(prefix)/$(libdir_relative)
man_dir = $(prefix)/share/man
man_dir_SQ = '$(subst ','\'',$(man_dir))'
@ -58,7 +64,7 @@ ifeq ($(prefix),$(HOME))
override plugin_dir = $(HOME)/.traceevent/plugins
set_plugin_dir := 0
else
override plugin_dir = $(prefix)/lib/traceevent/plugins
override plugin_dir = $(libdir)/traceevent/plugins
endif
endif
@ -85,11 +91,11 @@ srctree := $(patsubst %/,%,$(dir $(srctree)))
#$(info Determined 'srctree' to be $(srctree))
endif
export prefix bindir src obj
export prefix libdir src obj
# Shell quotes
bindir_SQ = $(subst ','\'',$(bindir))
bindir_relative_SQ = $(subst ','\'',$(bindir_relative))
libdir_SQ = $(subst ','\'',$(libdir))
libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
plugin_dir_SQ = $(subst ','\'',$(plugin_dir))
LIB_FILE = libtraceevent.a libtraceevent.so
@ -240,7 +246,7 @@ endef
install_lib: all_cmd install_plugins
$(call QUIET_INSTALL, $(LIB_FILE)) \
$(call do_install,$(LIB_FILE),$(bindir_SQ))
$(call do_install,$(LIB_FILE),$(libdir_SQ))
install_plugins: $(PLUGINS)
$(call QUIET_INSTALL, trace_plugins) \

View File

@ -8,6 +8,7 @@
#include "../builtin.h"
#include "../util/util.h"
#include "../util/parse-options.h"
#include "../util/cloexec.h"
#include "bench.h"

View File

@ -61,13 +61,13 @@ struct timechart {
tasks_only,
with_backtrace,
topology;
bool force;
/* IO related settings */
u64 io_events;
bool io_only,
skip_eagain;
u64 io_events;
u64 min_time,
merge_dist;
bool force;
};
struct per_pidcomm;

View File

@ -43,7 +43,7 @@ int test__thread_mg_share(void)
leader && t1 && t2 && t3 && other);
mg = leader->mg;
TEST_ASSERT_EQUAL("wrong refcnt", mg->refcnt, 4);
TEST_ASSERT_EQUAL("wrong refcnt", atomic_read(&mg->refcnt), 4);
/* test the map groups pointer is shared */
TEST_ASSERT_VAL("map groups don't match", mg == t1->mg);
@ -71,25 +71,25 @@ int test__thread_mg_share(void)
machine__remove_thread(machine, other_leader);
other_mg = other->mg;
TEST_ASSERT_EQUAL("wrong refcnt", other_mg->refcnt, 2);
TEST_ASSERT_EQUAL("wrong refcnt", atomic_read(&other_mg->refcnt), 2);
TEST_ASSERT_VAL("map groups don't match", other_mg == other_leader->mg);
/* release thread group */
thread__put(leader);
TEST_ASSERT_EQUAL("wrong refcnt", mg->refcnt, 3);
TEST_ASSERT_EQUAL("wrong refcnt", atomic_read(&mg->refcnt), 3);
thread__put(t1);
TEST_ASSERT_EQUAL("wrong refcnt", mg->refcnt, 2);
TEST_ASSERT_EQUAL("wrong refcnt", atomic_read(&mg->refcnt), 2);
thread__put(t2);
TEST_ASSERT_EQUAL("wrong refcnt", mg->refcnt, 1);
TEST_ASSERT_EQUAL("wrong refcnt", atomic_read(&mg->refcnt), 1);
thread__put(t3);
/* release other group */
thread__put(other_leader);
TEST_ASSERT_EQUAL("wrong refcnt", other_mg->refcnt, 1);
TEST_ASSERT_EQUAL("wrong refcnt", atomic_read(&other_mg->refcnt), 1);
thread__put(other);

View File

@ -115,23 +115,19 @@ static int add_cgroup(struct perf_evlist *evlist, char *str)
goto found;
n++;
}
if (cgrp->refcnt == 0)
if (atomic_read(&cgrp->refcnt) == 0)
free(cgrp);
return -1;
found:
cgrp->refcnt++;
atomic_inc(&cgrp->refcnt);
counter->cgrp = cgrp;
return 0;
}
void close_cgroup(struct cgroup_sel *cgrp)
{
if (!cgrp)
return;
/* XXX: not reentrant */
if (--cgrp->refcnt == 0) {
if (cgrp && atomic_dec_and_test(&cgrp->refcnt)) {
close(cgrp->fd);
zfree(&cgrp->name);
free(cgrp);

View File

@ -1,12 +1,14 @@
#ifndef __CGROUP_H__
#define __CGROUP_H__
#include <linux/atomic.h>
struct option;
struct cgroup_sel {
char *name;
int fd;
int refcnt;
atomic_t refcnt;
};

View File

@ -265,6 +265,7 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
*/
static LIST_HEAD(dso__data_open);
static long dso__data_open_cnt;
static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
static void dso__list_add(struct dso *dso)
{
@ -434,7 +435,9 @@ static void check_data_close(void)
*/
void dso__data_close(struct dso *dso)
{
pthread_mutex_lock(&dso__data_open_lock);
close_dso(dso);
pthread_mutex_unlock(&dso__data_open_lock);
}
/**
@ -457,6 +460,8 @@ int dso__data_fd(struct dso *dso, struct machine *machine)
if (dso->data.status == DSO_DATA_STATUS_ERROR)
return -1;
pthread_mutex_lock(&dso__data_open_lock);
if (dso->data.fd >= 0)
goto out;
@ -479,6 +484,7 @@ out:
else
dso->data.status = DSO_DATA_STATUS_ERROR;
pthread_mutex_unlock(&dso__data_open_lock);
return dso->data.fd;
}
@ -495,10 +501,12 @@ bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
}
static void
dso_cache__free(struct rb_root *root)
dso_cache__free(struct dso *dso)
{
struct rb_root *root = &dso->data.cache;
struct rb_node *next = rb_first(root);
pthread_mutex_lock(&dso->lock);
while (next) {
struct dso_cache *cache;
@ -507,10 +515,12 @@ dso_cache__free(struct rb_root *root)
rb_erase(&cache->rb_node, root);
free(cache);
}
pthread_mutex_unlock(&dso->lock);
}
static struct dso_cache *dso_cache__find(const struct rb_root *root, u64 offset)
static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset)
{
const struct rb_root *root = &dso->data.cache;
struct rb_node * const *p = &root->rb_node;
const struct rb_node *parent = NULL;
struct dso_cache *cache;
@ -529,17 +539,20 @@ static struct dso_cache *dso_cache__find(const struct rb_root *root, u64 offset)
else
return cache;
}
return NULL;
}
static void
dso_cache__insert(struct rb_root *root, struct dso_cache *new)
static struct dso_cache *
dso_cache__insert(struct dso *dso, struct dso_cache *new)
{
struct rb_root *root = &dso->data.cache;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct dso_cache *cache;
u64 offset = new->offset;
pthread_mutex_lock(&dso->lock);
while (*p != NULL) {
u64 end;
@ -551,10 +564,17 @@ dso_cache__insert(struct rb_root *root, struct dso_cache *new)
p = &(*p)->rb_left;
else if (offset >= end)
p = &(*p)->rb_right;
else
goto out;
}
rb_link_node(&new->rb_node, parent, p);
rb_insert_color(&new->rb_node, root);
cache = NULL;
out:
pthread_mutex_unlock(&dso->lock);
return cache;
}
static ssize_t
@ -569,19 +589,34 @@ dso_cache__memcpy(struct dso_cache *cache, u64 offset,
}
static ssize_t
dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
dso_cache__read(struct dso *dso, struct machine *machine,
u64 offset, u8 *data, ssize_t size)
{
struct dso_cache *cache;
struct dso_cache *old;
ssize_t ret;
do {
u64 cache_offset;
ret = -ENOMEM;
cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
if (!cache)
break;
return -ENOMEM;
pthread_mutex_lock(&dso__data_open_lock);
/*
* dso->data.fd might be closed if other thread opened another
* file (dso) due to open file limit (RLIMIT_NOFILE).
*/
if (dso->data.fd < 0) {
dso->data.fd = open_dso(dso, machine);
if (dso->data.fd < 0) {
ret = -errno;
dso->data.status = DSO_DATA_STATUS_ERROR;
break;
}
}
cache_offset = offset & DSO__DATA_CACHE_MASK;
@ -591,11 +626,20 @@ dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
cache->offset = cache_offset;
cache->size = ret;
dso_cache__insert(&dso->data.cache, cache);
} while (0);
pthread_mutex_unlock(&dso__data_open_lock);
if (ret > 0) {
old = dso_cache__insert(dso, cache);
if (old) {
/* we lose the race */
free(cache);
cache = old;
}
ret = dso_cache__memcpy(cache, offset, data, size);
} while (0);
}
if (ret <= 0)
free(cache);
@ -603,16 +647,16 @@ dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
return ret;
}
static ssize_t dso_cache_read(struct dso *dso, u64 offset,
u8 *data, ssize_t size)
static ssize_t dso_cache_read(struct dso *dso, struct machine *machine,
u64 offset, u8 *data, ssize_t size)
{
struct dso_cache *cache;
cache = dso_cache__find(&dso->data.cache, offset);
cache = dso_cache__find(dso, offset);
if (cache)
return dso_cache__memcpy(cache, offset, data, size);
else
return dso_cache__read(dso, offset, data, size);
return dso_cache__read(dso, machine, offset, data, size);
}
/*
@ -620,7 +664,8 @@ static ssize_t dso_cache_read(struct dso *dso, u64 offset,
* in the rb_tree. Any read to already cached data is served
* by cached data.
*/
static ssize_t cached_read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
static ssize_t cached_read(struct dso *dso, struct machine *machine,
u64 offset, u8 *data, ssize_t size)
{
ssize_t r = 0;
u8 *p = data;
@ -628,7 +673,7 @@ static ssize_t cached_read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
do {
ssize_t ret;
ret = dso_cache_read(dso, offset, p, size);
ret = dso_cache_read(dso, machine, offset, p, size);
if (ret < 0)
return ret;
@ -648,21 +693,42 @@ static ssize_t cached_read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
return r;
}
static int data_file_size(struct dso *dso)
static int data_file_size(struct dso *dso, struct machine *machine)
{
int ret = 0;
struct stat st;
char sbuf[STRERR_BUFSIZE];
if (!dso->data.file_size) {
if (fstat(dso->data.fd, &st)) {
pr_err("dso mmap failed, fstat: %s\n",
strerror_r(errno, sbuf, sizeof(sbuf)));
return -1;
if (dso->data.file_size)
return 0;
pthread_mutex_lock(&dso__data_open_lock);
/*
* dso->data.fd might be closed if other thread opened another
* file (dso) due to open file limit (RLIMIT_NOFILE).
*/
if (dso->data.fd < 0) {
dso->data.fd = open_dso(dso, machine);
if (dso->data.fd < 0) {
ret = -errno;
dso->data.status = DSO_DATA_STATUS_ERROR;
goto out;
}
dso->data.file_size = st.st_size;
}
return 0;
if (fstat(dso->data.fd, &st) < 0) {
ret = -errno;
pr_err("dso cache fstat failed: %s\n",
strerror_r(errno, sbuf, sizeof(sbuf)));
dso->data.status = DSO_DATA_STATUS_ERROR;
goto out;
}
dso->data.file_size = st.st_size;
out:
pthread_mutex_unlock(&dso__data_open_lock);
return ret;
}
/**
@ -680,17 +746,17 @@ off_t dso__data_size(struct dso *dso, struct machine *machine)
if (fd < 0)
return fd;
if (data_file_size(dso))
if (data_file_size(dso, machine))
return -1;
/* For now just estimate dso data size is close to file size */
return dso->data.file_size;
}
static ssize_t data_read_offset(struct dso *dso, u64 offset,
u8 *data, ssize_t size)
static ssize_t data_read_offset(struct dso *dso, struct machine *machine,
u64 offset, u8 *data, ssize_t size)
{
if (data_file_size(dso))
if (data_file_size(dso, machine))
return -1;
/* Check the offset sanity. */
@ -700,7 +766,7 @@ static ssize_t data_read_offset(struct dso *dso, u64 offset,
if (offset + size < offset)
return -1;
return cached_read(dso, offset, data, size);
return cached_read(dso, machine, offset, data, size);
}
/**
@ -717,10 +783,10 @@ static ssize_t data_read_offset(struct dso *dso, u64 offset,
ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
u64 offset, u8 *data, ssize_t size)
{
if (dso__data_fd(dso, machine) < 0)
if (dso->data.status == DSO_DATA_STATUS_ERROR)
return -1;
return data_read_offset(dso, offset, data, size);
return data_read_offset(dso, machine, offset, data, size);
}
/**
@ -936,6 +1002,7 @@ struct dso *dso__new(const char *name)
RB_CLEAR_NODE(&dso->rb_node);
INIT_LIST_HEAD(&dso->node);
INIT_LIST_HEAD(&dso->data.open_entry);
pthread_mutex_init(&dso->lock, NULL);
}
return dso;
@ -963,9 +1030,10 @@ void dso__delete(struct dso *dso)
dso__data_close(dso);
auxtrace_cache__free(dso->auxtrace_cache);
dso_cache__free(&dso->data.cache);
dso_cache__free(dso);
dso__free_a2l(dso);
zfree(&dso->symsrc_filename);
pthread_mutex_destroy(&dso->lock);
free(dso);
}

View File

@ -129,6 +129,7 @@ struct dsos {
struct auxtrace_cache;
struct dso {
pthread_mutex_t lock;
struct list_head node;
struct rb_node rb_node; /* rbtree node sorted by long name */
struct rb_root symbols[MAP__NR_TYPES];

View File

@ -994,11 +994,11 @@ static int die_get_var_innermost_scope(Dwarf_Die *sp_die, Dwarf_Die *vr_die,
end -= entry;
if (first) {
strbuf_addf(buf, "@<%s+[%lu-%lu",
strbuf_addf(buf, "@<%s+[%" PRIu64 "-%" PRIu64,
name, start, end);
first = false;
} else {
strbuf_addf(buf, ",%lu-%lu",
strbuf_addf(buf, ",%" PRIu64 "-%" PRIu64,
start, end);
}
}
@ -1057,11 +1057,11 @@ int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf)
start -= entry;
end -= entry;
if (first) {
strbuf_addf(buf, "@<%s+[%lu-%lu",
strbuf_addf(buf, "@<%s+[%" PRIu64 "-%" PRIu64,
name, start, end);
first = false;
} else {
strbuf_addf(buf, ",%lu-%lu",
strbuf_addf(buf, ",%" PRIu64 "-%" PRIu64,
start, end);
}
}

View File

@ -700,14 +700,14 @@ static bool perf_mmap__empty(struct perf_mmap *md)
static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx)
{
++evlist->mmap[idx].refcnt;
atomic_inc(&evlist->mmap[idx].refcnt);
}
static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx)
{
BUG_ON(evlist->mmap[idx].refcnt == 0);
BUG_ON(atomic_read(&evlist->mmap[idx].refcnt) == 0);
if (--evlist->mmap[idx].refcnt == 0)
if (atomic_dec_and_test(&evlist->mmap[idx].refcnt))
__perf_evlist__munmap(evlist, idx);
}
@ -721,7 +721,7 @@ void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
perf_mmap__write_tail(md, old);
}
if (md->refcnt == 1 && perf_mmap__empty(md))
if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md))
perf_evlist__mmap_put(evlist, idx);
}
@ -758,7 +758,7 @@ static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
if (evlist->mmap[idx].base != NULL) {
munmap(evlist->mmap[idx].base, evlist->mmap_len);
evlist->mmap[idx].base = NULL;
evlist->mmap[idx].refcnt = 0;
atomic_set(&evlist->mmap[idx].refcnt, 0);
}
auxtrace_mmap__munmap(&evlist->mmap[idx].auxtrace_mmap);
}
@ -807,7 +807,7 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
* evlist layer can't just drop it when filtering events in
* perf_evlist__filter_pollfd().
*/
evlist->mmap[idx].refcnt = 2;
atomic_set(&evlist->mmap[idx].refcnt, 2);
evlist->mmap[idx].prev = 0;
evlist->mmap[idx].mask = mp->mask;
evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot,

View File

@ -1,6 +1,7 @@
#ifndef __PERF_EVLIST_H
#define __PERF_EVLIST_H 1
#include <linux/atomic.h>
#include <linux/list.h>
#include <api/fd/array.h>
#include <stdio.h>
@ -27,7 +28,7 @@ struct record_opts;
struct perf_mmap {
void *base;
int mask;
int refcnt;
atomic_t refcnt;
u64 prev;
struct auxtrace_mmap auxtrace_mmap;
char event_copy[PERF_SAMPLE_MAX_SIZE] __attribute__((aligned(8)));
@ -39,6 +40,7 @@ struct perf_evlist {
int nr_entries;
int nr_groups;
int nr_mmaps;
bool overwrite;
size_t mmap_len;
int id_pos;
int is_pos;
@ -47,7 +49,6 @@ struct perf_evlist {
int cork_fd;
pid_t pid;
} workload;
bool overwrite;
struct fdarray pollfd;
struct perf_mmap *mmap;
struct thread_map *threads;

View File

@ -73,7 +73,6 @@ struct perf_evsel {
char *name;
double scale;
const char *unit;
bool snapshot;
struct event_format *tp_format;
union {
void *priv;
@ -86,6 +85,7 @@ struct perf_evsel {
unsigned int sample_size;
int id_pos;
int is_pos;
bool snapshot;
bool supported;
bool needs_swap;
bool no_aux_samples;
@ -93,11 +93,11 @@ struct perf_evsel {
bool system_wide;
bool tracking;
bool per_pkg;
unsigned long *per_pkg_mask;
/* parse modifier helper */
int exclude_GH;
int nr_members;
int sample_read;
unsigned long *per_pkg_mask;
struct perf_evsel *leader;
char *group_name;
};

View File

@ -1311,7 +1311,7 @@ static void __machine__remove_thread(struct machine *machine, struct thread *th,
if (machine->last_match == th)
machine->last_match = NULL;
BUG_ON(th->refcnt.counter == 0);
BUG_ON(atomic_read(&th->refcnt) == 0);
if (lock)
pthread_rwlock_wrlock(&machine->threads_lock);
rb_erase(&th->rb_node, &machine->threads);

View File

@ -426,7 +426,7 @@ void map_groups__init(struct map_groups *mg, struct machine *machine)
INIT_LIST_HEAD(&mg->removed_maps[i]);
}
mg->machine = machine;
mg->refcnt = 1;
atomic_set(&mg->refcnt, 1);
}
static void maps__delete(struct rb_root *maps)
@ -494,7 +494,7 @@ void map_groups__delete(struct map_groups *mg)
void map_groups__put(struct map_groups *mg)
{
if (--mg->refcnt == 0)
if (mg && atomic_dec_and_test(&mg->refcnt))
map_groups__delete(mg);
}

View File

@ -1,6 +1,7 @@
#ifndef __PERF_MAP_H
#define __PERF_MAP_H
#include <linux/atomic.h>
#include <linux/compiler.h>
#include <linux/list.h>
#include <linux/rbtree.h>
@ -61,7 +62,7 @@ struct map_groups {
struct rb_root maps[MAP__NR_TYPES];
struct list_head removed_maps[MAP__NR_TYPES];
struct machine *machine;
int refcnt;
atomic_t refcnt;
};
struct map_groups *map_groups__new(struct machine *machine);
@ -70,7 +71,8 @@ bool map_groups__empty(struct map_groups *mg);
static inline struct map_groups *map_groups__get(struct map_groups *mg)
{
++mg->refcnt;
if (mg)
atomic_inc(&mg->refcnt);
return mg;
}

View File

@ -2499,6 +2499,9 @@ static int find_probe_functions(struct map *map, char *name)
struct symbol *sym;
struct rb_node *tmp;
if (map__load(map, NULL) < 0)
return 0;
map__for_each_symbol(map, sym, tmp) {
if (strglobmatch(sym->name, name))
found++;

View File

@ -89,14 +89,14 @@ static int64_t
sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
{
/* Compare the addr that should be unique among comm */
return comm__str(right->comm) - comm__str(left->comm);
return strcmp(comm__str(right->comm), comm__str(left->comm));
}
static int64_t
sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
{
/* Compare the addr that should be unique among comm */
return comm__str(right->comm) - comm__str(left->comm);
return strcmp(comm__str(right->comm), comm__str(left->comm));
}
static int64_t

View File

@ -1383,12 +1383,22 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
bool kmod;
dso__set_loaded(dso, map->type);
pthread_mutex_lock(&dso->lock);
if (dso->kernel == DSO_TYPE_KERNEL)
return dso__load_kernel_sym(dso, map, filter);
else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
return dso__load_guest_kernel_sym(dso, map, filter);
/* check again under the dso->lock */
if (dso__loaded(dso, map->type)) {
ret = 1;
goto out;
}
if (dso->kernel) {
if (dso->kernel == DSO_TYPE_KERNEL)
ret = dso__load_kernel_sym(dso, map, filter);
else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
ret = dso__load_guest_kernel_sym(dso, map, filter);
goto out;
}
if (map->groups && map->groups->machine)
machine = map->groups->machine;
@ -1401,18 +1411,18 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
struct stat st;
if (lstat(dso->name, &st) < 0)
return -1;
goto out;
if (st.st_uid && (st.st_uid != geteuid())) {
pr_warning("File %s not owned by current user or root, "
"ignoring it.\n", dso->name);
return -1;
goto out;
}
ret = dso__load_perf_map(dso, map, filter);
dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
DSO_BINARY_TYPE__NOT_FOUND;
return ret;
goto out;
}
if (machine)
@ -1420,7 +1430,7 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
name = malloc(PATH_MAX);
if (!name)
return -1;
goto out;
kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
@ -1501,7 +1511,11 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
out_free:
free(name);
if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
return 0;
ret = 0;
out:
dso__set_loaded(dso, map->type);
pthread_mutex_unlock(&dso->lock);
return ret;
}
@ -1805,6 +1819,7 @@ static void vmlinux_path__exit(void)
{
while (--vmlinux_path__nr_entries >= 0)
zfree(&vmlinux_path[vmlinux_path__nr_entries]);
vmlinux_path__nr_entries = 0;
zfree(&vmlinux_path);
}

View File

@ -25,9 +25,9 @@ struct thread {
atomic_t refcnt;
char shortname[3];
bool comm_set;
int comm_len;
bool dead; /* if set thread has exited */
struct list_head comm_list;
int comm_len;
u64 db_id;
void *priv;

View File

@ -72,6 +72,49 @@ int mkdir_p(char *path, mode_t mode)
return (stat(path, &st) && mkdir(path, mode)) ? -1 : 0;
}
int rm_rf(char *path)
{
DIR *dir;
int ret = 0;
struct dirent *d;
char namebuf[PATH_MAX];
dir = opendir(path);
if (dir == NULL)
return 0;
while ((d = readdir(dir)) != NULL && !ret) {
struct stat statbuf;
if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, ".."))
continue;
scnprintf(namebuf, sizeof(namebuf), "%s/%s",
path, d->d_name);
ret = stat(namebuf, &statbuf);
if (ret < 0) {
pr_debug("stat failed: %s\n", namebuf);
break;
}
if (S_ISREG(statbuf.st_mode))
ret = unlink(namebuf);
else if (S_ISDIR(statbuf.st_mode))
ret = rm_rf(namebuf);
else {
pr_debug("unknown file: %s\n", namebuf);
ret = -1;
}
}
closedir(dir);
if (ret < 0)
return ret;
return rmdir(path);
}
static int slow_copyfile(const char *from, const char *to, mode_t mode)
{
int err = -1;
@ -102,11 +145,38 @@ out:
return err;
}
int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size)
{
void *ptr;
loff_t pgoff;
pgoff = off_in & ~(page_size - 1);
off_in -= pgoff;
ptr = mmap(NULL, off_in + size, PROT_READ, MAP_PRIVATE, ifd, pgoff);
if (ptr == MAP_FAILED)
return -1;
while (size) {
ssize_t ret = pwrite(ofd, ptr + off_in, size, off_out);
if (ret < 0 && errno == EINTR)
continue;
if (ret <= 0)
break;
size -= ret;
off_in += ret;
off_out -= ret;
}
munmap(ptr, off_in + size);
return size ? -1 : 0;
}
int copyfile_mode(const char *from, const char *to, mode_t mode)
{
int fromfd, tofd;
struct stat st;
void *addr;
int err = -1;
if (stat(from, &st))
@ -123,15 +193,8 @@ int copyfile_mode(const char *from, const char *to, mode_t mode)
if (tofd < 0)
goto out_close_from;
addr = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fromfd, 0);
if (addr == MAP_FAILED)
goto out_close_to;
err = copyfile_offset(fromfd, 0, tofd, 0, st.st_size);
if (write(tofd, addr, st.st_size) == st.st_size)
err = 0;
munmap(addr, st.st_size);
out_close_to:
close(tofd);
if (err)
unlink(to);

View File

@ -249,8 +249,10 @@ static inline int sane_case(int x, int high)
}
int mkdir_p(char *path, mode_t mode);
int rm_rf(char *path);
int copyfile(const char *from, const char *to);
int copyfile_mode(const char *from, const char *to, mode_t mode);
int copyfile_offset(int fromfd, loff_t from_ofs, int tofd, loff_t to_ofs, u64 size);
s64 perf_atoll(const char *str);
char **argv_split(const char *str, int *argcp);