forked from Minki/linux
libperf: Add perf_thread_map__get()/perf_thread_map__put()
Move the following functions: thread_map__get() thread_map__put() thread_map__comm() to libperf with the following names: perf_thread_map__get() perf_thread_map__put() perf_thread_map__comm() Add the perf_thread_map__comm() function for it to work/compile. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-34-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
4b49cce25e
commit
7836e52e51
@ -1060,7 +1060,7 @@ static int record__synthesize_workload(struct record *rec, bool tail)
|
||||
process_synthesized_event,
|
||||
&rec->session->machines.host,
|
||||
rec->opts.sample_address);
|
||||
thread_map__put(thread_map);
|
||||
perf_thread_map__put(thread_map);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -10,5 +10,9 @@ struct perf_thread_map;
|
||||
LIBPERF_API struct perf_thread_map *perf_thread_map__new_dummy(void);
|
||||
|
||||
LIBPERF_API void perf_thread_map__set_pid(struct perf_thread_map *map, int thread, pid_t pid);
|
||||
LIBPERF_API char *perf_thread_map__comm(struct perf_thread_map *map, int thread);
|
||||
|
||||
LIBPERF_API struct perf_thread_map *perf_thread_map__get(struct perf_thread_map *map);
|
||||
LIBPERF_API void perf_thread_map__put(struct perf_thread_map *map);
|
||||
|
||||
#endif /* __LIBPERF_THREADMAP_H */
|
||||
|
@ -6,6 +6,9 @@ LIBPERF_0.0.1 {
|
||||
perf_cpu_map__put;
|
||||
perf_thread_map__new_dummy;
|
||||
perf_thread_map__set_pid;
|
||||
perf_thread_map__comm;
|
||||
perf_thread_map__get;
|
||||
perf_thread_map__put;
|
||||
local:
|
||||
*;
|
||||
};
|
||||
|
@ -4,6 +4,8 @@
|
||||
#include <linux/refcount.h>
|
||||
#include <internal/threadmap.h>
|
||||
#include <string.h>
|
||||
#include <asm/bug.h>
|
||||
#include <stdio.h>
|
||||
|
||||
static void perf_thread_map__reset(struct perf_thread_map *map, int start, int nr)
|
||||
{
|
||||
@ -35,6 +37,11 @@ void perf_thread_map__set_pid(struct perf_thread_map *map, int thread, pid_t pid
|
||||
map->map[thread].pid = pid;
|
||||
}
|
||||
|
||||
char *perf_thread_map__comm(struct perf_thread_map *map, int thread)
|
||||
{
|
||||
return map->map[thread].comm;
|
||||
}
|
||||
|
||||
struct perf_thread_map *perf_thread_map__new_dummy(void)
|
||||
{
|
||||
struct perf_thread_map *threads = thread_map__alloc(1);
|
||||
@ -46,3 +53,29 @@ struct perf_thread_map *perf_thread_map__new_dummy(void)
|
||||
}
|
||||
return threads;
|
||||
}
|
||||
|
||||
static void perf_thread_map__delete(struct perf_thread_map *threads)
|
||||
{
|
||||
if (threads) {
|
||||
int i;
|
||||
|
||||
WARN_ONCE(refcount_read(&threads->refcnt) != 0,
|
||||
"thread map refcnt unbalanced\n");
|
||||
for (i = 0; i < threads->nr; i++)
|
||||
free(perf_thread_map__comm(threads, i));
|
||||
free(threads);
|
||||
}
|
||||
}
|
||||
|
||||
struct perf_thread_map *perf_thread_map__get(struct perf_thread_map *map)
|
||||
{
|
||||
if (map)
|
||||
refcount_inc(&map->refcnt);
|
||||
return map;
|
||||
}
|
||||
|
||||
void perf_thread_map__put(struct perf_thread_map *map)
|
||||
{
|
||||
if (map && refcount_dec_and_test(&map->refcnt))
|
||||
perf_thread_map__delete(map);
|
||||
}
|
||||
|
@ -656,7 +656,7 @@ static int do_test_code_reading(bool try_kcore)
|
||||
* call. Getting refference to keep them alive.
|
||||
*/
|
||||
perf_cpu_map__get(cpus);
|
||||
thread_map__get(threads);
|
||||
perf_thread_map__get(threads);
|
||||
perf_evlist__set_maps(evlist, NULL, NULL);
|
||||
evlist__delete(evlist);
|
||||
evlist = NULL;
|
||||
@ -706,7 +706,7 @@ out_err:
|
||||
evlist__delete(evlist);
|
||||
} else {
|
||||
perf_cpu_map__put(cpus);
|
||||
thread_map__put(threads);
|
||||
perf_thread_map__put(threads);
|
||||
}
|
||||
machine__delete_threads(machine);
|
||||
machine__delete(machine);
|
||||
|
@ -76,7 +76,7 @@ static int attach__current_disabled(struct evlist *evlist)
|
||||
return err;
|
||||
}
|
||||
|
||||
thread_map__put(threads);
|
||||
perf_thread_map__put(threads);
|
||||
return evsel__enable(evsel) == 0 ? TEST_OK : TEST_FAIL;
|
||||
}
|
||||
|
||||
@ -96,7 +96,7 @@ static int attach__current_enabled(struct evlist *evlist)
|
||||
|
||||
err = perf_evsel__open_per_thread(evsel, threads);
|
||||
|
||||
thread_map__put(threads);
|
||||
perf_thread_map__put(threads);
|
||||
return err == 0 ? TEST_OK : TEST_FAIL;
|
||||
}
|
||||
|
||||
|
@ -150,7 +150,7 @@ out_err:
|
||||
evlist__delete(evlist);
|
||||
} else {
|
||||
perf_cpu_map__put(cpus);
|
||||
thread_map__put(threads);
|
||||
perf_thread_map__put(threads);
|
||||
}
|
||||
|
||||
return err;
|
||||
|
@ -157,6 +157,6 @@ out_delete_evlist:
|
||||
out_free_cpus:
|
||||
perf_cpu_map__put(cpus);
|
||||
out_free_threads:
|
||||
thread_map__put(threads);
|
||||
perf_thread_map__put(threads);
|
||||
return err;
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ static int synth_process(struct machine *machine)
|
||||
perf_event__process,
|
||||
machine, 0);
|
||||
|
||||
thread_map__put(map);
|
||||
perf_thread_map__put(map);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -122,6 +122,6 @@ out_evsel_delete:
|
||||
out_cpu_map_delete:
|
||||
perf_cpu_map__put(cpus);
|
||||
out_thread_map_delete:
|
||||
thread_map__put(threads);
|
||||
perf_thread_map__put(threads);
|
||||
return err;
|
||||
}
|
||||
|
@ -61,6 +61,6 @@ out_close_fd:
|
||||
out_evsel_delete:
|
||||
evsel__delete(evsel);
|
||||
out_thread_map_delete:
|
||||
thread_map__put(threads);
|
||||
perf_thread_map__put(threads);
|
||||
return err;
|
||||
}
|
||||
|
@ -126,7 +126,7 @@ out_init:
|
||||
|
||||
out_free_maps:
|
||||
perf_cpu_map__put(cpus);
|
||||
thread_map__put(threads);
|
||||
perf_thread_map__put(threads);
|
||||
out_delete_evlist:
|
||||
evlist__delete(evlist);
|
||||
return err;
|
||||
|
@ -570,7 +570,7 @@ out:
|
||||
evlist__delete(evlist);
|
||||
} else {
|
||||
perf_cpu_map__put(cpus);
|
||||
thread_map__put(threads);
|
||||
perf_thread_map__put(threads);
|
||||
}
|
||||
|
||||
return err;
|
||||
|
@ -136,7 +136,7 @@ out_init:
|
||||
|
||||
out_free_maps:
|
||||
perf_cpu_map__put(cpus);
|
||||
thread_map__put(threads);
|
||||
perf_thread_map__put(threads);
|
||||
out_delete_evlist:
|
||||
evlist__delete(evlist);
|
||||
return err;
|
||||
|
@ -28,11 +28,11 @@ int test__thread_map(struct test *test __maybe_unused, int subtest __maybe_unuse
|
||||
TEST_ASSERT_VAL("wrong pid",
|
||||
thread_map__pid(map, 0) == getpid());
|
||||
TEST_ASSERT_VAL("wrong comm",
|
||||
thread_map__comm(map, 0) &&
|
||||
!strcmp(thread_map__comm(map, 0), NAME));
|
||||
perf_thread_map__comm(map, 0) &&
|
||||
!strcmp(perf_thread_map__comm(map, 0), NAME));
|
||||
TEST_ASSERT_VAL("wrong refcnt",
|
||||
refcount_read(&map->refcnt) == 1);
|
||||
thread_map__put(map);
|
||||
perf_thread_map__put(map);
|
||||
|
||||
/* test dummy pid */
|
||||
map = perf_thread_map__new_dummy();
|
||||
@ -43,11 +43,11 @@ int test__thread_map(struct test *test __maybe_unused, int subtest __maybe_unuse
|
||||
TEST_ASSERT_VAL("wrong nr", map->nr == 1);
|
||||
TEST_ASSERT_VAL("wrong pid", thread_map__pid(map, 0) == -1);
|
||||
TEST_ASSERT_VAL("wrong comm",
|
||||
thread_map__comm(map, 0) &&
|
||||
!strcmp(thread_map__comm(map, 0), "dummy"));
|
||||
perf_thread_map__comm(map, 0) &&
|
||||
!strcmp(perf_thread_map__comm(map, 0), "dummy"));
|
||||
TEST_ASSERT_VAL("wrong refcnt",
|
||||
refcount_read(&map->refcnt) == 1);
|
||||
thread_map__put(map);
|
||||
perf_thread_map__put(map);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -70,11 +70,11 @@ static int process_event(struct perf_tool *tool __maybe_unused,
|
||||
TEST_ASSERT_VAL("wrong pid",
|
||||
thread_map__pid(threads, 0) == getpid());
|
||||
TEST_ASSERT_VAL("wrong comm",
|
||||
thread_map__comm(threads, 0) &&
|
||||
!strcmp(thread_map__comm(threads, 0), NAME));
|
||||
perf_thread_map__comm(threads, 0) &&
|
||||
!strcmp(perf_thread_map__comm(threads, 0), NAME));
|
||||
TEST_ASSERT_VAL("wrong refcnt",
|
||||
refcount_read(&threads->refcnt) == 1);
|
||||
thread_map__put(threads);
|
||||
perf_thread_map__put(threads);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -992,7 +992,7 @@ int perf_event__synthesize_thread_map2(struct perf_tool *tool,
|
||||
|
||||
for (i = 0; i < threads->nr; i++) {
|
||||
struct thread_map_event_entry *entry = &event->thread_map.entries[i];
|
||||
char *comm = thread_map__comm(threads, i);
|
||||
char *comm = perf_thread_map__comm(threads, i);
|
||||
|
||||
if (!comm)
|
||||
comm = (char *) "";
|
||||
@ -1387,7 +1387,7 @@ size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
|
||||
else
|
||||
ret += fprintf(fp, "failed to get threads from event\n");
|
||||
|
||||
thread_map__put(threads);
|
||||
perf_thread_map__put(threads);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -142,7 +142,7 @@ void evlist__delete(struct evlist *evlist)
|
||||
perf_evlist__munmap(evlist);
|
||||
evlist__close(evlist);
|
||||
perf_cpu_map__put(evlist->cpus);
|
||||
thread_map__put(evlist->threads);
|
||||
perf_thread_map__put(evlist->threads);
|
||||
evlist->cpus = NULL;
|
||||
evlist->threads = NULL;
|
||||
perf_evlist__purge(evlist);
|
||||
@ -165,8 +165,8 @@ static void __perf_evlist__propagate_maps(struct evlist *evlist,
|
||||
evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
|
||||
}
|
||||
|
||||
thread_map__put(evsel->threads);
|
||||
evsel->threads = thread_map__get(evlist->threads);
|
||||
perf_thread_map__put(evsel->threads);
|
||||
evsel->threads = perf_thread_map__get(evlist->threads);
|
||||
}
|
||||
|
||||
static void perf_evlist__propagate_maps(struct evlist *evlist)
|
||||
@ -1100,7 +1100,7 @@ int perf_evlist__create_maps(struct evlist *evlist, struct target *target)
|
||||
return 0;
|
||||
|
||||
out_delete_threads:
|
||||
thread_map__put(threads);
|
||||
perf_thread_map__put(threads);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -1120,8 +1120,8 @@ void perf_evlist__set_maps(struct evlist *evlist, struct perf_cpu_map *cpus,
|
||||
}
|
||||
|
||||
if (threads != evlist->threads) {
|
||||
thread_map__put(evlist->threads);
|
||||
evlist->threads = thread_map__get(threads);
|
||||
perf_thread_map__put(evlist->threads);
|
||||
evlist->threads = perf_thread_map__get(threads);
|
||||
}
|
||||
|
||||
perf_evlist__propagate_maps(evlist);
|
||||
|
@ -1327,7 +1327,7 @@ void perf_evsel__exit(struct evsel *evsel)
|
||||
cgroup__put(evsel->cgrp);
|
||||
perf_cpu_map__put(evsel->cpus);
|
||||
perf_cpu_map__put(evsel->own_cpus);
|
||||
thread_map__put(evsel->threads);
|
||||
perf_thread_map__put(evsel->threads);
|
||||
zfree(&evsel->group_name);
|
||||
zfree(&evsel->name);
|
||||
perf_evsel__object.fini(evsel);
|
||||
|
@ -2337,7 +2337,7 @@ static bool is_event_supported(u8 type, unsigned config)
|
||||
evsel__delete(evsel);
|
||||
}
|
||||
|
||||
thread_map__put(tmap);
|
||||
perf_thread_map__put(tmap);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -626,7 +626,7 @@ static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
|
||||
|
||||
static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads)
|
||||
{
|
||||
thread_map__put(pthreads->threads);
|
||||
perf_thread_map__put(pthreads->threads);
|
||||
Py_TYPE(pthreads)->tp_free((PyObject*)pthreads);
|
||||
}
|
||||
|
||||
|
@ -116,7 +116,7 @@ static void aggr_printout(struct perf_stat_config *config,
|
||||
case AGGR_THREAD:
|
||||
fprintf(config->output, "%*s-%*d%s",
|
||||
config->csv_output ? 0 : 16,
|
||||
thread_map__comm(evsel->threads, id),
|
||||
perf_thread_map__comm(evsel->threads, id),
|
||||
config->csv_output ? 0 : -8,
|
||||
thread_map__pid(evsel->threads, id),
|
||||
config->csv_sep);
|
||||
|
@ -304,32 +304,6 @@ struct perf_thread_map *thread_map__new_str(const char *pid, const char *tid,
|
||||
return thread_map__new_by_tid_str(tid);
|
||||
}
|
||||
|
||||
static void thread_map__delete(struct perf_thread_map *threads)
|
||||
{
|
||||
if (threads) {
|
||||
int i;
|
||||
|
||||
WARN_ONCE(refcount_read(&threads->refcnt) != 0,
|
||||
"thread map refcnt unbalanced\n");
|
||||
for (i = 0; i < threads->nr; i++)
|
||||
free(thread_map__comm(threads, i));
|
||||
free(threads);
|
||||
}
|
||||
}
|
||||
|
||||
struct perf_thread_map *thread_map__get(struct perf_thread_map *map)
|
||||
{
|
||||
if (map)
|
||||
refcount_inc(&map->refcnt);
|
||||
return map;
|
||||
}
|
||||
|
||||
void thread_map__put(struct perf_thread_map *map)
|
||||
{
|
||||
if (map && refcount_dec_and_test(&map->refcnt))
|
||||
thread_map__delete(map);
|
||||
}
|
||||
|
||||
size_t thread_map__fprintf(struct perf_thread_map *threads, FILE *fp)
|
||||
{
|
||||
int i;
|
||||
|
@ -18,9 +18,6 @@ struct perf_thread_map *thread_map__new_all_cpus(void);
|
||||
struct perf_thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid);
|
||||
struct perf_thread_map *thread_map__new_event(struct thread_map_event *event);
|
||||
|
||||
struct perf_thread_map *thread_map__get(struct perf_thread_map *map);
|
||||
void thread_map__put(struct perf_thread_map *map);
|
||||
|
||||
struct perf_thread_map *thread_map__new_str(const char *pid,
|
||||
const char *tid, uid_t uid, bool all_threads);
|
||||
|
||||
@ -38,11 +35,6 @@ static inline pid_t thread_map__pid(struct perf_thread_map *map, int thread)
|
||||
return map->map[thread].pid;
|
||||
}
|
||||
|
||||
static inline char *thread_map__comm(struct perf_thread_map *map, int thread)
|
||||
{
|
||||
return map->map[thread].comm;
|
||||
}
|
||||
|
||||
void thread_map__read_comms(struct perf_thread_map *threads);
|
||||
bool thread_map__has(struct perf_thread_map *threads, pid_t pid);
|
||||
int thread_map__remove(struct perf_thread_map *threads, int idx);
|
||||
|
Loading…
Reference in New Issue
Block a user