mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 20:51:44 +00:00
perf evsel: Separate open preparation from open itself
This is a preparatory patch for the following patches with the goal to separate in evlist__open_cpu the actual perf_event_open, which could be performed in parallel, from the existing fallback mechanisms, which should be handled sequentially. This patch separates the first lines of evsel__open_cpu into a new __evsel__prepare_open function. Signed-off-by: Riccardo Mancini <rickyman7@gmail.com> Cc: Ian Rogers <irogers@google.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lore.kernel.org/lkml/e14118b934c338dbbf68b8677f20d0d7dbf9359a.1629490974.git.rickyman7@gmail.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
bc0496043e
commit
d45ce03434
@ -1746,22 +1746,20 @@ static int perf_event_open(struct evsel *evsel,
|
|||||||
return fd;
|
return fd;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
|
|
||||||
struct perf_thread_map *threads,
|
static struct perf_cpu_map *empty_cpu_map;
|
||||||
int start_cpu, int end_cpu)
|
static struct perf_thread_map *empty_thread_map;
|
||||||
|
|
||||||
|
static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
|
||||||
|
struct perf_thread_map *threads)
|
||||||
{
|
{
|
||||||
int cpu, thread, nthreads;
|
int nthreads;
|
||||||
unsigned long flags = PERF_FLAG_FD_CLOEXEC;
|
|
||||||
int pid = -1, err, old_errno;
|
|
||||||
enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
|
|
||||||
|
|
||||||
if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) ||
|
if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) ||
|
||||||
(perf_missing_features.aux_output && evsel->core.attr.aux_output))
|
(perf_missing_features.aux_output && evsel->core.attr.aux_output))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (cpus == NULL) {
|
if (cpus == NULL) {
|
||||||
static struct perf_cpu_map *empty_cpu_map;
|
|
||||||
|
|
||||||
if (empty_cpu_map == NULL) {
|
if (empty_cpu_map == NULL) {
|
||||||
empty_cpu_map = perf_cpu_map__dummy_new();
|
empty_cpu_map = perf_cpu_map__dummy_new();
|
||||||
if (empty_cpu_map == NULL)
|
if (empty_cpu_map == NULL)
|
||||||
@ -1772,8 +1770,6 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (threads == NULL) {
|
if (threads == NULL) {
|
||||||
static struct perf_thread_map *empty_thread_map;
|
|
||||||
|
|
||||||
if (empty_thread_map == NULL) {
|
if (empty_thread_map == NULL) {
|
||||||
empty_thread_map = thread_map__new_by_tid(-1);
|
empty_thread_map = thread_map__new_by_tid(-1);
|
||||||
if (empty_thread_map == NULL)
|
if (empty_thread_map == NULL)
|
||||||
@ -1792,6 +1788,33 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
|
|||||||
perf_evsel__alloc_fd(&evsel->core, cpus->nr, nthreads) < 0)
|
perf_evsel__alloc_fd(&evsel->core, cpus->nr, nthreads) < 0)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
|
||||||
|
struct perf_thread_map *threads,
|
||||||
|
int start_cpu, int end_cpu)
|
||||||
|
{
|
||||||
|
int cpu, thread, nthreads;
|
||||||
|
unsigned long flags = PERF_FLAG_FD_CLOEXEC;
|
||||||
|
int pid = -1, err, old_errno;
|
||||||
|
enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
|
||||||
|
|
||||||
|
err = __evsel__prepare_open(evsel, cpus, threads);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
if (cpus == NULL)
|
||||||
|
cpus = empty_cpu_map;
|
||||||
|
|
||||||
|
if (threads == NULL)
|
||||||
|
threads = empty_thread_map;
|
||||||
|
|
||||||
|
if (evsel->core.system_wide)
|
||||||
|
nthreads = 1;
|
||||||
|
else
|
||||||
|
nthreads = threads->nr;
|
||||||
|
|
||||||
if (evsel->cgrp) {
|
if (evsel->cgrp) {
|
||||||
flags |= PERF_FLAG_PID_CGROUP;
|
flags |= PERF_FLAG_PID_CGROUP;
|
||||||
pid = evsel->cgrp->fd;
|
pid = evsel->cgrp->fd;
|
||||||
|
Loading…
Reference in New Issue
Block a user