perf env: Add perf_env__numa_node()
To speed up cpu to node lookup, add perf_env__numa_node(), that creates cpu array on the first lookup, that holds numa nodes for each stored cpu. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Joe Mario <jmario@redhat.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20190904073415.723-3-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
61ec07f591
commit
389799a7a1
@ -180,6 +180,7 @@ void perf_env__exit(struct perf_env *env)
|
||||
zfree(&env->sibling_threads);
|
||||
zfree(&env->pmu_mappings);
|
||||
zfree(&env->cpu);
|
||||
zfree(&env->numa_map);
|
||||
|
||||
for (i = 0; i < env->nr_numa_nodes; i++)
|
||||
perf_cpu_map__put(env->numa_nodes[i].map);
|
||||
@ -354,3 +355,42 @@ const char *perf_env__arch(struct perf_env *env)
|
||||
|
||||
return normalize_arch(arch_name);
|
||||
}
|
||||
|
||||
|
||||
int perf_env__numa_node(struct perf_env *env, int cpu)
|
||||
{
|
||||
if (!env->nr_numa_map) {
|
||||
struct numa_node *nn;
|
||||
int i, nr = 0;
|
||||
|
||||
for (i = 0; i < env->nr_numa_nodes; i++) {
|
||||
nn = &env->numa_nodes[i];
|
||||
nr = max(nr, perf_cpu_map__max(nn->map));
|
||||
}
|
||||
|
||||
nr++;
|
||||
|
||||
/*
|
||||
* We initialize the numa_map array to prepare
|
||||
* it for missing cpus, which return node -1
|
||||
*/
|
||||
env->numa_map = malloc(nr * sizeof(int));
|
||||
if (!env->numa_map)
|
||||
return -1;
|
||||
|
||||
for (i = 0; i < nr; i++)
|
||||
env->numa_map[i] = -1;
|
||||
|
||||
env->nr_numa_map = nr;
|
||||
|
||||
for (i = 0; i < env->nr_numa_nodes; i++) {
|
||||
int tmp, j;
|
||||
|
||||
nn = &env->numa_nodes[i];
|
||||
perf_cpu_map__for_each_cpu(j, tmp, nn->map)
|
||||
env->numa_map[j] = i;
|
||||
}
|
||||
}
|
||||
|
||||
return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;
|
||||
}
|
||||
|
@ -87,6 +87,10 @@ struct perf_env {
|
||||
struct rb_root btfs;
|
||||
u32 btfs_cnt;
|
||||
} bpf_progs;
|
||||
|
||||
/* For fast cpu to numa node lookup via perf_env__numa_node */
|
||||
int *numa_map;
|
||||
int nr_numa_map;
|
||||
};
|
||||
|
||||
enum perf_compress_type {
|
||||
@ -120,4 +124,6 @@ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
|
||||
__u32 prog_id);
|
||||
void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
|
||||
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
|
||||
|
||||
int perf_env__numa_node(struct perf_env *env, int cpu);
|
||||
#endif /* __PERF_ENV_H */
|
||||
|
Loading…
Reference in New Issue
Block a user