mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 14:41:39 +00:00
267ed5d859
The kernel perf subsystem has to IPI to the target CPU for many operations. On systems with many CPUs and when managing many events the overhead can be dominated by lots of IPIs. An alternative is to set up CPU affinity in the perf tool, then set up all the events for that CPU, and then move on to the next CPU. Add some affinity management infrastructure to enable such a model. Used in followon patches. Committer notes: Use zfree() in some places, add missing stdbool.h header, some minor coding style changes. Signed-off-by: Andi Kleen <ak@linux.intel.com> Acked-by: Jiri Olsa <jolsa@kernel.org> Link: http://lore.kernel.org/lkml/20191121001522.180827-3-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
74 lines
1.8 KiB
C
74 lines
1.8 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* Manage affinity to optimize IPIs inside the kernel perf API. */
|
|
#define _GNU_SOURCE 1
|
|
#include <sched.h>
|
|
#include <stdlib.h>
|
|
#include <linux/bitmap.h>
|
|
#include <linux/zalloc.h>
|
|
#include "perf.h"
|
|
#include "cpumap.h"
|
|
#include "affinity.h"
|
|
|
|
static int get_cpu_set_size(void)
|
|
{
|
|
int sz = cpu__max_cpu() + 8 - 1;
|
|
/*
|
|
* sched_getaffinity doesn't like masks smaller than the kernel.
|
|
* Hopefully that's big enough.
|
|
*/
|
|
if (sz < 4096)
|
|
sz = 4096;
|
|
return sz / 8;
|
|
}
|
|
|
|
int affinity__setup(struct affinity *a)
|
|
{
|
|
int cpu_set_size = get_cpu_set_size();
|
|
|
|
a->orig_cpus = bitmap_alloc(cpu_set_size * 8);
|
|
if (!a->orig_cpus)
|
|
return -1;
|
|
sched_getaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus);
|
|
a->sched_cpus = bitmap_alloc(cpu_set_size * 8);
|
|
if (!a->sched_cpus) {
|
|
zfree(&a->orig_cpus);
|
|
return -1;
|
|
}
|
|
bitmap_zero((unsigned long *)a->sched_cpus, cpu_set_size);
|
|
a->changed = false;
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* perf_event_open does an IPI internally to the target CPU.
|
|
* It is more efficient to change perf's affinity to the target
|
|
* CPU and then set up all events on that CPU, so we amortize
|
|
* CPU communication.
|
|
*/
|
|
void affinity__set(struct affinity *a, int cpu)
|
|
{
|
|
int cpu_set_size = get_cpu_set_size();
|
|
|
|
if (cpu == -1)
|
|
return;
|
|
a->changed = true;
|
|
set_bit(cpu, a->sched_cpus);
|
|
/*
|
|
* We ignore errors because affinity is just an optimization.
|
|
* This could happen for example with isolated CPUs or cpusets.
|
|
* In this case the IPIs inside the kernel's perf API still work.
|
|
*/
|
|
sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->sched_cpus);
|
|
clear_bit(cpu, a->sched_cpus);
|
|
}
|
|
|
|
void affinity__cleanup(struct affinity *a)
|
|
{
|
|
int cpu_set_size = get_cpu_set_size();
|
|
|
|
if (a->changed)
|
|
sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus);
|
|
zfree(&a->sched_cpus);
|
|
zfree(&a->orig_cpus);
|
|
}
|