2015-09-02 07:56:43 +00:00
|
|
|
#include <api/fs/fs.h>
|
2015-09-07 08:38:06 +00:00
|
|
|
#include <linux/err.h>
|
2012-11-10 00:46:43 +00:00
|
|
|
#include "evsel.h"
|
|
|
|
#include "tests.h"
|
|
|
|
#include "thread_map.h"
|
|
|
|
#include "cpumap.h"
|
|
|
|
#include "debug.h"
|
2015-06-14 08:19:26 +00:00
|
|
|
#include "stat.h"
|
2012-11-10 00:46:43 +00:00
|
|
|
|
perf tests: Pass the subtest index to each test routine
Some tests have sub-tests we want to run, so allow passing this.
Wang tried to avoid having to touch all tests, but then, having the
test.func in an anonymous union makes the build fail on older compilers,
like the one in RHEL6, where:
test a = {
.func = foo,
};
fails.
To fix it leave the func pointer in the main structure and pass the subtest
index to all tests, end result function is the same, but we have just one
function pointer, not two, with and without the subtest index as an argument.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/n/tip-5genj0ficwdmelpoqlds0u4y@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-11-19 15:01:48 +00:00
|
|
|
int test__openat_syscall_event_on_all_cpus(int subtest __maybe_unused)
|
2012-11-10 00:46:43 +00:00
|
|
|
{
|
|
|
|
int err = -1, fd, cpu;
|
|
|
|
struct cpu_map *cpus;
|
|
|
|
struct perf_evsel *evsel;
|
2015-04-16 13:52:53 +00:00
|
|
|
unsigned int nr_openat_calls = 111, i;
|
2012-11-10 00:46:43 +00:00
|
|
|
cpu_set_t cpu_set;
|
2012-12-10 18:11:43 +00:00
|
|
|
struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
|
2014-08-14 02:22:45 +00:00
|
|
|
char sbuf[STRERR_BUFSIZE];
|
2015-09-02 07:56:45 +00:00
|
|
|
char errbuf[BUFSIZ];
|
2012-11-10 00:46:43 +00:00
|
|
|
|
|
|
|
if (threads == NULL) {
|
|
|
|
pr_debug("thread_map__new\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
cpus = cpu_map__new(NULL);
|
|
|
|
if (cpus == NULL) {
|
|
|
|
pr_debug("cpu_map__new\n");
|
|
|
|
goto out_thread_map_delete;
|
|
|
|
}
|
|
|
|
|
|
|
|
CPU_ZERO(&cpu_set);
|
|
|
|
|
2015-04-16 13:52:53 +00:00
|
|
|
evsel = perf_evsel__newtp("syscalls", "sys_enter_openat");
|
2015-09-07 08:38:06 +00:00
|
|
|
if (IS_ERR(evsel)) {
|
2015-09-02 07:56:45 +00:00
|
|
|
tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
|
2015-10-19 15:23:48 +00:00
|
|
|
pr_debug("%s\n", errbuf);
|
2012-11-10 00:46:43 +00:00
|
|
|
goto out_thread_map_delete;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (perf_evsel__open(evsel, cpus, threads) < 0) {
|
|
|
|
pr_debug("failed to open counter: %s, "
|
|
|
|
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
|
2014-08-14 02:22:45 +00:00
|
|
|
strerror_r(errno, sbuf, sizeof(sbuf)));
|
2012-11-10 00:46:43 +00:00
|
|
|
goto out_evsel_delete;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (cpu = 0; cpu < cpus->nr; ++cpu) {
|
2015-04-16 13:52:53 +00:00
|
|
|
unsigned int ncalls = nr_openat_calls + cpu;
|
2012-11-10 00:46:43 +00:00
|
|
|
/*
|
|
|
|
* XXX eventually lift this restriction in a way that
|
|
|
|
* keeps perf building on older glibc installations
|
|
|
|
* without CPU_ALLOC. 1024 cpus in 2010 still seems
|
|
|
|
* a reasonable upper limit tho :-)
|
|
|
|
*/
|
|
|
|
if (cpus->map[cpu] >= CPU_SETSIZE) {
|
|
|
|
pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
CPU_SET(cpus->map[cpu], &cpu_set);
|
|
|
|
if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
|
|
|
|
pr_debug("sched_setaffinity() failed on CPU %d: %s ",
|
|
|
|
cpus->map[cpu],
|
2014-08-14 02:22:45 +00:00
|
|
|
strerror_r(errno, sbuf, sizeof(sbuf)));
|
2012-11-10 00:46:43 +00:00
|
|
|
goto out_close_fd;
|
|
|
|
}
|
|
|
|
for (i = 0; i < ncalls; ++i) {
|
2015-04-16 13:52:53 +00:00
|
|
|
fd = openat(0, "/etc/passwd", O_RDONLY);
|
2012-11-10 00:46:43 +00:00
|
|
|
close(fd);
|
|
|
|
}
|
|
|
|
CPU_CLR(cpus->map[cpu], &cpu_set);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-02-24 18:02:25 +00:00
|
|
|
* Here we need to explicitly preallocate the counts, as if
|
2012-11-10 00:46:43 +00:00
|
|
|
* we use the auto allocation it will allocate just for 1 cpu,
|
|
|
|
* as we start by cpu 0.
|
|
|
|
*/
|
2015-06-26 09:29:11 +00:00
|
|
|
if (perf_evsel__alloc_counts(evsel, cpus->nr, 1) < 0) {
|
2012-11-10 00:46:43 +00:00
|
|
|
pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
|
|
|
|
goto out_close_fd;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
|
|
|
|
for (cpu = 0; cpu < cpus->nr; ++cpu) {
|
|
|
|
unsigned int expected;
|
|
|
|
|
|
|
|
if (cpus->map[cpu] >= CPU_SETSIZE)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
|
|
|
|
pr_debug("perf_evsel__read_on_cpu\n");
|
|
|
|
err = -1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-04-16 13:52:53 +00:00
|
|
|
expected = nr_openat_calls + cpu;
|
2015-06-26 09:29:11 +00:00
|
|
|
if (perf_counts(evsel->counts, cpu, 0)->val != expected) {
|
2012-11-10 00:46:43 +00:00
|
|
|
pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
|
2015-06-26 09:29:11 +00:00
|
|
|
expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val);
|
2012-11-10 00:46:43 +00:00
|
|
|
err = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-25 01:44:44 +00:00
|
|
|
perf_evsel__free_counts(evsel);
|
2012-11-10 00:46:43 +00:00
|
|
|
out_close_fd:
|
|
|
|
perf_evsel__close_fd(evsel, 1, threads->nr);
|
|
|
|
out_evsel_delete:
|
|
|
|
perf_evsel__delete(evsel);
|
|
|
|
out_thread_map_delete:
|
2015-06-22 22:36:05 +00:00
|
|
|
thread_map__put(threads);
|
2012-11-10 00:46:43 +00:00
|
|
|
return err;
|
|
|
|
}
|