forked from Minki/linux
perf evlist: Handle default value for 'pages' on mmap method
Every tool that calls this and allows the user to override the value needs this logic. Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/n/tip-lwscxpg57xfzahz5dmdfp9uz@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
35b9d88ecd
commit
50a682ce87
@ -456,10 +456,6 @@ static int __cmd_record(int argc, const char **argv)
|
|||||||
perf_header__set_feat(&session->header, HEADER_NUMA_TOPOLOGY);
|
perf_header__set_feat(&session->header, HEADER_NUMA_TOPOLOGY);
|
||||||
perf_header__set_feat(&session->header, HEADER_CPUID);
|
perf_header__set_feat(&session->header, HEADER_CPUID);
|
||||||
|
|
||||||
/* 512 kiB: default amount of unprivileged mlocked memory */
|
|
||||||
if (mmap_pages == UINT_MAX)
|
|
||||||
mmap_pages = (512 * 1024) / page_size;
|
|
||||||
|
|
||||||
if (forks) {
|
if (forks) {
|
||||||
err = perf_evlist__prepare_workload(evsel_list, &record_opts, argv);
|
err = perf_evlist__prepare_workload(evsel_list, &record_opts, argv);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
|
@ -507,14 +507,20 @@ out_unmap:
|
|||||||
*
|
*
|
||||||
* Using perf_evlist__read_on_cpu does this automatically.
|
* Using perf_evlist__read_on_cpu does this automatically.
|
||||||
*/
|
*/
|
||||||
int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
|
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
|
||||||
|
bool overwrite)
|
||||||
{
|
{
|
||||||
unsigned int page_size = sysconf(_SC_PAGE_SIZE);
|
unsigned int page_size = sysconf(_SC_PAGE_SIZE);
|
||||||
int mask = pages * page_size - 1;
|
|
||||||
struct perf_evsel *evsel;
|
struct perf_evsel *evsel;
|
||||||
const struct cpu_map *cpus = evlist->cpus;
|
const struct cpu_map *cpus = evlist->cpus;
|
||||||
const struct thread_map *threads = evlist->threads;
|
const struct thread_map *threads = evlist->threads;
|
||||||
int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
|
int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
|
||||||
|
|
||||||
|
/* 512 kiB: default amount of unprivileged mlocked memory */
|
||||||
|
if (pages == UINT_MAX)
|
||||||
|
pages = (512 * 1024) / page_size;
|
||||||
|
|
||||||
|
mask = pages * page_size - 1;
|
||||||
|
|
||||||
if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
|
if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -79,7 +79,8 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist,
|
|||||||
int perf_evlist__start_workload(struct perf_evlist *evlist);
|
int perf_evlist__start_workload(struct perf_evlist *evlist);
|
||||||
|
|
||||||
int perf_evlist__alloc_mmap(struct perf_evlist *evlist);
|
int perf_evlist__alloc_mmap(struct perf_evlist *evlist);
|
||||||
int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite);
|
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
|
||||||
|
bool overwrite);
|
||||||
void perf_evlist__munmap(struct perf_evlist *evlist);
|
void perf_evlist__munmap(struct perf_evlist *evlist);
|
||||||
|
|
||||||
void perf_evlist__disable(struct perf_evlist *evlist);
|
void perf_evlist__disable(struct perf_evlist *evlist);
|
||||||
|
Loading…
Reference in New Issue
Block a user