mirror of
https://github.com/torvalds/linux.git
synced 2024-12-03 17:41:22 +00:00
8ec984d537
Previously this was used to modify CPU map propagation, but it is now unnecessary as map propagation ensure core PMUs only have valid PMUs in the CPU map from user requested CPUs. Reviewed-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kang Minchul <tegongkang@gmail.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mike Leach <mike.leach@linaro.org> Cc: Ming Wang <wangming01@loongson.cn> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Rob Herring <robh@kernel.org> Cc: Sandipan Das <sandipan.das@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230527072210.2900565-11-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
107 lines
2.7 KiB
C
107 lines
2.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _PERF_TARGET_H
|
|
#define _PERF_TARGET_H
|
|
|
|
#include <stdbool.h>
|
|
#include <sys/types.h>
|
|
|
|
struct target {
|
|
const char *pid;
|
|
const char *tid;
|
|
const char *cpu_list;
|
|
const char *uid_str;
|
|
const char *bpf_str;
|
|
uid_t uid;
|
|
bool system_wide;
|
|
bool uses_mmap;
|
|
bool default_per_cpu;
|
|
bool per_thread;
|
|
bool use_bpf;
|
|
int initial_delay;
|
|
const char *attr_map;
|
|
};
|
|
|
|
enum target_errno {
|
|
TARGET_ERRNO__SUCCESS = 0,
|
|
|
|
/*
|
|
* Choose an arbitrary negative big number not to clash with standard
|
|
* errno since SUS requires the errno has distinct positive values.
|
|
* See 'Issue 6' in the link below.
|
|
*
|
|
* http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html
|
|
*/
|
|
__TARGET_ERRNO__START = -10000,
|
|
|
|
/* for target__validate() */
|
|
TARGET_ERRNO__PID_OVERRIDE_CPU = __TARGET_ERRNO__START,
|
|
TARGET_ERRNO__PID_OVERRIDE_UID,
|
|
TARGET_ERRNO__UID_OVERRIDE_CPU,
|
|
TARGET_ERRNO__PID_OVERRIDE_SYSTEM,
|
|
TARGET_ERRNO__UID_OVERRIDE_SYSTEM,
|
|
TARGET_ERRNO__SYSTEM_OVERRIDE_THREAD,
|
|
TARGET_ERRNO__BPF_OVERRIDE_CPU,
|
|
TARGET_ERRNO__BPF_OVERRIDE_PID,
|
|
TARGET_ERRNO__BPF_OVERRIDE_UID,
|
|
TARGET_ERRNO__BPF_OVERRIDE_THREAD,
|
|
|
|
/* for target__parse_uid() */
|
|
TARGET_ERRNO__INVALID_UID,
|
|
TARGET_ERRNO__USER_NOT_FOUND,
|
|
|
|
__TARGET_ERRNO__END,
|
|
};
|
|
|
|
enum target_errno target__validate(struct target *target);
|
|
enum target_errno target__parse_uid(struct target *target);
|
|
|
|
int target__strerror(struct target *target, int errnum, char *buf, size_t buflen);
|
|
|
|
static inline bool target__has_task(struct target *target)
|
|
{
|
|
return target->tid || target->pid || target->uid_str;
|
|
}
|
|
|
|
static inline bool target__has_cpu(struct target *target)
|
|
{
|
|
return target->system_wide || target->cpu_list;
|
|
}
|
|
|
|
static inline bool target__none(struct target *target)
|
|
{
|
|
return !target__has_task(target) && !target__has_cpu(target);
|
|
}
|
|
|
|
static inline bool target__enable_on_exec(struct target *target)
|
|
{
|
|
/*
|
|
* Normally enable_on_exec should be set if:
|
|
* 1) The tracee process is forked (not attaching to existed task or cpu).
|
|
* 2) And initial_delay is not configured.
|
|
* Otherwise, we enable tracee events manually.
|
|
*/
|
|
return target__none(target) && !target->initial_delay;
|
|
}
|
|
|
|
static inline bool target__has_per_thread(struct target *target)
|
|
{
|
|
return target->system_wide && target->per_thread;
|
|
}
|
|
|
|
static inline bool target__uses_dummy_map(struct target *target)
|
|
{
|
|
bool use_dummy = false;
|
|
|
|
if (target->default_per_cpu)
|
|
use_dummy = target->per_thread ? true : false;
|
|
else if (target__has_task(target) ||
|
|
(!target__has_cpu(target) && !target->uses_mmap))
|
|
use_dummy = true;
|
|
else if (target__has_per_thread(target))
|
|
use_dummy = true;
|
|
|
|
return use_dummy;
|
|
}
|
|
|
|
#endif /* _PERF_TARGET_H */
|