perf/x86: Add two more x86_pmu methods
In order to clean up x86_perf_event_{set_period,update)() start by adding them as x86_pmu methods. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20220829101321.440196408@infradead.org
This commit is contained in:
parent
f3c0eba287
commit
73759c3463
@ -72,6 +72,9 @@ DEFINE_STATIC_CALL_NULL(x86_pmu_add, *x86_pmu.add);
|
||||
DEFINE_STATIC_CALL_NULL(x86_pmu_del, *x86_pmu.del);
|
||||
DEFINE_STATIC_CALL_NULL(x86_pmu_read, *x86_pmu.read);
|
||||
|
||||
DEFINE_STATIC_CALL_NULL(x86_pmu_set_period, *x86_pmu.set_period);
|
||||
DEFINE_STATIC_CALL_NULL(x86_pmu_update, *x86_pmu.update);
|
||||
|
||||
DEFINE_STATIC_CALL_NULL(x86_pmu_schedule_events, *x86_pmu.schedule_events);
|
||||
DEFINE_STATIC_CALL_NULL(x86_pmu_get_event_constraints, *x86_pmu.get_event_constraints);
|
||||
DEFINE_STATIC_CALL_NULL(x86_pmu_put_event_constraints, *x86_pmu.put_event_constraints);
|
||||
@ -1518,7 +1521,7 @@ static void x86_pmu_start(struct perf_event *event, int flags)
|
||||
|
||||
if (flags & PERF_EF_RELOAD) {
|
||||
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
|
||||
x86_perf_event_set_period(event);
|
||||
static_call(x86_pmu_set_period)(event);
|
||||
}
|
||||
|
||||
event->hw.state = 0;
|
||||
@ -1610,7 +1613,7 @@ void x86_pmu_stop(struct perf_event *event, int flags)
|
||||
* Drain the remaining delta count out of a event
|
||||
* that we are disabling:
|
||||
*/
|
||||
x86_perf_event_update(event);
|
||||
static_call(x86_pmu_update)(event);
|
||||
hwc->state |= PERF_HES_UPTODATE;
|
||||
}
|
||||
}
|
||||
@ -1700,7 +1703,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
|
||||
|
||||
event = cpuc->events[idx];
|
||||
|
||||
val = x86_perf_event_update(event);
|
||||
val = static_call(x86_pmu_update)(event);
|
||||
if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
|
||||
continue;
|
||||
|
||||
@ -1709,7 +1712,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
|
||||
*/
|
||||
handled++;
|
||||
|
||||
if (!x86_perf_event_set_period(event))
|
||||
if (!static_call(x86_pmu_set_period)(event))
|
||||
continue;
|
||||
|
||||
perf_sample_data_init(&data, 0, event->hw.last_period);
|
||||
@ -2025,6 +2028,9 @@ static void x86_pmu_static_call_update(void)
|
||||
static_call_update(x86_pmu_del, x86_pmu.del);
|
||||
static_call_update(x86_pmu_read, x86_pmu.read);
|
||||
|
||||
static_call_update(x86_pmu_set_period, x86_pmu.set_period);
|
||||
static_call_update(x86_pmu_update, x86_pmu.update);
|
||||
|
||||
static_call_update(x86_pmu_schedule_events, x86_pmu.schedule_events);
|
||||
static_call_update(x86_pmu_get_event_constraints, x86_pmu.get_event_constraints);
|
||||
static_call_update(x86_pmu_put_event_constraints, x86_pmu.put_event_constraints);
|
||||
@ -2044,7 +2050,7 @@ static void x86_pmu_static_call_update(void)
|
||||
|
||||
static void _x86_pmu_read(struct perf_event *event)
|
||||
{
|
||||
x86_perf_event_update(event);
|
||||
static_call(x86_pmu_update)(event);
|
||||
}
|
||||
|
||||
void x86_pmu_show_pmu_cap(int num_counters, int num_counters_fixed,
|
||||
@ -2151,6 +2157,12 @@ static int __init init_hw_perf_events(void)
|
||||
if (!x86_pmu.guest_get_msrs)
|
||||
x86_pmu.guest_get_msrs = (void *)&__static_call_return0;
|
||||
|
||||
if (!x86_pmu.set_period)
|
||||
x86_pmu.set_period = x86_perf_event_set_period;
|
||||
|
||||
if (!x86_pmu.update)
|
||||
x86_pmu.update = x86_perf_event_update;
|
||||
|
||||
x86_pmu_static_call_update();
|
||||
|
||||
/*
|
||||
|
@ -743,6 +743,8 @@ struct x86_pmu {
|
||||
void (*add)(struct perf_event *);
|
||||
void (*del)(struct perf_event *);
|
||||
void (*read)(struct perf_event *event);
|
||||
int (*set_period)(struct perf_event *event);
|
||||
u64 (*update)(struct perf_event *event);
|
||||
int (*hw_config)(struct perf_event *event);
|
||||
int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
|
||||
unsigned eventsel;
|
||||
@ -1042,6 +1044,9 @@ static struct perf_pmu_format_hybrid_attr format_attr_hybrid_##_name = {\
|
||||
struct pmu *x86_get_pmu(unsigned int cpu);
|
||||
extern struct x86_pmu x86_pmu __read_mostly;
|
||||
|
||||
DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period);
|
||||
DECLARE_STATIC_CALL(x86_pmu_update, *x86_pmu.update);
|
||||
|
||||
static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx)
|
||||
{
|
||||
if (static_cpu_has(X86_FEATURE_ARCH_LBR))
|
||||
|
Loading…
Reference in New Issue
Block a user