Merge tag 'perf_urgent_for_v5.15_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Borislav Petkov: - Make sure the destroy callback is reset when a event initialization fails - Update the event constraints for Icelake - Make sure the active time of an event is updated even for inactive events * tag 'perf_urgent_for_v5.15_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/core: fix userpage->time_enabled of inactive events perf/x86/intel: Update event constraints for ICX perf/x86: Reset destroy callback on event init failure
This commit is contained in:
@@ -2465,6 +2465,7 @@ static int x86_pmu_event_init(struct perf_event *event)
|
||||
if (err) {
|
||||
if (event->destroy)
|
||||
event->destroy(event);
|
||||
event->destroy = NULL;
|
||||
}
|
||||
|
||||
if (READ_ONCE(x86_pmu.attr_rdpmc) &&
|
||||
|
||||
@@ -263,6 +263,7 @@ static struct event_constraint intel_icl_event_constraints[] = {
|
||||
INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
|
||||
INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
|
||||
INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
|
||||
INTEL_EVENT_CONSTRAINT(0xef, 0xf),
|
||||
INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
@@ -683,7 +683,9 @@ struct perf_event {
|
||||
/*
|
||||
* timestamp shadows the actual context timing but it can
|
||||
* be safely used in NMI interrupt context. It reflects the
|
||||
* context time as it was when the event was last scheduled in.
|
||||
* context time as it was when the event was last scheduled in,
|
||||
* or when ctx_sched_in failed to schedule the event because we
|
||||
* run out of PMC.
|
||||
*
|
||||
* ctx_time already accounts for ctx->timestamp. Therefore to
|
||||
* compute ctx_time for a sample, simply add perf_clock().
|
||||
|
||||
@@ -3707,6 +3707,29 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool event_update_userpage(struct perf_event *event)
|
||||
{
|
||||
if (likely(!atomic_read(&event->mmap_count)))
|
||||
return false;
|
||||
|
||||
perf_event_update_time(event);
|
||||
perf_set_shadow_time(event, event->ctx);
|
||||
perf_event_update_userpage(event);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void group_update_userpage(struct perf_event *group_event)
|
||||
{
|
||||
struct perf_event *event;
|
||||
|
||||
if (!event_update_userpage(group_event))
|
||||
return;
|
||||
|
||||
for_each_sibling_event(event, group_event)
|
||||
event_update_userpage(event);
|
||||
}
|
||||
|
||||
static int merge_sched_in(struct perf_event *event, void *data)
|
||||
{
|
||||
struct perf_event_context *ctx = event->ctx;
|
||||
@@ -3725,14 +3748,15 @@ static int merge_sched_in(struct perf_event *event, void *data)
|
||||
}
|
||||
|
||||
if (event->state == PERF_EVENT_STATE_INACTIVE) {
|
||||
*can_add_hw = 0;
|
||||
if (event->attr.pinned) {
|
||||
perf_cgroup_event_disable(event, ctx);
|
||||
perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
|
||||
} else {
|
||||
ctx->rotate_necessary = 1;
|
||||
perf_mux_hrtimer_restart(cpuctx);
|
||||
group_update_userpage(event);
|
||||
}
|
||||
|
||||
*can_add_hw = 0;
|
||||
ctx->rotate_necessary = 1;
|
||||
perf_mux_hrtimer_restart(cpuctx);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -6324,6 +6348,8 @@ accounting:
|
||||
|
||||
ring_buffer_attach(event, rb);
|
||||
|
||||
perf_event_update_time(event);
|
||||
perf_set_shadow_time(event, event->ctx);
|
||||
perf_event_init_userpage(event);
|
||||
perf_event_update_userpage(event);
|
||||
} else {
|
||||
|
||||
Reference in New Issue
Block a user