perf_events: Fix for transaction recovery in group_sched_in()
This new version (see commit 8e5fc1a
) is much simpler and ensures that
in case of error in group_sched_in() during event_sched_in(), the
events up to the failed event go through regular event_sched_out().
But the failed event and the remaining events in the group have their
timings adjusted as if they had also gone through event_sched_in() and
event_sched_out(). This ensures timing uniformity across all events in
a group. This also takes care of the tstamp_stopped problem in case
the group could never be scheduled. The tstamp_stopped is updated as
if the event had actually run.
With this patch, the following now reports correct time_enabled,
in case the NMI watchdog is active:
$ task -e unhalted_core_cycles,instructions_retired,baclears,baclears
noploop 1
noploop for 1 seconds
0 unhalted_core_cycles (100.00% scaling, ena=997,552,872, run=0)
0 instructions_retired (100.00% scaling, ena=997,552,872, run=0)
0 baclears (100.00% scaling, ena=997,552,872, run=0)
0 baclears (100.00% scaling, ena=997,552,872, run=0)
And the older test case also works:
$ task -einstructions_retired,baclears,baclears -e
unhalted_core_cycles,baclears,baclears sleep 5
1680885 instructions_retired (69.39% scaling, ena=950756, run=291006)
10735 baclears (69.39% scaling, ena=950756, run=291006)
10735 baclears (69.39% scaling, ena=950756, run=291006)
0 unhalted_core_cycles (100.00% scaling, ena=817932, run=0)
0 baclears (100.00% scaling, ena=817932, run=0)
0 baclears (100.00% scaling, ena=817932, run=0)
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4cbeeebc.8ee7d80a.5a28.0d5f@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
9ffcfa6f1f
commit
d7842da470
@ -691,6 +691,8 @@ group_sched_in(struct perf_event *group_event,
|
|||||||
{
|
{
|
||||||
struct perf_event *event, *partial_group = NULL;
|
struct perf_event *event, *partial_group = NULL;
|
||||||
struct pmu *pmu = group_event->pmu;
|
struct pmu *pmu = group_event->pmu;
|
||||||
|
u64 now = ctx->time;
|
||||||
|
bool simulate = false;
|
||||||
|
|
||||||
if (group_event->state == PERF_EVENT_STATE_OFF)
|
if (group_event->state == PERF_EVENT_STATE_OFF)
|
||||||
return 0;
|
return 0;
|
||||||
@ -719,11 +721,27 @@ group_error:
|
|||||||
/*
|
/*
|
||||||
* Groups can be scheduled in as one unit only, so undo any
|
* Groups can be scheduled in as one unit only, so undo any
|
||||||
* partial group before returning:
|
* partial group before returning:
|
||||||
|
* The events up to the failed event are scheduled out normally,
|
||||||
|
* tstamp_stopped will be updated.
|
||||||
|
*
|
||||||
|
* The failed events and the remaining siblings need to have
|
||||||
|
* their timings updated as if they had gone thru event_sched_in()
|
||||||
|
* and event_sched_out(). This is required to get consistent timings
|
||||||
|
* across the group. This also takes care of the case where the group
|
||||||
|
* could never be scheduled by ensuring tstamp_stopped is set to mark
|
||||||
|
* the time the event was actually stopped, such that time delta
|
||||||
|
* calculation in update_event_times() is correct.
|
||||||
*/
|
*/
|
||||||
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
|
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
|
||||||
if (event == partial_group)
|
if (event == partial_group)
|
||||||
break;
|
simulate = true;
|
||||||
event_sched_out(event, cpuctx, ctx);
|
|
||||||
|
if (simulate) {
|
||||||
|
event->tstamp_running += now - event->tstamp_stopped;
|
||||||
|
event->tstamp_stopped = now;
|
||||||
|
} else {
|
||||||
|
event_sched_out(event, cpuctx, ctx);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
event_sched_out(group_event, cpuctx, ctx);
|
event_sched_out(group_event, cpuctx, ctx);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user