mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 06:02:05 +00:00
tracing/wakeup: move access to wakeup_cpu into spinlock
The code had the following outside the lock: if (next != wakeup_task) return; pc = preempt_count(); /* The task we are waiting for is waking up */ data = wakeup_trace->data[wakeup_cpu]; On initialization, wakeup_task is NULL and wakeup_cpu -1. This code is not under a lock. If wakeup_task is set on another CPU as that task is waking up, we can see the wakeup_task before wakeup_cpu is set. If we read wakeup_cpu while it is still -1 then we will have a bad data pointer. This patch moves the reading of wakeup_cpu within the protection of the spinlock used to protect the writing of wakeup_cpu and wakeup_task. [ Impact: remove possible race causing invalid pointer dereference ] Reported-by: Maneesh Soni <maneesh@in.ibm.com> Signed-off-by: Steven Rostedt <srostedt@redhat.com>
This commit is contained in:
parent
6a74aa4090
commit
9be24414aa
@ -138,9 +138,6 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
|
||||
|
||||
pc = preempt_count();
|
||||
|
||||
/* The task we are waiting for is waking up */
|
||||
data = wakeup_trace->data[wakeup_cpu];
|
||||
|
||||
/* disable local data, not wakeup_cpu data */
|
||||
cpu = raw_smp_processor_id();
|
||||
disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
|
||||
@ -154,6 +151,9 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
|
||||
if (unlikely(!tracer_enabled || next != wakeup_task))
|
||||
goto out_unlock;
|
||||
|
||||
/* The task we are waiting for is waking up */
|
||||
data = wakeup_trace->data[wakeup_cpu];
|
||||
|
||||
trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
|
||||
tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user