arm64: prep stack walkers for THREAD_INFO_IN_TASK
When CONFIG_THREAD_INFO_IN_TASK is selected, task stacks may be freed before a task is destroyed. To account for this, the stacks are refcounted, and when manipulating the stack of another task, it is necessary to get/put the stack to ensure it isn't freed and/or re-used while we do so. This patch reworks the arm64 stack walking code to account for this. When CONFIG_THREAD_INFO_IN_TASK is not selected these perform no refcounting, and this should only be a structural change that does not affect behaviour. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Tested-by: Laura Abbott <labbott@redhat.com> Cc: AKASHI Takahiro <takahiro.akashi@linaro.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
parent
2020a5ae7c
commit
9bbd4c56b0
@ -350,27 +350,35 @@ struct task_struct *__switch_to(struct task_struct *prev,
|
|||||||
unsigned long get_wchan(struct task_struct *p)
|
unsigned long get_wchan(struct task_struct *p)
|
||||||
{
|
{
|
||||||
struct stackframe frame;
|
struct stackframe frame;
|
||||||
unsigned long stack_page;
|
unsigned long stack_page, ret = 0;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
if (!p || p == current || p->state == TASK_RUNNING)
|
if (!p || p == current || p->state == TASK_RUNNING)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
stack_page = (unsigned long)try_get_task_stack(p);
|
||||||
|
if (!stack_page)
|
||||||
|
return 0;
|
||||||
|
|
||||||
frame.fp = thread_saved_fp(p);
|
frame.fp = thread_saved_fp(p);
|
||||||
frame.sp = thread_saved_sp(p);
|
frame.sp = thread_saved_sp(p);
|
||||||
frame.pc = thread_saved_pc(p);
|
frame.pc = thread_saved_pc(p);
|
||||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||||
frame.graph = p->curr_ret_stack;
|
frame.graph = p->curr_ret_stack;
|
||||||
#endif
|
#endif
|
||||||
stack_page = (unsigned long)task_stack_page(p);
|
|
||||||
do {
|
do {
|
||||||
if (frame.sp < stack_page ||
|
if (frame.sp < stack_page ||
|
||||||
frame.sp >= stack_page + THREAD_SIZE ||
|
frame.sp >= stack_page + THREAD_SIZE ||
|
||||||
unwind_frame(p, &frame))
|
unwind_frame(p, &frame))
|
||||||
return 0;
|
goto out;
|
||||||
if (!in_sched_functions(frame.pc))
|
if (!in_sched_functions(frame.pc)) {
|
||||||
return frame.pc;
|
ret = frame.pc;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
} while (count ++ < 16);
|
} while (count ++ < 16);
|
||||||
return 0;
|
|
||||||
|
out:
|
||||||
|
put_task_stack(p);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long arch_align_stack(unsigned long sp)
|
unsigned long arch_align_stack(unsigned long sp)
|
||||||
|
@ -181,6 +181,9 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
|||||||
struct stack_trace_data data;
|
struct stack_trace_data data;
|
||||||
struct stackframe frame;
|
struct stackframe frame;
|
||||||
|
|
||||||
|
if (!try_get_task_stack(tsk))
|
||||||
|
return;
|
||||||
|
|
||||||
data.trace = trace;
|
data.trace = trace;
|
||||||
data.skip = trace->skip;
|
data.skip = trace->skip;
|
||||||
|
|
||||||
@ -202,6 +205,8 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
|||||||
walk_stackframe(tsk, &frame, save_trace, &data);
|
walk_stackframe(tsk, &frame, save_trace, &data);
|
||||||
if (trace->nr_entries < trace->max_entries)
|
if (trace->nr_entries < trace->max_entries)
|
||||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
||||||
|
|
||||||
|
put_task_stack(tsk);
|
||||||
}
|
}
|
||||||
|
|
||||||
void save_stack_trace(struct stack_trace *trace)
|
void save_stack_trace(struct stack_trace *trace)
|
||||||
|
@ -148,6 +148,9 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
|
|||||||
if (!tsk)
|
if (!tsk)
|
||||||
tsk = current;
|
tsk = current;
|
||||||
|
|
||||||
|
if (!try_get_task_stack(tsk))
|
||||||
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Switching between stacks is valid when tracing current and in
|
* Switching between stacks is valid when tracing current and in
|
||||||
* non-preemptible context.
|
* non-preemptible context.
|
||||||
@ -213,6 +216,8 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
|
|||||||
stack + sizeof(struct pt_regs));
|
stack + sizeof(struct pt_regs));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
put_task_stack(tsk);
|
||||||
}
|
}
|
||||||
|
|
||||||
void show_stack(struct task_struct *tsk, unsigned long *sp)
|
void show_stack(struct task_struct *tsk, unsigned long *sp)
|
||||||
|
Loading…
Reference in New Issue
Block a user