mirror of
https://github.com/torvalds/linux.git
synced 2024-11-08 05:01:48 +00:00
Merge branch 'perf/stacktrace' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing into perf/core
This commit is contained in:
commit
729aa21ab8
@ -152,6 +152,11 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
|
||||
(regs)->bp = caller_frame_pointer(); \
|
||||
(regs)->cs = __KERNEL_CS; \
|
||||
regs->flags = 0; \
|
||||
asm volatile( \
|
||||
_ASM_MOV "%%"_ASM_SP ", %0\n" \
|
||||
: "=m" ((regs)->sp) \
|
||||
:: "memory" \
|
||||
); \
|
||||
}
|
||||
|
||||
#else
|
||||
|
@ -104,34 +104,6 @@ in_irq_stack(unsigned long *stack, unsigned long *irq_stack,
|
||||
return (stack >= irq_stack && stack < irq_stack_end);
|
||||
}
|
||||
|
||||
/*
|
||||
* We are returning from the irq stack and go to the previous one.
|
||||
* If the previous stack is also in the irq stack, then bp in the first
|
||||
* frame of the irq stack points to the previous, interrupted one.
|
||||
* Otherwise we have another level of indirection: We first save
|
||||
* the bp of the previous stack, then we switch the stack to the irq one
|
||||
* and save a new bp that links to the previous one.
|
||||
* (See save_args())
|
||||
*/
|
||||
static inline unsigned long
|
||||
fixup_bp_irq_link(unsigned long bp, unsigned long *stack,
|
||||
unsigned long *irq_stack, unsigned long *irq_stack_end)
|
||||
{
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
struct stack_frame *frame = (struct stack_frame *)bp;
|
||||
unsigned long next;
|
||||
|
||||
if (!in_irq_stack(stack, irq_stack, irq_stack_end)) {
|
||||
if (!probe_kernel_address(&frame->next_frame, next))
|
||||
return next;
|
||||
else
|
||||
WARN_ONCE(1, "Perf: bad frame pointer = %p in "
|
||||
"callchain\n", &frame->next_frame);
|
||||
}
|
||||
#endif
|
||||
return bp;
|
||||
}
|
||||
|
||||
/*
|
||||
* x86-64 can have up to three kernel stacks:
|
||||
* process stack
|
||||
@ -155,9 +127,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
||||
task = current;
|
||||
|
||||
if (!stack) {
|
||||
stack = &dummy;
|
||||
if (task && task != current)
|
||||
if (regs)
|
||||
stack = (unsigned long *)regs->sp;
|
||||
else if (task && task != current)
|
||||
stack = (unsigned long *)task->thread.sp;
|
||||
else
|
||||
stack = &dummy;
|
||||
}
|
||||
|
||||
if (!bp)
|
||||
@ -205,8 +180,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
||||
* pointer (index -1 to end) in the IRQ stack:
|
||||
*/
|
||||
stack = (unsigned long *) (irq_stack_end[-1]);
|
||||
bp = fixup_bp_irq_link(bp, stack, irq_stack,
|
||||
irq_stack_end);
|
||||
irq_stack_end = NULL;
|
||||
ops->stack(data, "EOI");
|
||||
continue;
|
||||
|
@ -297,27 +297,26 @@ ENDPROC(native_usergs_sysret64)
|
||||
.endm
|
||||
|
||||
/* save partial stack frame */
|
||||
.pushsection .kprobes.text, "ax"
|
||||
ENTRY(save_args)
|
||||
XCPT_FRAME
|
||||
.macro SAVE_ARGS_IRQ
|
||||
cld
|
||||
/*
|
||||
* start from rbp in pt_regs and jump over
|
||||
* return address.
|
||||
*/
|
||||
movq_cfi rdi, RDI+8-RBP
|
||||
movq_cfi rsi, RSI+8-RBP
|
||||
movq_cfi rdx, RDX+8-RBP
|
||||
movq_cfi rcx, RCX+8-RBP
|
||||
movq_cfi rax, RAX+8-RBP
|
||||
movq_cfi r8, R8+8-RBP
|
||||
movq_cfi r9, R9+8-RBP
|
||||
movq_cfi r10, R10+8-RBP
|
||||
movq_cfi r11, R11+8-RBP
|
||||
/* start from rbp in pt_regs and jump over */
|
||||
movq_cfi rdi, RDI-RBP
|
||||
movq_cfi rsi, RSI-RBP
|
||||
movq_cfi rdx, RDX-RBP
|
||||
movq_cfi rcx, RCX-RBP
|
||||
movq_cfi rax, RAX-RBP
|
||||
movq_cfi r8, R8-RBP
|
||||
movq_cfi r9, R9-RBP
|
||||
movq_cfi r10, R10-RBP
|
||||
movq_cfi r11, R11-RBP
|
||||
|
||||
leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
|
||||
movq_cfi rbp, 8 /* push %rbp */
|
||||
leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
|
||||
/* Save rbp so that we can unwind from get_irq_regs() */
|
||||
movq_cfi rbp, 0
|
||||
|
||||
/* Save previous stack value */
|
||||
movq %rsp, %rsi
|
||||
|
||||
leaq -RBP(%rsp),%rdi /* arg1 for handler */
|
||||
testl $3, CS(%rdi)
|
||||
je 1f
|
||||
SWAPGS
|
||||
@ -329,19 +328,14 @@ ENTRY(save_args)
|
||||
*/
|
||||
1: incl PER_CPU_VAR(irq_count)
|
||||
jne 2f
|
||||
popq_cfi %rax /* move return address... */
|
||||
mov PER_CPU_VAR(irq_stack_ptr),%rsp
|
||||
EMPTY_FRAME 0
|
||||
pushq_cfi %rbp /* backlink for unwinder */
|
||||
pushq_cfi %rax /* ... to the new stack */
|
||||
/*
|
||||
* We entered an interrupt context - irqs are off:
|
||||
*/
|
||||
2: TRACE_IRQS_OFF
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
END(save_args)
|
||||
.popsection
|
||||
|
||||
2: /* Store previous stack value */
|
||||
pushq %rsi
|
||||
/* We entered an interrupt context - irqs are off: */
|
||||
TRACE_IRQS_OFF
|
||||
.endm
|
||||
|
||||
ENTRY(save_rest)
|
||||
PARTIAL_FRAME 1 REST_SKIP+8
|
||||
@ -791,7 +785,7 @@ END(interrupt)
|
||||
/* reserve pt_regs for scratch regs and rbp */
|
||||
subq $ORIG_RAX-RBP, %rsp
|
||||
CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
|
||||
call save_args
|
||||
SAVE_ARGS_IRQ
|
||||
PARTIAL_FRAME 0
|
||||
call \func
|
||||
.endm
|
||||
@ -814,15 +808,14 @@ ret_from_intr:
|
||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
||||
TRACE_IRQS_OFF
|
||||
decl PER_CPU_VAR(irq_count)
|
||||
leaveq
|
||||
|
||||
CFI_RESTORE rbp
|
||||
/* Restore saved previous stack */
|
||||
popq %rsi
|
||||
leaq 16(%rsi), %rsp
|
||||
|
||||
CFI_DEF_CFA_REGISTER rsp
|
||||
CFI_ADJUST_CFA_OFFSET -8
|
||||
CFI_ADJUST_CFA_OFFSET -16
|
||||
|
||||
/* we did not save rbx, restore only from ARGOFFSET */
|
||||
addq $8, %rsp
|
||||
CFI_ADJUST_CFA_OFFSET -8
|
||||
exit_intr:
|
||||
GET_THREAD_INFO(%rcx)
|
||||
testl $3,CS-ARGOFFSET(%rsp)
|
||||
|
Loading…
Reference in New Issue
Block a user