forked from Minki/linux
kprobes: treewide: Use 'kprobe_opcode_t *' for the code address in get_optimized_kprobe()
Since get_optimized_kprobe() is only used inside kprobes, it doesn't need to use 'unsigned long' type for 'addr' parameter. Make it use 'kprobe_opcode_t *' for the 'addr' parameter and subsequent call of arch_within_optimized_kprobe() also should use 'kprobe_opcode_t *'. Note that MAX_OPTIMIZED_LENGTH and RELATIVEJUMP_SIZE are defined by byte-size, but the size of 'kprobe_opcode_t' depends on the architecture. Therefore, we must be careful when calculating addresses using those macros. Link: https://lkml.kernel.org/r/163163040680.489837.12133032364499833736.stgit@devnote2 Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
parent
57d4e31780
commit
c42421e205
@ -347,10 +347,11 @@ void arch_unoptimize_kprobes(struct list_head *oplist,
|
||||
}
|
||||
|
||||
int arch_within_optimized_kprobe(struct optimized_kprobe *op,
|
||||
unsigned long addr)
|
||||
kprobe_opcode_t *addr)
|
||||
{
|
||||
return ((unsigned long)op->kp.addr <= addr &&
|
||||
(unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
|
||||
return (op->kp.addr <= addr &&
|
||||
op->kp.addr + (RELATIVEJUMP_SIZE / sizeof(kprobe_opcode_t)) > addr);
|
||||
|
||||
}
|
||||
|
||||
void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
|
||||
|
@ -301,8 +301,8 @@ void arch_unoptimize_kprobes(struct list_head *oplist, struct list_head *done_li
|
||||
}
|
||||
}
|
||||
|
||||
int arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr)
|
||||
int arch_within_optimized_kprobe(struct optimized_kprobe *op, kprobe_opcode_t *addr)
|
||||
{
|
||||
return ((unsigned long)op->kp.addr <= addr &&
|
||||
(unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
|
||||
return (op->kp.addr <= addr &&
|
||||
op->kp.addr + (RELATIVEJUMP_SIZE / sizeof(kprobe_opcode_t)) > addr);
|
||||
}
|
||||
|
@ -367,10 +367,10 @@ int arch_check_optimized_kprobe(struct optimized_kprobe *op)
|
||||
|
||||
/* Check the addr is within the optimized instructions. */
|
||||
int arch_within_optimized_kprobe(struct optimized_kprobe *op,
|
||||
unsigned long addr)
|
||||
kprobe_opcode_t *addr)
|
||||
{
|
||||
return ((unsigned long)op->kp.addr <= addr &&
|
||||
(unsigned long)op->kp.addr + op->optinsn.size > addr);
|
||||
return (op->kp.addr <= addr &&
|
||||
op->kp.addr + op->optinsn.size > addr);
|
||||
}
|
||||
|
||||
/* Free optimized instruction slot */
|
||||
|
@ -329,7 +329,7 @@ extern void arch_unoptimize_kprobes(struct list_head *oplist,
|
||||
struct list_head *done_list);
|
||||
extern void arch_unoptimize_kprobe(struct optimized_kprobe *op);
|
||||
extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
|
||||
unsigned long addr);
|
||||
kprobe_opcode_t *addr);
|
||||
|
||||
extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
|
||||
|
||||
|
@ -485,15 +485,15 @@ static int kprobe_queued(struct kprobe *p)
|
||||
* Return an optimized kprobe whose optimizing code replaces
|
||||
* instructions including 'addr' (exclude breakpoint).
|
||||
*/
|
||||
static struct kprobe *get_optimized_kprobe(unsigned long addr)
|
||||
static struct kprobe *get_optimized_kprobe(kprobe_opcode_t *addr)
|
||||
{
|
||||
int i;
|
||||
struct kprobe *p = NULL;
|
||||
struct optimized_kprobe *op;
|
||||
|
||||
/* Don't check i == 0, since that is a breakpoint case. */
|
||||
for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
|
||||
p = get_kprobe((void *)(addr - i));
|
||||
for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH / sizeof(kprobe_opcode_t); i++)
|
||||
p = get_kprobe(addr - i);
|
||||
|
||||
if (p && kprobe_optready(p)) {
|
||||
op = container_of(p, struct optimized_kprobe, kp);
|
||||
@ -967,7 +967,7 @@ static void __arm_kprobe(struct kprobe *p)
|
||||
lockdep_assert_held(&text_mutex);
|
||||
|
||||
/* Find the overlapping optimized kprobes. */
|
||||
_p = get_optimized_kprobe((unsigned long)p->addr);
|
||||
_p = get_optimized_kprobe(p->addr);
|
||||
if (unlikely(_p))
|
||||
/* Fallback to unoptimized kprobe */
|
||||
unoptimize_kprobe(_p, true);
|
||||
@ -989,7 +989,7 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
|
||||
if (!kprobe_queued(p)) {
|
||||
arch_disarm_kprobe(p);
|
||||
/* If another kprobe was blocked, re-optimize it. */
|
||||
_p = get_optimized_kprobe((unsigned long)p->addr);
|
||||
_p = get_optimized_kprobe(p->addr);
|
||||
if (unlikely(_p) && reopt)
|
||||
optimize_kprobe(_p);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user