mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 21:21:41 +00:00
kprobes: Tell lockdep about kprobe nesting
Since the kprobe handlers have protection that prohibits other handlers from executing in other contexts (like if an NMI comes in while processing a kprobe, and executes the same kprobe, it will get fail with a "busy" return). Lockdep is unaware of this protection. Use lockdep's nesting api to differentiate between locks taken in INT3 context and other context to suppress the false warnings. Link: https://lore.kernel.org/r/20201102160234.fa0ae70915ad9e2b21c08b85@kernel.org Cc: Peter Zijlstra <peterz@infradead.org> Acked-by: Masami Hiramatsu <mhiramat@kernel.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
parent
561ca66910
commit
645f224e7b
@ -1249,7 +1249,13 @@ __acquires(hlist_lock)
|
|||||||
|
|
||||||
*head = &kretprobe_inst_table[hash];
|
*head = &kretprobe_inst_table[hash];
|
||||||
hlist_lock = kretprobe_table_lock_ptr(hash);
|
hlist_lock = kretprobe_table_lock_ptr(hash);
|
||||||
raw_spin_lock_irqsave(hlist_lock, *flags);
|
/*
|
||||||
|
* Nested is a workaround that will soon not be needed.
|
||||||
|
* There's other protections that make sure the same lock
|
||||||
|
* is not taken on the same CPU that lockdep is unaware of.
|
||||||
|
* Differentiate when it is taken in NMI context.
|
||||||
|
*/
|
||||||
|
raw_spin_lock_irqsave_nested(hlist_lock, *flags, !!in_nmi());
|
||||||
}
|
}
|
||||||
NOKPROBE_SYMBOL(kretprobe_hash_lock);
|
NOKPROBE_SYMBOL(kretprobe_hash_lock);
|
||||||
|
|
||||||
@ -1258,7 +1264,13 @@ static void kretprobe_table_lock(unsigned long hash,
|
|||||||
__acquires(hlist_lock)
|
__acquires(hlist_lock)
|
||||||
{
|
{
|
||||||
raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
|
raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
|
||||||
raw_spin_lock_irqsave(hlist_lock, *flags);
|
/*
|
||||||
|
* Nested is a workaround that will soon not be needed.
|
||||||
|
* There's other protections that make sure the same lock
|
||||||
|
* is not taken on the same CPU that lockdep is unaware of.
|
||||||
|
* Differentiate when it is taken in NMI context.
|
||||||
|
*/
|
||||||
|
raw_spin_lock_irqsave_nested(hlist_lock, *flags, !!in_nmi());
|
||||||
}
|
}
|
||||||
NOKPROBE_SYMBOL(kretprobe_table_lock);
|
NOKPROBE_SYMBOL(kretprobe_table_lock);
|
||||||
|
|
||||||
@ -2028,7 +2040,12 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
|
|||||||
|
|
||||||
/* TODO: consider to only swap the RA after the last pre_handler fired */
|
/* TODO: consider to only swap the RA after the last pre_handler fired */
|
||||||
hash = hash_ptr(current, KPROBE_HASH_BITS);
|
hash = hash_ptr(current, KPROBE_HASH_BITS);
|
||||||
raw_spin_lock_irqsave(&rp->lock, flags);
|
/*
|
||||||
|
* Nested is a workaround that will soon not be needed.
|
||||||
|
* There's other protections that make sure the same lock
|
||||||
|
* is not taken on the same CPU that lockdep is unaware of.
|
||||||
|
*/
|
||||||
|
raw_spin_lock_irqsave_nested(&rp->lock, flags, 1);
|
||||||
if (!hlist_empty(&rp->free_instances)) {
|
if (!hlist_empty(&rp->free_instances)) {
|
||||||
ri = hlist_entry(rp->free_instances.first,
|
ri = hlist_entry(rp->free_instances.first,
|
||||||
struct kretprobe_instance, hlist);
|
struct kretprobe_instance, hlist);
|
||||||
@ -2039,7 +2056,7 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
|
|||||||
ri->task = current;
|
ri->task = current;
|
||||||
|
|
||||||
if (rp->entry_handler && rp->entry_handler(ri, regs)) {
|
if (rp->entry_handler && rp->entry_handler(ri, regs)) {
|
||||||
raw_spin_lock_irqsave(&rp->lock, flags);
|
raw_spin_lock_irqsave_nested(&rp->lock, flags, 1);
|
||||||
hlist_add_head(&ri->hlist, &rp->free_instances);
|
hlist_add_head(&ri->hlist, &rp->free_instances);
|
||||||
raw_spin_unlock_irqrestore(&rp->lock, flags);
|
raw_spin_unlock_irqrestore(&rp->lock, flags);
|
||||||
return 0;
|
return 0;
|
||||||
|
Loading…
Reference in New Issue
Block a user