forked from Minki/linux
KVM: async_pf: avoid recursive flushing of work items
This was reported by syzkaller: [ INFO: possible recursive locking detected ] 4.9.0-rc4+ #49 Not tainted --------------------------------------------- kworker/2:1/5658 is trying to acquire lock: ([ 1644.769018] (&work->work) [< inline >] list_empty include/linux/compiler.h:243 [<ffffffff8128dd60>] flush_work+0x0/0x660 kernel/workqueue.c:1511 but task is already holding lock: ([ 1644.769018] (&work->work) [<ffffffff812916ab>] process_one_work+0x94b/0x1900 kernel/workqueue.c:2093 stack backtrace: CPU: 2 PID: 5658 Comm: kworker/2:1 Not tainted 4.9.0-rc4+ #49 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Workqueue: events async_pf_execute ffff8800676ff630 ffffffff81c2e46b ffffffff8485b930 ffff88006b1fc480 0000000000000000 ffffffff8485b930 ffff8800676ff7e0 ffffffff81339b27 ffff8800676ff7e8 0000000000000046 ffff88006b1fcce8 ffff88006b1fccf0 Call Trace: ... [<ffffffff8128ddf3>] flush_work+0x93/0x660 kernel/workqueue.c:2846 [<ffffffff812954ea>] __cancel_work_timer+0x17a/0x410 kernel/workqueue.c:2916 [<ffffffff81295797>] cancel_work_sync+0x17/0x20 kernel/workqueue.c:2951 [<ffffffff81073037>] kvm_clear_async_pf_completion_queue+0xd7/0x400 virt/kvm/async_pf.c:126 [< inline >] kvm_free_vcpus arch/x86/kvm/x86.c:7841 [<ffffffff810b728d>] kvm_arch_destroy_vm+0x23d/0x620 arch/x86/kvm/x86.c:7946 [< inline >] kvm_destroy_vm virt/kvm/kvm_main.c:731 [<ffffffff8105914e>] kvm_put_kvm+0x40e/0x790 virt/kvm/kvm_main.c:752 [<ffffffff81072b3d>] async_pf_execute+0x23d/0x4f0 virt/kvm/async_pf.c:111 [<ffffffff8129175c>] process_one_work+0x9fc/0x1900 kernel/workqueue.c:2096 [<ffffffff8129274f>] worker_thread+0xef/0x1480 kernel/workqueue.c:2230 [<ffffffff812a5a94>] kthread+0x244/0x2d0 kernel/kthread.c:209 [<ffffffff831f102a>] ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:433 The reason is that kvm_put_kvm is causing the destruction of the VM, but the page fault is still on the ->queue list. The ->queue list is owned by the VCPU, not by the work items, so we cannot just add list_del to the work item. Instead, use work->vcpu to note async page faults that have been resolved and will be processed through the done list. There is no need to flush those. Cc: Dmitry Vyukov <dvyukov@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
This commit is contained in:
parent
e3fd9a93a1
commit
22583f0d9c
@ -91,6 +91,7 @@ static void async_pf_execute(struct work_struct *work)
|
||||
|
||||
spin_lock(&vcpu->async_pf.lock);
|
||||
list_add_tail(&apf->link, &vcpu->async_pf.done);
|
||||
apf->vcpu = NULL;
|
||||
spin_unlock(&vcpu->async_pf.lock);
|
||||
|
||||
/*
|
||||
@ -113,6 +114,8 @@ static void async_pf_execute(struct work_struct *work)
|
||||
|
||||
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
spin_lock(&vcpu->async_pf.lock);
|
||||
|
||||
/* cancel outstanding work queue item */
|
||||
while (!list_empty(&vcpu->async_pf.queue)) {
|
||||
struct kvm_async_pf *work =
|
||||
@ -120,6 +123,14 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
|
||||
typeof(*work), queue);
|
||||
list_del(&work->queue);
|
||||
|
||||
/*
|
||||
* We know it's present in vcpu->async_pf.done, do
|
||||
* nothing here.
|
||||
*/
|
||||
if (!work->vcpu)
|
||||
continue;
|
||||
|
||||
spin_unlock(&vcpu->async_pf.lock);
|
||||
#ifdef CONFIG_KVM_ASYNC_PF_SYNC
|
||||
flush_work(&work->work);
|
||||
#else
|
||||
@ -129,9 +140,9 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
|
||||
kmem_cache_free(async_pf_cache, work);
|
||||
}
|
||||
#endif
|
||||
spin_lock(&vcpu->async_pf.lock);
|
||||
}
|
||||
|
||||
spin_lock(&vcpu->async_pf.lock);
|
||||
while (!list_empty(&vcpu->async_pf.done)) {
|
||||
struct kvm_async_pf *work =
|
||||
list_first_entry(&vcpu->async_pf.done,
|
||||
|
Loading…
Reference in New Issue
Block a user