diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index bb9dee4b4f58..7d9a444f2f24 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6752,11 +6752,9 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) int r; /* try to reinject previous events if any */ - if (vcpu->arch.exception.injected) { - kvm_x86_ops->queue_exception(vcpu); - return 0; - } + if (vcpu->arch.exception.injected) + kvm_x86_ops->queue_exception(vcpu); /* * Do not inject an NMI or interrupt if there is a pending * exception. Exceptions and interrupts are recognized at @@ -6771,18 +6769,19 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) * serviced prior to recognizing any new events in order to * fully complete the previous instruction. */ - if (!vcpu->arch.exception.pending) { - if (vcpu->arch.nmi_injected) { + else if (!vcpu->arch.exception.pending) { + if (vcpu->arch.nmi_injected) kvm_x86_ops->set_nmi(vcpu); - return 0; - } - - if (vcpu->arch.interrupt.injected) { + else if (vcpu->arch.interrupt.injected) kvm_x86_ops->set_irq(vcpu); - return 0; - } } + /* + * Call check_nested_events() even if we reinjected a previous event + * in order for caller to determine if it should require immediate-exit + * from L2 to L1 due to pending L1 events which require exit + * from L2 to L1. + */ if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); if (r != 0) @@ -6795,6 +6794,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) vcpu->arch.exception.has_error_code, vcpu->arch.exception.error_code); + WARN_ON_ONCE(vcpu->arch.exception.injected); vcpu->arch.exception.pending = false; vcpu->arch.exception.injected = true; @@ -6809,7 +6809,14 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) } kvm_x86_ops->queue_exception(vcpu); - } else if (vcpu->arch.smi_pending && !is_smm(vcpu) && kvm_x86_ops->smi_allowed(vcpu)) { + } + + /* Don't consider new event if we re-injected an event */ + if (kvm_event_needs_reinjection(vcpu)) + return 0; + + if (vcpu->arch.smi_pending && !is_smm(vcpu) && + kvm_x86_ops->smi_allowed(vcpu)) { vcpu->arch.smi_pending = false; ++vcpu->arch.smi_count; enter_smm(vcpu);