KVM: Non-atomic interrupt injection

Change the interrupt injection code to work from preemptible, interrupts
enabled context.  This works by adding a ->cancel_injection() operation
that undoes an injection in case we were not able to actually enter the guest
(this condition could never happen with atomic injection).

Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Avi Kivity 2010-07-20 15:06:17 +03:00
parent 83422e17c1
commit b463a6f744
4 changed files with 48 additions and 28 deletions

View File

@ -552,6 +552,7 @@ struct kvm_x86_ops {
void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
bool has_error_code, u32 error_code,
bool reinject);
void (*cancel_injection)(struct kvm_vcpu *vcpu);
int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
int (*nmi_allowed)(struct kvm_vcpu *vcpu);
bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);

View File

@ -3261,6 +3261,17 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
}
}
static void svm_cancel_injection(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb_control_area *control = &svm->vmcb->control;
control->exit_int_info = control->event_inj;
control->exit_int_info_err = control->event_inj_err;
control->event_inj = 0;
svm_complete_interrupts(svm);
}
#ifdef CONFIG_X86_64
#define R "r"
#else
@ -3631,6 +3642,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.set_irq = svm_set_irq,
.set_nmi = svm_inject_nmi,
.queue_exception = svm_queue_exception,
.cancel_injection = svm_cancel_injection,
.interrupt_allowed = svm_interrupt_allowed,
.nmi_allowed = svm_nmi_allowed,
.get_nmi_mask = svm_get_nmi_mask,

View File

@ -3895,6 +3895,16 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
IDT_VECTORING_ERROR_CODE);
}
static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
{
__vmx_complete_interrupts(to_vmx(vcpu),
vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
VM_ENTRY_INSTRUCTION_LEN,
VM_ENTRY_EXCEPTION_ERROR_CODE);
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
}
/*
* Failure to inject an interrupt should give us the information
* in IDT_VECTORING_INFO_FIELD. However, if the failure occurs
@ -4348,6 +4358,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.set_irq = vmx_inject_irq,
.set_nmi = vmx_inject_nmi,
.queue_exception = vmx_queue_exception,
.cancel_injection = vmx_cancel_injection,
.interrupt_allowed = vmx_interrupt_allowed,
.nmi_allowed = vmx_nmi_allowed,
.get_nmi_mask = vmx_get_nmi_mask,

View File

@ -5005,7 +5005,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
int r;
bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
vcpu->run->request_interrupt_window;
bool req_event;
if (vcpu->requests) {
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
@ -5041,33 +5040,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (unlikely(r))
goto out;
preempt_disable();
kvm_x86_ops->prepare_guest_switch(vcpu);
if (vcpu->fpu_active)
kvm_load_guest_fpu(vcpu);
kvm_load_guest_xcr0(vcpu);
atomic_set(&vcpu->guest_mode, 1);
smp_wmb();
local_irq_disable();
req_event = kvm_check_request(KVM_REQ_EVENT, vcpu);
if (!atomic_read(&vcpu->guest_mode) || vcpu->requests
|| need_resched() || signal_pending(current)) {
if (req_event)
kvm_make_request(KVM_REQ_EVENT, vcpu);
atomic_set(&vcpu->guest_mode, 0);
smp_wmb();
local_irq_enable();
preempt_enable();
r = 1;
goto out;
}
if (req_event || req_int_win) {
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
inject_pending_event(vcpu);
/* enable NMI/IRQ window open exits if needed */
@ -5082,6 +5055,29 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
}
}
preempt_disable();
kvm_x86_ops->prepare_guest_switch(vcpu);
if (vcpu->fpu_active)
kvm_load_guest_fpu(vcpu);
kvm_load_guest_xcr0(vcpu);
atomic_set(&vcpu->guest_mode, 1);
smp_wmb();
local_irq_disable();
if (!atomic_read(&vcpu->guest_mode) || vcpu->requests
|| need_resched() || signal_pending(current)) {
atomic_set(&vcpu->guest_mode, 0);
smp_wmb();
local_irq_enable();
preempt_enable();
kvm_x86_ops->cancel_injection(vcpu);
r = 1;
goto out;
}
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
kvm_guest_enter();