KVM: VMX: Pass vcpu to __vmx_complete_interrupts

Cleanup: __vmx_complete_interrupts has no use for the vmx structure.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
This commit is contained in:
Jan Kiszka 2013-02-20 14:03:24 +01:00 committed by Gleb Natapov
parent 44ceb9d665
commit 3ab66e8a45

View File

@ -6436,7 +6436,7 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time)); ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
} }
static void __vmx_complete_interrupts(struct vcpu_vmx *vmx, static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
u32 idt_vectoring_info, u32 idt_vectoring_info,
int instr_len_field, int instr_len_field,
int error_code_field) int error_code_field)
@ -6447,46 +6447,43 @@ static void __vmx_complete_interrupts(struct vcpu_vmx *vmx,
idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
vmx->vcpu.arch.nmi_injected = false; vcpu->arch.nmi_injected = false;
kvm_clear_exception_queue(&vmx->vcpu); kvm_clear_exception_queue(vcpu);
kvm_clear_interrupt_queue(&vmx->vcpu); kvm_clear_interrupt_queue(vcpu);
if (!idtv_info_valid) if (!idtv_info_valid)
return; return;
kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu);
vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
switch (type) { switch (type) {
case INTR_TYPE_NMI_INTR: case INTR_TYPE_NMI_INTR:
vmx->vcpu.arch.nmi_injected = true; vcpu->arch.nmi_injected = true;
/* /*
* SDM 3: 27.7.1.2 (September 2008) * SDM 3: 27.7.1.2 (September 2008)
* Clear bit "block by NMI" before VM entry if a NMI * Clear bit "block by NMI" before VM entry if a NMI
* delivery faulted. * delivery faulted.
*/ */
vmx_set_nmi_mask(&vmx->vcpu, false); vmx_set_nmi_mask(vcpu, false);
break; break;
case INTR_TYPE_SOFT_EXCEPTION: case INTR_TYPE_SOFT_EXCEPTION:
vmx->vcpu.arch.event_exit_inst_len = vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
vmcs_read32(instr_len_field);
/* fall through */ /* fall through */
case INTR_TYPE_HARD_EXCEPTION: case INTR_TYPE_HARD_EXCEPTION:
if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
u32 err = vmcs_read32(error_code_field); u32 err = vmcs_read32(error_code_field);
kvm_queue_exception_e(&vmx->vcpu, vector, err); kvm_queue_exception_e(vcpu, vector, err);
} else } else
kvm_queue_exception(&vmx->vcpu, vector); kvm_queue_exception(vcpu, vector);
break; break;
case INTR_TYPE_SOFT_INTR: case INTR_TYPE_SOFT_INTR:
vmx->vcpu.arch.event_exit_inst_len = vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
vmcs_read32(instr_len_field);
/* fall through */ /* fall through */
case INTR_TYPE_EXT_INTR: case INTR_TYPE_EXT_INTR:
kvm_queue_interrupt(&vmx->vcpu, vector, kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
type == INTR_TYPE_SOFT_INTR);
break; break;
default: default:
break; break;
@ -6497,7 +6494,7 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
{ {
if (is_guest_mode(&vmx->vcpu)) if (is_guest_mode(&vmx->vcpu))
return; return;
__vmx_complete_interrupts(vmx, vmx->idt_vectoring_info, __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
VM_EXIT_INSTRUCTION_LEN, VM_EXIT_INSTRUCTION_LEN,
IDT_VECTORING_ERROR_CODE); IDT_VECTORING_ERROR_CODE);
} }
@ -6506,7 +6503,7 @@ static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
{ {
if (is_guest_mode(vcpu)) if (is_guest_mode(vcpu))
return; return;
__vmx_complete_interrupts(to_vmx(vcpu), __vmx_complete_interrupts(vcpu,
vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
VM_ENTRY_INSTRUCTION_LEN, VM_ENTRY_INSTRUCTION_LEN,
VM_ENTRY_EXCEPTION_ERROR_CODE); VM_ENTRY_EXCEPTION_ERROR_CODE);