mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 22:21:42 +00:00
kvm: nVMX: Handle deferred early VMLAUNCH/VMRESUME failure properly
When emulating a nested VM-entry from L1 to L2, several control field validation checks are deferred to the hardware. Should one of these validation checks fail, vcpu_vmx_run will set the vmx->fail flag. When this happens, the L2 guest state is not loaded (even in part), and execution should continue in L1 with the next instruction after the VMLAUNCH/VMRESUME. The VMCS12 is not modified (except for the VM-instruction error field), the VMCS12 MSR save/load lists are not processed, and the CPU state is not loaded from the VMCS12 host area. Moreover, the vmcs02 exit reason is stale, so it should not be consulted for any reason. Signed-off-by: Jim Mattson <jmattson@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
b060ca3b2e
commit
4f350c6dbc
@ -8344,12 +8344,14 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||
|
||||
trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
|
||||
vmcs_readl(EXIT_QUALIFICATION),
|
||||
vmx->idt_vectoring_info,
|
||||
intr_info,
|
||||
vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
|
||||
KVM_ISA_VMX);
|
||||
if (vmx->nested.nested_run_pending)
|
||||
return false;
|
||||
|
||||
if (unlikely(vmx->fail)) {
|
||||
pr_info_ratelimited("%s failed vm entry %x\n", __func__,
|
||||
vmcs_read32(VM_INSTRUCTION_ERROR));
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* The host physical addresses of some pages of guest memory
|
||||
@ -8363,14 +8365,12 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
|
||||
*/
|
||||
nested_mark_vmcs12_pages_dirty(vcpu);
|
||||
|
||||
if (vmx->nested.nested_run_pending)
|
||||
return false;
|
||||
|
||||
if (unlikely(vmx->fail)) {
|
||||
pr_info_ratelimited("%s failed vm entry %x\n", __func__,
|
||||
vmcs_read32(VM_INSTRUCTION_ERROR));
|
||||
return true;
|
||||
}
|
||||
trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
|
||||
vmcs_readl(EXIT_QUALIFICATION),
|
||||
vmx->idt_vectoring_info,
|
||||
intr_info,
|
||||
vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
|
||||
KVM_ISA_VMX);
|
||||
|
||||
switch (exit_reason) {
|
||||
case EXIT_REASON_EXCEPTION_NMI:
|
||||
@ -11395,46 +11395,30 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||
u32 vm_inst_error = 0;
|
||||
|
||||
/* trying to cancel vmlaunch/vmresume is a bug */
|
||||
WARN_ON_ONCE(vmx->nested.nested_run_pending);
|
||||
|
||||
leave_guest_mode(vcpu);
|
||||
prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
|
||||
exit_qualification);
|
||||
|
||||
if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr,
|
||||
vmcs12->vm_exit_msr_store_count))
|
||||
nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL);
|
||||
|
||||
if (unlikely(vmx->fail))
|
||||
vm_inst_error = vmcs_read32(VM_INSTRUCTION_ERROR);
|
||||
|
||||
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
|
||||
|
||||
/*
|
||||
* TODO: SDM says that with acknowledge interrupt on exit, bit 31 of
|
||||
* the VM-exit interrupt information (valid interrupt) is always set to
|
||||
* 1 on EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't need
|
||||
* kvm_cpu_has_interrupt(). See the commit message for details.
|
||||
* The only expected VM-instruction error is "VM entry with
|
||||
* invalid control field(s)." Anything else indicates a
|
||||
* problem with L0.
|
||||
*/
|
||||
if (nested_exit_intr_ack_set(vcpu) &&
|
||||
exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
|
||||
kvm_cpu_has_interrupt(vcpu)) {
|
||||
int irq = kvm_cpu_get_interrupt(vcpu);
|
||||
WARN_ON(irq < 0);
|
||||
vmcs12->vm_exit_intr_info = irq |
|
||||
INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
|
||||
WARN_ON_ONCE(vmx->fail && (vmcs_read32(VM_INSTRUCTION_ERROR) !=
|
||||
VMXERR_ENTRY_INVALID_CONTROL_FIELD));
|
||||
|
||||
leave_guest_mode(vcpu);
|
||||
|
||||
if (likely(!vmx->fail)) {
|
||||
prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
|
||||
exit_qualification);
|
||||
|
||||
if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr,
|
||||
vmcs12->vm_exit_msr_store_count))
|
||||
nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL);
|
||||
}
|
||||
|
||||
trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
|
||||
vmcs12->exit_qualification,
|
||||
vmcs12->idt_vectoring_info_field,
|
||||
vmcs12->vm_exit_intr_info,
|
||||
vmcs12->vm_exit_intr_error_code,
|
||||
KVM_ISA_VMX);
|
||||
|
||||
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
|
||||
vm_entry_controls_reset_shadow(vmx);
|
||||
vm_exit_controls_reset_shadow(vmx);
|
||||
vmx_segment_cache_clear(vmx);
|
||||
@ -11443,8 +11427,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
|
||||
if (VMCS02_POOL_SIZE == 0)
|
||||
nested_free_vmcs02(vmx, vmx->nested.current_vmptr);
|
||||
|
||||
load_vmcs12_host_state(vcpu, vmcs12);
|
||||
|
||||
/* Update any VMCS fields that might have changed while L2 ran */
|
||||
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
|
||||
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
|
||||
@ -11493,23 +11475,57 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
|
||||
*/
|
||||
kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
|
||||
|
||||
if (unlikely(vmx->fail)) {
|
||||
/*
|
||||
* After an early L2 VM-entry failure, we're now back
|
||||
* in L1 which thinks it just finished a VMLAUNCH or
|
||||
* VMRESUME instruction, so we need to set the failure
|
||||
* flag and the VM-instruction error field of the VMCS
|
||||
* accordingly.
|
||||
*/
|
||||
vmx->fail = 0;
|
||||
nested_vmx_failValid(vcpu, vm_inst_error);
|
||||
}
|
||||
|
||||
if (enable_shadow_vmcs)
|
||||
vmx->nested.sync_shadow_vmcs = true;
|
||||
|
||||
/* in case we halted in L2 */
|
||||
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
||||
|
||||
if (likely(!vmx->fail)) {
|
||||
/*
|
||||
* TODO: SDM says that with acknowledge interrupt on
|
||||
* exit, bit 31 of the VM-exit interrupt information
|
||||
* (valid interrupt) is always set to 1 on
|
||||
* EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't
|
||||
* need kvm_cpu_has_interrupt(). See the commit
|
||||
* message for details.
|
||||
*/
|
||||
if (nested_exit_intr_ack_set(vcpu) &&
|
||||
exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
|
||||
kvm_cpu_has_interrupt(vcpu)) {
|
||||
int irq = kvm_cpu_get_interrupt(vcpu);
|
||||
WARN_ON(irq < 0);
|
||||
vmcs12->vm_exit_intr_info = irq |
|
||||
INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
|
||||
}
|
||||
|
||||
trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
|
||||
vmcs12->exit_qualification,
|
||||
vmcs12->idt_vectoring_info_field,
|
||||
vmcs12->vm_exit_intr_info,
|
||||
vmcs12->vm_exit_intr_error_code,
|
||||
KVM_ISA_VMX);
|
||||
|
||||
load_vmcs12_host_state(vcpu, vmcs12);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* After an early L2 VM-entry failure, we're now back
|
||||
* in L1 which thinks it just finished a VMLAUNCH or
|
||||
* VMRESUME instruction, so we need to set the failure
|
||||
* flag and the VM-instruction error field of the VMCS
|
||||
* accordingly.
|
||||
*/
|
||||
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
|
||||
/*
|
||||
* The emulated instruction was already skipped in
|
||||
* nested_vmx_run, but the updated RIP was never
|
||||
* written back to the vmcs01.
|
||||
*/
|
||||
skip_emulated_instruction(vcpu);
|
||||
vmx->fail = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user