kvm: nVMX: Refactor nested_vmx_run()
Nested_vmx_run is split into two parts: the part that handles the VMLAUNCH/VMRESUME instruction, and the part that modifies the vcpu state to transition from VMX root mode to VMX non-root mode. The latter will be used when restoring the checkpointed state of a vCPU that was in VMX operation when a snapshot was taken. Signed-off-by: Jim Mattson <jmattson@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
ca0bde28f2
commit
858e25c06f
@ -10507,6 +10507,65 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
|
||||||
|
{
|
||||||
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
|
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||||
|
struct loaded_vmcs *vmcs02;
|
||||||
|
int cpu;
|
||||||
|
u32 msr_entry_idx;
|
||||||
|
u32 exit_qual;
|
||||||
|
|
||||||
|
vmcs02 = nested_get_current_vmcs02(vmx);
|
||||||
|
if (!vmcs02)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
enter_guest_mode(vcpu);
|
||||||
|
|
||||||
|
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
|
||||||
|
vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
|
||||||
|
|
||||||
|
cpu = get_cpu();
|
||||||
|
vmx->loaded_vmcs = vmcs02;
|
||||||
|
vmx_vcpu_put(vcpu);
|
||||||
|
vmx_vcpu_load(vcpu, cpu);
|
||||||
|
vcpu->cpu = cpu;
|
||||||
|
put_cpu();
|
||||||
|
|
||||||
|
vmx_segment_cache_clear(vmx);
|
||||||
|
|
||||||
|
if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) {
|
||||||
|
leave_guest_mode(vcpu);
|
||||||
|
vmx_load_vmcs01(vcpu);
|
||||||
|
nested_vmx_entry_failure(vcpu, vmcs12,
|
||||||
|
EXIT_REASON_INVALID_STATE, exit_qual);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
nested_get_vmcs12_pages(vcpu, vmcs12);
|
||||||
|
|
||||||
|
msr_entry_idx = nested_vmx_load_msr(vcpu,
|
||||||
|
vmcs12->vm_entry_msr_load_addr,
|
||||||
|
vmcs12->vm_entry_msr_load_count);
|
||||||
|
if (msr_entry_idx) {
|
||||||
|
leave_guest_mode(vcpu);
|
||||||
|
vmx_load_vmcs01(vcpu);
|
||||||
|
nested_vmx_entry_failure(vcpu, vmcs12,
|
||||||
|
EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
vmcs12->launch_state = 1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point
|
||||||
|
* we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
|
||||||
|
* returned as far as L1 is concerned. It will only return (and set
|
||||||
|
* the success flag) when L2 exits (see nested_vmx_vmexit()).
|
||||||
|
*/
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
|
* nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
|
||||||
* for running an L2 nested guest.
|
* for running an L2 nested guest.
|
||||||
@ -10515,9 +10574,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
|||||||
{
|
{
|
||||||
struct vmcs12 *vmcs12;
|
struct vmcs12 *vmcs12;
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
int cpu;
|
|
||||||
struct loaded_vmcs *vmcs02;
|
|
||||||
u32 msr_entry_idx;
|
|
||||||
u32 exit_qual;
|
u32 exit_qual;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -10576,58 +10632,15 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
|||||||
* the nested entry.
|
* the nested entry.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
vmcs02 = nested_get_current_vmcs02(vmx);
|
ret = enter_vmx_non_root_mode(vcpu, true);
|
||||||
if (!vmcs02)
|
if (ret)
|
||||||
return -ENOMEM;
|
return ret;
|
||||||
|
|
||||||
enter_guest_mode(vcpu);
|
|
||||||
|
|
||||||
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
|
|
||||||
vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
|
|
||||||
|
|
||||||
cpu = get_cpu();
|
|
||||||
vmx->loaded_vmcs = vmcs02;
|
|
||||||
vmx_vcpu_put(vcpu);
|
|
||||||
vmx_vcpu_load(vcpu, cpu);
|
|
||||||
vcpu->cpu = cpu;
|
|
||||||
put_cpu();
|
|
||||||
|
|
||||||
vmx_segment_cache_clear(vmx);
|
|
||||||
|
|
||||||
if (prepare_vmcs02(vcpu, vmcs12, true, &exit_qual)) {
|
|
||||||
leave_guest_mode(vcpu);
|
|
||||||
vmx_load_vmcs01(vcpu);
|
|
||||||
nested_vmx_entry_failure(vcpu, vmcs12,
|
|
||||||
EXIT_REASON_INVALID_STATE, exit_qual);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
nested_get_vmcs12_pages(vcpu, vmcs12);
|
|
||||||
|
|
||||||
msr_entry_idx = nested_vmx_load_msr(vcpu,
|
|
||||||
vmcs12->vm_entry_msr_load_addr,
|
|
||||||
vmcs12->vm_entry_msr_load_count);
|
|
||||||
if (msr_entry_idx) {
|
|
||||||
leave_guest_mode(vcpu);
|
|
||||||
vmx_load_vmcs01(vcpu);
|
|
||||||
nested_vmx_entry_failure(vcpu, vmcs12,
|
|
||||||
EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
vmcs12->launch_state = 1;
|
|
||||||
|
|
||||||
if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
|
if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
|
||||||
return kvm_vcpu_halt(vcpu);
|
return kvm_vcpu_halt(vcpu);
|
||||||
|
|
||||||
vmx->nested.nested_run_pending = 1;
|
vmx->nested.nested_run_pending = 1;
|
||||||
|
|
||||||
/*
|
|
||||||
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point
|
|
||||||
* we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
|
|
||||||
* returned as far as L1 is concerned. It will only return (and set
|
|
||||||
* the success flag) when L2 exits (see nested_vmx_vmexit()).
|
|
||||||
*/
|
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
Loading…
Reference in New Issue
Block a user