KVM: VMX: Cache vmcs.EXIT_INTR_INFO using arch avail_reg flags
Introduce a new "extended register" type, EXIT_INFO_2 (to pair with the nomenclature in .get_exit_info()), and use it to cache VMX's vmcs.EXIT_INTR_INFO. Drop a comment in vmx_recover_nmi_blocking() that is obsoleted by the generic caching mechanism. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Message-Id: <20200415203454.8296-6-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
5addc23519
commit
8791585837
@ -171,6 +171,7 @@ enum kvm_reg {
|
|||||||
VCPU_EXREG_RFLAGS,
|
VCPU_EXREG_RFLAGS,
|
||||||
VCPU_EXREG_SEGMENTS,
|
VCPU_EXREG_SEGMENTS,
|
||||||
VCPU_EXREG_EXIT_INFO_1,
|
VCPU_EXREG_EXIT_INFO_1,
|
||||||
|
VCPU_EXREG_EXIT_INFO_2,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
@ -5419,7 +5419,7 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
fail:
|
fail:
|
||||||
nested_vmx_vmexit(vcpu, vmx->exit_reason,
|
nested_vmx_vmexit(vcpu, vmx->exit_reason,
|
||||||
vmcs_read32(VM_EXIT_INTR_INFO),
|
vmx_get_intr_info(vcpu),
|
||||||
vmx_get_exit_qual(vcpu));
|
vmx_get_exit_qual(vcpu));
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
@ -5652,7 +5652,7 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
|
|||||||
|
|
||||||
switch (exit_reason) {
|
switch (exit_reason) {
|
||||||
case EXIT_REASON_EXCEPTION_NMI:
|
case EXIT_REASON_EXCEPTION_NMI:
|
||||||
intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
|
intr_info = vmx_get_intr_info(vcpu);
|
||||||
if (is_nmi(intr_info))
|
if (is_nmi(intr_info))
|
||||||
return true;
|
return true;
|
||||||
else if (is_page_fault(intr_info))
|
else if (is_page_fault(intr_info))
|
||||||
@ -5708,12 +5708,12 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
|
|||||||
*/
|
*/
|
||||||
static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
|
static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
|
||||||
{
|
{
|
||||||
|
u32 intr_info = vmx_get_intr_info(vcpu);
|
||||||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||||
u32 intr_info;
|
|
||||||
|
|
||||||
switch (exit_reason) {
|
switch (exit_reason) {
|
||||||
case EXIT_REASON_EXCEPTION_NMI:
|
case EXIT_REASON_EXCEPTION_NMI:
|
||||||
intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
|
intr_info = vmx_get_intr_info(vcpu);
|
||||||
if (is_nmi(intr_info))
|
if (is_nmi(intr_info))
|
||||||
return true;
|
return true;
|
||||||
else if (is_page_fault(intr_info))
|
else if (is_page_fault(intr_info))
|
||||||
@ -5848,7 +5848,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
|
|||||||
goto reflect_vmexit;
|
goto reflect_vmexit;
|
||||||
}
|
}
|
||||||
|
|
||||||
exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
|
exit_intr_info = vmx_get_intr_info(vcpu);
|
||||||
exit_qual = vmx_get_exit_qual(vcpu);
|
exit_qual = vmx_get_exit_qual(vcpu);
|
||||||
|
|
||||||
trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, exit_qual,
|
trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, exit_qual,
|
||||||
|
@ -5635,7 +5635,7 @@ static const int kvm_vmx_max_exit_handlers =
|
|||||||
static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
|
static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
|
||||||
{
|
{
|
||||||
*info1 = vmx_get_exit_qual(vcpu);
|
*info1 = vmx_get_exit_qual(vcpu);
|
||||||
*info2 = vmcs_read32(VM_EXIT_INTR_INFO);
|
*info2 = vmx_get_intr_info(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
|
static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
|
||||||
@ -6298,16 +6298,16 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
|
static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
|
||||||
{
|
{
|
||||||
vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
|
u32 intr_info = vmx_get_intr_info(&vmx->vcpu);
|
||||||
|
|
||||||
/* if exit due to PF check for async PF */
|
/* if exit due to PF check for async PF */
|
||||||
if (is_page_fault(vmx->exit_intr_info)) {
|
if (is_page_fault(intr_info)) {
|
||||||
vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
|
vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
|
||||||
/* Handle machine checks before interrupts are enabled */
|
/* Handle machine checks before interrupts are enabled */
|
||||||
} else if (is_machine_check(vmx->exit_intr_info)) {
|
} else if (is_machine_check(intr_info)) {
|
||||||
kvm_machine_check();
|
kvm_machine_check();
|
||||||
/* We need to handle NMIs before interrupts are enabled */
|
/* We need to handle NMIs before interrupts are enabled */
|
||||||
} else if (is_nmi(vmx->exit_intr_info)) {
|
} else if (is_nmi(intr_info)) {
|
||||||
kvm_before_interrupt(&vmx->vcpu);
|
kvm_before_interrupt(&vmx->vcpu);
|
||||||
asm("int $2");
|
asm("int $2");
|
||||||
kvm_after_interrupt(&vmx->vcpu);
|
kvm_after_interrupt(&vmx->vcpu);
|
||||||
@ -6322,9 +6322,8 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
|
|||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
#endif
|
#endif
|
||||||
gate_desc *desc;
|
gate_desc *desc;
|
||||||
u32 intr_info;
|
u32 intr_info = vmx_get_intr_info(vcpu);
|
||||||
|
|
||||||
intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
|
|
||||||
if (WARN_ONCE(!is_external_intr(intr_info),
|
if (WARN_ONCE(!is_external_intr(intr_info),
|
||||||
"KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info))
|
"KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info))
|
||||||
return;
|
return;
|
||||||
@ -6405,11 +6404,8 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
|
|||||||
if (enable_vnmi) {
|
if (enable_vnmi) {
|
||||||
if (vmx->loaded_vmcs->nmi_known_unmasked)
|
if (vmx->loaded_vmcs->nmi_known_unmasked)
|
||||||
return;
|
return;
|
||||||
/*
|
|
||||||
* Can't use vmx->exit_intr_info since we're not sure what
|
exit_intr_info = vmx_get_intr_info(&vmx->vcpu);
|
||||||
* the exit reason is.
|
|
||||||
*/
|
|
||||||
exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
|
|
||||||
unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
|
unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
|
||||||
vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
|
vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
|
||||||
/*
|
/*
|
||||||
|
@ -451,7 +451,8 @@ static inline void vmx_register_cache_reset(struct kvm_vcpu *vcpu)
|
|||||||
| (1 << VCPU_EXREG_PDPTR)
|
| (1 << VCPU_EXREG_PDPTR)
|
||||||
| (1 << VCPU_EXREG_SEGMENTS)
|
| (1 << VCPU_EXREG_SEGMENTS)
|
||||||
| (1 << VCPU_EXREG_CR3)
|
| (1 << VCPU_EXREG_CR3)
|
||||||
| (1 << VCPU_EXREG_EXIT_INFO_1));
|
| (1 << VCPU_EXREG_EXIT_INFO_1)
|
||||||
|
| (1 << VCPU_EXREG_EXIT_INFO_2));
|
||||||
vcpu->arch.regs_dirty = 0;
|
vcpu->arch.regs_dirty = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -506,6 +507,17 @@ static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
|
|||||||
return vmx->exit_qualification;
|
return vmx->exit_qualification;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
|
|
||||||
|
if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_2)) {
|
||||||
|
kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
|
||||||
|
vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
|
||||||
|
}
|
||||||
|
return vmx->exit_intr_info;
|
||||||
|
}
|
||||||
|
|
||||||
struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
|
struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
|
||||||
void free_vmcs(struct vmcs *vmcs);
|
void free_vmcs(struct vmcs *vmcs);
|
||||||
int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
|
int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
|
||||||
|
Loading…
Reference in New Issue
Block a user