forked from Minki/linux
KVM: VMX: Split out architectural interrupt/NMI blocking checks
Move the architectural (non-KVM specific) interrupt/NMI blocking checks to a separate helper so that they can be used in a future patch by vmx_check_nested_events(). No functional change intended. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Message-Id: <20200423022550.15113-8-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
55714cddbf
commit
1b660b6baa
@ -4510,21 +4510,35 @@ void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool vmx_nmi_blocked(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
if (is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (!enable_vnmi && to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
|
||||||
|
(GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI |
|
||||||
|
GUEST_INTR_STATE_NMI));
|
||||||
|
}
|
||||||
|
|
||||||
static bool vmx_nmi_allowed(struct kvm_vcpu *vcpu)
|
static bool vmx_nmi_allowed(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
if (to_vmx(vcpu)->nested.nested_run_pending)
|
if (to_vmx(vcpu)->nested.nested_run_pending)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu))
|
return !vmx_nmi_blocked(vcpu);
|
||||||
return true;
|
}
|
||||||
|
|
||||||
if (!enable_vnmi &&
|
bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
|
||||||
to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
|
{
|
||||||
|
if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
|
return !(vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) ||
|
||||||
(GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
|
(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
|
||||||
| GUEST_INTR_STATE_NMI));
|
(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
|
static bool vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
|
||||||
@ -4532,12 +4546,7 @@ static bool vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
|
|||||||
if (to_vmx(vcpu)->nested.nested_run_pending)
|
if (to_vmx(vcpu)->nested.nested_run_pending)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
|
return !vmx_interrupt_blocked(vcpu);
|
||||||
return true;
|
|
||||||
|
|
||||||
return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
|
|
||||||
!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
|
|
||||||
(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
|
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
|
||||||
|
@ -344,6 +344,8 @@ void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
|
|||||||
u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
|
u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
|
||||||
void update_exception_bitmap(struct kvm_vcpu *vcpu);
|
void update_exception_bitmap(struct kvm_vcpu *vcpu);
|
||||||
void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
|
void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
|
||||||
|
bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
|
||||||
|
bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
|
||||||
bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
|
bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
|
||||||
void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
|
void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
|
||||||
void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
|
void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
|
||||||
|
Loading…
Reference in New Issue
Block a user