KVM: x86: Move HF_NMI_MASK and HF_IRET_MASK into "struct vcpu_svm"

Move HF_NMI_MASK and HF_IRET_MASK (a.k.a. "waiting for IRET") out of the
common "hflags" and into dedicated flags in "struct vcpu_svm".  The flags
are used only for the SVM and thus should not be in hflags.

Tracking NMI masking in software isn't SVM specific, e.g. VMX has a
similar flag (soft_vnmi_blocked), but that's much more of a hack as VMX
can't intercept IRET, is useful only for ancient CPUs, i.e. will
hopefully be removed at some point, and again the exact behavior is
vendor specific and shouldn't ever be referenced in common code.
converting VMX

No functional change is intended.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Tested-by: Santosh Shukla <Santosh.Shukla@amd.com>
Link: https://lore.kernel.org/r/20221129193717.513824-5-mlevitsk@redhat.com
[sean: split from HF_GIF_MASK patch]
Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
Maxim Levitsky 2023-01-30 17:20:03 -08:00 committed by Sean Christopherson
parent c760e86f27
commit 916b54a768
3 changed files with 31 additions and 11 deletions

View File

@ -2074,8 +2074,6 @@ enum {
TASK_SWITCH_GATE = 3, TASK_SWITCH_GATE = 3,
}; };
#define HF_NMI_MASK (1 << 3)
#define HF_IRET_MASK (1 << 4)
#define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */ #define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */
#ifdef CONFIG_KVM_SMM #ifdef CONFIG_KVM_SMM

View File

@ -1338,6 +1338,9 @@ static void __svm_vcpu_reset(struct kvm_vcpu *vcpu)
vcpu->arch.microcode_version = 0x01000065; vcpu->arch.microcode_version = 0x01000065;
svm->tsc_ratio_msr = kvm_caps.default_tsc_scaling_ratio; svm->tsc_ratio_msr = kvm_caps.default_tsc_scaling_ratio;
svm->nmi_masked = false;
svm->awaiting_iret_completion = false;
if (sev_es_guest(vcpu->kvm)) if (sev_es_guest(vcpu->kvm))
sev_es_vcpu_reset(svm); sev_es_vcpu_reset(svm);
} }
@ -2482,7 +2485,7 @@ static int iret_interception(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
++vcpu->stat.nmi_window_exits; ++vcpu->stat.nmi_window_exits;
vcpu->arch.hflags |= HF_IRET_MASK; svm->awaiting_iret_completion = true;
if (!sev_es_guest(vcpu->kvm)) { if (!sev_es_guest(vcpu->kvm)) {
svm_clr_intercept(svm, INTERCEPT_IRET); svm_clr_intercept(svm, INTERCEPT_IRET);
svm->nmi_iret_rip = kvm_rip_read(vcpu); svm->nmi_iret_rip = kvm_rip_read(vcpu);
@ -3478,7 +3481,7 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu)
if (svm->nmi_l1_to_l2) if (svm->nmi_l1_to_l2)
return; return;
vcpu->arch.hflags |= HF_NMI_MASK; svm->nmi_masked = true;
if (!sev_es_guest(vcpu->kvm)) if (!sev_es_guest(vcpu->kvm))
svm_set_intercept(svm, INTERCEPT_IRET); svm_set_intercept(svm, INTERCEPT_IRET);
++vcpu->stat.nmi_injections; ++vcpu->stat.nmi_injections;
@ -3591,7 +3594,7 @@ bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
return false; return false;
return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) || return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
(vcpu->arch.hflags & HF_NMI_MASK); svm->nmi_masked;
} }
static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection) static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
@ -3611,7 +3614,7 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu) static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
{ {
return !!(vcpu->arch.hflags & HF_NMI_MASK); return to_svm(vcpu)->nmi_masked;
} }
static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
@ -3619,11 +3622,11 @@ static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
if (masked) { if (masked) {
vcpu->arch.hflags |= HF_NMI_MASK; svm->nmi_masked = true;
if (!sev_es_guest(vcpu->kvm)) if (!sev_es_guest(vcpu->kvm))
svm_set_intercept(svm, INTERCEPT_IRET); svm_set_intercept(svm, INTERCEPT_IRET);
} else { } else {
vcpu->arch.hflags &= ~HF_NMI_MASK; svm->nmi_masked = false;
if (!sev_es_guest(vcpu->kvm)) if (!sev_es_guest(vcpu->kvm))
svm_clr_intercept(svm, INTERCEPT_IRET); svm_clr_intercept(svm, INTERCEPT_IRET);
} }
@ -3709,7 +3712,7 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
if ((vcpu->arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) == HF_NMI_MASK) if (svm->nmi_masked && !svm->awaiting_iret_completion)
return; /* IRET will cause a vm exit */ return; /* IRET will cause a vm exit */
if (!gif_set(svm)) { if (!gif_set(svm)) {
@ -3833,10 +3836,11 @@ static void svm_complete_interrupts(struct kvm_vcpu *vcpu)
* If we've made progress since setting HF_IRET_MASK, we've * If we've made progress since setting HF_IRET_MASK, we've
* executed an IRET and can allow NMI injection. * executed an IRET and can allow NMI injection.
*/ */
if ((vcpu->arch.hflags & HF_IRET_MASK) && if (svm->awaiting_iret_completion &&
(sev_es_guest(vcpu->kvm) || (sev_es_guest(vcpu->kvm) ||
kvm_rip_read(vcpu) != svm->nmi_iret_rip)) { kvm_rip_read(vcpu) != svm->nmi_iret_rip)) {
vcpu->arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); svm->awaiting_iret_completion = false;
svm->nmi_masked = false;
kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu);
} }

View File

@ -230,8 +230,26 @@ struct vcpu_svm {
struct svm_nested_state nested; struct svm_nested_state nested;
/* NMI mask value, used when vNMI is not enabled */
bool nmi_masked;
/*
* True when NMIs are still masked but guest IRET was just intercepted
* and KVM is waiting for RIP to change, which will signal that the
* intercepted IRET was retired and thus NMI can be unmasked.
*/
bool awaiting_iret_completion;
/*
* Set when KVM is awaiting IRET completion and needs to inject NMIs as
* soon as the IRET completes (e.g. NMI is pending injection). KVM
* temporarily steals RFLAGS.TF to single-step the guest in this case
* in order to regain control as soon as the NMI-blocking condition
* goes away.
*/
bool nmi_singlestep; bool nmi_singlestep;
u64 nmi_singlestep_guest_rflags; u64 nmi_singlestep_guest_rflags;
bool nmi_l1_to_l2; bool nmi_l1_to_l2;
unsigned long soft_int_csbase; unsigned long soft_int_csbase;