KVM: VMX: Provide separate subroutines for invoking NMI vs. IRQ handlers

Split the asm subroutines for handling NMIs versus IRQs that occur in the
guest so that the NMI handler can be called from a noinstr section.  As a
bonus, the NMI path doesn't need an indirect branch.

Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20221213060912.654668-7-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
Sean Christopherson 2022-12-13 06:09:11 +00:00
parent 54a3b70a75
commit 4f76e86f7e
2 changed files with 50 additions and 46 deletions

View File

@ -31,6 +31,39 @@
#define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE
#endif
.macro VMX_DO_EVENT_IRQOFF call_insn call_target
/*
* Unconditionally create a stack frame, getting the correct RSP on the
* stack (for x86-64) would take two instructions anyways, and RBP can
* be used to restore RSP to make objtool happy (see below).
*/
push %_ASM_BP
mov %_ASM_SP, %_ASM_BP
#ifdef CONFIG_X86_64
/*
* Align RSP to a 16-byte boundary (to emulate CPU behavior) before
* creating the synthetic interrupt stack frame for the IRQ/NMI.
*/
and $-16, %rsp
push $__KERNEL_DS
push %rbp
#endif
pushf
push $__KERNEL_CS
\call_insn \call_target
/*
* "Restore" RSP from RBP, even though IRET has already unwound RSP to
* the correct value. objtool doesn't know the callee will IRET and,
* without the explicit restore, thinks the stack is getting walloped.
* Using an unwind hint is problematic due to x86-64's dynamic alignment.
*/
mov %_ASM_BP, %_ASM_SP
pop %_ASM_BP
RET
.endm
.section .noinstr.text, "ax"
/**
@ -320,35 +353,10 @@ SYM_FUNC_START(vmread_error_trampoline)
SYM_FUNC_END(vmread_error_trampoline)
#endif
SYM_FUNC_START(vmx_do_interrupt_nmi_irqoff)
/*
* Unconditionally create a stack frame, getting the correct RSP on the
* stack (for x86-64) would take two instructions anyways, and RBP can
* be used to restore RSP to make objtool happy (see below).
*/
push %_ASM_BP
mov %_ASM_SP, %_ASM_BP
SYM_FUNC_START(vmx_do_nmi_irqoff)
VMX_DO_EVENT_IRQOFF call asm_exc_nmi_kvm_vmx
SYM_FUNC_END(vmx_do_nmi_irqoff)
#ifdef CONFIG_X86_64
/*
* Align RSP to a 16-byte boundary (to emulate CPU behavior) before
* creating the synthetic interrupt stack frame for the IRQ/NMI.
*/
and $-16, %rsp
push $__KERNEL_DS
push %rbp
#endif
pushf
push $__KERNEL_CS
CALL_NOSPEC _ASM_ARG1
/*
* "Restore" RSP from RBP, even though IRET has already unwound RSP to
* the correct value. objtool doesn't know the callee will IRET and,
* without the explicit restore, thinks the stack is getting walloped.
* Using an unwind hint is problematic due to x86-64's dynamic alignment.
*/
mov %_ASM_BP, %_ASM_SP
pop %_ASM_BP
RET
SYM_FUNC_END(vmx_do_interrupt_nmi_irqoff)
SYM_FUNC_START(vmx_do_interrupt_irqoff)
VMX_DO_EVENT_IRQOFF CALL_NOSPEC _ASM_ARG1
SYM_FUNC_END(vmx_do_interrupt_irqoff)

View File

@ -6861,17 +6861,8 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir));
}
void vmx_do_interrupt_nmi_irqoff(unsigned long entry);
static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu,
unsigned long entry)
{
bool is_nmi = entry == (unsigned long)asm_exc_nmi_kvm_vmx;
kvm_before_interrupt(vcpu, is_nmi ? KVM_HANDLING_NMI : KVM_HANDLING_IRQ);
vmx_do_interrupt_nmi_irqoff(entry);
kvm_after_interrupt(vcpu);
}
void vmx_do_interrupt_irqoff(unsigned long entry);
void vmx_do_nmi_irqoff(void);
static void handle_nm_fault_irqoff(struct kvm_vcpu *vcpu)
{
@ -6895,7 +6886,6 @@ static void handle_nm_fault_irqoff(struct kvm_vcpu *vcpu)
static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
{
const unsigned long nmi_entry = (unsigned long)asm_exc_nmi_kvm_vmx;
u32 intr_info = vmx_get_intr_info(&vmx->vcpu);
/* if exit due to PF check for async PF */
@ -6908,8 +6898,11 @@ static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
else if (is_machine_check(intr_info))
kvm_machine_check();
/* We need to handle NMIs before interrupts are enabled */
else if (is_nmi(intr_info))
handle_interrupt_nmi_irqoff(&vmx->vcpu, nmi_entry);
else if (is_nmi(intr_info)) {
kvm_before_interrupt(&vmx->vcpu, KVM_HANDLING_NMI);
vmx_do_nmi_irqoff();
kvm_after_interrupt(&vmx->vcpu);
}
}
static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
@ -6922,7 +6915,10 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
"unexpected VM-Exit interrupt info: 0x%x", intr_info))
return;
handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc));
kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
vmx_do_interrupt_irqoff(gate_offset(desc));
kvm_after_interrupt(vcpu);
vcpu->arch.at_instruction_boundary = true;
}