KVM: x86: Switch KVM guest to using interrupts for page ready APF delivery
KVM now supports using interrupt for 'page ready' APF event delivery and legacy mechanism was deprecated. Switch KVM guests to the new one. Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Message-Id: <20200525144125.143875-9-vkuznets@redhat.com> [Use HYPERVISOR_CALLBACK_VECTOR instead of a separate vector. - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
49b3deaad3
commit
b1d405751c
@ -801,6 +801,7 @@ config KVM_GUEST
|
|||||||
depends on PARAVIRT
|
depends on PARAVIRT
|
||||||
select PARAVIRT_CLOCK
|
select PARAVIRT_CLOCK
|
||||||
select ARCH_CPUIDLE_HALTPOLL
|
select ARCH_CPUIDLE_HALTPOLL
|
||||||
|
select X86_HV_CALLBACK_VECTOR
|
||||||
default y
|
default y
|
||||||
---help---
|
---help---
|
||||||
This option enables various optimizations for running under the KVM
|
This option enables various optimizations for running under the KVM
|
||||||
|
@ -1475,6 +1475,11 @@ BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR,
|
|||||||
|
|
||||||
#endif /* CONFIG_HYPERV */
|
#endif /* CONFIG_HYPERV */
|
||||||
|
|
||||||
|
#ifdef CONFIG_KVM_GUEST
|
||||||
|
BUILD_INTERRUPT3(kvm_async_pf_vector, HYPERVISOR_CALLBACK_VECTOR,
|
||||||
|
kvm_async_pf_intr)
|
||||||
|
#endif
|
||||||
|
|
||||||
SYM_CODE_START(page_fault)
|
SYM_CODE_START(page_fault)
|
||||||
ASM_CLAC
|
ASM_CLAC
|
||||||
pushl $do_page_fault
|
pushl $do_page_fault
|
||||||
|
@ -1190,6 +1190,11 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
|
|||||||
acrn_hv_callback_vector acrn_hv_vector_handler
|
acrn_hv_callback_vector acrn_hv_vector_handler
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_KVM_GUEST
|
||||||
|
apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
|
||||||
|
kvm_async_pf_vector kvm_async_pf_intr
|
||||||
|
#endif
|
||||||
|
|
||||||
idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=IST_INDEX_DB ist_offset=DB_STACK_OFFSET
|
idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=IST_INDEX_DB ist_offset=DB_STACK_OFFSET
|
||||||
idtentry int3 do_int3 has_error_code=0 create_gap=1
|
idtentry int3 do_int3 has_error_code=0 create_gap=1
|
||||||
idtentry stack_segment do_stack_segment has_error_code=1
|
idtentry stack_segment do_stack_segment has_error_code=1
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
|
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/alternative.h>
|
#include <asm/alternative.h>
|
||||||
|
#include <linux/interrupt.h>
|
||||||
#include <uapi/asm/kvm_para.h>
|
#include <uapi/asm/kvm_para.h>
|
||||||
|
|
||||||
extern void kvmclock_init(void);
|
extern void kvmclock_init(void);
|
||||||
@ -104,6 +105,12 @@ static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern __visible void kvm_async_pf_vector(void);
|
||||||
|
#ifdef CONFIG_TRACING
|
||||||
|
#define trace_kvm_async_pf_vector kvm_async_pf_vector
|
||||||
|
#endif
|
||||||
|
__visible void __irq_entry kvm_async_pf_intr(struct pt_regs *regs);
|
||||||
|
|
||||||
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
||||||
void __init kvm_spinlock_init(void);
|
void __init kvm_spinlock_init(void);
|
||||||
#else /* !CONFIG_PARAVIRT_SPINLOCKS */
|
#else /* !CONFIG_PARAVIRT_SPINLOCKS */
|
||||||
|
@ -233,15 +233,10 @@ NOKPROBE_SYMBOL(kvm_read_and_reset_apf_flags);
|
|||||||
|
|
||||||
bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
|
bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
|
||||||
{
|
{
|
||||||
u32 reason = kvm_read_and_reset_apf_flags();
|
u32 flags = kvm_read_and_reset_apf_flags();
|
||||||
|
|
||||||
switch (reason) {
|
if (!flags)
|
||||||
case KVM_PV_REASON_PAGE_NOT_PRESENT:
|
|
||||||
case KVM_PV_REASON_PAGE_READY:
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
return false;
|
return false;
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the host managed to inject an async #PF into an interrupt
|
* If the host managed to inject an async #PF into an interrupt
|
||||||
@ -251,20 +246,39 @@ bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
|
|||||||
if (unlikely(!(regs->flags & X86_EFLAGS_IF)))
|
if (unlikely(!(regs->flags & X86_EFLAGS_IF)))
|
||||||
panic("Host injected async #PF in interrupt disabled region\n");
|
panic("Host injected async #PF in interrupt disabled region\n");
|
||||||
|
|
||||||
if (reason == KVM_PV_REASON_PAGE_NOT_PRESENT) {
|
if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
|
||||||
if (unlikely(!(user_mode(regs))))
|
if (unlikely(!(user_mode(regs))))
|
||||||
panic("Host injected async #PF in kernel mode\n");
|
panic("Host injected async #PF in kernel mode\n");
|
||||||
/* Page is swapped out by the host. */
|
/* Page is swapped out by the host. */
|
||||||
kvm_async_pf_task_wait_schedule(token);
|
kvm_async_pf_task_wait_schedule(token);
|
||||||
} else {
|
return true;
|
||||||
rcu_irq_enter();
|
|
||||||
kvm_async_pf_task_wake(token);
|
|
||||||
rcu_irq_exit();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
WARN_ONCE(1, "Unexpected async PF flags: %x\n", flags);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
NOKPROBE_SYMBOL(__kvm_handle_async_pf);
|
NOKPROBE_SYMBOL(__kvm_handle_async_pf);
|
||||||
|
|
||||||
|
__visible void __irq_entry kvm_async_pf_intr(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
u32 token;
|
||||||
|
|
||||||
|
entering_ack_irq();
|
||||||
|
|
||||||
|
inc_irq_stat(irq_hv_callback_count);
|
||||||
|
|
||||||
|
if (__this_cpu_read(apf_reason.enabled)) {
|
||||||
|
token = __this_cpu_read(apf_reason.token);
|
||||||
|
rcu_irq_enter();
|
||||||
|
kvm_async_pf_task_wake(token);
|
||||||
|
rcu_irq_exit();
|
||||||
|
__this_cpu_write(apf_reason.token, 0);
|
||||||
|
wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
exiting_irq();
|
||||||
|
}
|
||||||
|
|
||||||
static void __init paravirt_ops_setup(void)
|
static void __init paravirt_ops_setup(void)
|
||||||
{
|
{
|
||||||
pv_info.name = "KVM";
|
pv_info.name = "KVM";
|
||||||
@ -308,17 +322,19 @@ static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
|
|||||||
|
|
||||||
static void kvm_guest_cpu_init(void)
|
static void kvm_guest_cpu_init(void)
|
||||||
{
|
{
|
||||||
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
|
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
|
||||||
u64 pa;
|
u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
|
||||||
|
|
||||||
WARN_ON_ONCE(!static_branch_likely(&kvm_async_pf_enabled));
|
WARN_ON_ONCE(!static_branch_likely(&kvm_async_pf_enabled));
|
||||||
|
|
||||||
pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
|
pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
|
||||||
pa |= KVM_ASYNC_PF_ENABLED;
|
pa |= KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
|
||||||
|
|
||||||
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
|
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
|
||||||
pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
|
pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
|
||||||
|
|
||||||
|
wrmsrl(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR);
|
||||||
|
|
||||||
wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
|
wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
|
||||||
__this_cpu_write(apf_reason.enabled, 1);
|
__this_cpu_write(apf_reason.enabled, 1);
|
||||||
pr_info("KVM setup async PF for cpu %d\n", smp_processor_id());
|
pr_info("KVM setup async PF for cpu %d\n", smp_processor_id());
|
||||||
@ -643,8 +659,10 @@ static void __init kvm_guest_init(void)
|
|||||||
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
|
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
|
||||||
apic_set_eoi_write(kvm_guest_apic_eoi_write);
|
apic_set_eoi_write(kvm_guest_apic_eoi_write);
|
||||||
|
|
||||||
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf)
|
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
|
||||||
static_branch_enable(&kvm_async_pf_enabled);
|
static_branch_enable(&kvm_async_pf_enabled);
|
||||||
|
alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, kvm_async_pf_vector);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
|
smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
|
||||||
|
Loading…
Reference in New Issue
Block a user