From ba696429d290690db967e5f49463df4b2c1314a4 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Wed, 3 Apr 2019 19:03:09 +0200 Subject: [PATCH 1/3] x86/hyper-v: Implement EOI assist Hyper-V TLFS suggests an optimization to avoid imminent VMExit on EOI: "The OS performs an EOI by atomically writing zero to the EOI Assist field of the virtual VP assist page and checking whether the "No EOI required" field was previously zero. If it was, the OS must write to the HV_X64_APIC_EOI MSR thereby triggering an intercept into the hypervisor." Implement the optimization in Linux. Tested-by: Long Li Signed-off-by: Vitaly Kuznetsov Cc: Borislav Petkov Cc: Haiyang Zhang Cc: K. Y. Srinivasan Cc: Linus Torvalds Cc: Michael Kelley (EOSG) Cc: Peter Zijlstra Cc: Sasha Levin Cc: Simon Xiao Cc: Stephen Hemminger Cc: Thomas Gleixner Cc: linux-hyperv@vger.kernel.org Link: http://lkml.kernel.org/r/20190403170309.4107-1-vkuznets@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/hyperv/hv_apic.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c index 8eb6fbee8e13..5c056b8aebef 100644 --- a/arch/x86/hyperv/hv_apic.c +++ b/arch/x86/hyperv/hv_apic.c @@ -86,6 +86,11 @@ static void hv_apic_write(u32 reg, u32 val) static void hv_apic_eoi_write(u32 reg, u32 val) { + struct hv_vp_assist_page *hvp = hv_vp_assist_page[smp_processor_id()]; + + if (hvp && (xchg(&hvp->apic_assist, 0) & 0x1)) + return; + wrmsr(HV_X64_MSR_EOI, val, 0); } From 02143c2931c3c0faf088c5859a10de6c2b4f2d96 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 29 Mar 2019 17:47:40 -0700 Subject: [PATCH 2/3] x86/hyperv: Make hv_vcpu_is_preempted() visible This function is referrenced from assembler, so it needs to be marked visible for LTO. Fixes: 3a025de64bf8 ("x86/hyperv: Enable PV qspinlock for Hyper-V") Signed-off-by: Andi Kleen Signed-off-by: Thomas Gleixner Reviewed-by: Yi Sun Cc: kys@microsoft.com Cc: haiyangz@microsoft.com Link: https://lkml.kernel.org/r/20190330004743.29541-6-andi@firstfloor.org --- arch/x86/hyperv/hv_spinlock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/hyperv/hv_spinlock.c b/arch/x86/hyperv/hv_spinlock.c index a861b0456b1a..07f21a06392f 100644 --- a/arch/x86/hyperv/hv_spinlock.c +++ b/arch/x86/hyperv/hv_spinlock.c @@ -56,7 +56,7 @@ static void hv_qlock_wait(u8 *byte, u8 val) /* * Hyper-V does not support this so far. */ -bool hv_vcpu_is_preempted(int vcpu) +__visible bool hv_vcpu_is_preempted(int vcpu) { return false; } From 14e581c381b942ce5463a7e61326d8ce1c843be7 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 29 Mar 2019 17:47:42 -0700 Subject: [PATCH 3/3] x86/kvm: Make steal_time visible This per cpu variable is accessed from assembler code, so it needs to be visible for LTO. Signed-off-by: Andi Kleen Signed-off-by: Thomas Gleixner Cc: pbonzini@redhat.com Link: https://lkml.kernel.org/r/20190330004743.29541-8-andi@firstfloor.org --- arch/x86/kernel/kvm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 5c93a65ee1e5..3f0cc828cc36 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -67,7 +67,7 @@ static int __init parse_no_stealacc(char *arg) early_param("no-steal-acc", parse_no_stealacc); static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64); -static DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64); +DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible; static int has_steal_clock = 0; /*