From 334006b78ca84b7619d7dd313d5b6b39007e9528 Mon Sep 17 00:00:00 2001 From: Xiaoyao Li Date: Mon, 10 Apr 2023 08:50:16 -0400 Subject: [PATCH 01/11] KVM: VMX: Use kvm_read_cr4() to get cr4 value Directly use vcpu->arch.cr4 is not recommended since it gets stale value if the cr4 is not available. Use kvm_read_cr4() instead to ensure correct value. Signed-off-by: Xiaoyao Li Link: https://lore.kernel.org/r/20230410125017.1305238-2-xiaoyao.li@intel.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 44fb619803b8..4335d4b9cb2e 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -3387,7 +3387,7 @@ static bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { - unsigned long old_cr4 = vcpu->arch.cr4; + unsigned long old_cr4 = kvm_read_cr4(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu); /* * Pass through host's Machine Check Enable value to hw_cr4, which From 82dc11b82b001ca8024663b3e7990b59f600841f Mon Sep 17 00:00:00 2001 From: Xiaoyao Li Date: Mon, 10 Apr 2023 08:50:17 -0400 Subject: [PATCH 02/11] KVM: VMX: Move the comment of CR4.MCE handling right above the code Move the comment about keeping the hosts CR4.MCE loaded in hardware above the code that actually modifies the hardware CR4 value. No functional change indented. Signed-off-by: Xiaoyao Li Link: https://lore.kernel.org/r/20230410125017.1305238-3-xiaoyao.li@intel.com [sean: elaborate in changelog] Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 4335d4b9cb2e..275542d24375 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -3389,13 +3389,13 @@ void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long old_cr4 = kvm_read_cr4(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu); + unsigned long hw_cr4; + /* * Pass through host's Machine Check Enable value to hw_cr4, which * is in force while we are in guest mode. Do not let guests control * this bit, even if host CR4.MCE == 0. */ - unsigned long hw_cr4; - hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE); if (is_unrestricted_guest(vcpu)) hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST; From 3243b93c16d90c2d63cf30655276ffdf5bb65bf7 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 13 Apr 2023 16:19:13 -0700 Subject: [PATCH 03/11] KVM: VMX: Treat UMIP as emulated if and only if the host doesn't have UMIP Advertise UMIP as emulated if and only if the host doesn't natively support UMIP, otherwise vmx_umip_emulated() is misleading when the host _does_ support UMIP. Of the four users of vmx_umip_emulated(), two already check for native support, and the logic in vmx_set_cpu_caps() is relevant if and only if UMIP isn't natively supported as UMIP is set in KVM's caps by kvm_set_cpu_caps() when UMIP is present in hardware. That leaves KVM's stuffing of X86_CR4_UMIP into the default cr4_fixed1 value enumerated for nested VMX. In that case, checking for (lack of) host support is actually a bug fix of sorts, as enumerating UMIP support based solely on descriptor table exiting works only because KVM doesn't sanity check MSR_IA32_VMX_CR4_FIXED1. E.g. if a (very theoretical) host supported UMIP in hardware but didn't allow UMIP+VMX, KVM would advertise UMIP but not actually emulate UMIP. Of course, KVM would explode long before it could run a nested VM on said theoretical CPU, as KVM doesn't modify host CR4 when enabling VMX, i.e. would load an "illegal" value into vmcs.HOST_CR4. Reported-by: Robert Hoo Link: https://lore.kernel.org/all/20230310125718.1442088-2-robert.hu@intel.com Link: https://lore.kernel.org/r/20230413231914.1482782-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/capabilities.h | 4 ++-- arch/x86/kvm/vmx/nested.c | 3 +-- arch/x86/kvm/vmx/vmx.c | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index 45162c1bcd8f..d0abee35d7ba 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -152,8 +152,8 @@ static inline bool cpu_has_vmx_ept(void) static inline bool vmx_umip_emulated(void) { - return vmcs_config.cpu_based_2nd_exec_ctrl & - SECONDARY_EXEC_DESC; + return !boot_cpu_has(X86_FEATURE_UMIP) && + (vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_DESC); } static inline bool cpu_has_vmx_rdtscp(void) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index e35cf0bd0df9..368a43e3b40e 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -2328,8 +2328,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0 * Preset *DT exiting when emulating UMIP, so that vmx_set_cr4() * will not have to rewrite the controls just for this bit. */ - if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated() && - (vmcs12->guest_cr4 & X86_CR4_UMIP)) + if (vmx_umip_emulated() && (vmcs12->guest_cr4 & X86_CR4_UMIP)) exec_control |= SECONDARY_EXEC_DESC; if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 275542d24375..98f966635c88 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -3404,7 +3404,7 @@ void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) else hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON; - if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) { + if (vmx_umip_emulated()) { if (cr4 & X86_CR4_UMIP) { secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC); hw_cr4 &= ~X86_CR4_UMIP; From 023cfa6fc200fc179dbf8e1857cc7140fa1466f9 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 13 Apr 2023 16:19:14 -0700 Subject: [PATCH 04/11] KVM: VMX: Use proper accessor to read guest CR4 in handle_desc() Use kvm_is_cr4_bit_set() to read guest CR4.UMIP when sanity checking that a descriptor table VM-Exit occurs if and only if guest.CR4.UMIP=1. UMIP can't be guest-owned, i.e. using kvm_read_cr4_bits() to decache guest- owned bits isn't strictly necessary, but eliminating raw reads of vcpu->arch.cr4 is desirable as it makes it easy to visually audit KVM for correctness. Opportunistically add a compile-time assertion that UMIP isn't guest-owned as letting the guest own UMIP isn't compatible with emulation (or any CR4 bit that is emulated by KVM). Opportunistically change the WARN_ON() to a ONCE variant. When the WARN fires, it fires _a lot_, and spamming the kernel logs ends up doing more harm than whatever led to KVM's unnecessary emulation. Reported-by: Robert Hoo Link: https://lore.kernel.org/all/20230310125718.1442088-4-robert.hu@intel.com Link: https://lore.kernel.org/r/20230413231914.1482782-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmx.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 98f966635c88..7ecab2118106 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5402,7 +5402,13 @@ static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) static int handle_desc(struct kvm_vcpu *vcpu) { - WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP)); + /* + * UMIP emulation relies on intercepting writes to CR4.UMIP, i.e. this + * and other code needs to be updated if UMIP can be guest owned. + */ + BUILD_BUG_ON(KVM_POSSIBLE_CR4_GUEST_BITS & X86_CR4_UMIP); + + WARN_ON_ONCE(!kvm_is_cr4_bit_set(vcpu, X86_CR4_UMIP)); return kvm_emulate_instruction(vcpu, 0); } From 33ab767c2628136c54c2e2160e4c536c7db9c6d0 Mon Sep 17 00:00:00 2001 From: Jinrong Liang Date: Tue, 11 Apr 2023 21:03:38 +0800 Subject: [PATCH 05/11] KVM: x86/pmu: Remove redundant check for MSR_IA32_DS_AREA set handler After commit 2de154f541fc ("KVM: x86/pmu: Provide "error" semantics for unsupported-but-known PMU MSRs"), the guest_cpuid_has(DS) check is not necessary any more since if the guest supports X86_FEATURE_DS, it never returns 1. And if the guest does not support this feature, the set_msr handler will get false from kvm_pmu_is_valid_msr() before reaching this point. Therefore, the check will not be true in all cases and can be safely removed, which also simplifies the code and improves its readability. Signed-off-by: Jinrong Liang Link: https://lore.kernel.org/r/20230411130338.8592-1-cloudliang@tencent.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/pmu_intel.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 741efe2c497b..84be32d9f365 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -444,8 +444,6 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) } break; case MSR_IA32_DS_AREA: - if (msr_info->host_initiated && data && !guest_cpuid_has(vcpu, X86_FEATURE_DS)) - return 1; if (is_noncanonical_address(data, vcpu)) return 1; From 331f229768160dceec5b5694ea32ecf66e2e2452 Mon Sep 17 00:00:00 2001 From: Jon Kohler Date: Wed, 31 May 2023 11:58:21 -0400 Subject: [PATCH 06/11] KVM: VMX: restore vmx_vmexit alignment Commit 8bd200d23ec4 ("KVM: VMX: Flatten __vmx_vcpu_run()") changed vmx_vmexit from SYM_FUNC_START to SYM_INNER_LABEL, accidentally removing 16 byte alignment as SYM_FUNC_START uses SYM_A_ALIGN and SYM_INNER_LABEL does not. Josh mentioned [1] this was unintentional. Fix by changing to SYM_INNER_LABEL_ALIGN instead. [1] https://lore.kernel.org/lkml/Y3adkSe%2FJ70PqUyt@p183 Fixes: 8bd200d23ec4 ("KVM: VMX: Flatten __vmx_vcpu_run()") Signed-off-by: Jon Kohler Suggested-by: Alexey Dobriyan CC: Josh Poimboeuf Acked-by: Josh Poimboeuf Reviewed-by: Jim Mattson Link: https://lore.kernel.org/r/20230531155821.80590-1-jon@nutanix.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmenter.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index 631fd7da2bc3..07e927d4d099 100644 --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -187,7 +187,7 @@ SYM_FUNC_START(__vmx_vcpu_run) _ASM_EXTABLE(.Lvmresume, .Lfixup) _ASM_EXTABLE(.Lvmlaunch, .Lfixup) -SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL) +SYM_INNER_LABEL_ALIGN(vmx_vmexit, SYM_L_GLOBAL) /* Restore unwind state from before the VMRESUME/VMLAUNCH. */ UNWIND_HINT_RESTORE From 5e50082c8c21cd32ef21f523c149939668954ab6 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 5 Apr 2023 16:45:55 -0700 Subject: [PATCH 07/11] KVM: VMX: Inject #GP on ENCLS if vCPU has paging disabled (CR0.PG==0) Inject a #GP when emulating/forwarding a valid ENCLS leaf if the vCPU has paging disabled, e.g. if KVM is intercepting ECREATE to enforce additional restrictions. The pseudocode in the SDM lists all #GP triggers, including CR0.PG=0, as being checked after the ENLCS-exiting checks, i.e. the VM-Exit will occur before the CPU performs the CR0.PG check. Fixes: 70210c044b4e ("KVM: VMX: Add SGX ENCLS[ECREATE] handler to enforce CPUID restrictions") Cc: Binbin Wu Cc: Kai Huang Tested-by: Kai Huang Reviewed-by: Kai Huang Link: https://lore.kernel.org/r/20230405234556.696927-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/sgx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c index 2261b684a7d4..137088a663ff 100644 --- a/arch/x86/kvm/vmx/sgx.c +++ b/arch/x86/kvm/vmx/sgx.c @@ -382,7 +382,7 @@ int handle_encls(struct kvm_vcpu *vcpu) if (!encls_leaf_enabled_in_guest(vcpu, leaf)) { kvm_queue_exception(vcpu, UD_VECTOR); - } else if (!sgx_enabled_in_guest_bios(vcpu)) { + } else if (!sgx_enabled_in_guest_bios(vcpu) || !is_paging(vcpu)) { kvm_inject_gp(vcpu, 0); } else { if (leaf == ECREATE) From c3a1e119a343a70a9f49689b8f18bb43f236d681 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 5 Apr 2023 16:45:56 -0700 Subject: [PATCH 08/11] KVM: VMX: Inject #GP, not #UD, if SGX2 ENCLS leafs are unsupported Per Intel's SDM, unsupported ENCLS leafs result in a #GP, not a #UD. SGX1 is a special snowflake as the SGX1 flag is used by the CPU as a "soft" disable, e.g. if software disables machine check reporting, i.e. having SGX but not SGX1 is effectively "SGX completely unsupported" and and thus #UDs. Fixes: 9798adbc04cf ("KVM: VMX: Frame in ENCLS handler for SGX virtualization") Cc: Binbin Wu Cc: Kai Huang Tested-by: Kai Huang Reviewed-by: Kai Huang Link: https://lore.kernel.org/r/20230405234556.696927-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/sgx.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c index 137088a663ff..3e822e582497 100644 --- a/arch/x86/kvm/vmx/sgx.c +++ b/arch/x86/kvm/vmx/sgx.c @@ -357,11 +357,12 @@ static int handle_encls_einit(struct kvm_vcpu *vcpu) static inline bool encls_leaf_enabled_in_guest(struct kvm_vcpu *vcpu, u32 leaf) { - if (!enable_sgx || !guest_cpuid_has(vcpu, X86_FEATURE_SGX)) - return false; - + /* + * ENCLS generates a #UD if SGX1 isn't supported, i.e. this point will + * be reached if and only if the SGX1 leafs are enabled. + */ if (leaf >= ECREATE && leaf <= ETRACK) - return guest_cpuid_has(vcpu, X86_FEATURE_SGX1); + return true; if (leaf >= EAUG && leaf <= EMODT) return guest_cpuid_has(vcpu, X86_FEATURE_SGX2); @@ -380,9 +381,11 @@ int handle_encls(struct kvm_vcpu *vcpu) { u32 leaf = (u32)kvm_rax_read(vcpu); - if (!encls_leaf_enabled_in_guest(vcpu, leaf)) { + if (!enable_sgx || !guest_cpuid_has(vcpu, X86_FEATURE_SGX) || + !guest_cpuid_has(vcpu, X86_FEATURE_SGX1)) { kvm_queue_exception(vcpu, UD_VECTOR); - } else if (!sgx_enabled_in_guest_bios(vcpu) || !is_paging(vcpu)) { + } else if (!encls_leaf_enabled_in_guest(vcpu, leaf) || + !sgx_enabled_in_guest_bios(vcpu) || !is_paging(vcpu)) { kvm_inject_gp(vcpu, 0); } else { if (leaf == ECREATE) From 878940b33d7678e39a526ffe264ee025977dc67e Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 1 Jun 2023 18:15:16 -0700 Subject: [PATCH 09/11] KVM: VMX: Retry APIC-access page reload if invalidation is in-progress Re-request an APIC-access page reload if there is a relevant mmu_notifier invalidation in-progress when KVM retrieves the backing pfn, i.e. stall vCPUs until the backing pfn for the APIC-access page is "officially" stable. Relying on the primary MMU to not make changes after invoking ->invalidate_range() works, e.g. any additional changes to a PRESENT PTE would also trigger an ->invalidate_range(), but using ->invalidate_range() to fudge around KVM not honoring past and in-progress invalidations is a bit hacky. Honoring invalidations will allow using KVM's standard mmu_notifier hooks to detect APIC-access page reloads, which will in turn allow removing KVM's implementation of ->invalidate_range() (the APIC-access page case is a true one-off). Opportunistically add a comment to explain why doing nothing if a memslot isn't found is functionally correct. Suggested-by: Jason Gunthorpe Cc: Alistair Popple Cc: Robin Murphy Reviewed-by: Alistair Popple Reviewed-by: Paolo Bonzini Link: https://lore.kernel.org/r/20230602011518.787006-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmx.c | 50 +++++++++++++++++++++++++++++++++++++----- 1 file changed, 45 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 7ecab2118106..9ea4a5dfe62a 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6714,7 +6714,12 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu) static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu) { - struct page *page; + const gfn_t gfn = APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT; + struct kvm *kvm = vcpu->kvm; + struct kvm_memslots *slots = kvm_memslots(kvm); + struct kvm_memory_slot *slot; + unsigned long mmu_seq; + kvm_pfn_t pfn; /* Defer reload until vmcs01 is the current VMCS. */ if (is_guest_mode(vcpu)) { @@ -6726,18 +6731,53 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu) SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) return; - page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); - if (is_error_page(page)) + /* + * Grab the memslot so that the hva lookup for the mmu_notifier retry + * is guaranteed to use the same memslot as the pfn lookup, i.e. rely + * on the pfn lookup's validation of the memslot to ensure a valid hva + * is used for the retry check. + */ + slot = id_to_memslot(slots, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT); + if (!slot || slot->flags & KVM_MEMSLOT_INVALID) return; - vmcs_write64(APIC_ACCESS_ADDR, page_to_phys(page)); + /* + * Ensure that the mmu_notifier sequence count is read before KVM + * retrieves the pfn from the primary MMU. Note, the memslot is + * protected by SRCU, not the mmu_notifier. Pairs with the smp_wmb() + * in kvm_mmu_invalidate_end(). + */ + mmu_seq = kvm->mmu_invalidate_seq; + smp_rmb(); + + /* + * No need to retry if the memslot does not exist or is invalid. KVM + * controls the APIC-access page memslot, and only deletes the memslot + * if APICv is permanently inhibited, i.e. the memslot won't reappear. + */ + pfn = gfn_to_pfn_memslot(slot, gfn); + if (is_error_noslot_pfn(pfn)) + return; + + read_lock(&vcpu->kvm->mmu_lock); + if (mmu_invalidate_retry_hva(kvm, mmu_seq, + gfn_to_hva_memslot(slot, gfn))) { + kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); + read_unlock(&vcpu->kvm->mmu_lock); + goto out; + } + + vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(pfn)); + read_unlock(&vcpu->kvm->mmu_lock); + vmx_flush_tlb_current(vcpu); +out: /* * Do not pin apic access page in memory, the MMU notifier * will call us again if it is migrated or swapped out. */ - put_page(page); + kvm_release_pfn_clean(pfn); } static void vmx_hwapic_isr_update(int max_isr) From 0a8a5f2c8c266e9d94fb45f76a26cff135d0051c Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 1 Jun 2023 18:15:17 -0700 Subject: [PATCH 10/11] KVM: x86: Use standard mmu_notifier invalidate hooks for APIC access page Now that KVM honors past and in-progress mmu_notifier invalidations when reloading the APIC-access page, use KVM's "standard" invalidation hooks to trigger a reload and delete the one-off usage of invalidate_range(). Aside from eliminating one-off code in KVM, dropping KVM's use of invalidate_range() will allow common mmu_notifier to redefine the API to be more strictly focused on invalidating secondary TLBs that share the primary MMU's page tables. Suggested-by: Jason Gunthorpe Cc: Alistair Popple Cc: Robin Murphy Reviewed-by: Alistair Popple Reviewed-by: Paolo Bonzini Link: https://lore.kernel.org/r/20230602011518.787006-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 3 +++ arch/x86/kvm/x86.c | 14 -------------- include/linux/kvm_host.h | 3 --- virt/kvm/kvm_main.c | 18 ------------------ 4 files changed, 3 insertions(+), 35 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index c8961f45e3b1..01a11ce68e57 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1600,6 +1600,9 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) if (tdp_mmu_enabled) flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush); + if (range->slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT) + kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); + return flush; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c0778ca39650..f962b7e3487e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10435,20 +10435,6 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors); } -void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, - unsigned long start, unsigned long end) -{ - unsigned long apic_address; - - /* - * The physical address of apic access page is stored in the VMCS. - * Update it when it becomes invalid. - */ - apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); - if (start <= apic_address && apic_address < end) - kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); -} - void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) { static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 0e571e973bc2..cb66f4100be7 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -2237,9 +2237,6 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, } #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ -void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, - unsigned long start, unsigned long end); - void kvm_arch_guest_memory_reclaimed(struct kvm *kvm); #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 479802a892d4..f3c7c3c90161 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -154,11 +154,6 @@ static unsigned long long kvm_active_vms; static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask); -__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, - unsigned long start, unsigned long end) -{ -} - __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) { } @@ -521,18 +516,6 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) return container_of(mn, struct kvm, mmu_notifier); } -static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - struct kvm *kvm = mmu_notifier_to_kvm(mn); - int idx; - - idx = srcu_read_lock(&kvm->srcu); - kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); - srcu_read_unlock(&kvm->srcu, idx); -} - typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range); typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start, @@ -892,7 +875,6 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, } static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { - .invalidate_range = kvm_mmu_notifier_invalidate_range, .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, .clear_flush_young = kvm_mmu_notifier_clear_flush_young, From 0a3869e14d4a5e1016aad6bc6c5b70f82bc0dbbe Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 1 Jun 2023 18:15:18 -0700 Subject: [PATCH 11/11] KVM: x86/mmu: Trigger APIC-access page reload iff vendor code cares Request an APIC-access page reload when the backing page is migrated (or unmapped) if and only if vendor code actually plugs the backing pfn into structures that reside outside of KVM's MMU. This avoids kicking all vCPUs in the (hopefully infrequent) scenario where the backing page is migrated/invalidated. Unlike VMX's APICv, SVM's AVIC doesn't plug the backing pfn directly into the VMCB and so doesn't need a hook to invalidate an out-of-MMU "mapping". Reviewed-by: Paolo Bonzini Link: https://lore.kernel.org/r/20230602011518.787006-4-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 01a11ce68e57..beb507d82adf 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1600,7 +1600,8 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) if (tdp_mmu_enabled) flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush); - if (range->slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT) + if (kvm_x86_ops.set_apic_access_page_addr && + range->slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT) kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); return flush;