mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
KVM: Allow range-based TLB invalidation from common code
Make kvm_flush_remote_tlbs_range() visible in common code and create a default implementation that just invalidates the whole TLB. This paves the way for several future features/cleanups: - Introduction of range-based TLBI on ARM. - Eliminating kvm_arch_flush_remote_tlbs_memslot() - Moving the KVM/x86 TDP MMU to common code. No functional change intended. Signed-off-by: David Matlack <dmatlack@google.com> Signed-off-by: Raghavendra Rao Ananta <rananta@google.com> Reviewed-by: Gavin Shan <gshan@redhat.com> Reviewed-by: Shaoqin Huang <shahuang@redhat.com> Reviewed-by: Anup Patel <anup@brainfault.org> Acked-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20230811045127.3308641-6-rananta@google.com
This commit is contained in:
parent
eddd214810
commit
d478899605
@ -1804,6 +1804,8 @@ static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
|
||||
|
||||
#define kvm_arch_pmi_in_guest(vcpu) \
|
||||
((vcpu) && (vcpu)->arch.handling_intr_from_guest)
|
||||
|
||||
|
@ -278,16 +278,12 @@ static inline bool kvm_available_flush_remote_tlbs_range(void)
|
||||
return kvm_x86_ops.flush_remote_tlbs_range;
|
||||
}
|
||||
|
||||
void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn,
|
||||
gfn_t nr_pages)
|
||||
int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
|
||||
{
|
||||
int ret = -EOPNOTSUPP;
|
||||
if (!kvm_x86_ops.flush_remote_tlbs_range)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (kvm_x86_ops.flush_remote_tlbs_range)
|
||||
ret = static_call(kvm_x86_flush_remote_tlbs_range)(kvm, start_gfn,
|
||||
nr_pages);
|
||||
if (ret)
|
||||
kvm_flush_remote_tlbs(kvm);
|
||||
return static_call(kvm_x86_flush_remote_tlbs_range)(kvm, gfn, nr_pages);
|
||||
}
|
||||
|
||||
static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index);
|
||||
|
@ -170,9 +170,6 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot, u64 gfn,
|
||||
int min_level);
|
||||
|
||||
void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn,
|
||||
gfn_t nr_pages);
|
||||
|
||||
/* Flush the given page (huge or not) of guest memory. */
|
||||
static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level)
|
||||
{
|
||||
|
@ -1359,6 +1359,7 @@ int kvm_vcpu_yield_to(struct kvm_vcpu *target);
|
||||
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode);
|
||||
|
||||
void kvm_flush_remote_tlbs(struct kvm *kvm);
|
||||
void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
|
||||
|
||||
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
|
||||
int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
|
||||
@ -1488,6 +1489,16 @@ static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
|
||||
int kvm_arch_flush_remote_tlbs(struct kvm *kvm);
|
||||
#endif
|
||||
|
||||
#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
|
||||
static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm,
|
||||
gfn_t gfn, u64 nr_pages)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#else
|
||||
int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
|
||||
#endif
|
||||
|
||||
#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
|
||||
void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
|
||||
void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
|
||||
|
@ -366,6 +366,19 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
|
||||
|
||||
void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
|
||||
{
|
||||
if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Fall back to a flushing entire TLBs if the architecture range-based
|
||||
* TLB invalidation is unsupported or can't be performed for whatever
|
||||
* reason.
|
||||
*/
|
||||
kvm_flush_remote_tlbs(kvm);
|
||||
}
|
||||
|
||||
static void kvm_flush_shadow_all(struct kvm *kvm)
|
||||
{
|
||||
kvm_arch_flush_shadow_all(kvm);
|
||||
|
Loading…
Reference in New Issue
Block a user