mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
KVM: pfncache: allow a cache to be activated with a fixed (userspace) HVA
Some pfncache pages may actually be overlays on guest memory that have a fixed HVA within the VMM. It's pointless to invalidate such cached mappings if the overlay is moved so allow a cache to be activated directly with the HVA to cater for such cases. A subsequent patch will make use of this facility. Signed-off-by: Paul Durrant <pdurrant@amazon.com> Reviewed-by: David Woodhouse <dwmw@amazon.co.uk> Link: https://lore.kernel.org/r/20240215152916.1158-10-paul@xen.org Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
parent
9e7325acb3
commit
721f5b0dda
@ -148,6 +148,11 @@ static inline bool kvm_is_error_hva(unsigned long addr)
|
||||
|
||||
#endif
|
||||
|
||||
static inline bool kvm_is_error_gpa(gpa_t gpa)
|
||||
{
|
||||
return gpa == INVALID_GPA;
|
||||
}
|
||||
|
||||
#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
|
||||
|
||||
static inline bool is_error_page(struct page *page)
|
||||
@ -1344,6 +1349,22 @@ void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm);
|
||||
*/
|
||||
int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len);
|
||||
|
||||
/**
|
||||
* kvm_gpc_activate_hva - prepare a cached kernel mapping and HPA for a given HVA.
|
||||
*
|
||||
* @gpc: struct gfn_to_pfn_cache object.
|
||||
* @hva: userspace virtual address to map.
|
||||
* @len: sanity check; the range being access must fit a single page.
|
||||
*
|
||||
* @return: 0 for success.
|
||||
* -EINVAL for a mapping which would cross a page boundary.
|
||||
* -EFAULT for an untranslatable guest physical address.
|
||||
*
|
||||
* The semantics of this function are the same as those of kvm_gpc_activate(). It
|
||||
* merely bypasses a layer of address translation.
|
||||
*/
|
||||
int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long hva, unsigned long len);
|
||||
|
||||
/**
|
||||
* kvm_gpc_check - check validity of a gfn_to_pfn_cache.
|
||||
*
|
||||
@ -1390,6 +1411,16 @@ int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len);
|
||||
*/
|
||||
void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);
|
||||
|
||||
static inline bool kvm_gpc_is_gpa_active(struct gfn_to_pfn_cache *gpc)
|
||||
{
|
||||
return gpc->active && !kvm_is_error_gpa(gpc->gpa);
|
||||
}
|
||||
|
||||
static inline bool kvm_gpc_is_hva_active(struct gfn_to_pfn_cache *gpc)
|
||||
{
|
||||
return gpc->active && kvm_is_error_gpa(gpc->gpa);
|
||||
}
|
||||
|
||||
void kvm_sigset_activate(struct kvm_vcpu *vcpu);
|
||||
void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
|
||||
|
||||
|
@ -48,7 +48,14 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
|
||||
if (!gpc->active)
|
||||
return false;
|
||||
|
||||
if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva))
|
||||
/*
|
||||
* If the page was cached from a memslot, make sure the memslots have
|
||||
* not been re-configured.
|
||||
*/
|
||||
if (!kvm_is_error_gpa(gpc->gpa) && gpc->generation != slots->generation)
|
||||
return false;
|
||||
|
||||
if (kvm_is_error_hva(gpc->uhva))
|
||||
return false;
|
||||
|
||||
if (offset_in_page(gpc->uhva) + len > PAGE_SIZE)
|
||||
@ -209,11 +216,10 @@ out_error:
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
|
||||
static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva,
|
||||
unsigned long len)
|
||||
{
|
||||
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
|
||||
unsigned long page_offset = offset_in_page(gpa);
|
||||
unsigned long page_offset;
|
||||
bool unmap_old = false;
|
||||
unsigned long old_uhva;
|
||||
kvm_pfn_t old_pfn;
|
||||
@ -221,10 +227,16 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
|
||||
void *old_khva;
|
||||
int ret;
|
||||
|
||||
/* Either gpa or uhva must be valid, but not both */
|
||||
if (WARN_ON_ONCE(kvm_is_error_gpa(gpa) == kvm_is_error_hva(uhva)))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* If must fit within a single page. The 'len' argument is
|
||||
* only to enforce that.
|
||||
* The cached acces must fit within a single page. The 'len' argument
|
||||
* exists only to enforce that.
|
||||
*/
|
||||
page_offset = kvm_is_error_gpa(gpa) ? offset_in_page(uhva) :
|
||||
offset_in_page(gpa);
|
||||
if (page_offset + len > PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
@ -246,29 +258,39 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
|
||||
old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
|
||||
old_uhva = PAGE_ALIGN_DOWN(gpc->uhva);
|
||||
|
||||
/* Refresh the userspace HVA if necessary */
|
||||
if (gpc->gpa != gpa || gpc->generation != slots->generation ||
|
||||
kvm_is_error_hva(gpc->uhva)) {
|
||||
gfn_t gfn = gpa_to_gfn(gpa);
|
||||
if (kvm_is_error_gpa(gpa)) {
|
||||
gpc->gpa = INVALID_GPA;
|
||||
gpc->memslot = NULL;
|
||||
gpc->uhva = PAGE_ALIGN_DOWN(uhva);
|
||||
|
||||
gpc->gpa = gpa;
|
||||
gpc->generation = slots->generation;
|
||||
gpc->memslot = __gfn_to_memslot(slots, gfn);
|
||||
gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
|
||||
|
||||
if (kvm_is_error_hva(gpc->uhva)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Even if the GPA and/or the memslot generation changed, the
|
||||
* HVA may still be the same.
|
||||
*/
|
||||
if (gpc->uhva != old_uhva)
|
||||
hva_change = true;
|
||||
} else {
|
||||
gpc->uhva = old_uhva;
|
||||
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
|
||||
|
||||
if (gpc->gpa != gpa || gpc->generation != slots->generation ||
|
||||
kvm_is_error_hva(gpc->uhva)) {
|
||||
gfn_t gfn = gpa_to_gfn(gpa);
|
||||
|
||||
gpc->gpa = gpa;
|
||||
gpc->generation = slots->generation;
|
||||
gpc->memslot = __gfn_to_memslot(slots, gfn);
|
||||
gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
|
||||
|
||||
if (kvm_is_error_hva(gpc->uhva)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Even if the GPA and/or the memslot generation changed, the
|
||||
* HVA may still be the same.
|
||||
*/
|
||||
if (gpc->uhva != old_uhva)
|
||||
hva_change = true;
|
||||
} else {
|
||||
gpc->uhva = old_uhva;
|
||||
}
|
||||
}
|
||||
|
||||
/* Note: the offset must be correct before calling hva_to_pfn_retry() */
|
||||
@ -319,7 +341,15 @@ out_unlock:
|
||||
|
||||
int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
|
||||
{
|
||||
return __kvm_gpc_refresh(gpc, gpc->gpa, len);
|
||||
/*
|
||||
* If the GPA is valid then ignore the HVA, as a cache can be GPA-based
|
||||
* or HVA-based, not both. For GPA-based caches, the HVA will be
|
||||
* recomputed during refresh if necessary.
|
||||
*/
|
||||
unsigned long uhva = kvm_is_error_gpa(gpc->gpa) ? gpc->uhva :
|
||||
KVM_HVA_ERR_BAD;
|
||||
|
||||
return __kvm_gpc_refresh(gpc, gpc->gpa, uhva, len);
|
||||
}
|
||||
|
||||
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
|
||||
@ -329,10 +359,12 @@ void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
|
||||
|
||||
gpc->kvm = kvm;
|
||||
gpc->pfn = KVM_PFN_ERR_FAULT;
|
||||
gpc->gpa = INVALID_GPA;
|
||||
gpc->uhva = KVM_HVA_ERR_BAD;
|
||||
}
|
||||
|
||||
int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
|
||||
static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva,
|
||||
unsigned long len)
|
||||
{
|
||||
struct kvm *kvm = gpc->kvm;
|
||||
|
||||
@ -353,7 +385,17 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
|
||||
gpc->active = true;
|
||||
write_unlock_irq(&gpc->lock);
|
||||
}
|
||||
return __kvm_gpc_refresh(gpc, gpa, len);
|
||||
return __kvm_gpc_refresh(gpc, gpa, uhva, len);
|
||||
}
|
||||
|
||||
int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
|
||||
{
|
||||
return __kvm_gpc_activate(gpc, gpa, KVM_HVA_ERR_BAD, len);
|
||||
}
|
||||
|
||||
int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long uhva, unsigned long len)
|
||||
{
|
||||
return __kvm_gpc_activate(gpc, INVALID_GPA, uhva, len);
|
||||
}
|
||||
|
||||
void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
|
||||
|
Loading…
Reference in New Issue
Block a user