KVM: guest_memfd: let kvm_gmem_populate() operate only on private gfns

This check is currently performed by sev_gmem_post_populate(), but it
applies to all callers of kvm_gmem_populate(): the point of the function
is that the memory is being encrypted and some work has to be done
on all the gfns in order to encrypt them.

Therefore, check the KVM_MEMORY_ATTRIBUTE_PRIVATE attribute prior
to invoking the callback, and stop the operation if a shared page
is encountered.  Because CONFIG_KVM_PRIVATE_MEM in principle does
not require attributes, this makes kvm_gmem_populate() depend on
CONFIG_KVM_GENERIC_PRIVATE_MEM (which does require them).

Reviewed-by: Michael Roth <michael.roth@amd.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2024-07-11 18:27:55 -04:00
parent 4b5f67120a
commit e4ee544792
3 changed files with 14 additions and 7 deletions

View File

@ -2279,13 +2279,6 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pf
bool assigned; bool assigned;
int level; int level;
if (!kvm_mem_is_private(kvm, gfn)) {
pr_debug("%s: Failed to ensure GFN 0x%llx has private memory attribute set\n",
__func__, gfn);
ret = -EINVAL;
goto err;
}
ret = snp_lookup_rmpentry((u64)pfn + i, &assigned, &level); ret = snp_lookup_rmpentry((u64)pfn + i, &assigned, &level);
if (ret || assigned) { if (ret || assigned) {
pr_debug("%s: Failed to ensure GFN 0x%llx RMP entry is initial shared state, ret: %d assigned: %d\n", pr_debug("%s: Failed to ensure GFN 0x%llx RMP entry is initial shared state, ret: %d assigned: %d\n",

View File

@ -2449,6 +2449,7 @@ static inline int kvm_gmem_get_pfn(struct kvm *kvm,
int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order); int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order);
#endif #endif
#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
/** /**
* kvm_gmem_populate() - Populate/prepare a GPA range with guest data * kvm_gmem_populate() - Populate/prepare a GPA range with guest data
* *
@ -2475,6 +2476,7 @@ typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages, long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages,
kvm_gmem_populate_cb post_populate, void *opaque); kvm_gmem_populate_cb post_populate, void *opaque);
#endif
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end); void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);

View File

@ -612,6 +612,7 @@ out:
} }
EXPORT_SYMBOL_GPL(kvm_gmem_get_pfn); EXPORT_SYMBOL_GPL(kvm_gmem_get_pfn);
#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long npages, long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long npages,
kvm_gmem_populate_cb post_populate, void *opaque) kvm_gmem_populate_cb post_populate, void *opaque)
{ {
@ -665,11 +666,21 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
(npages - i) < (1 << max_order)) (npages - i) < (1 << max_order))
max_order = 0; max_order = 0;
ret = -EINVAL;
while (!kvm_range_has_memory_attributes(kvm, gfn, gfn + (1 << max_order),
KVM_MEMORY_ATTRIBUTE_PRIVATE,
KVM_MEMORY_ATTRIBUTE_PRIVATE)) {
if (!max_order)
goto put_folio_and_exit;
max_order--;
}
p = src ? src + i * PAGE_SIZE : NULL; p = src ? src + i * PAGE_SIZE : NULL;
ret = post_populate(kvm, gfn, pfn, p, max_order, opaque); ret = post_populate(kvm, gfn, pfn, p, max_order, opaque);
if (!ret) if (!ret)
folio_mark_uptodate(folio); folio_mark_uptodate(folio);
put_folio_and_exit:
folio_put(folio); folio_put(folio);
if (ret) if (ret)
break; break;
@ -681,3 +692,4 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
return ret && !i ? ret : i; return ret && !i ? ret : i;
} }
EXPORT_SYMBOL_GPL(kvm_gmem_populate); EXPORT_SYMBOL_GPL(kvm_gmem_populate);
#endif