From cc753fbe1ac47560e1517e3e11fb0c8a3c95eef5 Mon Sep 17 00:00:00 2001 From: Hang Yuan Date: Fri, 22 Dec 2017 18:06:31 +0800 Subject: [PATCH] drm/i915/gvt: validate gfn before set shadow page entry GVT may receive partial write on one guest PTE update. Validate gfn not to translate incomplete gfn. This avoids some unnecessary error messages incurred by the incomplete gfn translating. Also fix the bug that the whole PPGTT shadow page update is aborted on any invalid gfn entry. gfn validation relys on hypervisor's help. Add one MPT module function to provide the function. Signed-off-by: Hang Yuan Reviewed-by: Zhi Wang Signed-off-by: Zhenyu Wang Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/i915/gvt/gtt.c | 24 +++++++++++++++++++----- drivers/gpu/drm/i915/gvt/hypercall.h | 1 + drivers/gpu/drm/i915/gvt/kvmgt.c | 16 ++++++++++++++++ drivers/gpu/drm/i915/gvt/mpt.h | 17 +++++++++++++++++ 4 files changed, 53 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index a529d2bd393c..8d5317d0122d 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -997,9 +997,11 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) { struct intel_vgpu *vgpu = spt->vgpu; + struct intel_gvt *gvt = vgpu->gvt; + struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s; struct intel_gvt_gtt_entry se, ge; - unsigned long i; + unsigned long gfn, i; int ret; trace_spt_change(spt->vgpu->id, "born", spt, @@ -1007,9 +1009,10 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) if (gtt_type_is_pte_pt(spt->shadow_page.type)) { for_each_present_guest_entry(spt, &ge, i) { - ret = gtt_entry_p2m(vgpu, &ge, &se); - if (ret) - goto fail; + gfn = ops->get_pfn(&ge); + if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn) || + gtt_entry_p2m(vgpu, &ge, &se)) + ops->set_pfn(&se, gvt->gtt.scratch_mfn); ppgtt_set_shadow_entry(spt, &se, i); } return 0; @@ -1906,7 +1909,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; unsigned long g_gtt_index = off >> info->gtt_entry_size_shift; - unsigned long gma; + unsigned long gma, gfn; struct intel_gvt_gtt_entry e, m; int ret; @@ -1925,6 +1928,16 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, bytes); if (ops->test_present(&e)) { + gfn = ops->get_pfn(&e); + + /* one PTE update may be issued in multiple writes and the + * first write may not construct a valid gfn + */ + if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) { + ops->set_pfn(&m, gvt->gtt.scratch_mfn); + goto out; + } + ret = gtt_entry_p2m(vgpu, &e, &m); if (ret) { gvt_vgpu_err("fail to translate guest gtt entry\n"); @@ -1939,6 +1952,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ops->set_pfn(&m, gvt->gtt.scratch_mfn); } +out: ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index); gtt_invalidate(gvt->dev_priv); ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index a1bd82feb827..f8e77e166246 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -58,6 +58,7 @@ struct intel_gvt_mpt { int (*set_opregion)(void *vgpu); int (*get_vfio_device)(void *vgpu); void (*put_vfio_device)(void *vgpu); + bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn); }; extern struct intel_gvt_mpt xengt_mpt; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 45bab5a6290b..eb92572056c3 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1570,6 +1570,21 @@ static unsigned long kvmgt_virt_to_pfn(void *addr) return PFN_DOWN(__pa(addr)); } +static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) +{ + struct kvmgt_guest_info *info; + struct kvm *kvm; + + if (!handle_valid(handle)) + return false; + + info = (struct kvmgt_guest_info *)handle; + kvm = info->kvm; + + return kvm_is_visible_gfn(kvm, gfn); + +} + struct intel_gvt_mpt kvmgt_mpt = { .host_init = kvmgt_host_init, .host_exit = kvmgt_host_exit, @@ -1585,6 +1600,7 @@ struct intel_gvt_mpt kvmgt_mpt = { .set_opregion = kvmgt_set_opregion, .get_vfio_device = kvmgt_get_vfio_device, .put_vfio_device = kvmgt_put_vfio_device, + .is_valid_gfn = kvmgt_is_valid_gfn, }; EXPORT_SYMBOL_GPL(kvmgt_mpt); diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index ca8005a6d5fa..81aff4eacbfe 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -339,4 +339,21 @@ static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu) intel_gvt_host.mpt->put_vfio_device(vgpu); } +/** + * intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn + * @vgpu: a vGPU + * @gfn: guest PFN + * + * Returns: + * true on valid gfn, false on not. + */ +static inline bool intel_gvt_hypervisor_is_valid_gfn( + struct intel_vgpu *vgpu, unsigned long gfn) +{ + if (!intel_gvt_host.mpt->is_valid_gfn) + return true; + + return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn); +} + #endif /* _GVT_MPT_H_ */