From b294657d1bab3371bf02c31a243232bfa9f4629f Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 15 May 2018 10:35:33 +0800 Subject: [PATCH 01/16] drm/i915/gvt: Add new 64K entry type Add a new entry type GTT_TYPE_PPGTT_PTE_64K_ENTRY. 64K entry is very different from 2M/1G entry. 64K entry is controlled by IPS bit in upper PDE. To leverage the current logic, I take IPS bit as 'PSE' for PTE level. Which means, 64K entries can also processed by get_pse_type(). v2: Make it bisectable. Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 26 ++++++++++++++++++++++---- drivers/gpu/drm/i915/gvt/gtt.h | 1 + 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 6a8a9868bcfb..03f2a5b26546 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -216,16 +216,22 @@ static struct gtt_type_table_entry gtt_type_table[] = { GTT_TYPE_PPGTT_PDE_PT, GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_PPGTT_PTE_2M_ENTRY), + /* We take IPS bit as 'PSE' for PTE level. */ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_PPGTT_PTE_4K_ENTRY, GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_INVALID, - GTT_TYPE_INVALID), + GTT_TYPE_PPGTT_PTE_64K_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY, GTT_TYPE_PPGTT_PTE_4K_ENTRY, GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_INVALID, - GTT_TYPE_INVALID), + GTT_TYPE_PPGTT_PTE_64K_ENTRY), + GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY, + GTT_TYPE_PPGTT_PTE_4K_ENTRY, + GTT_TYPE_PPGTT_PTE_PT, + GTT_TYPE_INVALID, + GTT_TYPE_PPGTT_PTE_64K_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY, GTT_TYPE_PPGTT_PDE_ENTRY, GTT_TYPE_PPGTT_PDE_PT, @@ -339,6 +345,7 @@ static inline int gtt_set_entry64(void *pt, #define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30) #define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21) +#define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16) #define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12) static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e) @@ -349,6 +356,8 @@ static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e) pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT; else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT; + else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) + pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT; else pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT; return pfn; @@ -362,6 +371,9 @@ static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn) } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) { e->val64 &= ~ADDR_2M_MASK; pfn &= (ADDR_2M_MASK >> PAGE_SHIFT); + } else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) { + e->val64 &= ~ADDR_64K_MASK; + pfn &= (ADDR_64K_MASK >> PAGE_SHIFT); } else { e->val64 &= ~ADDR_4K_MASK; pfn &= (ADDR_4K_MASK >> PAGE_SHIFT); @@ -380,6 +392,10 @@ static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e) if (!(e->val64 & _PAGE_PSE)) return false; + /* We don't support 64K entry yet, will remove this later. */ + if (get_pse_type(e->type) == GTT_TYPE_PPGTT_PTE_64K_ENTRY) + return false; + e->type = get_pse_type(e->type); return true; } @@ -871,9 +887,10 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt) gvt_vdbg_mm("invalidate 4K entry\n"); ppgtt_invalidate_pte(spt, &e); break; + case GTT_TYPE_PPGTT_PTE_64K_ENTRY: case GTT_TYPE_PPGTT_PTE_2M_ENTRY: case GTT_TYPE_PPGTT_PTE_1G_ENTRY: - WARN(1, "GVT doesn't support 2M/1GB page\n"); + WARN(1, "GVT doesn't support 64K/2M/1GB page\n"); continue; case GTT_TYPE_PPGTT_PML4_ENTRY: case GTT_TYPE_PPGTT_PDP_ENTRY: @@ -970,9 +987,10 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, case GTT_TYPE_PPGTT_PTE_4K_ENTRY: gvt_vdbg_mm("shadow 4K gtt entry\n"); break; + case GTT_TYPE_PPGTT_PTE_64K_ENTRY: case GTT_TYPE_PPGTT_PTE_2M_ENTRY: case GTT_TYPE_PPGTT_PTE_1G_ENTRY: - gvt_vgpu_err("GVT doesn't support 2M/1GB entry\n"); + gvt_vgpu_err("GVT doesn't support 64K/2M/1GB entry\n"); return -EINVAL; default: GEM_BUG_ON(1); diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 3792f2b7f4ff..60630816df5c 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -95,6 +95,7 @@ typedef enum { GTT_TYPE_GGTT_PTE, GTT_TYPE_PPGTT_PTE_4K_ENTRY, + GTT_TYPE_PPGTT_PTE_64K_ENTRY, GTT_TYPE_PPGTT_PTE_2M_ENTRY, GTT_TYPE_PPGTT_PTE_1G_ENTRY, From 6fd7937832698e73a5719ff488c2fc5e22c9c0ba Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 15 May 2018 10:35:34 +0800 Subject: [PATCH 02/16] drm/i915/gvt: Add PTE IPS bit operations Add three IPS operation functions to test/set/clear IPS in PDE. Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 18 ++++++++++++++++++ drivers/gpu/drm/i915/gvt/gtt.h | 2 ++ 2 files changed, 20 insertions(+) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 03f2a5b26546..3e6733914530 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -400,6 +400,22 @@ static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e) return true; } +static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e) +{ + if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY)) + return false; + + return !!(e->val64 & GEN8_PDE_IPS_64K); +} + +static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e) +{ + if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY)) + return; + + e->val64 &= ~GEN8_PDE_IPS_64K; +} + static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e) { /* @@ -456,6 +472,8 @@ static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { .set_present = gtt_entry_set_present, .test_present = gen8_gtt_test_present, .test_pse = gen8_gtt_test_pse, + .clear_ips = gen8_gtt_clear_ips, + .test_ips = gen8_gtt_test_ips, .get_pfn = gen8_gtt_get_pfn, .set_pfn = gen8_gtt_set_pfn, }; diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 60630816df5c..9257b7467b14 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -63,6 +63,8 @@ struct intel_gvt_gtt_pte_ops { void (*clear_present)(struct intel_gvt_gtt_entry *e); void (*set_present)(struct intel_gvt_gtt_entry *e); bool (*test_pse)(struct intel_gvt_gtt_entry *e); + bool (*test_ips)(struct intel_gvt_gtt_entry *e); + void (*clear_ips)(struct intel_gvt_gtt_entry *e); void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn); unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e); }; From 52ca14e6844a04e174b5cd3d7dbf63a23271775c Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 15 May 2018 10:35:35 +0800 Subject: [PATCH 03/16] drm/i915/gvt: Handle MMIO GEN8_GAMW_ECO_DEV_RW_IA for 64K GTT The register RENDER_HWS_PGA_GEN7 is renamed to GEN8_GAMW_ECO_DEV_RW_IA from GEN8 which can control IPS enabling. v3: MMIO control for IPS is not removed from gen9 but gen10 (Matthew Auld) v2: IPS of all engines must be enabled together for gen9. Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 0bc0c5418adb..17f56fc20613 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -210,6 +210,31 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu, return 0; } +static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD; + + if (INTEL_GEN(vgpu->gvt->dev_priv) <= 10) { + if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD) + gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id); + else if (!ips) + gvt_dbg_core("vgpu%d: ips disabled\n", vgpu->id); + else { + /* All engines must be enabled together for vGPU, + * since we don't know which engine the ppgtt will + * bind to when shadowing. + */ + gvt_vgpu_err("Unsupported IPS setting %x, cannot enable 64K gtt.\n", + ips); + return -EINVAL; + } + } + + write_vreg(vgpu, offset, p_data, bytes); + return 0; +} + static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off, void *p_data, unsigned int bytes) { @@ -1769,7 +1794,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL); - MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL); + MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL, + gamw_echo_dev_rw_ia_write); + MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL); MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL); MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL); From 40b271767dcf9748327619ed550be810cc2e10ae Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 15 May 2018 10:35:36 +0800 Subject: [PATCH 04/16] drm/i915/gvt: Detect 64K gtt entry by IPS bit of PDE This change help us detect the real entry type per PSE and IPS setting. For 64K entry, we also need to check reg GEN8_GAMW_ECO_DEV_RW_IA. v2: Extend IPS mmio control to Gen10. (Matthew Auld) Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 70 ++++++++++++++++++++++++---------- drivers/gpu/drm/i915/gvt/gtt.h | 2 + 2 files changed, 51 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 3e6733914530..3dae75b5b574 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -384,20 +384,7 @@ static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn) static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e) { - /* Entry doesn't have PSE bit. */ - if (get_pse_type(e->type) == GTT_TYPE_INVALID) - return false; - - e->type = get_entry_type(e->type); - if (!(e->val64 & _PAGE_PSE)) - return false; - - /* We don't support 64K entry yet, will remove this later. */ - if (get_pse_type(e->type) == GTT_TYPE_PPGTT_PTE_64K_ENTRY) - return false; - - e->type = get_pse_type(e->type); - return true; + return !!(e->val64 & _PAGE_PSE); } static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e) @@ -487,6 +474,27 @@ static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = { .gma_to_pml4_index = gen8_gma_to_pml4_index, }; +/* Update entry type per pse and ips bit. */ +static void update_entry_type_for_real(struct intel_gvt_gtt_pte_ops *pte_ops, + struct intel_gvt_gtt_entry *entry, bool ips) +{ + switch (entry->type) { + case GTT_TYPE_PPGTT_PDE_ENTRY: + case GTT_TYPE_PPGTT_PDP_ENTRY: + if (pte_ops->test_pse(entry)) + entry->type = get_pse_type(entry->type); + break; + case GTT_TYPE_PPGTT_PTE_4K_ENTRY: + if (ips) + entry->type = get_pse_type(entry->type); + break; + default: + GEM_BUG_ON(!gtt_type_is_entry(entry->type)); + } + + GEM_BUG_ON(entry->type == GTT_TYPE_INVALID); +} + /* * MM helpers. */ @@ -502,8 +510,7 @@ static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm, pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps : mm->ppgtt_mm.shadow_pdps, entry, index, false, 0, mm->vgpu); - - pte_ops->test_pse(entry); + update_entry_type_for_real(pte_ops, entry, false); } static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm, @@ -608,7 +615,8 @@ static inline int ppgtt_spt_get_entry( if (ret) return ret; - ops->test_pse(e); + update_entry_type_for_real(ops, e, guest ? + spt->guest_page.pde_ips : false); gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n", type, e->type, index, e->val64); @@ -752,7 +760,8 @@ static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn( static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt); static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt( - struct intel_vgpu *vgpu, int type, unsigned long gfn) + struct intel_vgpu *vgpu, int type, unsigned long gfn, + bool guest_pde_ips) { struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; struct intel_vgpu_ppgtt_spt *spt = NULL; @@ -792,6 +801,7 @@ retry: */ spt->guest_page.type = type; spt->guest_page.gfn = gfn; + spt->guest_page.pde_ips = guest_pde_ips; ret = intel_vgpu_register_page_track(vgpu, spt->guest_page.gfn, ppgtt_write_protection_handler, spt); @@ -934,6 +944,22 @@ fail: return ret; } +static bool vgpu_ips_enabled(struct intel_vgpu *vgpu) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + + if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) { + u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) & + GAMW_ECO_ENABLE_64K_IPS_FIELD; + + return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD; + } else if (INTEL_GEN(dev_priv) >= 11) { + /* 64K paging only controlled by IPS bit in PTE now. */ + return true; + } else + return false; +} + static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt); static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( @@ -941,6 +967,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( { struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *spt = NULL; + bool ips = false; int ret; GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type))); @@ -951,7 +978,10 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( else { int type = get_next_pt_type(we->type); - spt = ppgtt_alloc_spt(vgpu, type, ops->get_pfn(we)); + if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY) + ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we); + + spt = ppgtt_alloc_spt(vgpu, type, ops->get_pfn(we), ips); if (IS_ERR(spt)) { ret = PTR_ERR(spt); goto fail; @@ -1427,8 +1457,6 @@ static int ppgtt_handle_guest_write_page_table_bytes( ppgtt_get_guest_entry(spt, &we, index); - ops->test_pse(&we); - if (bytes == info->gtt_entry_size) { ret = ppgtt_handle_guest_write_page_table(spt, &we, index); if (ret) diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 9257b7467b14..c11284bb291b 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -223,6 +223,7 @@ struct intel_vgpu_ppgtt_spt { struct { intel_gvt_gtt_type_t type; + bool pde_ips; /* for 64KB PTEs */ void *vaddr; struct page *page; unsigned long mfn; @@ -230,6 +231,7 @@ struct intel_vgpu_ppgtt_spt { struct { intel_gvt_gtt_type_t type; + bool pde_ips; /* for 64KB PTEs */ unsigned long gfn; unsigned long write_cnt; struct intel_vgpu_oos_page *oos_page; From 716348485695de1a91b2f8b398f9c08687f794af Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 15 May 2018 10:35:37 +0800 Subject: [PATCH 05/16] drm/i915/gvt: Add software PTE flag to mark special 64K splited entry This add a software PTE flag on the Ignored bit of PTE. It will be used to identify splited 64K shadow entries. v2: fix mask definition. Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 21 +++++++++++++++++++++ drivers/gpu/drm/i915/gvt/gtt.h | 3 +++ 2 files changed, 24 insertions(+) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 3dae75b5b574..34c401fb37d1 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -348,6 +348,9 @@ static inline int gtt_set_entry64(void *pt, #define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16) #define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12) +#define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52) +#define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */ + static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e) { unsigned long pfn; @@ -427,6 +430,21 @@ static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e) e->val64 |= _PAGE_PRESENT; } +static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e) +{ + return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED); +} + +static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e) +{ + e->val64 |= GTT_SPTE_FLAG_64K_SPLITED; +} + +static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e) +{ + e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED; +} + /* * Per-platform GMA routines. */ @@ -461,6 +479,9 @@ static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { .test_pse = gen8_gtt_test_pse, .clear_ips = gen8_gtt_clear_ips, .test_ips = gen8_gtt_test_ips, + .clear_64k_splited = gen8_gtt_clear_64k_splited, + .set_64k_splited = gen8_gtt_set_64k_splited, + .test_64k_splited = gen8_gtt_test_64k_splited, .get_pfn = gen8_gtt_get_pfn, .set_pfn = gen8_gtt_set_pfn, }; diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index c11284bb291b..162ef19f4117 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -65,6 +65,9 @@ struct intel_gvt_gtt_pte_ops { bool (*test_pse)(struct intel_gvt_gtt_entry *e); bool (*test_ips)(struct intel_gvt_gtt_entry *e); void (*clear_ips)(struct intel_gvt_gtt_entry *e); + bool (*test_64k_splited)(struct intel_gvt_gtt_entry *e); + void (*clear_64k_splited)(struct intel_gvt_gtt_entry *e); + void (*set_64k_splited)(struct intel_gvt_gtt_entry *e); void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn); unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e); }; From c3e697635fcc9173e1d7116d9ebfd2fd0887177d Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 15 May 2018 10:35:38 +0800 Subject: [PATCH 06/16] drm/i915/gvt: Add GTT clear_pse operation Add clear_pse operation in case we need to split huge gtt into small pages. v2: correct description. Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 19 +++++++++++++++++++ drivers/gpu/drm/i915/gvt/gtt.h | 1 + 2 files changed, 20 insertions(+) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 34c401fb37d1..d34dc9ab66e1 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -390,6 +390,24 @@ static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e) return !!(e->val64 & _PAGE_PSE); } +static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e) +{ + if (gen8_gtt_test_pse(e)) { + switch (e->type) { + case GTT_TYPE_PPGTT_PTE_2M_ENTRY: + e->val64 &= ~_PAGE_PSE; + e->type = GTT_TYPE_PPGTT_PDE_ENTRY; + break; + case GTT_TYPE_PPGTT_PTE_1G_ENTRY: + e->type = GTT_TYPE_PPGTT_PDP_ENTRY; + e->val64 &= ~_PAGE_PSE; + break; + default: + WARN_ON(1); + } + } +} + static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e) { if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY)) @@ -477,6 +495,7 @@ static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { .set_present = gtt_entry_set_present, .test_present = gen8_gtt_test_present, .test_pse = gen8_gtt_test_pse, + .clear_pse = gen8_gtt_clear_pse, .clear_ips = gen8_gtt_clear_ips, .test_ips = gen8_gtt_test_ips, .clear_64k_splited = gen8_gtt_clear_64k_splited, diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 162ef19f4117..b7bf68cc8418 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -63,6 +63,7 @@ struct intel_gvt_gtt_pte_ops { void (*clear_present)(struct intel_gvt_gtt_entry *e); void (*set_present)(struct intel_gvt_gtt_entry *e); bool (*test_pse)(struct intel_gvt_gtt_entry *e); + void (*clear_pse)(struct intel_gvt_gtt_entry *e); bool (*test_ips)(struct intel_gvt_gtt_entry *e); void (*clear_ips)(struct intel_gvt_gtt_entry *e); bool (*test_64k_splited)(struct intel_gvt_gtt_entry *e); From 155521c93e468211673206e1871b53d26a44a82d Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 15 May 2018 10:35:39 +0800 Subject: [PATCH 07/16] drm/i915/gvt: Split ppgtt_alloc_spt into two parts We need a interface to allocate a pure shadow page which doesn't have a guest page associated with. Such shadow page is used to shadow 2M huge gtt entry. Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 62 ++++++++++++++++++++++------------ 1 file changed, 40 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index d34dc9ab66e1..15f6908fc648 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -735,10 +735,12 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt) radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn); - if (spt->guest_page.oos_page) - detach_oos_page(spt->vgpu, spt->guest_page.oos_page); + if (spt->guest_page.gfn) { + if (spt->guest_page.oos_page) + detach_oos_page(spt->vgpu, spt->guest_page.oos_page); - intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn); + intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn); + } list_del_init(&spt->post_shadow_list); free_spt(spt); @@ -799,9 +801,9 @@ static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn( static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt); +/* Allocate shadow page table without guest page. */ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt( - struct intel_vgpu *vgpu, int type, unsigned long gfn, - bool guest_pde_ips) + struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type) { struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; struct intel_vgpu_ppgtt_spt *spt = NULL; @@ -836,27 +838,12 @@ retry: spt->shadow_page.vaddr = page_address(spt->shadow_page.page); spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT; - /* - * Init guest_page. - */ - spt->guest_page.type = type; - spt->guest_page.gfn = gfn; - spt->guest_page.pde_ips = guest_pde_ips; - - ret = intel_vgpu_register_page_track(vgpu, spt->guest_page.gfn, - ppgtt_write_protection_handler, spt); + ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt); if (ret) goto err_unmap_dma; - ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt); - if (ret) - goto err_unreg_page_track; - - trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn); return spt; -err_unreg_page_track: - intel_vgpu_unregister_page_track(vgpu, spt->guest_page.gfn); err_unmap_dma: dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); err_free_spt: @@ -864,6 +851,37 @@ err_free_spt: return ERR_PTR(ret); } +/* Allocate shadow page table associated with specific gfn. */ +static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn( + struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type, + unsigned long gfn, bool guest_pde_ips) +{ + struct intel_vgpu_ppgtt_spt *spt; + int ret; + + spt = ppgtt_alloc_spt(vgpu, type); + if (IS_ERR(spt)) + return spt; + + /* + * Init guest_page. + */ + ret = intel_vgpu_register_page_track(vgpu, gfn, + ppgtt_write_protection_handler, spt); + if (ret) { + ppgtt_free_spt(spt); + return ERR_PTR(ret); + } + + spt->guest_page.type = type; + spt->guest_page.gfn = gfn; + spt->guest_page.pde_ips = guest_pde_ips; + + trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn); + + return spt; +} + #define pt_entry_size_shift(spt) \ ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift) @@ -1021,7 +1039,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY) ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we); - spt = ppgtt_alloc_spt(vgpu, type, ops->get_pfn(we), ips); + spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips); if (IS_ERR(spt)) { ret = PTR_ERR(spt); goto fail; From 4c9414d7b152bf344521bf786b5748e833270776 Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 15 May 2018 10:35:40 +0800 Subject: [PATCH 08/16] drm/i915/gvt: Make PTE iterator 64K entry aware 64K PTE is special, only PTE#0, PTE#16, PTE#32, ... PTE#496 are used in the page table. Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 15f6908fc648..7fc277cceb23 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -351,6 +351,8 @@ static inline int gtt_set_entry64(void *pt, #define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52) #define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */ +#define GTT_64K_PTE_STRIDE 16 + static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e) { unsigned long pfn; @@ -889,12 +891,14 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn( (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt)) #define for_each_present_guest_entry(spt, e, i) \ - for (i = 0; i < pt_entries(spt); i++) \ + for (i = 0; i < pt_entries(spt); \ + i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \ if (!ppgtt_get_guest_entry(spt, e, i) && \ spt->vgpu->gvt->gtt.pte_ops->test_present(e)) #define for_each_present_shadow_entry(spt, e, i) \ - for (i = 0; i < pt_entries(spt); i++) \ + for (i = 0; i < pt_entries(spt); \ + i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \ if (!ppgtt_get_shadow_entry(spt, e, i) && \ spt->vgpu->gvt->gtt.pte_ops->test_present(e)) From eb3a353014d2c2402e572ab7bef86bf5e328160f Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 15 May 2018 10:35:41 +0800 Subject: [PATCH 09/16] drm/i915/gvt: Add 64K huge gtt support Finally, this add the first huge gtt support for GVTg - 64K pages. Since 64K page and 4K page cannot be mixed on the same page table, so we always split a 64K entry into small 4K page. And when unshadow guest 64K entry, we need ensure all the shadowed entries in shadow page table also get cleared. For page table which has 64K gtt entry, only PTE#0, PTE#16, PTE#32, ... PTE#496 are used. Unused PTEs update should be ignored. Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 86 +++++++++++++++++++++++++++++++--- 1 file changed, 80 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 7fc277cceb23..54c221dedfe8 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -978,9 +978,12 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt) ppgtt_invalidate_pte(spt, &e); break; case GTT_TYPE_PPGTT_PTE_64K_ENTRY: + /* We don't setup 64K shadow entry so far. */ + WARN(1, "suspicious 64K gtt entry\n"); + continue; case GTT_TYPE_PPGTT_PTE_2M_ENTRY: case GTT_TYPE_PPGTT_PTE_1G_ENTRY: - WARN(1, "GVT doesn't support 64K/2M/1GB page\n"); + WARN(1, "GVT doesn't support 2M/1GB page\n"); continue; case GTT_TYPE_PPGTT_PML4_ENTRY: case GTT_TYPE_PPGTT_PDP_ENTRY: @@ -1075,9 +1078,44 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, se->type = ge->type; se->val64 = ge->val64; + /* Because we always split 64KB pages, so clear IPS in shadow PDE. */ + if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY) + ops->clear_ips(se); + ops->set_pfn(se, s->shadow_page.mfn); } +static int split_64KB_gtt_entry(struct intel_vgpu *vgpu, + struct intel_vgpu_ppgtt_spt *spt, unsigned long index, + struct intel_gvt_gtt_entry *se) +{ + struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + struct intel_gvt_gtt_entry entry = *se; + unsigned long start_gfn; + dma_addr_t dma_addr; + int i, ret; + + gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index); + + GEM_BUG_ON(index % GTT_64K_PTE_STRIDE); + + start_gfn = ops->get_pfn(se); + + entry.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY; + ops->set_64k_splited(&entry); + + for (i = 0; i < GTT_64K_PTE_STRIDE; i++) { + ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, + start_gfn + i, &dma_addr); + if (ret) + return ret; + + ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT); + ppgtt_set_shadow_entry(spt, &entry, index + i); + } + return 0; +} + static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, struct intel_vgpu_ppgtt_spt *spt, unsigned long index, struct intel_gvt_gtt_entry *ge) @@ -1098,9 +1136,16 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, gvt_vdbg_mm("shadow 4K gtt entry\n"); break; case GTT_TYPE_PPGTT_PTE_64K_ENTRY: + gvt_vdbg_mm("shadow 64K gtt entry\n"); + /* + * The layout of 64K page is special, the page size is + * controlled by uper PDE. To be simple, we always split + * 64K page to smaller 4K pages in shadow PT. + */ + return split_64KB_gtt_entry(vgpu, spt, index, &se); case GTT_TYPE_PPGTT_PTE_2M_ENTRY: case GTT_TYPE_PPGTT_PTE_1G_ENTRY: - gvt_vgpu_err("GVT doesn't support 64K/2M/1GB entry\n"); + gvt_vgpu_err("GVT doesn't support 2M/1GB entry\n"); return -EINVAL; default: GEM_BUG_ON(1); @@ -1190,8 +1235,12 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt, ret = ppgtt_invalidate_spt(s); if (ret) goto fail; - } else + } else { + /* We don't setup 64K shadow entry so far. */ + WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY, + "suspicious 64K entry\n"); ppgtt_invalidate_pte(spt, se); + } return 0; fail: @@ -1414,7 +1463,7 @@ static int ppgtt_handle_guest_write_page_table( struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry old_se; int new_present; - int ret; + int i, ret; new_present = ops->test_present(we); @@ -1436,8 +1485,21 @@ static int ppgtt_handle_guest_write_page_table( goto fail; if (!new_present) { - ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn); - ppgtt_set_shadow_entry(spt, &old_se, index); + /* For 64KB splited entries, we need clear them all. */ + if (ops->test_64k_splited(&old_se) && + !(index % GTT_64K_PTE_STRIDE)) { + gvt_vdbg_mm("remove splited 64K shadow entries\n"); + for (i = 0; i < GTT_64K_PTE_STRIDE; i++) { + ops->clear_64k_splited(&old_se); + ops->set_pfn(&old_se, + vgpu->gtt.scratch_pt[type].page_mfn); + ppgtt_set_shadow_entry(spt, &old_se, index + i); + } + } else { + ops->set_pfn(&old_se, + vgpu->gtt.scratch_pt[type].page_mfn); + ppgtt_set_shadow_entry(spt, &old_se, index); + } } return 0; @@ -1519,6 +1581,18 @@ static int ppgtt_handle_guest_write_page_table_bytes( ppgtt_get_guest_entry(spt, &we, index); + /* + * For page table which has 64K gtt entry, only PTE#0, PTE#16, + * PTE#32, ... PTE#496 are used. Unused PTEs update should be + * ignored. + */ + if (we.type == GTT_TYPE_PPGTT_PTE_64K_ENTRY && + (index % GTT_64K_PTE_STRIDE)) { + gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n", + index); + return 0; + } + if (bytes == info->gtt_entry_size) { ret = ppgtt_handle_guest_write_page_table(spt, &we, index); if (ret) From 79e542f5af79918e5e766c441561fb9bff8af3aa Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 15 May 2018 10:35:42 +0800 Subject: [PATCH 10/16] drm/i915/kvmgt: Support setting dma map for huge pages To support huge gtt, we need to support huge pages in kvmgt first. This patch adds a 'size' param to the intel_gvt_mpt::dma_map_guest_page API and implements it in kvmgt. v2: rebase. Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 6 +- drivers/gpu/drm/i915/gvt/hypercall.h | 2 +- drivers/gpu/drm/i915/gvt/kvmgt.c | 128 ++++++++++++++++++++------- drivers/gpu/drm/i915/gvt/mpt.h | 7 +- 4 files changed, 103 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 54c221dedfe8..e26c01da2bd6 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1106,7 +1106,7 @@ static int split_64KB_gtt_entry(struct intel_vgpu *vgpu, for (i = 0; i < GTT_64K_PTE_STRIDE; i++) { ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, - start_gfn + i, &dma_addr); + start_gfn + i, PAGE_SIZE, &dma_addr); if (ret) return ret; @@ -1152,7 +1152,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, }; /* direct shadow */ - ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, &dma_addr); + ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, PAGE_SIZE, &dma_addr); if (ret) return -ENXIO; @@ -2080,7 +2080,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, } ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, - &dma_addr); + PAGE_SIZE, &dma_addr); if (ret) { gvt_vgpu_err("fail to populate guest ggtt entry\n"); /* guest driver may read/write the entry when partial diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index f6dd9f717888..5af11cf1b482 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -53,7 +53,7 @@ struct intel_gvt_mpt { unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn); int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn, - dma_addr_t *dma_addr); + unsigned long size, dma_addr_t *dma_addr); void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr); int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn, diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 1466d8769ec9..685cb3de6dab 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -94,6 +94,7 @@ struct gvt_dma { struct rb_node dma_addr_node; gfn_t gfn; dma_addr_t dma_addr; + unsigned long size; struct kref ref; }; @@ -106,45 +107,103 @@ static int kvmgt_guest_init(struct mdev_device *mdev); static void intel_vgpu_release_work(struct work_struct *work); static bool kvmgt_guest_exit(struct kvmgt_guest_info *info); -static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, - dma_addr_t *dma_addr) +static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, + unsigned long size) { - struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; - struct page *page; - unsigned long pfn; + int total_pages; + int npage; int ret; - /* Pin the page first. */ - ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1, - IOMMU_READ | IOMMU_WRITE, &pfn); - if (ret != 1) { - gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", - gfn, ret); - return -EINVAL; + total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE; + + for (npage = 0; npage < total_pages; npage++) { + unsigned long cur_gfn = gfn + npage; + + ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1); + WARN_ON(ret != 1); } +} + +/* Pin a normal or compound guest page for dma. */ +static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, + unsigned long size, struct page **page) +{ + unsigned long base_pfn = 0; + int total_pages; + int npage; + int ret; + + total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE; + /* + * We pin the pages one-by-one to avoid allocating a big arrary + * on stack to hold pfns. + */ + for (npage = 0; npage < total_pages; npage++) { + unsigned long cur_gfn = gfn + npage; + unsigned long pfn; + + ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1, + IOMMU_READ | IOMMU_WRITE, &pfn); + if (ret != 1) { + gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n", + cur_gfn, ret); + goto err; + } + + if (!pfn_valid(pfn)) { + gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn); + npage++; + ret = -EFAULT; + goto err; + } + + if (npage == 0) + base_pfn = pfn; + else if (base_pfn + npage != pfn) { + gvt_vgpu_err("The pages are not continuous\n"); + ret = -EINVAL; + npage++; + goto err; + } + } + + *page = pfn_to_page(base_pfn); + return 0; +err: + gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE); + return ret; +} + +static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, + dma_addr_t *dma_addr, unsigned long size) +{ + struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; + struct page *page = NULL; + int ret; + + ret = gvt_pin_guest_page(vgpu, gfn, size, &page); + if (ret) + return ret; /* Setup DMA mapping. */ - page = pfn_to_page(pfn); - *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); - if (dma_mapping_error(dev, *dma_addr)) { - gvt_vgpu_err("DMA mapping failed for gfn 0x%lx\n", gfn); - vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1); - return -ENOMEM; + *dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL); + ret = dma_mapping_error(dev, *dma_addr); + if (ret) { + gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n", + page_to_pfn(page), ret); + gvt_unpin_guest_page(vgpu, gfn, size); } - return 0; + return ret; } static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, - dma_addr_t dma_addr) + dma_addr_t dma_addr, unsigned long size) { struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; - int ret; - dma_unmap_page(dev, dma_addr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1); - WARN_ON(ret != 1); + dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); + gvt_unpin_guest_page(vgpu, gfn, size); } static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu, @@ -185,7 +244,7 @@ static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn) } static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, - dma_addr_t dma_addr) + dma_addr_t dma_addr, unsigned long size) { struct gvt_dma *new, *itr; struct rb_node **link, *parent = NULL; @@ -197,6 +256,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, new->vgpu = vgpu; new->gfn = gfn; new->dma_addr = dma_addr; + new->size = size; kref_init(&new->ref); /* gfn_cache maps gfn to struct gvt_dma. */ @@ -254,7 +314,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu) break; } dma = rb_entry(node, struct gvt_dma, gfn_node); - gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr); + gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size); __gvt_cache_remove_entry(vgpu, dma); mutex_unlock(&vgpu->vdev.cache_lock); } @@ -509,7 +569,8 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, if (!entry) continue; - gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr); + gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr, + entry->size); __gvt_cache_remove_entry(vgpu, entry); } mutex_unlock(&vgpu->vdev.cache_lock); @@ -1616,7 +1677,7 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) } int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, - dma_addr_t *dma_addr) + unsigned long size, dma_addr_t *dma_addr) { struct kvmgt_guest_info *info; struct intel_vgpu *vgpu; @@ -1633,11 +1694,11 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, entry = __gvt_cache_find_gfn(info->vgpu, gfn); if (!entry) { - ret = gvt_dma_map_page(vgpu, gfn, dma_addr); + ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size); if (ret) goto err_unlock; - ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr); + ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size); if (ret) goto err_unmap; } else { @@ -1649,7 +1710,7 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, return 0; err_unmap: - gvt_dma_unmap_page(vgpu, gfn, *dma_addr); + gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size); err_unlock: mutex_unlock(&info->vgpu->vdev.cache_lock); return ret; @@ -1659,7 +1720,8 @@ static void __gvt_dma_release(struct kref *ref) { struct gvt_dma *entry = container_of(ref, typeof(*entry), ref); - gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr); + gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr, + entry->size); __gvt_cache_remove_entry(entry->vgpu, entry); } diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 32ffcd566cdd..67f19992b226 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -230,17 +230,18 @@ static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn( /** * intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page * @vgpu: a vGPU - * @gpfn: guest pfn + * @gfn: guest pfn + * @size: page size * @dma_addr: retrieve allocated dma addr * * Returns: * 0 on success, negative error code if failed. */ static inline int intel_gvt_hypervisor_dma_map_guest_page( - struct intel_vgpu *vgpu, unsigned long gfn, + struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr) { - return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, + return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size, dma_addr); } From b901b252b6cf5cecc612059ccf05d974a9085c58 Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 15 May 2018 10:35:43 +0800 Subject: [PATCH 11/16] drm/i915/gvt: Add 2M huge gtt support This add 2M huge gtt support for GVTg. Unlike 64K gtt entry, we can shadow 2M guest entry with real huge gtt. But before that, we have to check memory physical continuous, alignment and if it is supported on the host. We can get all supported page sizes from intel_device_info.page_sizes. Finally we must split the 2M page into smaller pages if we cannot satisfy guest Huge Page. Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 100 +++++++++++++++++++++++++++++++-- 1 file changed, 95 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index e26c01da2bd6..5a66d0f3365c 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -902,6 +902,11 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn( if (!ppgtt_get_shadow_entry(spt, e, i) && \ spt->vgpu->gvt->gtt.pte_ops->test_present(e)) +#define for_each_shadow_entry(spt, e, i) \ + for (i = 0; i < pt_entries(spt); \ + i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \ + if (!ppgtt_get_shadow_entry(spt, e, i)) + static void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt) { int v = atomic_read(&spt->refcount); @@ -949,7 +954,8 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt, pfn = ops->get_pfn(entry); type = spt->shadow_page.type; - if (pfn == vgpu->gtt.scratch_pt[type].page_mfn) + /* Uninitialized spte or unshadowed spte. */ + if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn) return; intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT); @@ -982,8 +988,10 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt) WARN(1, "suspicious 64K gtt entry\n"); continue; case GTT_TYPE_PPGTT_PTE_2M_ENTRY: + gvt_vdbg_mm("invalidate 2M entry\n"); + continue; case GTT_TYPE_PPGTT_PTE_1G_ENTRY: - WARN(1, "GVT doesn't support 2M/1GB page\n"); + WARN(1, "GVT doesn't support 1GB page\n"); continue; case GTT_TYPE_PPGTT_PML4_ENTRY: case GTT_TYPE_PPGTT_PDP_ENTRY: @@ -1085,6 +1093,73 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, ops->set_pfn(se, s->shadow_page.mfn); } +/** + * Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition, + * negtive if found err. + */ +static int is_2MB_gtt_possible(struct intel_vgpu *vgpu, + struct intel_gvt_gtt_entry *entry) +{ + struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + unsigned long pfn; + + if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M)) + return 0; + + pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry)); + if (pfn == INTEL_GVT_INVALID_ADDR) + return -EINVAL; + + return PageTransHuge(pfn_to_page(pfn)); +} + +static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, + struct intel_vgpu_ppgtt_spt *spt, unsigned long index, + struct intel_gvt_gtt_entry *se) +{ + struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + struct intel_vgpu_ppgtt_spt *sub_spt; + struct intel_gvt_gtt_entry sub_se; + unsigned long start_gfn; + dma_addr_t dma_addr; + unsigned long sub_index; + int ret; + + gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index); + + start_gfn = ops->get_pfn(se); + + sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT); + if (IS_ERR(sub_spt)) + return PTR_ERR(sub_spt); + + for_each_shadow_entry(sub_spt, &sub_se, sub_index) { + ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, + start_gfn + sub_index, PAGE_SIZE, &dma_addr); + if (ret) { + ppgtt_invalidate_spt(spt); + return ret; + } + sub_se.val64 = se->val64; + + /* Copy the PAT field from PDE. */ + sub_se.val64 &= ~_PAGE_PAT; + sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5; + + ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT); + ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index); + } + + /* Clear dirty field. */ + se->val64 &= ~_PAGE_DIRTY; + + ops->clear_pse(se); + ops->clear_ips(se); + ops->set_pfn(se, sub_spt->shadow_page.mfn); + ppgtt_set_shadow_entry(spt, se, index); + return 0; +} + static int split_64KB_gtt_entry(struct intel_vgpu *vgpu, struct intel_vgpu_ppgtt_spt *spt, unsigned long index, struct intel_gvt_gtt_entry *se) @@ -1122,7 +1197,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, { struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry se = *ge; - unsigned long gfn; + unsigned long gfn, page_size = PAGE_SIZE; dma_addr_t dma_addr; int ret; @@ -1144,15 +1219,24 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, */ return split_64KB_gtt_entry(vgpu, spt, index, &se); case GTT_TYPE_PPGTT_PTE_2M_ENTRY: + gvt_vdbg_mm("shadow 2M gtt entry\n"); + ret = is_2MB_gtt_possible(vgpu, ge); + if (ret == 0) + return split_2MB_gtt_entry(vgpu, spt, index, &se); + else if (ret < 0) + return ret; + page_size = I915_GTT_PAGE_SIZE_2M; + break; case GTT_TYPE_PPGTT_PTE_1G_ENTRY: - gvt_vgpu_err("GVT doesn't support 2M/1GB entry\n"); + gvt_vgpu_err("GVT doesn't support 1GB entry\n"); return -EINVAL; default: GEM_BUG_ON(1); }; /* direct shadow */ - ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, PAGE_SIZE, &dma_addr); + ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size, + &dma_addr); if (ret) return -ENXIO; @@ -1495,6 +1579,12 @@ static int ppgtt_handle_guest_write_page_table( vgpu->gtt.scratch_pt[type].page_mfn); ppgtt_set_shadow_entry(spt, &old_se, index + i); } + } else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY || + old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) { + ops->clear_pse(&old_se); + ops->set_pfn(&old_se, + vgpu->gtt.scratch_pt[type].page_mfn); + ppgtt_set_shadow_entry(spt, &old_se, index); } else { ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn); From 54c81653bd67ce2a11fa08295d9148385ea29603 Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 15 May 2018 10:35:44 +0800 Subject: [PATCH 12/16] drm/i915/gvt: Handle special sequence on PDE IPS bit If the guest update the 64K gtt entry before changing IPS bit of PDE, we need to re-shadow the whole page table. Because we have ignored all updates to unused entries. Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 5a66d0f3365c..b40c6154e571 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1045,14 +1045,24 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type))); - spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we)); - if (spt) - ppgtt_get_spt(spt); - else { - int type = get_next_pt_type(we->type); + if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY) + ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we); - if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY) - ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we); + spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we)); + if (spt) { + ppgtt_get_spt(spt); + + if (ips != spt->guest_page.pde_ips) { + spt->guest_page.pde_ips = ips; + + gvt_dbg_mm("reshadow PDE since ips changed\n"); + clear_page(spt->shadow_page.vaddr); + ret = ppgtt_populate_spt(spt); + if (ret) + goto fail; + } + } else { + int type = get_next_pt_type(we->type); spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips); if (IS_ERR(spt)) { From 80e76ea631de6e14c2c436b278ee0c6871227606 Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 15 May 2018 10:35:45 +0800 Subject: [PATCH 13/16] drm/i915/gvt: Fix error handling in ppgtt_populate_spt_by_guest_entry Don't forget to free allocated spt if shadowing failed. Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index b40c6154e571..156ceeeb7446 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -907,15 +907,22 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn( i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \ if (!ppgtt_get_shadow_entry(spt, e, i)) -static void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt) +static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt) { int v = atomic_read(&spt->refcount); trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1)); - atomic_inc(&spt->refcount); } +static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt) +{ + int v = atomic_read(&spt->refcount); + + trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1)); + return atomic_dec_return(&spt->refcount); +} + static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt); static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu, @@ -967,14 +974,11 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt) struct intel_gvt_gtt_entry e; unsigned long index; int ret; - int v = atomic_read(&spt->refcount); trace_spt_change(spt->vgpu->id, "die", spt, spt->guest_page.gfn, spt->shadow_page.type); - trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1)); - - if (atomic_dec_return(&spt->refcount) > 0) + if (ppgtt_put_spt(spt) > 0) return 0; for_each_present_shadow_entry(spt, &e, index) { @@ -1058,8 +1062,10 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( gvt_dbg_mm("reshadow PDE since ips changed\n"); clear_page(spt->shadow_page.vaddr); ret = ppgtt_populate_spt(spt); - if (ret) - goto fail; + if (ret) { + ppgtt_put_spt(spt); + goto err; + } } } else { int type = get_next_pt_type(we->type); @@ -1067,22 +1073,25 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips); if (IS_ERR(spt)) { ret = PTR_ERR(spt); - goto fail; + goto err; } ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn); if (ret) - goto fail; + goto err_free_spt; ret = ppgtt_populate_spt(spt); if (ret) - goto fail; + goto err_free_spt; trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn, spt->shadow_page.type); } return spt; -fail: + +err_free_spt: + ppgtt_free_spt(spt); +err: gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", spt, we->val64, we->type); return ERR_PTR(ret); From aa36ed6d9536ad694995340264b69d57b01da7d3 Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 15 May 2018 10:35:46 +0800 Subject: [PATCH 14/16] drm/i915: Enable platform support for vGPU huge gtt pages Now GVTg supports shadowing both 2M/64K huge gtt pages. So let's turn on the cap info bit VGT_CAPS_HUGE_GTT. v2: Split changes in i915 side into a separated patch. Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/vgpu.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 889d10f8ee96..aa063b275e81 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -46,6 +46,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT; vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION; + vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT; vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) = vgpu_aperture_gmadr_base(vgpu); From 93d68b258e7d6b2b4b0716c6f33a38d5b6a536ff Mon Sep 17 00:00:00 2001 From: Colin Xu Date: Mon, 9 Jul 2018 09:28:18 +0800 Subject: [PATCH 15/16] drm/i915/gvt: Handle EDP_PSR_IMR and EDP_PSR_IIR for BXT. BXT supports EDP. However since GVT-g only simulate DP monitor to guest and handles EDP_PSR_IMR and EDP_PSR_IIR as default MMIO r/w. If guest r/w these IMR/IIR, GVT-g won't simulate the real HW behavior and below warning is printed: -------- Interrupt register 0x64838 is not zero: 0xffffffff WARNING: CPU: 1 PID: 1 at drivers/gpu/drm/i915/i915_irq.c:161 gen3_assert_iir_is_zero+0x34/0xa0 Call Trace: gen8_de_irq_postinstall+0xad/0x330 gen8_irq_postinstall+0x23/0x80 drm_irq_install+0xb5/0x130 i915_driver_load+0xafd/0xf70 -------- Since GVT-g won't simulate EDP to guest, always set EDP_PSR_IMR and EDP_PSR_IIR IMR/IIR to 0. Signed-off-by: Colin Xu Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 17f56fc20613..e2e252c67de8 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1584,6 +1584,13 @@ static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu, return 0; } +static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + vgpu_vreg(vgpu, offset) = 0; + return 0; +} + static int mmio_read_from_hw(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -3182,6 +3189,9 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt) MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT); + MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write); + MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write); + MMIO_D(RC6_CTX_BASE, D_BXT); MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT); From 279ce5d117078ee8ea40c40199399889981fd808 Mon Sep 17 00:00:00 2001 From: Hang Yuan Date: Mon, 9 Jul 2018 18:24:10 +0800 Subject: [PATCH 16/16] drm/i915/gvt: declare gvt as i915's soft dependency This helps initramfs builder and other tools to know the full dependencies of i915 and have gvt module loaded with i915. v2: add condition and change to pre-dependency (Chris) v3: move declaration to gvt.c. (Chris) v4: remove xengt (Zhenyu) Signed-off-by: Hang Yuan Reviewed-by: Chris Wilson Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gvt.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 4e65266e7b95..712f9d14e720 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -468,3 +468,7 @@ out_clean_idr: kfree(gvt); return ret; } + +#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) +MODULE_SOFTDEP("pre: kvmgt"); +#endif