Merge tag 'gvt-next-2017-02-15' of https://github.com/01org/gvt-linux into drm-intel-next-fixes
gvt-next-2017-02-15 - Chuanxiao's IOMMU workaround fix - debug message cleanup from Changbin - oops fix in fail path of workload submission when GPU reset from Changbin - other misc fixes Signed-off-by: Jani Nikula <jani.nikula@intel.com>
This commit is contained in:
commit
33b7bfdf91
@ -49,20 +49,21 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
|
|||||||
if (high_gm) {
|
if (high_gm) {
|
||||||
node = &vgpu->gm.high_gm_node;
|
node = &vgpu->gm.high_gm_node;
|
||||||
size = vgpu_hidden_sz(vgpu);
|
size = vgpu_hidden_sz(vgpu);
|
||||||
start = gvt_hidden_gmadr_base(gvt);
|
start = ALIGN(gvt_hidden_gmadr_base(gvt), I915_GTT_PAGE_SIZE);
|
||||||
end = gvt_hidden_gmadr_end(gvt);
|
end = ALIGN(gvt_hidden_gmadr_end(gvt), I915_GTT_PAGE_SIZE);
|
||||||
flags = PIN_HIGH;
|
flags = PIN_HIGH;
|
||||||
} else {
|
} else {
|
||||||
node = &vgpu->gm.low_gm_node;
|
node = &vgpu->gm.low_gm_node;
|
||||||
size = vgpu_aperture_sz(vgpu);
|
size = vgpu_aperture_sz(vgpu);
|
||||||
start = gvt_aperture_gmadr_base(gvt);
|
start = ALIGN(gvt_aperture_gmadr_base(gvt), I915_GTT_PAGE_SIZE);
|
||||||
end = gvt_aperture_gmadr_end(gvt);
|
end = ALIGN(gvt_aperture_gmadr_end(gvt), I915_GTT_PAGE_SIZE);
|
||||||
flags = PIN_MAPPABLE;
|
flags = PIN_MAPPABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||||
ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node,
|
ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node,
|
||||||
size, 4096, I915_COLOR_UNEVICTABLE,
|
size, I915_GTT_PAGE_SIZE,
|
||||||
|
I915_COLOR_UNEVICTABLE,
|
||||||
start, end, flags);
|
start, end, flags);
|
||||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -254,7 +255,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
|
|||||||
if (request > avail)
|
if (request > avail)
|
||||||
goto no_enough_resource;
|
goto no_enough_resource;
|
||||||
|
|
||||||
vgpu_aperture_sz(vgpu) = request;
|
vgpu_aperture_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE);
|
||||||
|
|
||||||
item = "high GM space";
|
item = "high GM space";
|
||||||
max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
|
max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
|
||||||
@ -265,7 +266,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
|
|||||||
if (request > avail)
|
if (request > avail)
|
||||||
goto no_enough_resource;
|
goto no_enough_resource;
|
||||||
|
|
||||||
vgpu_hidden_sz(vgpu) = request;
|
vgpu_hidden_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE);
|
||||||
|
|
||||||
item = "fence";
|
item = "fence";
|
||||||
max = gvt_fence_sz(gvt) - HOST_FENCE;
|
max = gvt_fence_sz(gvt) - HOST_FENCE;
|
||||||
|
@ -1135,6 +1135,8 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
|
|||||||
u32 dword2 = cmd_val(s, 2);
|
u32 dword2 = cmd_val(s, 2);
|
||||||
u32 plane = (dword0 & GENMASK(12, 8)) >> 8;
|
u32 plane = (dword0 & GENMASK(12, 8)) >> 8;
|
||||||
|
|
||||||
|
info->plane = PRIMARY_PLANE;
|
||||||
|
|
||||||
switch (plane) {
|
switch (plane) {
|
||||||
case MI_DISPLAY_FLIP_SKL_PLANE_1_A:
|
case MI_DISPLAY_FLIP_SKL_PLANE_1_A:
|
||||||
info->pipe = PIPE_A;
|
info->pipe = PIPE_A;
|
||||||
@ -1148,12 +1150,28 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
|
|||||||
info->pipe = PIPE_C;
|
info->pipe = PIPE_C;
|
||||||
info->event = PRIMARY_C_FLIP_DONE;
|
info->event = PRIMARY_C_FLIP_DONE;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case MI_DISPLAY_FLIP_SKL_PLANE_2_A:
|
||||||
|
info->pipe = PIPE_A;
|
||||||
|
info->event = SPRITE_A_FLIP_DONE;
|
||||||
|
info->plane = SPRITE_PLANE;
|
||||||
|
break;
|
||||||
|
case MI_DISPLAY_FLIP_SKL_PLANE_2_B:
|
||||||
|
info->pipe = PIPE_B;
|
||||||
|
info->event = SPRITE_B_FLIP_DONE;
|
||||||
|
info->plane = SPRITE_PLANE;
|
||||||
|
break;
|
||||||
|
case MI_DISPLAY_FLIP_SKL_PLANE_2_C:
|
||||||
|
info->pipe = PIPE_C;
|
||||||
|
info->event = SPRITE_C_FLIP_DONE;
|
||||||
|
info->plane = SPRITE_PLANE;
|
||||||
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
gvt_err("unknown plane code %d\n", plane);
|
gvt_err("unknown plane code %d\n", plane);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
info->pipe = PRIMARY_PLANE;
|
|
||||||
info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
|
info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
|
||||||
info->tile_val = (dword1 & GENMASK(2, 0));
|
info->tile_val = (dword1 & GENMASK(2, 0));
|
||||||
info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
|
info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
|
||||||
|
@ -333,3 +333,15 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu)
|
|||||||
else
|
else
|
||||||
return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B);
|
return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* intel_vgpu_reset_display- reset vGPU virtual display emulation
|
||||||
|
* @vgpu: a vGPU
|
||||||
|
*
|
||||||
|
* This function is used to reset vGPU virtual display emulation stuffs
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
void intel_vgpu_reset_display(struct intel_vgpu *vgpu)
|
||||||
|
{
|
||||||
|
emulate_monitor_status_change(vgpu);
|
||||||
|
}
|
||||||
|
@ -158,6 +158,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
|
|||||||
void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
|
void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
|
||||||
|
|
||||||
int intel_vgpu_init_display(struct intel_vgpu *vgpu);
|
int intel_vgpu_init_display(struct intel_vgpu *vgpu);
|
||||||
|
void intel_vgpu_reset_display(struct intel_vgpu *vgpu);
|
||||||
void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
|
void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -515,7 +515,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
|||||||
|
|
||||||
static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||||
{
|
{
|
||||||
if (wa_ctx->indirect_ctx.size == 0)
|
if (!wa_ctx->indirect_ctx.obj)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
|
i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
|
||||||
|
@ -606,21 +606,33 @@ struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
|
|||||||
static inline int init_shadow_page(struct intel_vgpu *vgpu,
|
static inline int init_shadow_page(struct intel_vgpu *vgpu,
|
||||||
struct intel_vgpu_shadow_page *p, int type)
|
struct intel_vgpu_shadow_page *p, int type)
|
||||||
{
|
{
|
||||||
|
struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
|
||||||
|
dma_addr_t daddr;
|
||||||
|
|
||||||
|
daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
|
||||||
|
if (dma_mapping_error(kdev, daddr)) {
|
||||||
|
gvt_err("fail to map dma addr\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
p->vaddr = page_address(p->page);
|
p->vaddr = page_address(p->page);
|
||||||
p->type = type;
|
p->type = type;
|
||||||
|
|
||||||
INIT_HLIST_NODE(&p->node);
|
INIT_HLIST_NODE(&p->node);
|
||||||
|
|
||||||
p->mfn = intel_gvt_hypervisor_virt_to_mfn(p->vaddr);
|
p->mfn = daddr >> GTT_PAGE_SHIFT;
|
||||||
if (p->mfn == INTEL_GVT_INVALID_ADDR)
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
|
hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void clean_shadow_page(struct intel_vgpu_shadow_page *p)
|
static inline void clean_shadow_page(struct intel_vgpu *vgpu,
|
||||||
|
struct intel_vgpu_shadow_page *p)
|
||||||
{
|
{
|
||||||
|
struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
|
||||||
|
|
||||||
|
dma_unmap_page(kdev, p->mfn << GTT_PAGE_SHIFT, 4096,
|
||||||
|
PCI_DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
if (!hlist_unhashed(&p->node))
|
if (!hlist_unhashed(&p->node))
|
||||||
hash_del(&p->node);
|
hash_del(&p->node);
|
||||||
}
|
}
|
||||||
@ -670,7 +682,7 @@ static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
|
|||||||
{
|
{
|
||||||
trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
|
trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
|
||||||
|
|
||||||
clean_shadow_page(&spt->shadow_page);
|
clean_shadow_page(spt->vgpu, &spt->shadow_page);
|
||||||
intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page);
|
intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page);
|
||||||
list_del_init(&spt->post_shadow_list);
|
list_del_init(&spt->post_shadow_list);
|
||||||
|
|
||||||
@ -1875,8 +1887,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
|
|||||||
int page_entry_num = GTT_PAGE_SIZE >>
|
int page_entry_num = GTT_PAGE_SIZE >>
|
||||||
vgpu->gvt->device_info.gtt_entry_size_shift;
|
vgpu->gvt->device_info.gtt_entry_size_shift;
|
||||||
void *scratch_pt;
|
void *scratch_pt;
|
||||||
unsigned long mfn;
|
|
||||||
int i;
|
int i;
|
||||||
|
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
|
||||||
|
dma_addr_t daddr;
|
||||||
|
|
||||||
if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
|
if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -1887,16 +1900,18 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt);
|
daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
|
||||||
if (mfn == INTEL_GVT_INVALID_ADDR) {
|
4096, PCI_DMA_BIDIRECTIONAL);
|
||||||
gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt);
|
if (dma_mapping_error(dev, daddr)) {
|
||||||
free_page((unsigned long)scratch_pt);
|
gvt_err("fail to dmamap scratch_pt\n");
|
||||||
return -EFAULT;
|
__free_page(virt_to_page(scratch_pt));
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
gtt->scratch_pt[type].page_mfn = mfn;
|
gtt->scratch_pt[type].page_mfn =
|
||||||
|
(unsigned long)(daddr >> GTT_PAGE_SHIFT);
|
||||||
gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
|
gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
|
||||||
gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
|
gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
|
||||||
vgpu->id, type, mfn);
|
vgpu->id, type, gtt->scratch_pt[type].page_mfn);
|
||||||
|
|
||||||
/* Build the tree by full filled the scratch pt with the entries which
|
/* Build the tree by full filled the scratch pt with the entries which
|
||||||
* point to the next level scratch pt or scratch page. The
|
* point to the next level scratch pt or scratch page. The
|
||||||
@ -1930,9 +1945,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
|
|||||||
static int release_scratch_page_tree(struct intel_vgpu *vgpu)
|
static int release_scratch_page_tree(struct intel_vgpu *vgpu)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
|
||||||
|
dma_addr_t daddr;
|
||||||
|
|
||||||
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
|
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
|
||||||
if (vgpu->gtt.scratch_pt[i].page != NULL) {
|
if (vgpu->gtt.scratch_pt[i].page != NULL) {
|
||||||
|
daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
|
||||||
|
GTT_PAGE_SHIFT);
|
||||||
|
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
|
||||||
__free_page(vgpu->gtt.scratch_pt[i].page);
|
__free_page(vgpu->gtt.scratch_pt[i].page);
|
||||||
vgpu->gtt.scratch_pt[i].page = NULL;
|
vgpu->gtt.scratch_pt[i].page = NULL;
|
||||||
vgpu->gtt.scratch_pt[i].page_mfn = 0;
|
vgpu->gtt.scratch_pt[i].page_mfn = 0;
|
||||||
@ -2192,6 +2212,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
void *page;
|
void *page;
|
||||||
|
struct device *dev = &gvt->dev_priv->drm.pdev->dev;
|
||||||
|
dma_addr_t daddr;
|
||||||
|
|
||||||
gvt_dbg_core("init gtt\n");
|
gvt_dbg_core("init gtt\n");
|
||||||
|
|
||||||
@ -2209,14 +2231,16 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
|||||||
gvt_err("fail to allocate scratch ggtt page\n");
|
gvt_err("fail to allocate scratch ggtt page\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
gvt->gtt.scratch_ggtt_page = virt_to_page(page);
|
|
||||||
|
|
||||||
gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page);
|
daddr = dma_map_page(dev, virt_to_page(page), 0,
|
||||||
if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
|
4096, PCI_DMA_BIDIRECTIONAL);
|
||||||
gvt_err("fail to translate scratch ggtt page\n");
|
if (dma_mapping_error(dev, daddr)) {
|
||||||
__free_page(gvt->gtt.scratch_ggtt_page);
|
gvt_err("fail to dmamap scratch ggtt page\n");
|
||||||
return -EFAULT;
|
__free_page(virt_to_page(page));
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
gvt->gtt.scratch_ggtt_page = virt_to_page(page);
|
||||||
|
gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> GTT_PAGE_SHIFT);
|
||||||
|
|
||||||
if (enable_out_of_sync) {
|
if (enable_out_of_sync) {
|
||||||
ret = setup_spt_oos(gvt);
|
ret = setup_spt_oos(gvt);
|
||||||
@ -2239,6 +2263,12 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
|||||||
*/
|
*/
|
||||||
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
|
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
|
||||||
{
|
{
|
||||||
|
struct device *dev = &gvt->dev_priv->drm.pdev->dev;
|
||||||
|
dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_ggtt_mfn <<
|
||||||
|
GTT_PAGE_SHIFT);
|
||||||
|
|
||||||
|
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
__free_page(gvt->gtt.scratch_ggtt_page);
|
__free_page(gvt->gtt.scratch_ggtt_page);
|
||||||
|
|
||||||
if (enable_out_of_sync)
|
if (enable_out_of_sync)
|
||||||
|
@ -75,13 +75,6 @@ int intel_gvt_init_host(void)
|
|||||||
if (xen_domain() && !xen_initial_domain())
|
if (xen_domain() && !xen_initial_domain())
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
#ifdef CONFIG_INTEL_IOMMU
|
|
||||||
if (intel_iommu_gfx_mapped) {
|
|
||||||
gvt_err("Hardware IOMMU compatibility not yet supported, try to boot with intel_iommu=igfx_off\n");
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Try to load MPT modules for hypervisors */
|
/* Try to load MPT modules for hypervisors */
|
||||||
if (xen_initial_domain()) {
|
if (xen_initial_domain()) {
|
||||||
/* In Xen dom0 */
|
/* In Xen dom0 */
|
||||||
|
@ -176,26 +176,15 @@ int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
|
|||||||
{
|
{
|
||||||
struct intel_gvt *gvt = vgpu->gvt;
|
struct intel_gvt *gvt = vgpu->gvt;
|
||||||
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
|
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
|
||||||
u32 changed, masked, unmasked;
|
|
||||||
u32 imr = *(u32 *)p_data;
|
u32 imr = *(u32 *)p_data;
|
||||||
|
|
||||||
gvt_dbg_irq("write IMR %x with val %x\n",
|
gvt_dbg_irq("write IMR %x, new %08x, old %08x, changed %08x\n",
|
||||||
reg, imr);
|
reg, imr, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ imr);
|
||||||
|
|
||||||
gvt_dbg_irq("old vIMR %x\n", vgpu_vreg(vgpu, reg));
|
|
||||||
|
|
||||||
/* figure out newly masked/unmasked bits */
|
|
||||||
changed = vgpu_vreg(vgpu, reg) ^ imr;
|
|
||||||
masked = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
|
|
||||||
unmasked = masked ^ changed;
|
|
||||||
|
|
||||||
gvt_dbg_irq("changed %x, masked %x, unmasked %x\n",
|
|
||||||
changed, masked, unmasked);
|
|
||||||
|
|
||||||
vgpu_vreg(vgpu, reg) = imr;
|
vgpu_vreg(vgpu, reg) = imr;
|
||||||
|
|
||||||
ops->check_pending_irq(vgpu);
|
ops->check_pending_irq(vgpu);
|
||||||
gvt_dbg_irq("IRQ: new vIMR %x\n", vgpu_vreg(vgpu, reg));
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -217,14 +206,11 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
|
|||||||
{
|
{
|
||||||
struct intel_gvt *gvt = vgpu->gvt;
|
struct intel_gvt *gvt = vgpu->gvt;
|
||||||
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
|
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
|
||||||
u32 changed, enabled, disabled;
|
|
||||||
u32 ier = *(u32 *)p_data;
|
u32 ier = *(u32 *)p_data;
|
||||||
u32 virtual_ier = vgpu_vreg(vgpu, reg);
|
u32 virtual_ier = vgpu_vreg(vgpu, reg);
|
||||||
|
|
||||||
gvt_dbg_irq("write master irq reg %x with val %x\n",
|
gvt_dbg_irq("write MASTER_IRQ %x, new %08x, old %08x, changed %08x\n",
|
||||||
reg, ier);
|
reg, ier, virtual_ier, virtual_ier ^ ier);
|
||||||
|
|
||||||
gvt_dbg_irq("old vreg %x\n", vgpu_vreg(vgpu, reg));
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* GEN8_MASTER_IRQ is a special irq register,
|
* GEN8_MASTER_IRQ is a special irq register,
|
||||||
@ -236,16 +222,8 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
|
|||||||
vgpu_vreg(vgpu, reg) &= ~GEN8_MASTER_IRQ_CONTROL;
|
vgpu_vreg(vgpu, reg) &= ~GEN8_MASTER_IRQ_CONTROL;
|
||||||
vgpu_vreg(vgpu, reg) |= ier;
|
vgpu_vreg(vgpu, reg) |= ier;
|
||||||
|
|
||||||
/* figure out newly enabled/disable bits */
|
|
||||||
changed = virtual_ier ^ ier;
|
|
||||||
enabled = (virtual_ier & changed) ^ changed;
|
|
||||||
disabled = enabled ^ changed;
|
|
||||||
|
|
||||||
gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
|
|
||||||
changed, enabled, disabled);
|
|
||||||
|
|
||||||
ops->check_pending_irq(vgpu);
|
ops->check_pending_irq(vgpu);
|
||||||
gvt_dbg_irq("new vreg %x\n", vgpu_vreg(vgpu, reg));
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -268,21 +246,11 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
|
|||||||
struct intel_gvt *gvt = vgpu->gvt;
|
struct intel_gvt *gvt = vgpu->gvt;
|
||||||
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
|
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
|
||||||
struct intel_gvt_irq_info *info;
|
struct intel_gvt_irq_info *info;
|
||||||
u32 changed, enabled, disabled;
|
|
||||||
u32 ier = *(u32 *)p_data;
|
u32 ier = *(u32 *)p_data;
|
||||||
|
|
||||||
gvt_dbg_irq("write IER %x with val %x\n",
|
gvt_dbg_irq("write IER %x, new %08x, old %08x, changed %08x\n",
|
||||||
reg, ier);
|
reg, ier, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ ier);
|
||||||
|
|
||||||
gvt_dbg_irq("old vIER %x\n", vgpu_vreg(vgpu, reg));
|
|
||||||
|
|
||||||
/* figure out newly enabled/disable bits */
|
|
||||||
changed = vgpu_vreg(vgpu, reg) ^ ier;
|
|
||||||
enabled = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
|
|
||||||
disabled = enabled ^ changed;
|
|
||||||
|
|
||||||
gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
|
|
||||||
changed, enabled, disabled);
|
|
||||||
vgpu_vreg(vgpu, reg) = ier;
|
vgpu_vreg(vgpu, reg) = ier;
|
||||||
|
|
||||||
info = regbase_to_irq_info(gvt, ier_to_regbase(reg));
|
info = regbase_to_irq_info(gvt, ier_to_regbase(reg));
|
||||||
@ -293,7 +261,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
|
|||||||
update_upstream_irq(vgpu, info);
|
update_upstream_irq(vgpu, info);
|
||||||
|
|
||||||
ops->check_pending_irq(vgpu);
|
ops->check_pending_irq(vgpu);
|
||||||
gvt_dbg_irq("new vIER %x\n", vgpu_vreg(vgpu, reg));
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -317,7 +285,8 @@ int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
|
|||||||
iir_to_regbase(reg));
|
iir_to_regbase(reg));
|
||||||
u32 iir = *(u32 *)p_data;
|
u32 iir = *(u32 *)p_data;
|
||||||
|
|
||||||
gvt_dbg_irq("write IIR %x with val %x\n", reg, iir);
|
gvt_dbg_irq("write IIR %x, new %08x, old %08x, changed %08x\n",
|
||||||
|
reg, iir, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ iir);
|
||||||
|
|
||||||
if (WARN_ON(!info))
|
if (WARN_ON(!info))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -619,6 +588,10 @@ static void gen8_init_irq(
|
|||||||
SET_BIT_INFO(irq, 3, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
|
SET_BIT_INFO(irq, 3, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
|
||||||
SET_BIT_INFO(irq, 3, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
|
SET_BIT_INFO(irq, 3, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
|
||||||
SET_BIT_INFO(irq, 3, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
|
SET_BIT_INFO(irq, 3, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
|
||||||
|
|
||||||
|
SET_BIT_INFO(irq, 4, SPRITE_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
|
||||||
|
SET_BIT_INFO(irq, 4, SPRITE_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
|
||||||
|
SET_BIT_INFO(irq, 4, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* GEN8 interrupt PCU events */
|
/* GEN8 interrupt PCU events */
|
||||||
|
@ -77,7 +77,7 @@ struct kvmgt_guest_info {
|
|||||||
struct gvt_dma {
|
struct gvt_dma {
|
||||||
struct rb_node node;
|
struct rb_node node;
|
||||||
gfn_t gfn;
|
gfn_t gfn;
|
||||||
kvm_pfn_t pfn;
|
unsigned long iova;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline bool handle_valid(unsigned long handle)
|
static inline bool handle_valid(unsigned long handle)
|
||||||
@ -89,6 +89,35 @@ static int kvmgt_guest_init(struct mdev_device *mdev);
|
|||||||
static void intel_vgpu_release_work(struct work_struct *work);
|
static void intel_vgpu_release_work(struct work_struct *work);
|
||||||
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
|
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
|
||||||
|
|
||||||
|
static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
|
||||||
|
unsigned long *iova)
|
||||||
|
{
|
||||||
|
struct page *page;
|
||||||
|
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
|
||||||
|
dma_addr_t daddr;
|
||||||
|
|
||||||
|
page = pfn_to_page(pfn);
|
||||||
|
if (is_error_page(page))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
|
||||||
|
PCI_DMA_BIDIRECTIONAL);
|
||||||
|
if (dma_mapping_error(dev, daddr))
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
*iova = (unsigned long)(daddr >> PAGE_SHIFT);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void gvt_dma_unmap_iova(struct intel_vgpu *vgpu, unsigned long iova)
|
||||||
|
{
|
||||||
|
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
|
||||||
|
dma_addr_t daddr;
|
||||||
|
|
||||||
|
daddr = (dma_addr_t)(iova << PAGE_SHIFT);
|
||||||
|
dma_unmap_page(dev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||||
|
}
|
||||||
|
|
||||||
static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
|
static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
|
||||||
{
|
{
|
||||||
struct rb_node *node = vgpu->vdev.cache.rb_node;
|
struct rb_node *node = vgpu->vdev.cache.rb_node;
|
||||||
@ -111,21 +140,22 @@ out:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
|
static unsigned long gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
|
||||||
{
|
{
|
||||||
struct gvt_dma *entry;
|
struct gvt_dma *entry;
|
||||||
kvm_pfn_t pfn;
|
unsigned long iova;
|
||||||
|
|
||||||
mutex_lock(&vgpu->vdev.cache_lock);
|
mutex_lock(&vgpu->vdev.cache_lock);
|
||||||
|
|
||||||
entry = __gvt_cache_find(vgpu, gfn);
|
entry = __gvt_cache_find(vgpu, gfn);
|
||||||
pfn = (entry == NULL) ? 0 : entry->pfn;
|
iova = (entry == NULL) ? INTEL_GVT_INVALID_ADDR : entry->iova;
|
||||||
|
|
||||||
mutex_unlock(&vgpu->vdev.cache_lock);
|
mutex_unlock(&vgpu->vdev.cache_lock);
|
||||||
return pfn;
|
return iova;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
|
static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
|
||||||
|
unsigned long iova)
|
||||||
{
|
{
|
||||||
struct gvt_dma *new, *itr;
|
struct gvt_dma *new, *itr;
|
||||||
struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
|
struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
|
||||||
@ -135,7 +165,7 @@ static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
new->gfn = gfn;
|
new->gfn = gfn;
|
||||||
new->pfn = pfn;
|
new->iova = iova;
|
||||||
|
|
||||||
mutex_lock(&vgpu->vdev.cache_lock);
|
mutex_lock(&vgpu->vdev.cache_lock);
|
||||||
while (*link) {
|
while (*link) {
|
||||||
@ -182,6 +212,7 @@ static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
|
|||||||
}
|
}
|
||||||
|
|
||||||
g1 = gfn;
|
g1 = gfn;
|
||||||
|
gvt_dma_unmap_iova(vgpu, this->iova);
|
||||||
rc = vfio_unpin_pages(dev, &g1, 1);
|
rc = vfio_unpin_pages(dev, &g1, 1);
|
||||||
WARN_ON(rc != 1);
|
WARN_ON(rc != 1);
|
||||||
__gvt_cache_remove_entry(vgpu, this);
|
__gvt_cache_remove_entry(vgpu, this);
|
||||||
@ -204,6 +235,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
|
|||||||
mutex_lock(&vgpu->vdev.cache_lock);
|
mutex_lock(&vgpu->vdev.cache_lock);
|
||||||
while ((node = rb_first(&vgpu->vdev.cache))) {
|
while ((node = rb_first(&vgpu->vdev.cache))) {
|
||||||
dma = rb_entry(node, struct gvt_dma, node);
|
dma = rb_entry(node, struct gvt_dma, node);
|
||||||
|
gvt_dma_unmap_iova(vgpu, dma->iova);
|
||||||
gfn = dma->gfn;
|
gfn = dma->gfn;
|
||||||
|
|
||||||
vfio_unpin_pages(dev, &gfn, 1);
|
vfio_unpin_pages(dev, &gfn, 1);
|
||||||
@ -965,11 +997,6 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
|
|||||||
sparse->areas[0].offset =
|
sparse->areas[0].offset =
|
||||||
PAGE_ALIGN(vgpu_aperture_offset(vgpu));
|
PAGE_ALIGN(vgpu_aperture_offset(vgpu));
|
||||||
sparse->areas[0].size = vgpu_aperture_sz(vgpu);
|
sparse->areas[0].size = vgpu_aperture_sz(vgpu);
|
||||||
if (!caps.buf) {
|
|
||||||
kfree(caps.buf);
|
|
||||||
caps.buf = NULL;
|
|
||||||
caps.size = 0;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
|
case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
|
||||||
@ -1353,7 +1380,7 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
|
|||||||
|
|
||||||
static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
|
static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
|
||||||
{
|
{
|
||||||
unsigned long pfn;
|
unsigned long iova, pfn;
|
||||||
struct kvmgt_guest_info *info;
|
struct kvmgt_guest_info *info;
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
int rc;
|
int rc;
|
||||||
@ -1362,9 +1389,9 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
|
|||||||
return INTEL_GVT_INVALID_ADDR;
|
return INTEL_GVT_INVALID_ADDR;
|
||||||
|
|
||||||
info = (struct kvmgt_guest_info *)handle;
|
info = (struct kvmgt_guest_info *)handle;
|
||||||
pfn = gvt_cache_find(info->vgpu, gfn);
|
iova = gvt_cache_find(info->vgpu, gfn);
|
||||||
if (pfn != 0)
|
if (iova != INTEL_GVT_INVALID_ADDR)
|
||||||
return pfn;
|
return iova;
|
||||||
|
|
||||||
pfn = INTEL_GVT_INVALID_ADDR;
|
pfn = INTEL_GVT_INVALID_ADDR;
|
||||||
dev = mdev_dev(info->vgpu->vdev.mdev);
|
dev = mdev_dev(info->vgpu->vdev.mdev);
|
||||||
@ -1373,9 +1400,16 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
|
|||||||
gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
|
gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
|
||||||
return INTEL_GVT_INVALID_ADDR;
|
return INTEL_GVT_INVALID_ADDR;
|
||||||
}
|
}
|
||||||
|
/* transfer to host iova for GFX to use DMA */
|
||||||
|
rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
|
||||||
|
if (rc) {
|
||||||
|
gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
|
||||||
|
vfio_unpin_pages(dev, &gfn, 1);
|
||||||
|
return INTEL_GVT_INVALID_ADDR;
|
||||||
|
}
|
||||||
|
|
||||||
gvt_cache_add(info->vgpu, gfn, pfn);
|
gvt_cache_add(info->vgpu, gfn, iova);
|
||||||
return pfn;
|
return iova;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
|
static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
|
||||||
|
@ -236,12 +236,18 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define CTX_CONTEXT_CONTROL_VAL 0x03
|
||||||
|
|
||||||
void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
|
void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||||
struct render_mmio *mmio;
|
struct render_mmio *mmio;
|
||||||
u32 v;
|
u32 v;
|
||||||
int i, array_size;
|
int i, array_size;
|
||||||
|
u32 *reg_state = vgpu->shadow_ctx->engine[ring_id].lrc_reg_state;
|
||||||
|
u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
|
||||||
|
u32 inhibit_mask =
|
||||||
|
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
|
||||||
|
|
||||||
if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
|
if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
|
||||||
mmio = gen9_render_mmio_list;
|
mmio = gen9_render_mmio_list;
|
||||||
@ -257,6 +263,17 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
mmio->value = I915_READ(mmio->reg);
|
mmio->value = I915_READ(mmio->reg);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* if it is an inhibit context, load in_context mmio
|
||||||
|
* into HW by mmio write. If it is not, skip this mmio
|
||||||
|
* write.
|
||||||
|
*/
|
||||||
|
if (mmio->in_context &&
|
||||||
|
((ctx_ctrl & inhibit_mask) != inhibit_mask) &&
|
||||||
|
i915.enable_execlists)
|
||||||
|
continue;
|
||||||
|
|
||||||
if (mmio->mask)
|
if (mmio->mask)
|
||||||
v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16);
|
v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16);
|
||||||
else
|
else
|
||||||
|
@ -125,7 +125,6 @@ static void tbs_sched_func(struct work_struct *work)
|
|||||||
vgpu_data = scheduler->current_vgpu->sched_data;
|
vgpu_data = scheduler->current_vgpu->sched_data;
|
||||||
head = &vgpu_data->list;
|
head = &vgpu_data->list;
|
||||||
} else {
|
} else {
|
||||||
gvt_dbg_sched("no current vgpu search from q head\n");
|
|
||||||
head = &sched_data->runq_head;
|
head = &sched_data->runq_head;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -169,7 +169,8 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
|||||||
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
|
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
|
||||||
ring_id, workload);
|
ring_id, workload);
|
||||||
|
|
||||||
shadow_ctx->desc_template = workload->ctx_desc.addressing_mode <<
|
shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
|
||||||
|
shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
|
||||||
GEN8_CTX_ADDRESSING_MODE_SHIFT;
|
GEN8_CTX_ADDRESSING_MODE_SHIFT;
|
||||||
|
|
||||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||||
@ -456,7 +457,7 @@ static int workload_thread(void *priv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
complete:
|
complete:
|
||||||
gvt_dbg_sched("will complete workload %p\n, status: %d\n",
|
gvt_dbg_sched("will complete workload %p, status: %d\n",
|
||||||
workload, workload->status);
|
workload, workload->status);
|
||||||
|
|
||||||
if (workload->req)
|
if (workload->req)
|
||||||
|
@ -385,6 +385,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
|||||||
intel_vgpu_reset_resource(vgpu);
|
intel_vgpu_reset_resource(vgpu);
|
||||||
intel_vgpu_reset_mmio(vgpu);
|
intel_vgpu_reset_mmio(vgpu);
|
||||||
populate_pvinfo_page(vgpu);
|
populate_pvinfo_page(vgpu);
|
||||||
|
intel_vgpu_reset_display(vgpu);
|
||||||
|
|
||||||
if (dmlr)
|
if (dmlr)
|
||||||
intel_vgpu_reset_cfg_space(vgpu);
|
intel_vgpu_reset_cfg_space(vgpu);
|
||||||
|
Loading…
Reference in New Issue
Block a user