mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 06:31:52 +00:00
Merge tag 'drm-intel-fixes-2017-01-19' of git://anongit.freedesktop.org/git/drm-intel into drm-fixes
More GVT-g stuff than I'd like at this stage, but then again that's pretty new and isolated so I'm not too worried. * tag 'drm-intel-fixes-2017-01-19' of git://anongit.freedesktop.org/git/drm-intel: (26 commits) drm/i915: Ignore bogus plane coordinates on SKL when the plane is not visible drm/i915: Remove WaDisableLSQCROPERFforOCL KBL workaround. drm/i915/gvt: rewrite gt reset handler using new function intel_gvt_reset_vgpu_locked drm/i915/gvt: fix vGPU instance reuse issues by vGPU reset function drm/i915/gvt: introduce intel_vgpu_reset_mmio() to reset mmio space drm/i915/gvt: move mmio init/clean function to mmio.c drm/i915/gvt: introduce intel_vgpu_reset_cfg_space to reset configuration space drm/i915/gvt: move cfg space inititation function to cfg_space.c drm/i915/gvt: introuduce intel_vgpu_reset_gtt() to reset gtt drm/i915/gvt: introudce intel_vgpu_reset_resource() to reset vgpu resource state drm/i915: Fix phys pwrite for struct_mutex-less operation drm/i915: Clear ret before unbinding in i915_gem_evict_something() drm/i915/gvt: cleanup GFP flags drm/i915/gvt/kvmgt: return meaningful error for vgpu creating failure drm/i915/gvt: cleanup opregion memory allocation code drm/i915/gvt: destroy the allocated idr on vgpu creating failures drm/i915/gvt: init/destroy vgpu_idr properly drm/i915/gvt: dec vgpu->running_workload_num after the workload is really done drm/i915/gvt: fix use after free for workload drm/i915/gvt: remove duplicated definition ...
This commit is contained in:
commit
f1750e144a
@ -37,13 +37,6 @@
|
||||
#include "i915_drv.h"
|
||||
#include "gvt.h"
|
||||
|
||||
#define MB_TO_BYTES(mb) ((mb) << 20ULL)
|
||||
#define BYTES_TO_MB(b) ((b) >> 20ULL)
|
||||
|
||||
#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
|
||||
#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
|
||||
#define HOST_FENCE 4
|
||||
|
||||
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
@ -165,6 +158,14 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
|
||||
POSTING_READ(fence_reg_lo);
|
||||
}
|
||||
|
||||
static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vgpu_fence_sz(vgpu); i++)
|
||||
intel_vgpu_write_fence(vgpu, i, 0);
|
||||
}
|
||||
|
||||
static void free_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
@ -178,9 +179,9 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
_clear_vgpu_fence(vgpu);
|
||||
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
|
||||
reg = vgpu->fence.regs[i];
|
||||
intel_vgpu_write_fence(vgpu, i, 0);
|
||||
list_add_tail(®->link,
|
||||
&dev_priv->mm.fence_list);
|
||||
}
|
||||
@ -208,13 +209,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
continue;
|
||||
list_del(pos);
|
||||
vgpu->fence.regs[i] = reg;
|
||||
intel_vgpu_write_fence(vgpu, i, 0);
|
||||
if (++i == vgpu_fence_sz(vgpu))
|
||||
break;
|
||||
}
|
||||
if (i != vgpu_fence_sz(vgpu))
|
||||
goto out_free_fence;
|
||||
|
||||
_clear_vgpu_fence(vgpu);
|
||||
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
return 0;
|
||||
@ -313,6 +315,22 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
|
||||
free_resource(vgpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_reset_resource - reset resource state owned by a vGPU
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
* This function is used to reset resource state owned by a vGPU.
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
_clear_vgpu_fence(vgpu);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_alloc_vgpu_resource - allocate HW resource for a vGPU
|
||||
* @vgpu: vGPU
|
||||
|
@ -282,3 +282,77 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU
|
||||
*
|
||||
* @vgpu: a vGPU
|
||||
* @primary: is the vGPU presented as primary
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
|
||||
bool primary)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
const struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
u16 *gmch_ctl;
|
||||
int i;
|
||||
|
||||
memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
|
||||
info->cfg_space_size);
|
||||
|
||||
if (!primary) {
|
||||
vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
|
||||
INTEL_GVT_PCI_CLASS_VGA_OTHER;
|
||||
vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
|
||||
INTEL_GVT_PCI_CLASS_VGA_OTHER;
|
||||
}
|
||||
|
||||
/* Show guest that there isn't any stolen memory.*/
|
||||
gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
|
||||
*gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
|
||||
|
||||
intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
|
||||
gvt_aperture_pa_base(gvt), true);
|
||||
|
||||
vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
|
||||
| PCI_COMMAND_MEMORY
|
||||
| PCI_COMMAND_MASTER);
|
||||
/*
|
||||
* Clear the bar upper 32bit and let guest to assign the new value
|
||||
*/
|
||||
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
|
||||
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
|
||||
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
|
||||
|
||||
for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
|
||||
vgpu->cfg_space.bar[i].size = pci_resource_len(
|
||||
gvt->dev_priv->drm.pdev, i * 2);
|
||||
vgpu->cfg_space.bar[i].tracked = false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_reset_cfg_space - reset vGPU configuration space
|
||||
*
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu)
|
||||
{
|
||||
u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND];
|
||||
bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] !=
|
||||
INTEL_GVT_PCI_CLASS_VGA_OTHER;
|
||||
|
||||
if (cmd & PCI_COMMAND_MEMORY) {
|
||||
trap_gttmmio(vgpu, false);
|
||||
map_aperture(vgpu, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Currently we only do such reset when vGPU is not
|
||||
* owned by any VM, so we simply restore entire cfg
|
||||
* space to default value.
|
||||
*/
|
||||
intel_vgpu_init_cfg_space(vgpu, primary);
|
||||
}
|
||||
|
@ -240,15 +240,8 @@ static inline int get_pse_type(int type)
|
||||
static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
|
||||
{
|
||||
void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
|
||||
u64 pte;
|
||||
|
||||
#ifdef readq
|
||||
pte = readq(addr);
|
||||
#else
|
||||
pte = ioread32(addr);
|
||||
pte |= (u64)ioread32(addr + 4) << 32;
|
||||
#endif
|
||||
return pte;
|
||||
return readq(addr);
|
||||
}
|
||||
|
||||
static void write_pte64(struct drm_i915_private *dev_priv,
|
||||
@ -256,12 +249,8 @@ static void write_pte64(struct drm_i915_private *dev_priv,
|
||||
{
|
||||
void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
|
||||
|
||||
#ifdef writeq
|
||||
writeq(pte, addr);
|
||||
#else
|
||||
iowrite32((u32)pte, addr);
|
||||
iowrite32(pte >> 32, addr + 4);
|
||||
#endif
|
||||
|
||||
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
|
||||
POSTING_READ(GFX_FLSH_CNTL_GEN6);
|
||||
}
|
||||
@ -1380,8 +1369,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
|
||||
info->gtt_entry_size;
|
||||
mem = kzalloc(mm->has_shadow_page_table ?
|
||||
mm->page_table_entry_size * 2
|
||||
: mm->page_table_entry_size,
|
||||
GFP_ATOMIC);
|
||||
: mm->page_table_entry_size, GFP_KERNEL);
|
||||
if (!mem)
|
||||
return -ENOMEM;
|
||||
mm->virtual_page_table = mem;
|
||||
@ -1532,7 +1520,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_mm *mm;
|
||||
int ret;
|
||||
|
||||
mm = kzalloc(sizeof(*mm), GFP_ATOMIC);
|
||||
mm = kzalloc(sizeof(*mm), GFP_KERNEL);
|
||||
if (!mm) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
@ -1886,30 +1874,27 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
|
||||
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
||||
int page_entry_num = GTT_PAGE_SIZE >>
|
||||
vgpu->gvt->device_info.gtt_entry_size_shift;
|
||||
struct page *scratch_pt;
|
||||
void *scratch_pt;
|
||||
unsigned long mfn;
|
||||
int i;
|
||||
void *p;
|
||||
|
||||
if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
|
||||
return -EINVAL;
|
||||
|
||||
scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
|
||||
scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!scratch_pt) {
|
||||
gvt_err("fail to allocate scratch page\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
p = kmap_atomic(scratch_pt);
|
||||
mfn = intel_gvt_hypervisor_virt_to_mfn(p);
|
||||
mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt);
|
||||
if (mfn == INTEL_GVT_INVALID_ADDR) {
|
||||
gvt_err("fail to translate vaddr:0x%llx\n", (u64)p);
|
||||
kunmap_atomic(p);
|
||||
__free_page(scratch_pt);
|
||||
gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt);
|
||||
free_page((unsigned long)scratch_pt);
|
||||
return -EFAULT;
|
||||
}
|
||||
gtt->scratch_pt[type].page_mfn = mfn;
|
||||
gtt->scratch_pt[type].page = scratch_pt;
|
||||
gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
|
||||
gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
|
||||
vgpu->id, type, mfn);
|
||||
|
||||
@ -1918,7 +1903,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
|
||||
* scratch_pt[type] indicate the scratch pt/scratch page used by the
|
||||
* 'type' pt.
|
||||
* e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
|
||||
* GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self
|
||||
* GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
|
||||
* is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
|
||||
*/
|
||||
if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
|
||||
@ -1936,11 +1921,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
|
||||
se.val64 |= PPAT_CACHED_INDEX;
|
||||
|
||||
for (i = 0; i < page_entry_num; i++)
|
||||
ops->set_entry(p, &se, i, false, 0, vgpu);
|
||||
ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
|
||||
}
|
||||
|
||||
kunmap_atomic(p);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2208,7 +2191,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
|
||||
int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
||||
{
|
||||
int ret;
|
||||
void *page_addr;
|
||||
void *page;
|
||||
|
||||
gvt_dbg_core("init gtt\n");
|
||||
|
||||
@ -2221,17 +2204,14 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
gvt->gtt.scratch_ggtt_page =
|
||||
alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
|
||||
if (!gvt->gtt.scratch_ggtt_page) {
|
||||
page = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!page) {
|
||||
gvt_err("fail to allocate scratch ggtt page\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
gvt->gtt.scratch_ggtt_page = virt_to_page(page);
|
||||
|
||||
page_addr = page_address(gvt->gtt.scratch_ggtt_page);
|
||||
|
||||
gvt->gtt.scratch_ggtt_mfn =
|
||||
intel_gvt_hypervisor_virt_to_mfn(page_addr);
|
||||
gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page);
|
||||
if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
|
||||
gvt_err("fail to translate scratch ggtt page\n");
|
||||
__free_page(gvt->gtt.scratch_ggtt_page);
|
||||
@ -2297,3 +2277,30 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
|
||||
for (offset = 0; offset < num_entries; offset++)
|
||||
ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_reset_gtt - reset the all GTT related status
|
||||
* @vgpu: a vGPU
|
||||
* @dmlr: true for vGPU Device Model Level Reset, false for GT Reset
|
||||
*
|
||||
* This function is called from vfio core to reset reset all
|
||||
* GTT related status, including GGTT, PPGTT, scratch page.
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
|
||||
{
|
||||
int i;
|
||||
|
||||
ppgtt_free_all_shadow_page(vgpu);
|
||||
if (!dmlr)
|
||||
return;
|
||||
|
||||
intel_vgpu_reset_ggtt(vgpu);
|
||||
|
||||
/* clear scratch page for security */
|
||||
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
|
||||
if (vgpu->gtt.scratch_pt[i].page != NULL)
|
||||
memset(page_address(vgpu->gtt.scratch_pt[i].page),
|
||||
0, PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
@ -208,6 +208,7 @@ extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
|
||||
|
||||
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
|
||||
extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr);
|
||||
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
|
||||
|
||||
extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
|
||||
|
@ -201,6 +201,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
|
||||
intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
|
||||
intel_gvt_clean_vgpu_types(gvt);
|
||||
|
||||
idr_destroy(&gvt->vgpu_idr);
|
||||
|
||||
kfree(dev_priv->gvt);
|
||||
dev_priv->gvt = NULL;
|
||||
}
|
||||
@ -237,6 +239,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
|
||||
|
||||
gvt_dbg_core("init gvt device\n");
|
||||
|
||||
idr_init(&gvt->vgpu_idr);
|
||||
|
||||
mutex_init(&gvt->lock);
|
||||
gvt->dev_priv = dev_priv;
|
||||
|
||||
@ -244,7 +248,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
|
||||
|
||||
ret = intel_gvt_setup_mmio_info(gvt);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out_clean_idr;
|
||||
|
||||
ret = intel_gvt_load_firmware(gvt);
|
||||
if (ret)
|
||||
@ -313,6 +317,8 @@ out_free_firmware:
|
||||
intel_gvt_free_firmware(gvt);
|
||||
out_clean_mmio_info:
|
||||
intel_gvt_clean_mmio_info(gvt);
|
||||
out_clean_idr:
|
||||
idr_destroy(&gvt->vgpu_idr);
|
||||
kfree(gvt);
|
||||
return ret;
|
||||
}
|
||||
|
@ -323,6 +323,7 @@ struct intel_vgpu_creation_params {
|
||||
|
||||
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_creation_params *param);
|
||||
void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
|
||||
u32 fence, u64 value);
|
||||
@ -375,6 +376,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
|
||||
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||
struct intel_vgpu_type *type);
|
||||
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
|
||||
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
||||
unsigned int engine_mask);
|
||||
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
|
||||
|
||||
|
||||
@ -411,6 +414,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
|
||||
int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
|
||||
unsigned long *g_index);
|
||||
|
||||
void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
|
||||
bool primary);
|
||||
void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
|
||||
|
||||
int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes);
|
||||
|
||||
@ -424,7 +431,6 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
|
||||
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
|
||||
|
||||
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
|
||||
int setup_vgpu_mmio(struct intel_vgpu *vgpu);
|
||||
void populate_pvinfo_page(struct intel_vgpu *vgpu);
|
||||
|
||||
struct intel_gvt_ops {
|
||||
|
@ -93,7 +93,8 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
static int new_mmio_info(struct intel_gvt *gvt,
|
||||
u32 offset, u32 flags, u32 size,
|
||||
u32 addr_mask, u32 ro_mask, u32 device,
|
||||
void *read, void *write)
|
||||
int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int),
|
||||
int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int))
|
||||
{
|
||||
struct intel_gvt_mmio_info *info, *p;
|
||||
u32 start, end, i;
|
||||
@ -219,7 +220,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
|
||||
default:
|
||||
/*should not hit here*/
|
||||
gvt_err("invalid forcewake offset 0x%x\n", offset);
|
||||
return 1;
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
|
||||
@ -230,77 +231,45 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes, unsigned long bitmap)
|
||||
{
|
||||
struct intel_gvt_workload_scheduler *scheduler =
|
||||
&vgpu->gvt->scheduler;
|
||||
|
||||
vgpu->resetting = true;
|
||||
|
||||
intel_vgpu_stop_schedule(vgpu);
|
||||
/*
|
||||
* The current_vgpu will set to NULL after stopping the
|
||||
* scheduler when the reset is triggered by current vgpu.
|
||||
*/
|
||||
if (scheduler->current_vgpu == NULL) {
|
||||
mutex_unlock(&vgpu->gvt->lock);
|
||||
intel_gvt_wait_vgpu_idle(vgpu);
|
||||
mutex_lock(&vgpu->gvt->lock);
|
||||
}
|
||||
|
||||
intel_vgpu_reset_execlist(vgpu, bitmap);
|
||||
|
||||
/* full GPU reset */
|
||||
if (bitmap == 0xff) {
|
||||
mutex_unlock(&vgpu->gvt->lock);
|
||||
intel_vgpu_clean_gtt(vgpu);
|
||||
mutex_lock(&vgpu->gvt->lock);
|
||||
setup_vgpu_mmio(vgpu);
|
||||
populate_pvinfo_page(vgpu);
|
||||
intel_vgpu_init_gtt(vgpu);
|
||||
}
|
||||
|
||||
vgpu->resetting = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
unsigned int engine_mask = 0;
|
||||
u32 data;
|
||||
u64 bitmap = 0;
|
||||
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
data = vgpu_vreg(vgpu, offset);
|
||||
|
||||
if (data & GEN6_GRDOM_FULL) {
|
||||
gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
|
||||
bitmap = 0xff;
|
||||
engine_mask = ALL_ENGINES;
|
||||
} else {
|
||||
if (data & GEN6_GRDOM_RENDER) {
|
||||
gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
|
||||
engine_mask |= (1 << RCS);
|
||||
}
|
||||
if (data & GEN6_GRDOM_MEDIA) {
|
||||
gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
|
||||
engine_mask |= (1 << VCS);
|
||||
}
|
||||
if (data & GEN6_GRDOM_BLT) {
|
||||
gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
|
||||
engine_mask |= (1 << BCS);
|
||||
}
|
||||
if (data & GEN6_GRDOM_VECS) {
|
||||
gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
|
||||
engine_mask |= (1 << VECS);
|
||||
}
|
||||
if (data & GEN8_GRDOM_MEDIA2) {
|
||||
gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
|
||||
if (HAS_BSD2(vgpu->gvt->dev_priv))
|
||||
engine_mask |= (1 << VCS2);
|
||||
}
|
||||
}
|
||||
if (data & GEN6_GRDOM_RENDER) {
|
||||
gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
|
||||
bitmap |= (1 << RCS);
|
||||
}
|
||||
if (data & GEN6_GRDOM_MEDIA) {
|
||||
gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
|
||||
bitmap |= (1 << VCS);
|
||||
}
|
||||
if (data & GEN6_GRDOM_BLT) {
|
||||
gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
|
||||
bitmap |= (1 << BCS);
|
||||
}
|
||||
if (data & GEN6_GRDOM_VECS) {
|
||||
gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
|
||||
bitmap |= (1 << VECS);
|
||||
}
|
||||
if (data & GEN8_GRDOM_MEDIA2) {
|
||||
gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
|
||||
if (HAS_BSD2(vgpu->gvt->dev_priv))
|
||||
bitmap |= (1 << VCS2);
|
||||
}
|
||||
return handle_device_reset(vgpu, offset, p_data, bytes, bitmap);
|
||||
|
||||
intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
@ -974,7 +943,7 @@ static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
u32 data;
|
||||
@ -1366,7 +1335,6 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
int rc = 0;
|
||||
unsigned int id = 0;
|
||||
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
@ -1389,12 +1357,11 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
|
||||
id = VECS;
|
||||
break;
|
||||
default:
|
||||
rc = -EINVAL;
|
||||
break;
|
||||
return -EINVAL;
|
||||
}
|
||||
set_bit(id, (void *)vgpu->tlb_handle_pending);
|
||||
|
||||
return rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
|
||||
|
@ -398,6 +398,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
|
||||
struct intel_vgpu_type *type;
|
||||
struct device *pdev;
|
||||
void *gvt;
|
||||
int ret;
|
||||
|
||||
pdev = mdev_parent_dev(mdev);
|
||||
gvt = kdev_to_i915(pdev)->gvt;
|
||||
@ -406,13 +407,15 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
|
||||
if (!type) {
|
||||
gvt_err("failed to find type %s to create\n",
|
||||
kobject_name(kobj));
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
vgpu = intel_gvt_ops->vgpu_create(gvt, type);
|
||||
if (IS_ERR_OR_NULL(vgpu)) {
|
||||
gvt_err("create intel vgpu failed\n");
|
||||
return -EINVAL;
|
||||
ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
|
||||
gvt_err("failed to create intel vgpu: %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
|
||||
@ -422,7 +425,10 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
|
||||
|
||||
gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
|
||||
dev_name(mdev_dev(mdev)));
|
||||
return 0;
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int intel_vgpu_remove(struct mdev_device *mdev)
|
||||
|
@ -125,25 +125,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
|
||||
if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
|
||||
goto err;
|
||||
|
||||
mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
|
||||
if (!mmio && !vgpu->mmio.disable_warn_untrack) {
|
||||
gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
|
||||
vgpu->id, offset, bytes, *(u32 *)p_data);
|
||||
|
||||
if (offset == 0x206c) {
|
||||
gvt_err("------------------------------------------\n");
|
||||
gvt_err("vgpu%d: likely triggers a gfx reset\n",
|
||||
vgpu->id);
|
||||
gvt_err("------------------------------------------\n");
|
||||
vgpu->mmio.disable_warn_untrack = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
|
||||
if (WARN_ON(!IS_ALIGNED(offset, bytes)))
|
||||
goto err;
|
||||
}
|
||||
|
||||
mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
|
||||
if (mmio) {
|
||||
if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
|
||||
if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
|
||||
@ -152,9 +139,23 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
|
||||
goto err;
|
||||
}
|
||||
ret = mmio->read(vgpu, offset, p_data, bytes);
|
||||
} else
|
||||
} else {
|
||||
ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
|
||||
|
||||
if (!vgpu->mmio.disable_warn_untrack) {
|
||||
gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n",
|
||||
vgpu->id, offset, bytes, *(u32 *)p_data);
|
||||
|
||||
if (offset == 0x206c) {
|
||||
gvt_err("------------------------------------------\n");
|
||||
gvt_err("vgpu%d: likely triggers a gfx reset\n",
|
||||
vgpu->id);
|
||||
gvt_err("------------------------------------------\n");
|
||||
vgpu->mmio.disable_warn_untrack = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -302,3 +303,56 @@ err:
|
||||
mutex_unlock(&gvt->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* intel_vgpu_reset_mmio - reset virtual MMIO space
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
const struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
|
||||
memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
|
||||
memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
|
||||
|
||||
vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
|
||||
|
||||
/* set the bit 0:2(Core C-State ) to C0 */
|
||||
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_init_mmio - init MMIO space
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed
|
||||
*/
|
||||
int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
|
||||
|
||||
vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
|
||||
if (!vgpu->mmio.vreg)
|
||||
return -ENOMEM;
|
||||
|
||||
vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
|
||||
|
||||
intel_vgpu_reset_mmio(vgpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_clean_mmio - clean MMIO space
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
vfree(vgpu->mmio.vreg);
|
||||
vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
|
||||
}
|
||||
|
@ -86,6 +86,10 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
|
||||
*offset; \
|
||||
})
|
||||
|
||||
int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
|
||||
|
||||
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
|
||||
|
||||
int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
|
||||
|
@ -36,9 +36,9 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
|
||||
vgpu->id))
|
||||
return -EINVAL;
|
||||
|
||||
vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC |
|
||||
GFP_DMA32 | __GFP_ZERO,
|
||||
INTEL_GVT_OPREGION_PORDER);
|
||||
vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
|
||||
__GFP_ZERO,
|
||||
get_order(INTEL_GVT_OPREGION_SIZE));
|
||||
|
||||
if (!vgpu_opregion(vgpu)->va)
|
||||
return -ENOMEM;
|
||||
@ -97,7 +97,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
|
||||
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
|
||||
map_vgpu_opregion(vgpu, false);
|
||||
free_pages((unsigned long)vgpu_opregion(vgpu)->va,
|
||||
INTEL_GVT_OPREGION_PORDER);
|
||||
get_order(INTEL_GVT_OPREGION_SIZE));
|
||||
|
||||
vgpu_opregion(vgpu)->va = NULL;
|
||||
}
|
||||
|
@ -50,8 +50,7 @@
|
||||
#define INTEL_GVT_OPREGION_PARM 0x204
|
||||
|
||||
#define INTEL_GVT_OPREGION_PAGES 2
|
||||
#define INTEL_GVT_OPREGION_PORDER 1
|
||||
#define INTEL_GVT_OPREGION_SIZE (2 * 4096)
|
||||
#define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE)
|
||||
|
||||
#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
|
||||
|
||||
|
@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
||||
{
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
struct intel_vgpu_workload *workload;
|
||||
struct intel_vgpu *vgpu;
|
||||
int event;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
|
||||
workload = scheduler->current_workload[ring_id];
|
||||
vgpu = workload->vgpu;
|
||||
|
||||
if (!workload->status && !workload->vgpu->resetting) {
|
||||
if (!workload->status && !vgpu->resetting) {
|
||||
wait_event(workload->shadow_ctx_status_wq,
|
||||
!atomic_read(&workload->shadow_ctx_active));
|
||||
|
||||
@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
||||
|
||||
for_each_set_bit(event, workload->pending_events,
|
||||
INTEL_GVT_EVENT_MAX)
|
||||
intel_vgpu_trigger_virtual_event(workload->vgpu,
|
||||
event);
|
||||
intel_vgpu_trigger_virtual_event(vgpu, event);
|
||||
}
|
||||
|
||||
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
|
||||
@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
||||
|
||||
scheduler->current_workload[ring_id] = NULL;
|
||||
|
||||
atomic_dec(&workload->vgpu->running_workload_num);
|
||||
|
||||
list_del_init(&workload->list);
|
||||
workload->complete(workload);
|
||||
|
||||
atomic_dec(&vgpu->running_workload_num);
|
||||
wake_up(&scheduler->workload_complete_wq);
|
||||
mutex_unlock(&gvt->lock);
|
||||
}
|
||||
@ -459,11 +459,11 @@ complete:
|
||||
gvt_dbg_sched("will complete workload %p\n, status: %d\n",
|
||||
workload, workload->status);
|
||||
|
||||
complete_current_workload(gvt, ring_id);
|
||||
|
||||
if (workload->req)
|
||||
i915_gem_request_put(fetch_and_zero(&workload->req));
|
||||
|
||||
complete_current_workload(gvt, ring_id);
|
||||
|
||||
if (need_force_wake)
|
||||
intel_uncore_forcewake_put(gvt->dev_priv,
|
||||
FORCEWAKE_ALL);
|
||||
|
@ -35,79 +35,6 @@
|
||||
#include "gvt.h"
|
||||
#include "i915_pvinfo.h"
|
||||
|
||||
static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
vfree(vgpu->mmio.vreg);
|
||||
vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
|
||||
}
|
||||
|
||||
int setup_vgpu_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
const struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
|
||||
if (vgpu->mmio.vreg)
|
||||
memset(vgpu->mmio.vreg, 0, info->mmio_size * 2);
|
||||
else {
|
||||
vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
|
||||
if (!vgpu->mmio.vreg)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
|
||||
|
||||
memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
|
||||
memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
|
||||
|
||||
vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
|
||||
|
||||
/* set the bit 0:2(Core C-State ) to C0 */
|
||||
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_creation_params *param)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
const struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
u16 *gmch_ctl;
|
||||
int i;
|
||||
|
||||
memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
|
||||
info->cfg_space_size);
|
||||
|
||||
if (!param->primary) {
|
||||
vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
|
||||
INTEL_GVT_PCI_CLASS_VGA_OTHER;
|
||||
vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
|
||||
INTEL_GVT_PCI_CLASS_VGA_OTHER;
|
||||
}
|
||||
|
||||
/* Show guest that there isn't any stolen memory.*/
|
||||
gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
|
||||
*gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
|
||||
|
||||
intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
|
||||
gvt_aperture_pa_base(gvt), true);
|
||||
|
||||
vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
|
||||
| PCI_COMMAND_MEMORY
|
||||
| PCI_COMMAND_MASTER);
|
||||
/*
|
||||
* Clear the bar upper 32bit and let guest to assign the new value
|
||||
*/
|
||||
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
|
||||
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
|
||||
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
|
||||
|
||||
for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
|
||||
vgpu->cfg_space.bar[i].size = pci_resource_len(
|
||||
gvt->dev_priv->drm.pdev, i * 2);
|
||||
vgpu->cfg_space.bar[i].tracked = false;
|
||||
}
|
||||
}
|
||||
|
||||
void populate_pvinfo_page(struct intel_vgpu *vgpu)
|
||||
{
|
||||
/* setup the ballooning information */
|
||||
@ -177,7 +104,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
|
||||
if (low_avail / min_low == 0)
|
||||
break;
|
||||
gvt->types[i].low_gm_size = min_low;
|
||||
gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size;
|
||||
gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
|
||||
gvt->types[i].fence = 4;
|
||||
gvt->types[i].max_instance = low_avail / min_low;
|
||||
gvt->types[i].avail_instance = gvt->types[i].max_instance;
|
||||
@ -217,7 +144,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
|
||||
*/
|
||||
low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
|
||||
gvt->gm.vgpu_allocated_low_gm_size;
|
||||
high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE -
|
||||
high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE -
|
||||
gvt->gm.vgpu_allocated_high_gm_size;
|
||||
fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
|
||||
gvt->fence.vgpu_allocated_fence_num;
|
||||
@ -268,7 +195,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
|
||||
intel_vgpu_clean_gtt(vgpu);
|
||||
intel_gvt_hypervisor_detach_vgpu(vgpu);
|
||||
intel_vgpu_free_resource(vgpu);
|
||||
clean_vgpu_mmio(vgpu);
|
||||
intel_vgpu_clean_mmio(vgpu);
|
||||
vfree(vgpu);
|
||||
|
||||
intel_gvt_update_vgpu_types(gvt);
|
||||
@ -300,11 +227,11 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||
vgpu->gvt = gvt;
|
||||
bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
|
||||
|
||||
setup_vgpu_cfg_space(vgpu, param);
|
||||
intel_vgpu_init_cfg_space(vgpu, param->primary);
|
||||
|
||||
ret = setup_vgpu_mmio(vgpu);
|
||||
ret = intel_vgpu_init_mmio(vgpu);
|
||||
if (ret)
|
||||
goto out_free_vgpu;
|
||||
goto out_clean_idr;
|
||||
|
||||
ret = intel_vgpu_alloc_resource(vgpu, param);
|
||||
if (ret)
|
||||
@ -354,7 +281,9 @@ out_detach_hypervisor_vgpu:
|
||||
out_clean_vgpu_resource:
|
||||
intel_vgpu_free_resource(vgpu);
|
||||
out_clean_vgpu_mmio:
|
||||
clean_vgpu_mmio(vgpu);
|
||||
intel_vgpu_clean_mmio(vgpu);
|
||||
out_clean_idr:
|
||||
idr_remove(&gvt->vgpu_idr, vgpu->id);
|
||||
out_free_vgpu:
|
||||
vfree(vgpu);
|
||||
mutex_unlock(&gvt->lock);
|
||||
@ -398,7 +327,75 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_reset_vgpu - reset a virtual GPU
|
||||
* intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
|
||||
* @vgpu: virtual GPU
|
||||
* @dmlr: vGPU Device Model Level Reset or GT Reset
|
||||
* @engine_mask: engines to reset for GT reset
|
||||
*
|
||||
* This function is called when user wants to reset a virtual GPU through
|
||||
* device model reset or GT reset. The caller should hold the gvt lock.
|
||||
*
|
||||
* vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
|
||||
* the whole vGPU to default state as when it is created. This vGPU function
|
||||
* is required both for functionary and security concerns.The ultimate goal
|
||||
* of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
|
||||
* assign a vGPU to a virtual machine we must isse such reset first.
|
||||
*
|
||||
* Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
|
||||
* (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
|
||||
* Unlike the FLR, GT reset only reset particular resource of a vGPU per
|
||||
* the reset request. Guest driver can issue a GT reset by programming the
|
||||
* virtual GDRST register to reset specific virtual GPU engine or all
|
||||
* engines.
|
||||
*
|
||||
* The parameter dev_level is to identify if we will do DMLR or GT reset.
|
||||
* The parameter engine_mask is to specific the engines that need to be
|
||||
* resetted. If value ALL_ENGINES is given for engine_mask, it means
|
||||
* the caller requests a full GT reset that we will reset all virtual
|
||||
* GPU engines. For FLR, engine_mask is ignored.
|
||||
*/
|
||||
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
||||
unsigned int engine_mask)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
|
||||
gvt_dbg_core("------------------------------------------\n");
|
||||
gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
|
||||
vgpu->id, dmlr, engine_mask);
|
||||
vgpu->resetting = true;
|
||||
|
||||
intel_vgpu_stop_schedule(vgpu);
|
||||
/*
|
||||
* The current_vgpu will set to NULL after stopping the
|
||||
* scheduler when the reset is triggered by current vgpu.
|
||||
*/
|
||||
if (scheduler->current_vgpu == NULL) {
|
||||
mutex_unlock(&gvt->lock);
|
||||
intel_gvt_wait_vgpu_idle(vgpu);
|
||||
mutex_lock(&gvt->lock);
|
||||
}
|
||||
|
||||
intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
|
||||
|
||||
/* full GPU reset or device model level reset */
|
||||
if (engine_mask == ALL_ENGINES || dmlr) {
|
||||
intel_vgpu_reset_gtt(vgpu, dmlr);
|
||||
intel_vgpu_reset_resource(vgpu);
|
||||
intel_vgpu_reset_mmio(vgpu);
|
||||
populate_pvinfo_page(vgpu);
|
||||
|
||||
if (dmlr)
|
||||
intel_vgpu_reset_cfg_space(vgpu);
|
||||
}
|
||||
|
||||
vgpu->resetting = false;
|
||||
gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
|
||||
gvt_dbg_core("------------------------------------------\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
|
||||
* @vgpu: virtual GPU
|
||||
*
|
||||
* This function is called when user wants to reset a virtual GPU.
|
||||
@ -406,4 +403,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||
*/
|
||||
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
mutex_lock(&vgpu->gvt->lock);
|
||||
intel_gvt_reset_vgpu_locked(vgpu, true, 0);
|
||||
mutex_unlock(&vgpu->gvt->lock);
|
||||
}
|
||||
|
@ -595,47 +595,21 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_pwrite *args,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
void *vaddr = obj->phys_handle->vaddr + args->offset;
|
||||
char __user *user_data = u64_to_user_ptr(args->data_ptr);
|
||||
int ret;
|
||||
|
||||
/* We manually control the domain here and pretend that it
|
||||
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
|
||||
*/
|
||||
lockdep_assert_held(&obj->base.dev->struct_mutex);
|
||||
ret = i915_gem_object_wait(obj,
|
||||
I915_WAIT_INTERRUPTIBLE |
|
||||
I915_WAIT_LOCKED |
|
||||
I915_WAIT_ALL,
|
||||
MAX_SCHEDULE_TIMEOUT,
|
||||
to_rps_client(file));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
|
||||
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
|
||||
unsigned long unwritten;
|
||||
|
||||
/* The physical object once assigned is fixed for the lifetime
|
||||
* of the obj, so we can safely drop the lock and continue
|
||||
* to access vaddr.
|
||||
*/
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
unwritten = copy_from_user(vaddr, user_data, args->size);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (unwritten) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (copy_from_user(vaddr, user_data, args->size))
|
||||
return -EFAULT;
|
||||
|
||||
drm_clflush_virt_range(vaddr, args->size);
|
||||
i915_gem_chipset_flush(to_i915(dev));
|
||||
i915_gem_chipset_flush(to_i915(obj->base.dev));
|
||||
|
||||
out:
|
||||
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *i915_gem_object_alloc(struct drm_device *dev)
|
||||
|
@ -199,6 +199,7 @@ found:
|
||||
}
|
||||
|
||||
/* Unbinding will emit any required flushes */
|
||||
ret = 0;
|
||||
while (!list_empty(&eviction_list)) {
|
||||
vma = list_first_entry(&eviction_list,
|
||||
struct i915_vma,
|
||||
|
@ -2967,6 +2967,9 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
|
||||
unsigned int rotation = plane_state->base.rotation;
|
||||
int ret;
|
||||
|
||||
if (!plane_state->base.visible)
|
||||
return 0;
|
||||
|
||||
/* Rotate src coordinates to match rotated GTT view */
|
||||
if (drm_rotation_90_or_270(rotation))
|
||||
drm_rect_rotate(&plane_state->base.src,
|
||||
|
@ -979,18 +979,8 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
|
||||
uint32_t *batch,
|
||||
uint32_t index)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
|
||||
|
||||
/*
|
||||
* WaDisableLSQCROPERFforOCL:kbl
|
||||
* This WA is implemented in skl_init_clock_gating() but since
|
||||
* this batch updates GEN8_L3SQCREG4 with default value we need to
|
||||
* set this bit here to retain the WA during flush.
|
||||
*/
|
||||
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
|
||||
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
|
||||
|
||||
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
|
||||
MI_SRM_LRM_GLOBAL_GTT));
|
||||
wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
|
||||
|
@ -1095,14 +1095,6 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
|
||||
WA_SET_BIT_MASKED(HDC_CHICKEN0,
|
||||
HDC_FENCE_DEST_SLM_DISABLE);
|
||||
|
||||
/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
|
||||
* involving this register should also be added to WA batch as required.
|
||||
*/
|
||||
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
|
||||
/* WaDisableLSQCROPERFforOCL:kbl */
|
||||
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
|
||||
GEN8_LQSC_RO_PERF_DIS);
|
||||
|
||||
/* WaToEnableHwFixForPushConstHWBug:kbl */
|
||||
if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
|
||||
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
|
||||
|
Loading…
Reference in New Issue
Block a user