forked from Minki/linux
Merge tag 'drm-intel-next-fixes-2022-01-13' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
- Hold runtime PM wakelock during PXP unbind (Juston Li) - Three fixes for the TTM backend fault handling (Matthew Auld) - Make sure to unmap when purging in the TTM backend (Matthew Auld) Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> From: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/Yd/xzyCM87rfrwQT@tursulin-mobl2
This commit is contained in:
commit
8f18a987ca
@ -538,6 +538,9 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct i915_mmap_offset *mmo, *mn;
|
||||
|
||||
if (obj->ops->unmap_virtual)
|
||||
obj->ops->unmap_virtual(obj);
|
||||
|
||||
spin_lock(&obj->mmo.lock);
|
||||
rbtree_postorder_for_each_entry_safe(mmo, mn,
|
||||
&obj->mmo.offsets, offset) {
|
||||
|
@ -67,6 +67,7 @@ struct drm_i915_gem_object_ops {
|
||||
int (*pwrite)(struct drm_i915_gem_object *obj,
|
||||
const struct drm_i915_gem_pwrite *arg);
|
||||
u64 (*mmap_offset)(struct drm_i915_gem_object *obj);
|
||||
void (*unmap_virtual)(struct drm_i915_gem_object *obj);
|
||||
|
||||
int (*dmabuf_export)(struct drm_i915_gem_object *obj);
|
||||
|
||||
|
@ -161,7 +161,6 @@ retry:
|
||||
/* Immediately discard the backing storage */
|
||||
int i915_gem_object_truncate(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
drm_gem_free_mmap_offset(&obj->base);
|
||||
if (obj->ops->truncate)
|
||||
return obj->ops->truncate(obj);
|
||||
|
||||
|
@ -556,6 +556,20 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
|
||||
return intel_region_ttm_resource_to_rsgt(obj->mm.region, res);
|
||||
}
|
||||
|
||||
static int i915_ttm_truncate(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
|
||||
int err;
|
||||
|
||||
WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED);
|
||||
|
||||
err = i915_ttm_move_notify(bo);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return i915_ttm_purge(obj);
|
||||
}
|
||||
|
||||
static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
|
||||
@ -883,6 +897,11 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (obj->mm.madv != I915_MADV_WILLNEED) {
|
||||
dma_resv_unlock(bo->base.resv);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
if (drm_dev_enter(dev, &idx)) {
|
||||
ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
|
||||
TTM_BO_VM_NUM_PREFAULT);
|
||||
@ -945,6 +964,11 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
|
||||
return drm_vma_node_offset_addr(&obj->base.vma_node);
|
||||
}
|
||||
|
||||
static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
|
||||
}
|
||||
|
||||
static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
|
||||
.name = "i915_gem_object_ttm",
|
||||
.flags = I915_GEM_OBJECT_IS_SHRINKABLE |
|
||||
@ -952,7 +976,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
|
||||
|
||||
.get_pages = i915_ttm_get_pages,
|
||||
.put_pages = i915_ttm_put_pages,
|
||||
.truncate = i915_ttm_purge,
|
||||
.truncate = i915_ttm_truncate,
|
||||
.shrinker_release_pages = i915_ttm_shrinker_release_pages,
|
||||
|
||||
.adjust_lru = i915_ttm_adjust_lru,
|
||||
@ -960,6 +984,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
|
||||
.migrate = i915_ttm_migrate,
|
||||
|
||||
.mmap_offset = i915_ttm_mmap_offset,
|
||||
.unmap_virtual = i915_ttm_unmap_virtual,
|
||||
.mmap_ops = &vm_ops_ttm,
|
||||
};
|
||||
|
||||
|
@ -1368,20 +1368,10 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
|
||||
}
|
||||
}
|
||||
|
||||
if (!obj->ops->mmap_ops) {
|
||||
err = check_absent(addr, obj->base.size);
|
||||
if (err) {
|
||||
pr_err("%s: was not absent\n", obj->mm.region->name);
|
||||
goto out_unmap;
|
||||
}
|
||||
} else {
|
||||
/* ttm allows access to evicted regions by design */
|
||||
|
||||
err = check_present(addr, obj->base.size);
|
||||
if (err) {
|
||||
pr_err("%s: was not present\n", obj->mm.region->name);
|
||||
goto out_unmap;
|
||||
}
|
||||
err = check_absent(addr, obj->base.size);
|
||||
if (err) {
|
||||
pr_err("%s: was not absent\n", obj->mm.region->name);
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
out_unmap:
|
||||
|
@ -107,9 +107,12 @@ static int i915_pxp_tee_component_bind(struct device *i915_kdev,
|
||||
static void i915_pxp_tee_component_unbind(struct device *i915_kdev,
|
||||
struct device *tee_kdev, void *data)
|
||||
{
|
||||
struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
|
||||
struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev);
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
intel_pxp_fini_hw(pxp);
|
||||
with_intel_runtime_pm_if_in_use(&i915->runtime_pm, wakeref)
|
||||
intel_pxp_fini_hw(pxp);
|
||||
|
||||
mutex_lock(&pxp->tee_mutex);
|
||||
pxp->pxp_component = NULL;
|
||||
|
Loading…
Reference in New Issue
Block a user