drm/i915: Refactor unsettting obj->mm.pages
As i915_gem_object_phys_attach() wants to play dirty and mess around with obj->mm.pages itself (replacing the shmemfs with a DMA allocation), refactor the gubbins so into i915_gem_object_unset_pages() that we don't have to duplicate all the secrets. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180611075532.26534-1-chris@chris-wilson.co.uk Link: https://patchwork.freedesktop.org/patch/msgid/152871104647.1718.8796913290418060204@jlahtine-desk.ger.corp.intel.com
This commit is contained in:
parent
51c18bf7fd
commit
acd1c1e621
@ -2401,29 +2401,15 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
|
|||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
|
static struct sg_table *
|
||||||
enum i915_mm_subclass subclass)
|
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||||
struct sg_table *pages;
|
struct sg_table *pages;
|
||||||
|
|
||||||
if (i915_gem_object_has_pinned_pages(obj))
|
|
||||||
return;
|
|
||||||
|
|
||||||
GEM_BUG_ON(obj->bind_count);
|
|
||||||
if (!i915_gem_object_has_pages(obj))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* May be called by shrinker from within get_pages() (on another bo) */
|
|
||||||
mutex_lock_nested(&obj->mm.lock, subclass);
|
|
||||||
if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
|
|
||||||
goto unlock;
|
|
||||||
|
|
||||||
/* ->put_pages might need to allocate memory for the bit17 swizzle
|
|
||||||
* array, hence protect them from being reaped by removing them from gtt
|
|
||||||
* lists early. */
|
|
||||||
pages = fetch_and_zero(&obj->mm.pages);
|
pages = fetch_and_zero(&obj->mm.pages);
|
||||||
GEM_BUG_ON(!pages);
|
if (!pages)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
spin_lock(&i915->mm.obj_lock);
|
spin_lock(&i915->mm.obj_lock);
|
||||||
list_del(&obj->mm.link);
|
list_del(&obj->mm.link);
|
||||||
@ -2442,12 +2428,37 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
|
|||||||
}
|
}
|
||||||
|
|
||||||
__i915_gem_object_reset_page_iter(obj);
|
__i915_gem_object_reset_page_iter(obj);
|
||||||
|
obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
|
||||||
|
|
||||||
|
return pages;
|
||||||
|
}
|
||||||
|
|
||||||
|
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
|
||||||
|
enum i915_mm_subclass subclass)
|
||||||
|
{
|
||||||
|
struct sg_table *pages;
|
||||||
|
|
||||||
|
if (i915_gem_object_has_pinned_pages(obj))
|
||||||
|
return;
|
||||||
|
|
||||||
|
GEM_BUG_ON(obj->bind_count);
|
||||||
|
if (!i915_gem_object_has_pages(obj))
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* May be called by shrinker from within get_pages() (on another bo) */
|
||||||
|
mutex_lock_nested(&obj->mm.lock, subclass);
|
||||||
|
if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ->put_pages might need to allocate memory for the bit17 swizzle
|
||||||
|
* array, hence protect them from being reaped by removing them from gtt
|
||||||
|
* lists early.
|
||||||
|
*/
|
||||||
|
pages = __i915_gem_object_unset_pages(obj);
|
||||||
if (!IS_ERR(pages))
|
if (!IS_ERR(pages))
|
||||||
obj->ops->put_pages(obj, pages);
|
obj->ops->put_pages(obj, pages);
|
||||||
|
|
||||||
obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
|
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
mutex_unlock(&obj->mm.lock);
|
mutex_unlock(&obj->mm.lock);
|
||||||
}
|
}
|
||||||
@ -6089,16 +6100,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
|
|||||||
goto err_unlock;
|
goto err_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
pages = fetch_and_zero(&obj->mm.pages);
|
pages = __i915_gem_object_unset_pages(obj);
|
||||||
if (pages) {
|
|
||||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
|
||||||
|
|
||||||
__i915_gem_object_reset_page_iter(obj);
|
|
||||||
|
|
||||||
spin_lock(&i915->mm.obj_lock);
|
|
||||||
list_del(&obj->mm.link);
|
|
||||||
spin_unlock(&i915->mm.obj_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
obj->ops = &i915_gem_phys_ops;
|
obj->ops = &i915_gem_phys_ops;
|
||||||
|
|
||||||
@ -6116,7 +6118,11 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
|
|||||||
|
|
||||||
err_xfer:
|
err_xfer:
|
||||||
obj->ops = &i915_gem_object_ops;
|
obj->ops = &i915_gem_object_ops;
|
||||||
obj->mm.pages = pages;
|
if (!IS_ERR_OR_NULL(pages)) {
|
||||||
|
unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
|
||||||
|
|
||||||
|
__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
|
||||||
|
}
|
||||||
err_unlock:
|
err_unlock:
|
||||||
mutex_unlock(&obj->mm.lock);
|
mutex_unlock(&obj->mm.lock);
|
||||||
return err;
|
return err;
|
||||||
|
Loading…
Reference in New Issue
Block a user