forked from Minki/linux
drm/i915/shmem: ensure flush during swap-in on non-LLC
On non-LLC platforms, force the flush-on-acquire if this is ever swapped-in. Our async flush path is not trust worthy enough yet(and happens in the wrong order), and with some tricks it's conceivable for userspace to change the cache-level to I915_CACHE_NONE after the pages are swapped-in, and since execbuf binds the object before doing the async flush, there is a potential race window. Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20211018174508.2137279-6-matthew.auld@intel.com
This commit is contained in:
parent
6343034771
commit
d70af57944
@ -286,6 +286,8 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages,
|
||||
bool needs_clflush)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
|
||||
GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
|
||||
|
||||
if (obj->mm.madv == I915_MADV_DONTNEED)
|
||||
@ -297,6 +299,16 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
|
||||
drm_clflush_sg(pages);
|
||||
|
||||
__start_cpu_write(obj);
|
||||
/*
|
||||
* On non-LLC platforms, force the flush-on-acquire if this is ever
|
||||
* swapped-in. Our async flush path is not trust worthy enough yet(and
|
||||
* happens in the wrong order), and with some tricks it's conceivable
|
||||
* for userspace to change the cache-level to I915_CACHE_NONE after the
|
||||
* pages are swapped-in, and since execbuf binds the object before doing
|
||||
* the async flush, we have a race window.
|
||||
*/
|
||||
if (!HAS_LLC(i915))
|
||||
obj->cache_dirty = true;
|
||||
}
|
||||
|
||||
void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages)
|
||||
|
Loading…
Reference in New Issue
Block a user