2017-02-22 11:40:48 +00:00
|
|
|
/*
|
2019-05-28 09:29:49 +00:00
|
|
|
* SPDX-License-Identifier: MIT
|
2017-02-22 11:40:48 +00:00
|
|
|
*
|
2019-05-28 09:29:49 +00:00
|
|
|
* Copyright © 2016 Intel Corporation
|
2017-02-22 11:40:48 +00:00
|
|
|
*/
|
|
|
|
|
2019-06-13 08:44:16 +00:00
|
|
|
#include "display/intel_frontbuffer.h"
|
|
|
|
|
2017-02-22 11:40:48 +00:00
|
|
|
#include "i915_drv.h"
|
|
|
|
#include "i915_gem_clflush.h"
|
2019-08-21 19:16:06 +00:00
|
|
|
#include "i915_sw_fence_work.h"
|
2019-08-06 10:07:28 +00:00
|
|
|
#include "i915_trace.h"
|
2017-02-22 11:40:48 +00:00
|
|
|
|
|
|
|
struct clflush {
|
2019-08-21 19:16:06 +00:00
|
|
|
struct dma_fence_work base;
|
2017-02-22 11:40:48 +00:00
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
};
|
|
|
|
|
2019-08-21 19:16:06 +00:00
|
|
|
static void __do_clflush(struct drm_i915_gem_object *obj)
|
2017-02-22 11:40:48 +00:00
|
|
|
{
|
2017-10-13 20:26:13 +00:00
|
|
|
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
|
2017-02-22 11:40:48 +00:00
|
|
|
drm_clflush_sg(obj->mm.pages);
|
2019-12-18 10:40:43 +00:00
|
|
|
|
|
|
|
i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
|
2017-02-22 11:40:48 +00:00
|
|
|
}
|
|
|
|
|
2019-08-21 19:16:06 +00:00
|
|
|
static int clflush_work(struct dma_fence_work *base)
|
2017-02-22 11:40:48 +00:00
|
|
|
{
|
2019-08-21 19:16:06 +00:00
|
|
|
struct clflush *clflush = container_of(base, typeof(*clflush), base);
|
2019-12-16 16:17:16 +00:00
|
|
|
struct drm_i915_gem_object *obj = clflush->obj;
|
2019-08-21 19:16:06 +00:00
|
|
|
int err;
|
2017-02-22 11:40:48 +00:00
|
|
|
|
2019-08-21 19:16:06 +00:00
|
|
|
err = i915_gem_object_pin_pages(obj);
|
|
|
|
if (err)
|
2019-12-16 16:17:16 +00:00
|
|
|
return err;
|
2017-02-22 11:40:48 +00:00
|
|
|
|
2019-08-21 19:16:06 +00:00
|
|
|
__do_clflush(obj);
|
2017-02-22 11:40:48 +00:00
|
|
|
i915_gem_object_unpin_pages(obj);
|
|
|
|
|
2019-12-16 16:17:16 +00:00
|
|
|
return 0;
|
2019-08-21 19:16:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void clflush_release(struct dma_fence_work *base)
|
|
|
|
{
|
|
|
|
struct clflush *clflush = container_of(base, typeof(*clflush), base);
|
2017-02-22 11:40:48 +00:00
|
|
|
|
2019-12-16 16:17:16 +00:00
|
|
|
i915_gem_object_put(clflush->obj);
|
2017-02-22 11:40:48 +00:00
|
|
|
}
|
|
|
|
|
2019-08-21 19:16:06 +00:00
|
|
|
static const struct dma_fence_work_ops clflush_ops = {
|
|
|
|
.name = "clflush",
|
|
|
|
.work = clflush_work,
|
|
|
|
.release = clflush_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
|
2017-02-22 11:40:48 +00:00
|
|
|
{
|
2019-08-21 19:16:06 +00:00
|
|
|
struct clflush *clflush;
|
2017-02-22 11:40:48 +00:00
|
|
|
|
2019-08-21 19:16:06 +00:00
|
|
|
GEM_BUG_ON(!obj->cache_dirty);
|
2017-02-22 11:40:48 +00:00
|
|
|
|
2019-08-21 19:16:06 +00:00
|
|
|
clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
|
|
|
|
if (!clflush)
|
|
|
|
return NULL;
|
2017-02-22 11:40:48 +00:00
|
|
|
|
2019-08-21 19:16:06 +00:00
|
|
|
dma_fence_work_init(&clflush->base, &clflush_ops);
|
|
|
|
clflush->obj = i915_gem_object_get(obj); /* obj <-> clflush cycle */
|
|
|
|
|
|
|
|
return clflush;
|
2017-02-22 11:40:48 +00:00
|
|
|
}
|
|
|
|
|
2017-07-21 14:50:37 +00:00
|
|
|
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
|
2017-02-22 11:40:48 +00:00
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
struct clflush *clflush;
|
|
|
|
|
2019-05-28 09:29:51 +00:00
|
|
|
assert_object_held(obj);
|
|
|
|
|
2017-02-22 11:40:48 +00:00
|
|
|
/*
|
|
|
|
* Stolen memory is always coherent with the GPU as it is explicitly
|
|
|
|
* marked as wc by the system, or the system is cache-coherent.
|
|
|
|
* Similarly, we only access struct pages through the CPU cache, so
|
|
|
|
* anything not backed by physical memory we consider to be always
|
|
|
|
* coherent and not need clflushing.
|
|
|
|
*/
|
2017-06-15 12:38:49 +00:00
|
|
|
if (!i915_gem_object_has_struct_page(obj)) {
|
|
|
|
obj->cache_dirty = false;
|
2017-07-21 14:50:37 +00:00
|
|
|
return false;
|
2017-06-15 12:38:49 +00:00
|
|
|
}
|
2017-02-22 11:40:48 +00:00
|
|
|
|
|
|
|
/* If the GPU is snooping the contents of the CPU cache,
|
|
|
|
* we do not need to manually clear the CPU cache lines. However,
|
|
|
|
* the caches are only snooped when the render cache is
|
|
|
|
* flushed/invalidated. As we always have to emit invalidations
|
|
|
|
* and flushes when moving into and out of the RENDER domain, correct
|
|
|
|
* snooping behaviour occurs naturally as the result of our domain
|
|
|
|
* tracking.
|
|
|
|
*/
|
2017-08-11 11:11:16 +00:00
|
|
|
if (!(flags & I915_CLFLUSH_FORCE) &&
|
|
|
|
obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
|
2017-07-21 14:50:37 +00:00
|
|
|
return false;
|
2017-02-22 11:40:48 +00:00
|
|
|
|
|
|
|
trace_i915_gem_object_clflush(obj);
|
|
|
|
|
|
|
|
clflush = NULL;
|
|
|
|
if (!(flags & I915_CLFLUSH_SYNC))
|
2019-08-21 19:16:06 +00:00
|
|
|
clflush = clflush_work_create(obj);
|
2017-02-22 11:40:48 +00:00
|
|
|
if (clflush) {
|
2019-08-21 19:16:06 +00:00
|
|
|
i915_sw_fence_await_reservation(&clflush->base.chain,
|
|
|
|
obj->base.resv, NULL, true,
|
2020-05-09 10:50:21 +00:00
|
|
|
i915_fence_timeout(to_i915(obj->base.dev)),
|
2017-12-12 18:06:51 +00:00
|
|
|
I915_FENCE_GFP);
|
2019-08-21 19:16:06 +00:00
|
|
|
dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
|
|
|
|
dma_fence_work_commit(&clflush->base);
|
2017-02-22 11:40:48 +00:00
|
|
|
} else if (obj->mm.pages) {
|
2019-08-21 19:16:06 +00:00
|
|
|
__do_clflush(obj);
|
2017-02-22 11:40:48 +00:00
|
|
|
} else {
|
2018-02-16 12:43:38 +00:00
|
|
|
GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
|
2017-02-22 11:40:48 +00:00
|
|
|
}
|
2017-06-15 12:38:49 +00:00
|
|
|
|
|
|
|
obj->cache_dirty = false;
|
2017-07-21 14:50:37 +00:00
|
|
|
return true;
|
2017-02-22 11:40:48 +00:00
|
|
|
}
|