forked from Minki/linux
drm/i915: Remove locking from i915_gem_object_prepare_read/write
Execbuffer submission will perform its own WW locking, and we cannot rely on the implicit lock there. This also makes it clear that the GVT code will get a lockdep splat when multiple batchbuffer shadows need to be performed in the same instance, fix that up. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200819140904.1708856-7-maarten.lankhorst@linux.intel.com Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
This commit is contained in:
parent
80f0b679d6
commit
1af343cdc1
@ -576,19 +576,17 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
|
|||||||
if (!i915_gem_object_has_struct_page(obj))
|
if (!i915_gem_object_has_struct_page(obj))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
ret = i915_gem_object_lock_interruptible(obj, NULL);
|
assert_object_held(obj);
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
ret = i915_gem_object_wait(obj,
|
ret = i915_gem_object_wait(obj,
|
||||||
I915_WAIT_INTERRUPTIBLE,
|
I915_WAIT_INTERRUPTIBLE,
|
||||||
MAX_SCHEDULE_TIMEOUT);
|
MAX_SCHEDULE_TIMEOUT);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_unlock;
|
return ret;
|
||||||
|
|
||||||
ret = i915_gem_object_pin_pages(obj);
|
ret = i915_gem_object_pin_pages(obj);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_unlock;
|
return ret;
|
||||||
|
|
||||||
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
|
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
|
||||||
!static_cpu_has(X86_FEATURE_CLFLUSH)) {
|
!static_cpu_has(X86_FEATURE_CLFLUSH)) {
|
||||||
@ -616,8 +614,6 @@ out:
|
|||||||
|
|
||||||
err_unpin:
|
err_unpin:
|
||||||
i915_gem_object_unpin_pages(obj);
|
i915_gem_object_unpin_pages(obj);
|
||||||
err_unlock:
|
|
||||||
i915_gem_object_unlock(obj);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -630,20 +626,18 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
|
|||||||
if (!i915_gem_object_has_struct_page(obj))
|
if (!i915_gem_object_has_struct_page(obj))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
ret = i915_gem_object_lock_interruptible(obj, NULL);
|
assert_object_held(obj);
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
ret = i915_gem_object_wait(obj,
|
ret = i915_gem_object_wait(obj,
|
||||||
I915_WAIT_INTERRUPTIBLE |
|
I915_WAIT_INTERRUPTIBLE |
|
||||||
I915_WAIT_ALL,
|
I915_WAIT_ALL,
|
||||||
MAX_SCHEDULE_TIMEOUT);
|
MAX_SCHEDULE_TIMEOUT);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_unlock;
|
return ret;
|
||||||
|
|
||||||
ret = i915_gem_object_pin_pages(obj);
|
ret = i915_gem_object_pin_pages(obj);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_unlock;
|
return ret;
|
||||||
|
|
||||||
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
|
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
|
||||||
!static_cpu_has(X86_FEATURE_CLFLUSH)) {
|
!static_cpu_has(X86_FEATURE_CLFLUSH)) {
|
||||||
@ -680,7 +674,5 @@ out:
|
|||||||
|
|
||||||
err_unpin:
|
err_unpin:
|
||||||
i915_gem_object_unpin_pages(obj);
|
i915_gem_object_unpin_pages(obj);
|
||||||
err_unlock:
|
|
||||||
i915_gem_object_unlock(obj);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -991,11 +991,14 @@ static void reloc_cache_reset(struct reloc_cache *cache)
|
|||||||
|
|
||||||
vaddr = unmask_page(cache->vaddr);
|
vaddr = unmask_page(cache->vaddr);
|
||||||
if (cache->vaddr & KMAP) {
|
if (cache->vaddr & KMAP) {
|
||||||
|
struct drm_i915_gem_object *obj =
|
||||||
|
(struct drm_i915_gem_object *)cache->node.mm;
|
||||||
if (cache->vaddr & CLFLUSH_AFTER)
|
if (cache->vaddr & CLFLUSH_AFTER)
|
||||||
mb();
|
mb();
|
||||||
|
|
||||||
kunmap_atomic(vaddr);
|
kunmap_atomic(vaddr);
|
||||||
i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
|
i915_gem_object_finish_access(obj);
|
||||||
|
i915_gem_object_unlock(obj);
|
||||||
} else {
|
} else {
|
||||||
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
|
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
|
||||||
|
|
||||||
@ -1031,10 +1034,16 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
|
|||||||
unsigned int flushes;
|
unsigned int flushes;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = i915_gem_object_prepare_write(obj, &flushes);
|
err = i915_gem_object_lock_interruptible(obj, NULL);
|
||||||
if (err)
|
if (err)
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
|
|
||||||
|
err = i915_gem_object_prepare_write(obj, &flushes);
|
||||||
|
if (err) {
|
||||||
|
i915_gem_object_unlock(obj);
|
||||||
|
return ERR_PTR(err);
|
||||||
|
}
|
||||||
|
|
||||||
BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
|
BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
|
||||||
BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
|
BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
|
||||||
|
|
||||||
|
@ -432,7 +432,6 @@ static inline void
|
|||||||
i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
|
i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
|
||||||
{
|
{
|
||||||
i915_gem_object_unpin_pages(obj);
|
i915_gem_object_unpin_pages(obj);
|
||||||
i915_gem_object_unlock(obj);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct intel_engine_cs *
|
static inline struct intel_engine_cs *
|
||||||
|
@ -964,9 +964,10 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
|
|||||||
unsigned long n;
|
unsigned long n;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
i915_gem_object_lock(obj, NULL);
|
||||||
err = i915_gem_object_prepare_read(obj, &needs_flush);
|
err = i915_gem_object_prepare_read(obj, &needs_flush);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
goto err_unlock;
|
||||||
|
|
||||||
for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
|
for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
|
||||||
u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
|
u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
|
||||||
@ -986,6 +987,8 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
|
|||||||
}
|
}
|
||||||
|
|
||||||
i915_gem_object_finish_access(obj);
|
i915_gem_object_finish_access(obj);
|
||||||
|
err_unlock:
|
||||||
|
i915_gem_object_unlock(obj);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -27,9 +27,10 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
|
|||||||
u32 *cpu;
|
u32 *cpu;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
i915_gem_object_lock(ctx->obj, NULL);
|
||||||
err = i915_gem_object_prepare_write(ctx->obj, &needs_clflush);
|
err = i915_gem_object_prepare_write(ctx->obj, &needs_clflush);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
goto out;
|
||||||
|
|
||||||
page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
|
page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
|
||||||
map = kmap_atomic(page);
|
map = kmap_atomic(page);
|
||||||
@ -46,7 +47,9 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
|
|||||||
kunmap_atomic(map);
|
kunmap_atomic(map);
|
||||||
i915_gem_object_finish_access(ctx->obj);
|
i915_gem_object_finish_access(ctx->obj);
|
||||||
|
|
||||||
return 0;
|
out:
|
||||||
|
i915_gem_object_unlock(ctx->obj);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
|
static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
|
||||||
@ -57,9 +60,10 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
|
|||||||
u32 *cpu;
|
u32 *cpu;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
i915_gem_object_lock(ctx->obj, NULL);
|
||||||
err = i915_gem_object_prepare_read(ctx->obj, &needs_clflush);
|
err = i915_gem_object_prepare_read(ctx->obj, &needs_clflush);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
goto out;
|
||||||
|
|
||||||
page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
|
page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
|
||||||
map = kmap_atomic(page);
|
map = kmap_atomic(page);
|
||||||
@ -73,7 +77,9 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
|
|||||||
kunmap_atomic(map);
|
kunmap_atomic(map);
|
||||||
i915_gem_object_finish_access(ctx->obj);
|
i915_gem_object_finish_access(ctx->obj);
|
||||||
|
|
||||||
return 0;
|
out:
|
||||||
|
i915_gem_object_unlock(ctx->obj);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
|
static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
|
||||||
|
@ -461,9 +461,10 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
|
|||||||
unsigned int n, m, need_flush;
|
unsigned int n, m, need_flush;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
i915_gem_object_lock(obj, NULL);
|
||||||
err = i915_gem_object_prepare_write(obj, &need_flush);
|
err = i915_gem_object_prepare_write(obj, &need_flush);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
goto out;
|
||||||
|
|
||||||
for (n = 0; n < real_page_count(obj); n++) {
|
for (n = 0; n < real_page_count(obj); n++) {
|
||||||
u32 *map;
|
u32 *map;
|
||||||
@ -479,7 +480,9 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
|
|||||||
i915_gem_object_finish_access(obj);
|
i915_gem_object_finish_access(obj);
|
||||||
obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
|
obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
|
||||||
obj->write_domain = 0;
|
obj->write_domain = 0;
|
||||||
return 0;
|
out:
|
||||||
|
i915_gem_object_unlock(obj);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static noinline int cpu_check(struct drm_i915_gem_object *obj,
|
static noinline int cpu_check(struct drm_i915_gem_object *obj,
|
||||||
@ -488,9 +491,10 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
|
|||||||
unsigned int n, m, needs_flush;
|
unsigned int n, m, needs_flush;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
i915_gem_object_lock(obj, NULL);
|
||||||
err = i915_gem_object_prepare_read(obj, &needs_flush);
|
err = i915_gem_object_prepare_read(obj, &needs_flush);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
goto out_unlock;
|
||||||
|
|
||||||
for (n = 0; n < real_page_count(obj); n++) {
|
for (n = 0; n < real_page_count(obj); n++) {
|
||||||
u32 *map;
|
u32 *map;
|
||||||
@ -527,6 +531,8 @@ out_unmap:
|
|||||||
}
|
}
|
||||||
|
|
||||||
i915_gem_object_finish_access(obj);
|
i915_gem_object_finish_access(obj);
|
||||||
|
out_unlock:
|
||||||
|
i915_gem_object_unlock(obj);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1923,6 +1923,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto err_unmap;
|
goto err_unmap;
|
||||||
|
|
||||||
|
i915_gem_object_unlock(bb->obj);
|
||||||
INIT_LIST_HEAD(&bb->list);
|
INIT_LIST_HEAD(&bb->list);
|
||||||
list_add(&bb->list, &s->workload->shadow_bb);
|
list_add(&bb->list, &s->workload->shadow_bb);
|
||||||
|
|
||||||
|
@ -335,12 +335,20 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
|
|||||||
u64 remain;
|
u64 remain;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = i915_gem_object_prepare_read(obj, &needs_clflush);
|
ret = i915_gem_object_lock_interruptible(obj, NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
ret = i915_gem_object_prepare_read(obj, &needs_clflush);
|
||||||
|
if (ret) {
|
||||||
|
i915_gem_object_unlock(obj);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
fence = i915_gem_object_lock_fence(obj);
|
fence = i915_gem_object_lock_fence(obj);
|
||||||
i915_gem_object_finish_access(obj);
|
i915_gem_object_finish_access(obj);
|
||||||
|
i915_gem_object_unlock(obj);
|
||||||
|
|
||||||
if (!fence)
|
if (!fence)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
@ -734,12 +742,20 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
|
|||||||
u64 remain;
|
u64 remain;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = i915_gem_object_prepare_write(obj, &needs_clflush);
|
ret = i915_gem_object_lock_interruptible(obj, NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
ret = i915_gem_object_prepare_write(obj, &needs_clflush);
|
||||||
|
if (ret) {
|
||||||
|
i915_gem_object_unlock(obj);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
fence = i915_gem_object_lock_fence(obj);
|
fence = i915_gem_object_lock_fence(obj);
|
||||||
i915_gem_object_finish_access(obj);
|
i915_gem_object_finish_access(obj);
|
||||||
|
i915_gem_object_unlock(obj);
|
||||||
|
|
||||||
if (!fence)
|
if (!fence)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user