linux/drivers/gpu/drm/i915/gem/i915_gem_fence.c
Chris Wilson 6951e5893b drm/i915: Move GEM object domain management from struct_mutex to local
Use the per-object local lock to control the cache domain of the
individual GEM objects, not struct_mutex. This is a huge leap forward
for us in terms of object-level synchronisation; execbuffers are
coordinated using the ww_mutex and pread/pwrite is finally fully
serialised again.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-10-chris@chris-wilson.co.uk
2019-05-28 12:45:29 +01:00

97 lines
2.0 KiB
C

/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#include "i915_drv.h"
#include "i915_gem_object.h"
struct stub_fence {
struct dma_fence dma;
struct i915_sw_fence chain;
};
static int __i915_sw_fence_call
stub_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct stub_fence *stub = container_of(fence, typeof(*stub), chain);
switch (state) {
case FENCE_COMPLETE:
dma_fence_signal(&stub->dma);
break;
case FENCE_FREE:
dma_fence_put(&stub->dma);
break;
}
return NOTIFY_DONE;
}
static const char *stub_driver_name(struct dma_fence *fence)
{
return DRIVER_NAME;
}
static const char *stub_timeline_name(struct dma_fence *fence)
{
return "object";
}
static void stub_release(struct dma_fence *fence)
{
struct stub_fence *stub = container_of(fence, typeof(*stub), dma);
i915_sw_fence_fini(&stub->chain);
BUILD_BUG_ON(offsetof(typeof(*stub), dma));
dma_fence_free(&stub->dma);
}
static const struct dma_fence_ops stub_fence_ops = {
.get_driver_name = stub_driver_name,
.get_timeline_name = stub_timeline_name,
.release = stub_release,
};
struct dma_fence *
i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
{
struct stub_fence *stub;
assert_object_held(obj);
stub = kmalloc(sizeof(*stub), GFP_KERNEL);
if (!stub)
return NULL;
i915_sw_fence_init(&stub->chain, stub_notify);
dma_fence_init(&stub->dma, &stub_fence_ops, &stub->chain.wait.lock,
to_i915(obj->base.dev)->mm.unordered_timeline,
0);
if (i915_sw_fence_await_reservation(&stub->chain,
obj->resv, NULL,
true, I915_FENCE_TIMEOUT,
I915_FENCE_GFP) < 0)
goto err;
reservation_object_add_excl_fence(obj->resv, &stub->dma);
return &stub->dma;
err:
stub_release(&stub->dma);
return NULL;
}
void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
struct dma_fence *fence)
{
struct stub_fence *stub = container_of(fence, typeof(*stub), dma);
i915_sw_fence_commit(&stub->chain);
}