linux/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
Chris Wilson d19d71fc2b drm/i915: Mark i915_request.timeline as a volatile, rcu pointer
The request->timeline is only valid until the request is retired (i.e.
before it is completed). Upon retiring the request, the context may be
unpinned and freed, and along with it the timeline may be freed. We
therefore need to be very careful when chasing rq->timeline that the
pointer does not disappear beneath us. The vast majority of users are in
a protected context, either during request construction or retirement,
where the timeline->mutex is held and the timeline cannot disappear. It
is those few off the beaten path (where we access a second timeline) that
need extra scrutiny -- to be added in the next patch after first adding
the warnings about dangerous access.

One complication, where we cannot use the timeline->mutex itself, is
during request submission onto hardware (under spinlocks). Here, we want
to check on the timeline to finalize the breadcrumb, and so we need to
impose a second rule to ensure that the request->timeline is indeed
valid. As we are submitting the request, it's context and timeline must
be pinned, as it will be used by the hardware. Since it is pinned, we
know the request->timeline must still be valid, and we cannot submit the
idle barrier until after we release the engine->active.lock, ergo while
submitting and holding that spinlock, a second thread cannot release the
timeline.

v2: Don't be lazy inside selftests; hold the timeline->mutex for as long
as we need it, and tidy up acquiring the timeline with a bit of
refactoring (i915_active_add_request)

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190919111912.21631-1-chris@chris-wilson.co.uk
2019-09-20 10:24:09 +01:00

317 lines
7.0 KiB
C

// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include "i915_drv.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_pool.h"
#include "i915_gem_client_blt.h"
#include "i915_gem_object_blt.h"
struct i915_sleeve {
struct i915_vma *vma;
struct drm_i915_gem_object *obj;
struct sg_table *pages;
struct i915_page_sizes page_sizes;
};
static int vma_set_pages(struct i915_vma *vma)
{
struct i915_sleeve *sleeve = vma->private;
vma->pages = sleeve->pages;
vma->page_sizes = sleeve->page_sizes;
return 0;
}
static void vma_clear_pages(struct i915_vma *vma)
{
GEM_BUG_ON(!vma->pages);
vma->pages = NULL;
}
static int vma_bind(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
return vma->vm->vma_ops.bind_vma(vma, cache_level, flags);
}
static void vma_unbind(struct i915_vma *vma)
{
vma->vm->vma_ops.unbind_vma(vma);
}
static const struct i915_vma_ops proxy_vma_ops = {
.set_pages = vma_set_pages,
.clear_pages = vma_clear_pages,
.bind_vma = vma_bind,
.unbind_vma = vma_unbind,
};
static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
struct drm_i915_gem_object *obj,
struct sg_table *pages,
struct i915_page_sizes *page_sizes)
{
struct i915_sleeve *sleeve;
struct i915_vma *vma;
int err;
sleeve = kzalloc(sizeof(*sleeve), GFP_KERNEL);
if (!sleeve)
return ERR_PTR(-ENOMEM);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_free;
}
vma->private = sleeve;
vma->ops = &proxy_vma_ops;
sleeve->vma = vma;
sleeve->pages = pages;
sleeve->page_sizes = *page_sizes;
return sleeve;
err_free:
kfree(sleeve);
return ERR_PTR(err);
}
static void destroy_sleeve(struct i915_sleeve *sleeve)
{
kfree(sleeve);
}
struct clear_pages_work {
struct dma_fence dma;
struct dma_fence_cb cb;
struct i915_sw_fence wait;
struct work_struct work;
struct irq_work irq_work;
struct i915_sleeve *sleeve;
struct intel_context *ce;
u32 value;
};
static const char *clear_pages_work_driver_name(struct dma_fence *fence)
{
return DRIVER_NAME;
}
static const char *clear_pages_work_timeline_name(struct dma_fence *fence)
{
return "clear";
}
static void clear_pages_work_release(struct dma_fence *fence)
{
struct clear_pages_work *w = container_of(fence, typeof(*w), dma);
destroy_sleeve(w->sleeve);
i915_sw_fence_fini(&w->wait);
BUILD_BUG_ON(offsetof(typeof(*w), dma));
dma_fence_free(&w->dma);
}
static const struct dma_fence_ops clear_pages_work_ops = {
.get_driver_name = clear_pages_work_driver_name,
.get_timeline_name = clear_pages_work_timeline_name,
.release = clear_pages_work_release,
};
static void clear_pages_signal_irq_worker(struct irq_work *work)
{
struct clear_pages_work *w = container_of(work, typeof(*w), irq_work);
dma_fence_signal(&w->dma);
dma_fence_put(&w->dma);
}
static void clear_pages_dma_fence_cb(struct dma_fence *fence,
struct dma_fence_cb *cb)
{
struct clear_pages_work *w = container_of(cb, typeof(*w), cb);
if (fence->error)
dma_fence_set_error(&w->dma, fence->error);
/*
* Push the signalling of the fence into yet another worker to avoid
* the nightmare locking around the fence spinlock.
*/
irq_work_queue(&w->irq_work);
}
static void clear_pages_worker(struct work_struct *work)
{
struct clear_pages_work *w = container_of(work, typeof(*w), work);
struct drm_i915_private *i915 = w->ce->engine->i915;
struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
struct i915_vma *vma = w->sleeve->vma;
struct i915_request *rq;
struct i915_vma *batch;
int err = w->dma.error;
if (unlikely(err))
goto out_signal;
if (obj->cache_dirty) {
if (i915_gem_object_has_struct_page(obj))
drm_clflush_sg(w->sleeve->pages);
obj->cache_dirty = false;
}
obj->read_domains = I915_GEM_GPU_DOMAINS;
obj->write_domain = 0;
/* XXX: we need to kill this */
mutex_lock(&i915->drm.struct_mutex);
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (unlikely(err))
goto out_unlock;
batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto out_unpin;
}
rq = intel_context_create_request(w->ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_batch;
}
/* There's no way the fence has signalled */
if (dma_fence_add_callback(&rq->fence, &w->cb,
clear_pages_dma_fence_cb))
GEM_BUG_ON(1);
err = intel_emit_vma_mark_active(batch, rq);
if (unlikely(err))
goto out_request;
if (w->ce->engine->emit_init_breadcrumb) {
err = w->ce->engine->emit_init_breadcrumb(rq);
if (unlikely(err))
goto out_request;
}
/*
* w->dma is already exported via (vma|obj)->resv we need only
* keep track of the GPU activity within this vma/request, and
* propagate the signal from the request to w->dma.
*/
err = i915_active_add_request(&vma->active, rq);
if (err)
goto out_request;
err = w->ce->engine->emit_bb_start(rq,
batch->node.start, batch->node.size,
0);
out_request:
if (unlikely(err)) {
i915_request_skip(rq, err);
err = 0;
}
i915_request_add(rq);
out_batch:
intel_emit_vma_release(w->ce, batch);
out_unpin:
i915_vma_unpin(vma);
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
out_signal:
if (unlikely(err)) {
dma_fence_set_error(&w->dma, err);
dma_fence_signal(&w->dma);
dma_fence_put(&w->dma);
}
}
static int __i915_sw_fence_call
clear_pages_work_notify(struct i915_sw_fence *fence,
enum i915_sw_fence_notify state)
{
struct clear_pages_work *w = container_of(fence, typeof(*w), wait);
switch (state) {
case FENCE_COMPLETE:
schedule_work(&w->work);
break;
case FENCE_FREE:
dma_fence_put(&w->dma);
break;
}
return NOTIFY_DONE;
}
static DEFINE_SPINLOCK(fence_lock);
/* XXX: better name please */
int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
struct intel_context *ce,
struct sg_table *pages,
struct i915_page_sizes *page_sizes,
u32 value)
{
struct clear_pages_work *work;
struct i915_sleeve *sleeve;
int err;
sleeve = create_sleeve(ce->vm, obj, pages, page_sizes);
if (IS_ERR(sleeve))
return PTR_ERR(sleeve);
work = kmalloc(sizeof(*work), GFP_KERNEL);
if (!work) {
destroy_sleeve(sleeve);
return -ENOMEM;
}
work->value = value;
work->sleeve = sleeve;
work->ce = ce;
INIT_WORK(&work->work, clear_pages_worker);
init_irq_work(&work->irq_work, clear_pages_signal_irq_worker);
dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0);
i915_sw_fence_init(&work->wait, clear_pages_work_notify);
i915_gem_object_lock(obj);
err = i915_sw_fence_await_reservation(&work->wait,
obj->base.resv, NULL,
true, I915_FENCE_TIMEOUT,
I915_FENCE_GFP);
if (err < 0) {
dma_fence_set_error(&work->dma, err);
} else {
dma_resv_add_excl_fence(obj->base.resv, &work->dma);
err = 0;
}
i915_gem_object_unlock(obj);
dma_fence_get(&work->dma);
i915_sw_fence_commit(&work->wait);
return err;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_gem_client_blt.c"
#endif