drm/i915: Convert i915_perf to ww locking as well

We have the ordering of timeline->mutex vs resv_lock wrong,
convert the i915_pin_vma and intel_context_pin as well to
future-proof this.

We may need to do future changes to do this more transaction-like,
and only get down to a single i915_gem_ww_ctx, but for now this
should work.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200819140904.1708856-18-maarten.lankhorst@linux.intel.com
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
This commit is contained in:
Maarten Lankhorst 2020-08-19 16:08:57 +02:00 committed by Joonas Lahtinen
parent c8d225946a
commit f00ecc2ef5

View File

@ -1195,24 +1195,39 @@ static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
struct i915_gem_engines_iter it; struct i915_gem_engines_iter it;
struct i915_gem_context *ctx = stream->ctx; struct i915_gem_context *ctx = stream->ctx;
struct intel_context *ce; struct intel_context *ce;
int err; struct i915_gem_ww_ctx ww;
int err = -ENODEV;
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
if (ce->engine != stream->engine) /* first match! */ if (ce->engine != stream->engine) /* first match! */
continue; continue;
/* err = 0;
* As the ID is the gtt offset of the context's vma we break;
* pin the vma to ensure the ID remains fixed.
*/
err = intel_context_pin(ce);
if (err == 0) {
stream->pinned_ctx = ce;
break;
}
} }
i915_gem_context_unlock_engines(ctx); i915_gem_context_unlock_engines(ctx);
if (err)
return ERR_PTR(err);
i915_gem_ww_ctx_init(&ww, true);
retry:
/*
* As the ID is the gtt offset of the context's vma we
* pin the vma to ensure the ID remains fixed.
*/
err = intel_context_pin_ww(ce, &ww);
if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(&ww);
if (!err)
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
if (err)
return ERR_PTR(err);
stream->pinned_ctx = ce;
return stream->pinned_ctx; return stream->pinned_ctx;
} }
@ -1923,15 +1938,22 @@ emit_oa_config(struct i915_perf_stream *stream,
{ {
struct i915_request *rq; struct i915_request *rq;
struct i915_vma *vma; struct i915_vma *vma;
struct i915_gem_ww_ctx ww;
int err; int err;
vma = get_oa_vma(stream, oa_config); vma = get_oa_vma(stream, oa_config);
if (IS_ERR(vma)) if (IS_ERR(vma))
return PTR_ERR(vma); return PTR_ERR(vma);
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); i915_gem_ww_ctx_init(&ww, true);
retry:
err = i915_gem_object_lock(vma->obj, &ww);
if (err) if (err)
goto err_vma_put; goto err;
err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
goto err;
intel_engine_pm_get(ce->engine); intel_engine_pm_get(ce->engine);
rq = i915_request_create(ce); rq = i915_request_create(ce);
@ -1953,11 +1975,9 @@ emit_oa_config(struct i915_perf_stream *stream,
goto err_add_request; goto err_add_request;
} }
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, 0); err = i915_request_await_object(rq, vma->obj, 0);
if (!err) if (!err)
err = i915_vma_move_to_active(vma, rq, 0); err = i915_vma_move_to_active(vma, rq, 0);
i915_vma_unlock(vma);
if (err) if (err)
goto err_add_request; goto err_add_request;
@ -1971,7 +1991,14 @@ err_add_request:
i915_request_add(rq); i915_request_add(rq);
err_vma_unpin: err_vma_unpin:
i915_vma_unpin(vma); i915_vma_unpin(vma);
err_vma_put: err:
if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(&ww);
if (!err)
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
i915_vma_put(vma); i915_vma_put(vma);
return err; return err;
} }