mirror of
https://github.com/torvalds/linux.git
synced 2024-12-04 01:51:34 +00:00
drm/i915: Fixup preempt-to-busy vs reset of a virtual request
Due to the nature of preempt-to-busy the execlists active tracking and the schedule queue may become temporarily desync'ed (between resubmission to HW and its ack from HW). This means that we may have unwound a request and passed it back to the virtual engine, but it is still inflight on the HW and may even result in a GPU hang. If we detect that GPU hang and try to reset, the hanging request->engine will no longer match the current engine, which means that the request is not on the execlists active list and we should not try to find an older incomplete request. Given that we have deduced this must be a request on a virtual engine, it is the single active request in the context and so must be guilty (as the context is still inflight, it is prevented from being executed on another engine as we process the reset). Fixes:22b7a426bb
("drm/i915/execlists: Preempt-to-busy") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190923152844.8914-2-chris@chris-wilson.co.uk (cherry picked from commitcb2377a919
) Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
parent
a8385f0c3f
commit
68184eb7b0
@ -2414,10 +2414,14 @@ static void reset_csb_pointers(struct intel_engine_cs *engine)
|
||||
|
||||
static struct i915_request *active_request(struct i915_request *rq)
|
||||
{
|
||||
const struct list_head * const list = &rq->timeline->requests;
|
||||
const struct intel_context * const ce = rq->hw_context;
|
||||
struct i915_request *active = NULL;
|
||||
struct list_head *list;
|
||||
|
||||
if (!i915_request_is_active(rq)) /* unwound, but incomplete! */
|
||||
return rq;
|
||||
|
||||
list = &rq->timeline->requests;
|
||||
list_for_each_entry_from_reverse(rq, list, link) {
|
||||
if (i915_request_completed(rq))
|
||||
break;
|
||||
|
@ -42,11 +42,10 @@ static void engine_skip_context(struct i915_request *rq)
|
||||
struct intel_engine_cs *engine = rq->engine;
|
||||
struct i915_gem_context *hung_ctx = rq->gem_context;
|
||||
|
||||
lockdep_assert_held(&engine->active.lock);
|
||||
|
||||
if (!i915_request_is_active(rq))
|
||||
return;
|
||||
|
||||
lockdep_assert_held(&engine->active.lock);
|
||||
list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
|
||||
if (rq->gem_context == hung_ctx)
|
||||
i915_request_skip(rq, -EIO);
|
||||
@ -123,7 +122,6 @@ void __i915_request_reset(struct i915_request *rq, bool guilty)
|
||||
rq->fence.seqno,
|
||||
yesno(guilty));
|
||||
|
||||
lockdep_assert_held(&rq->engine->active.lock);
|
||||
GEM_BUG_ON(i915_request_completed(rq));
|
||||
|
||||
if (guilty) {
|
||||
|
Loading…
Reference in New Issue
Block a user