drm/i915: Protect request peeking with RCU

Since the execlists_active() is no longer protected by the
engine->active.lock, we need to protect the request pointer with RCU to
prevent it being freed as we evaluate whether or not we need to preempt.

Fixes: df40306902 ("drm/i915/execlists: Lift process_csb() out of the irq-off spinlock")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191104090158.2959-2-chris@chris-wilson.co.uk
(cherry picked from commit 7d14863525)
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
(cherry picked from commit 8eb4704b12)
(cherry picked from commit 7e27238e149ce4f00d9cd801fe3aa0ea55e986a2)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
Chris Wilson 2019-11-03 16:23:05 +00:00 committed by Rodrigo Vivi
parent 2d691aeca4
commit c0fa92ec89

View File

@ -177,9 +177,37 @@ static inline int rq_prio(const struct i915_request *rq)
return rq->sched.attr.priority | __NO_PREEMPTION; return rq->sched.attr.priority | __NO_PREEMPTION;
} }
static void kick_submission(struct intel_engine_cs *engine, int prio) static inline bool need_preempt(int prio, int active)
{ {
const struct i915_request *inflight = *engine->execlists.active; /*
* Allow preemption of low -> normal -> high, but we do
* not allow low priority tasks to preempt other low priority
* tasks under the impression that latency for low priority
* tasks does not matter (as much as background throughput),
* so kiss.
*/
return prio >= max(I915_PRIORITY_NORMAL, active);
}
static void kick_submission(struct intel_engine_cs *engine,
const struct i915_request *rq,
int prio)
{
const struct i915_request *inflight;
/*
* We only need to kick the tasklet once for the high priority
* new context we add into the queue.
*/
if (prio <= engine->execlists.queue_priority_hint)
return;
rcu_read_lock();
/* Nothing currently active? We're overdue for a submission! */
inflight = execlists_active(&engine->execlists);
if (!inflight)
goto unlock;
/* /*
* If we are already the currently executing context, don't * If we are already the currently executing context, don't
@ -188,10 +216,15 @@ static void kick_submission(struct intel_engine_cs *engine, int prio)
* tasklet, i.e. we have not change the priority queue * tasklet, i.e. we have not change the priority queue
* sufficiently to oust the running context. * sufficiently to oust the running context.
*/ */
if (!inflight || !i915_scheduler_need_preempt(prio, rq_prio(inflight))) if (inflight->hw_context == rq->hw_context)
return; goto unlock;
tasklet_hi_schedule(&engine->execlists.tasklet); engine->execlists.queue_priority_hint = prio;
if (need_preempt(prio, rq_prio(inflight)))
tasklet_hi_schedule(&engine->execlists.tasklet);
unlock:
rcu_read_unlock();
} }
static void __i915_schedule(struct i915_sched_node *node, static void __i915_schedule(struct i915_sched_node *node,
@ -317,13 +350,8 @@ static void __i915_schedule(struct i915_sched_node *node,
list_move_tail(&node->link, cache.priolist); list_move_tail(&node->link, cache.priolist);
} }
if (prio <= engine->execlists.queue_priority_hint)
continue;
engine->execlists.queue_priority_hint = prio;
/* Defer (tasklet) submission until after all of our updates. */ /* Defer (tasklet) submission until after all of our updates. */
kick_submission(engine, prio); kick_submission(engine, node_to_request(node), prio);
} }
spin_unlock(&engine->active.lock); spin_unlock(&engine->active.lock);