drm/i915: Add kick_backend function to i915_sched_engine
Not all back-ends require a kick after a scheduling update, so make the kick a call-back function that the back-end can opt-in to. Also move the current kick function from the scheduler to the execlists file as it is specific to that back-end. Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Signed-off-by: Matt Roper <matthew.d.roper@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210618010638.98941-7-matthew.brost@intel.com
This commit is contained in:
committed by
Matt Roper
parent
3f623e06cd
commit
71ed60112d
@@ -3116,10 +3116,61 @@ static bool can_preempt(struct intel_engine_cs *engine)
|
|||||||
return engine->class != RENDER_CLASS;
|
return engine->class != RENDER_CLASS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void kick_execlists(const struct i915_request *rq, int prio)
|
||||||
|
{
|
||||||
|
struct intel_engine_cs *engine = rq->engine;
|
||||||
|
struct i915_sched_engine *sched_engine = engine->sched_engine;
|
||||||
|
const struct i915_request *inflight;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We only need to kick the tasklet once for the high priority
|
||||||
|
* new context we add into the queue.
|
||||||
|
*/
|
||||||
|
if (prio <= sched_engine->queue_priority_hint)
|
||||||
|
return;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
|
||||||
|
/* Nothing currently active? We're overdue for a submission! */
|
||||||
|
inflight = execlists_active(&engine->execlists);
|
||||||
|
if (!inflight)
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we are already the currently executing context, don't
|
||||||
|
* bother evaluating if we should preempt ourselves.
|
||||||
|
*/
|
||||||
|
if (inflight->context == rq->context)
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
|
ENGINE_TRACE(engine,
|
||||||
|
"bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
|
||||||
|
prio,
|
||||||
|
rq->fence.context, rq->fence.seqno,
|
||||||
|
inflight->fence.context, inflight->fence.seqno,
|
||||||
|
inflight->sched.attr.priority);
|
||||||
|
|
||||||
|
sched_engine->queue_priority_hint = prio;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allow preemption of low -> normal -> high, but we do
|
||||||
|
* not allow low priority tasks to preempt other low priority
|
||||||
|
* tasks under the impression that latency for low priority
|
||||||
|
* tasks does not matter (as much as background throughput),
|
||||||
|
* so kiss.
|
||||||
|
*/
|
||||||
|
if (prio >= max(I915_PRIORITY_NORMAL, rq_prio(inflight)))
|
||||||
|
tasklet_hi_schedule(&engine->execlists.tasklet);
|
||||||
|
|
||||||
|
unlock:
|
||||||
|
rcu_read_unlock();
|
||||||
|
}
|
||||||
|
|
||||||
static void execlists_set_default_submission(struct intel_engine_cs *engine)
|
static void execlists_set_default_submission(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
engine->submit_request = execlists_submit_request;
|
engine->submit_request = execlists_submit_request;
|
||||||
engine->sched_engine->schedule = i915_schedule;
|
engine->sched_engine->schedule = i915_schedule;
|
||||||
|
engine->sched_engine->kick_backend = kick_execlists;
|
||||||
engine->execlists.tasklet.callback = execlists_submission_tasklet;
|
engine->execlists.tasklet.callback = execlists_submission_tasklet;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3702,6 +3753,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
|
|||||||
ve->base.request_alloc = execlists_request_alloc;
|
ve->base.request_alloc = execlists_request_alloc;
|
||||||
|
|
||||||
ve->base.sched_engine->schedule = i915_schedule;
|
ve->base.sched_engine->schedule = i915_schedule;
|
||||||
|
ve->base.sched_engine->kick_backend = kick_execlists;
|
||||||
ve->base.submit_request = virtual_submit_request;
|
ve->base.submit_request = virtual_submit_request;
|
||||||
ve->base.bond_execute = virtual_bond_execute;
|
ve->base.bond_execute = virtual_bond_execute;
|
||||||
|
|
||||||
|
|||||||
@@ -157,65 +157,6 @@ sched_lock_engine(const struct i915_sched_node *node,
|
|||||||
return locked;
|
return locked;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int rq_prio(const struct i915_request *rq)
|
|
||||||
{
|
|
||||||
return rq->sched.attr.priority;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool need_preempt(int prio, int active)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Allow preemption of low -> normal -> high, but we do
|
|
||||||
* not allow low priority tasks to preempt other low priority
|
|
||||||
* tasks under the impression that latency for low priority
|
|
||||||
* tasks does not matter (as much as background throughput),
|
|
||||||
* so kiss.
|
|
||||||
*/
|
|
||||||
return prio >= max(I915_PRIORITY_NORMAL, active);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void kick_submission(struct intel_engine_cs *engine,
|
|
||||||
const struct i915_request *rq,
|
|
||||||
int prio)
|
|
||||||
{
|
|
||||||
const struct i915_request *inflight;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We only need to kick the tasklet once for the high priority
|
|
||||||
* new context we add into the queue.
|
|
||||||
*/
|
|
||||||
if (prio <= engine->sched_engine->queue_priority_hint)
|
|
||||||
return;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
|
|
||||||
/* Nothing currently active? We're overdue for a submission! */
|
|
||||||
inflight = execlists_active(&engine->execlists);
|
|
||||||
if (!inflight)
|
|
||||||
goto unlock;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If we are already the currently executing context, don't
|
|
||||||
* bother evaluating if we should preempt ourselves.
|
|
||||||
*/
|
|
||||||
if (inflight->context == rq->context)
|
|
||||||
goto unlock;
|
|
||||||
|
|
||||||
ENGINE_TRACE(engine,
|
|
||||||
"bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
|
|
||||||
prio,
|
|
||||||
rq->fence.context, rq->fence.seqno,
|
|
||||||
inflight->fence.context, inflight->fence.seqno,
|
|
||||||
inflight->sched.attr.priority);
|
|
||||||
|
|
||||||
engine->sched_engine->queue_priority_hint = prio;
|
|
||||||
if (need_preempt(prio, rq_prio(inflight)))
|
|
||||||
tasklet_hi_schedule(&engine->execlists.tasklet);
|
|
||||||
|
|
||||||
unlock:
|
|
||||||
rcu_read_unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __i915_schedule(struct i915_sched_node *node,
|
static void __i915_schedule(struct i915_sched_node *node,
|
||||||
const struct i915_sched_attr *attr)
|
const struct i915_sched_attr *attr)
|
||||||
{
|
{
|
||||||
@@ -335,7 +276,8 @@ static void __i915_schedule(struct i915_sched_node *node,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Defer (tasklet) submission until after all of our updates. */
|
/* Defer (tasklet) submission until after all of our updates. */
|
||||||
kick_submission(engine, node_to_request(node), prio);
|
if (engine->sched_engine->kick_backend)
|
||||||
|
engine->sched_engine->kick_backend(node_to_request(node), prio);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&engine->sched_engine->lock);
|
spin_unlock(&engine->sched_engine->lock);
|
||||||
|
|||||||
@@ -153,6 +153,12 @@ struct i915_sched_engine {
|
|||||||
*/
|
*/
|
||||||
bool no_priolist;
|
bool no_priolist;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @kick_backend: kick backend after a request's priority has changed
|
||||||
|
*/
|
||||||
|
void (*kick_backend)(const struct i915_request *rq,
|
||||||
|
int prio);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @schedule: adjust priority of request
|
* @schedule: adjust priority of request
|
||||||
*
|
*
|
||||||
|
|||||||
Reference in New Issue
Block a user