forked from Minki/linux
drm/i915: Mark intel_wakeref_get() as a sleeper
Assume that intel_wakeref_get() may take the mutex, and perform other sleeping actions in the course of its callbacks and so use might_sleep() to ensure that all callers abide. Anything that cannot sleep has to use e.g. intel_wakeref_get_if_active() to guarantee its avoidance of the non-atomic paths. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191121130528.309474-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
c95d31c3df
commit
93b0e8fe47
@ -22,6 +22,11 @@ static inline void intel_gt_pm_get(struct intel_gt *gt)
|
||||
intel_wakeref_get(>->wakeref);
|
||||
}
|
||||
|
||||
static inline void __intel_gt_pm_get(struct intel_gt *gt)
|
||||
{
|
||||
__intel_wakeref_get(>->wakeref);
|
||||
}
|
||||
|
||||
static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt)
|
||||
{
|
||||
return intel_wakeref_get_if_active(>->wakeref);
|
||||
|
@ -1121,7 +1121,7 @@ __execlists_schedule_in(struct i915_request *rq)
|
||||
BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID);
|
||||
}
|
||||
|
||||
intel_gt_pm_get(engine->gt);
|
||||
__intel_gt_pm_get(engine->gt);
|
||||
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
|
||||
intel_engine_context_in(engine);
|
||||
|
||||
|
@ -529,7 +529,7 @@ static struct i915_request *schedule_in(struct i915_request *rq, int idx)
|
||||
* required if we generalise the inflight tracking.
|
||||
*/
|
||||
|
||||
intel_gt_pm_get(rq->engine->gt);
|
||||
__intel_gt_pm_get(rq->engine->gt);
|
||||
return i915_request_get(rq);
|
||||
}
|
||||
|
||||
|
@ -59,9 +59,7 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
|
||||
|
||||
/**
|
||||
* intel_wakeref_get: Acquire the wakeref
|
||||
* @i915: the drm_i915_private device
|
||||
* @wf: the wakeref
|
||||
* @fn: callback for acquired the wakeref, called only on first acquire.
|
||||
*
|
||||
* Acquire a hold on the wakeref. The first user to do so, will acquire
|
||||
* the runtime pm wakeref and then call the @fn underneath the wakeref
|
||||
@ -76,12 +74,29 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
|
||||
static inline int
|
||||
intel_wakeref_get(struct intel_wakeref *wf)
|
||||
{
|
||||
might_sleep();
|
||||
if (unlikely(!atomic_inc_not_zero(&wf->count)))
|
||||
return __intel_wakeref_get_first(wf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __intel_wakeref_get: Acquire the wakeref, again
|
||||
* @wf: the wakeref
|
||||
*
|
||||
* Increment the wakeref counter, only valid if it is already held by
|
||||
* the caller.
|
||||
*
|
||||
* See intel_wakeref_get().
|
||||
*/
|
||||
static inline void
|
||||
__intel_wakeref_get(struct intel_wakeref *wf)
|
||||
{
|
||||
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
|
||||
atomic_inc(&wf->count);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_wakeref_get_if_in_use: Acquire the wakeref
|
||||
* @wf: the wakeref
|
||||
|
Loading…
Reference in New Issue
Block a user