mirror of
https://github.com/torvalds/linux.git
synced 2024-12-04 18:13:04 +00:00
drm/i915/execlists: Use a locked clear_bit() for synchronisation with interrupt
We were relying on the uncached reads when processing the CSB to provide ourselves with the serialisation with the interrupt handler (so we could detect new interrupts in the middle of processing the old one). However, in commit767a983ab2
("drm/i915/execlists: Read the context-status HEAD from the HWSP") those uncached reads were eliminated (on one path at least) and along with them our serialisation. The result is that we would very rarely miss notification of a new interrupt and leave a context-switch unprocessed, hanging the GPU. Fixes:767a983ab2
("drm/i915/execlists: Read the context-status HEAD from the HWSP") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Michel Thierry <michel.thierry@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Mika Kuoppala <mika.kuoppala@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Reviewed-by: Michel Thierry <michel.thierry@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180321091027.21034-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
d871bfd008
commit
9153e6b7c8
@ -831,7 +831,8 @@ static void execlists_submission_tasklet(unsigned long data)
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
bool fw = false;
|
||||
|
||||
/* We can skip acquiring intel_runtime_pm_get() here as it was taken
|
||||
/*
|
||||
* We can skip acquiring intel_runtime_pm_get() here as it was taken
|
||||
* on our behalf by the request (see i915_gem_mark_busy()) and it will
|
||||
* not be relinquished until the device is idle (see
|
||||
* i915_gem_idle_work_handler()). As a precaution, we make sure
|
||||
@ -840,7 +841,8 @@ static void execlists_submission_tasklet(unsigned long data)
|
||||
*/
|
||||
GEM_BUG_ON(!dev_priv->gt.awake);
|
||||
|
||||
/* Prefer doing test_and_clear_bit() as a two stage operation to avoid
|
||||
/*
|
||||
* Prefer doing test_and_clear_bit() as a two stage operation to avoid
|
||||
* imposing the cost of a locked atomic transaction when submitting a
|
||||
* new request (outside of the context-switch interrupt).
|
||||
*/
|
||||
@ -856,17 +858,10 @@ static void execlists_submission_tasklet(unsigned long data)
|
||||
execlists->csb_head = -1; /* force mmio read of CSB ptrs */
|
||||
}
|
||||
|
||||
/* The write will be ordered by the uncached read (itself
|
||||
* a memory barrier), so we do not need another in the form
|
||||
* of a locked instruction. The race between the interrupt
|
||||
* handler and the split test/clear is harmless as we order
|
||||
* our clear before the CSB read. If the interrupt arrived
|
||||
* first between the test and the clear, we read the updated
|
||||
* CSB and clear the bit. If the interrupt arrives as we read
|
||||
* the CSB or later (i.e. after we had cleared the bit) the bit
|
||||
* is set and we do a new loop.
|
||||
*/
|
||||
__clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
|
||||
/* Clear before reading to catch new interrupts */
|
||||
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
|
||||
smp_mb__after_atomic();
|
||||
|
||||
if (unlikely(execlists->csb_head == -1)) { /* following a reset */
|
||||
if (!fw) {
|
||||
intel_uncore_forcewake_get(dev_priv,
|
||||
|
Loading…
Reference in New Issue
Block a user