2021-01-22 19:29:04 +00:00
|
|
|
// SPDX-License-Identifier: MIT
|
2019-03-08 13:25:19 +00:00
|
|
|
/*
|
|
|
|
* Copyright © 2019 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
2019-05-28 09:29:49 +00:00
|
|
|
#include "gem/i915_gem_context.h"
|
|
|
|
#include "gem/i915_gem_pm.h"
|
|
|
|
|
2019-03-08 13:25:19 +00:00
|
|
|
#include "i915_drv.h"
|
2021-07-21 21:51:01 +00:00
|
|
|
#include "i915_trace.h"
|
2019-04-24 17:48:39 +00:00
|
|
|
|
2019-03-08 13:25:19 +00:00
|
|
|
#include "intel_context.h"
|
2019-04-24 17:48:39 +00:00
|
|
|
#include "intel_engine.h"
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-24 20:07:17 +00:00
|
|
|
#include "intel_engine_pm.h"
|
2019-10-24 10:03:44 +00:00
|
|
|
#include "intel_ring.h"
|
2019-03-08 13:25:19 +00:00
|
|
|
|
2021-07-27 12:10:30 +00:00
|
|
|
static struct kmem_cache *slab_ce;
|
2019-03-08 13:25:19 +00:00
|
|
|
|
2019-04-26 16:33:34 +00:00
|
|
|
static struct intel_context *intel_context_alloc(void)
|
2019-03-08 13:25:19 +00:00
|
|
|
{
|
2021-07-27 12:10:30 +00:00
|
|
|
return kmem_cache_zalloc(slab_ce, GFP_KERNEL);
|
2019-03-08 13:25:19 +00:00
|
|
|
}
|
|
|
|
|
2020-11-26 14:04:05 +00:00
|
|
|
static void rcu_context_free(struct rcu_head *rcu)
|
2019-03-08 13:25:19 +00:00
|
|
|
{
|
2020-11-26 14:04:05 +00:00
|
|
|
struct intel_context *ce = container_of(rcu, typeof(*ce), rcu);
|
|
|
|
|
2021-07-21 21:51:01 +00:00
|
|
|
trace_intel_context_free(ce);
|
2021-07-27 12:10:30 +00:00
|
|
|
kmem_cache_free(slab_ce, ce);
|
2019-03-08 13:25:19 +00:00
|
|
|
}
|
|
|
|
|
2020-11-26 14:04:05 +00:00
|
|
|
void intel_context_free(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
call_rcu(&ce->rcu, rcu_context_free);
|
|
|
|
}
|
|
|
|
|
2019-03-08 13:25:19 +00:00
|
|
|
struct intel_context *
|
2019-12-21 16:03:24 +00:00
|
|
|
intel_context_create(struct intel_engine_cs *engine)
|
2019-03-08 13:25:19 +00:00
|
|
|
{
|
2019-04-26 16:33:34 +00:00
|
|
|
struct intel_context *ce;
|
2019-03-08 13:25:19 +00:00
|
|
|
|
|
|
|
ce = intel_context_alloc();
|
|
|
|
if (!ce)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2019-12-21 16:03:24 +00:00
|
|
|
intel_context_init(ce, engine);
|
2021-07-21 21:51:01 +00:00
|
|
|
trace_intel_context_create(ce);
|
2019-04-26 16:33:34 +00:00
|
|
|
return ce;
|
2019-03-08 13:25:19 +00:00
|
|
|
}
|
|
|
|
|
2020-01-09 08:57:15 +00:00
|
|
|
int intel_context_alloc_state(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (mutex_lock_interruptible(&ce->pin_mutex))
|
|
|
|
return -EINTR;
|
|
|
|
|
|
|
|
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
|
2020-03-03 08:05:45 +00:00
|
|
|
if (intel_context_is_banned(ce)) {
|
|
|
|
err = -EIO;
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
2020-01-09 08:57:15 +00:00
|
|
|
err = ce->ops->alloc(ce);
|
|
|
|
if (unlikely(err))
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
mutex_unlock(&ce->pin_mutex);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-01-09 08:57:16 +00:00
|
|
|
static int intel_context_active_acquire(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2020-01-27 15:28:29 +00:00
|
|
|
__i915_active_acquire(&ce->active);
|
|
|
|
|
2021-07-21 21:50:52 +00:00
|
|
|
if (intel_context_is_barrier(ce) || intel_engine_uses_guc(ce->engine))
|
2020-01-27 15:28:29 +00:00
|
|
|
return 0;
|
2020-01-09 08:57:16 +00:00
|
|
|
|
|
|
|
/* Preallocate tracking nodes */
|
2020-01-27 15:28:29 +00:00
|
|
|
err = i915_active_acquire_preallocate_barrier(&ce->active,
|
|
|
|
ce->engine);
|
|
|
|
if (err)
|
|
|
|
i915_active_release(&ce->active);
|
2020-01-09 08:57:16 +00:00
|
|
|
|
2020-01-27 15:28:29 +00:00
|
|
|
return err;
|
2020-01-09 08:57:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void intel_context_active_release(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
/* Nodes preallocated in intel_context_active() */
|
|
|
|
i915_active_acquire_barrier(&ce->active);
|
|
|
|
i915_active_release(&ce->active);
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
static int __context_pin_state(struct i915_vma *vma, struct i915_gem_ww_ctx *ww)
|
2019-03-08 13:25:19 +00:00
|
|
|
{
|
2019-12-05 11:37:25 +00:00
|
|
|
unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
int err;
|
2019-03-08 13:25:19 +00:00
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
err = i915_ggtt_pin(vma, ww, 0, bias | PIN_HIGH);
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2020-01-10 11:04:01 +00:00
|
|
|
err = i915_active_acquire(&vma->active);
|
|
|
|
if (err)
|
|
|
|
goto err_unpin;
|
|
|
|
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
/*
|
|
|
|
* And mark it as a globally pinned object to let the shrinker know
|
|
|
|
* it cannot reclaim the object until we release it.
|
|
|
|
*/
|
2019-08-02 21:21:36 +00:00
|
|
|
i915_vma_make_unshrinkable(vma);
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
vma->obj->mm.dirty = true;
|
|
|
|
|
|
|
|
return 0;
|
2020-01-10 11:04:01 +00:00
|
|
|
|
|
|
|
err_unpin:
|
|
|
|
i915_vma_unpin(vma);
|
|
|
|
return err;
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __context_unpin_state(struct i915_vma *vma)
|
|
|
|
{
|
2019-08-02 21:21:36 +00:00
|
|
|
i915_vma_make_shrinkable(vma);
|
2020-01-10 11:04:01 +00:00
|
|
|
i915_active_release(&vma->active);
|
2019-09-10 21:22:04 +00:00
|
|
|
__i915_vma_unpin(vma);
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
static int __ring_active(struct intel_ring *ring,
|
|
|
|
struct i915_gem_ww_ctx *ww)
|
2020-01-10 11:04:02 +00:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
err = intel_ring_pin(ring, ww);
|
2020-01-10 11:04:02 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2020-07-06 17:01:38 +00:00
|
|
|
err = i915_active_acquire(&ring->vma->active);
|
2020-01-10 11:04:02 +00:00
|
|
|
if (err)
|
2020-07-06 17:01:38 +00:00
|
|
|
goto err_pin;
|
2020-01-10 11:04:02 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2020-07-06 17:01:38 +00:00
|
|
|
err_pin:
|
|
|
|
intel_ring_unpin(ring);
|
2020-01-10 11:04:02 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __ring_retire(struct intel_ring *ring)
|
|
|
|
{
|
|
|
|
i915_active_release(&ring->vma->active);
|
2020-07-06 17:01:38 +00:00
|
|
|
intel_ring_unpin(ring);
|
2020-01-10 11:04:02 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
static int intel_context_pre_pin(struct intel_context *ce,
|
|
|
|
struct i915_gem_ww_ctx *ww)
|
2020-08-19 14:08:53 +00:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
CE_TRACE(ce, "active\n");
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
err = __ring_active(ce->ring, ww);
|
2020-08-19 14:08:53 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
err = intel_timeline_pin(ce->timeline, ww);
|
2020-08-19 14:08:53 +00:00
|
|
|
if (err)
|
|
|
|
goto err_ring;
|
|
|
|
|
|
|
|
if (!ce->state)
|
|
|
|
return 0;
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
err = __context_pin_state(ce->state, ww);
|
2020-08-19 14:08:53 +00:00
|
|
|
if (err)
|
|
|
|
goto err_timeline;
|
|
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_timeline:
|
|
|
|
intel_timeline_unpin(ce->timeline);
|
|
|
|
err_ring:
|
|
|
|
__ring_retire(ce->ring);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void intel_context_post_unpin(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
if (ce->state)
|
|
|
|
__context_unpin_state(ce->state);
|
|
|
|
|
|
|
|
intel_timeline_unpin(ce->timeline);
|
|
|
|
__ring_retire(ce->ring);
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
int __intel_context_do_pin_ww(struct intel_context *ce,
|
|
|
|
struct i915_gem_ww_ctx *ww)
|
2020-08-19 14:08:53 +00:00
|
|
|
{
|
|
|
|
bool handoff = false;
|
|
|
|
void *vaddr;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
|
|
|
|
err = intel_context_alloc_state(ce);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We always pin the context/ring/timeline here, to ensure a pin
|
|
|
|
* refcount for __intel_context_active(), which prevent a lock
|
|
|
|
* inversion of ce->pin_mutex vs dma_resv_lock().
|
|
|
|
*/
|
2020-08-19 14:08:54 +00:00
|
|
|
|
|
|
|
err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww);
|
|
|
|
if (!err && ce->ring->vma->obj)
|
|
|
|
err = i915_gem_object_lock(ce->ring->vma->obj, ww);
|
|
|
|
if (!err && ce->state)
|
|
|
|
err = i915_gem_object_lock(ce->state->obj, ww);
|
|
|
|
if (!err)
|
|
|
|
err = intel_context_pre_pin(ce, ww);
|
2020-08-19 14:08:53 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err = i915_active_acquire(&ce->active);
|
|
|
|
if (err)
|
|
|
|
goto err_ctx_unpin;
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
err = ce->ops->pre_pin(ce, ww, &vaddr);
|
2020-08-19 14:08:53 +00:00
|
|
|
if (err)
|
|
|
|
goto err_release;
|
|
|
|
|
|
|
|
err = mutex_lock_interruptible(&ce->pin_mutex);
|
|
|
|
if (err)
|
|
|
|
goto err_post_unpin;
|
|
|
|
|
2021-10-14 17:19:43 +00:00
|
|
|
intel_engine_pm_might_get(ce->engine);
|
|
|
|
|
2020-08-19 14:08:53 +00:00
|
|
|
if (unlikely(intel_context_is_closed(ce))) {
|
|
|
|
err = -ENOENT;
|
|
|
|
goto err_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
|
|
|
|
err = intel_context_active_acquire(ce);
|
|
|
|
if (unlikely(err))
|
|
|
|
goto err_unlock;
|
|
|
|
|
|
|
|
err = ce->ops->pin(ce, vaddr);
|
|
|
|
if (err) {
|
|
|
|
intel_context_active_release(ce);
|
|
|
|
goto err_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n",
|
|
|
|
i915_ggtt_offset(ce->ring->vma),
|
|
|
|
ce->ring->head, ce->ring->tail);
|
|
|
|
|
|
|
|
handoff = true;
|
|
|
|
smp_mb__before_atomic(); /* flush pin before it is visible */
|
|
|
|
atomic_inc(&ce->pin_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
|
|
|
|
|
2021-07-21 21:51:01 +00:00
|
|
|
trace_intel_context_do_pin(ce);
|
|
|
|
|
2020-08-19 14:08:53 +00:00
|
|
|
err_unlock:
|
|
|
|
mutex_unlock(&ce->pin_mutex);
|
|
|
|
err_post_unpin:
|
|
|
|
if (!handoff)
|
|
|
|
ce->ops->post_unpin(ce);
|
|
|
|
err_release:
|
|
|
|
i915_active_release(&ce->active);
|
|
|
|
err_ctx_unpin:
|
|
|
|
intel_context_post_unpin(ce);
|
2020-09-03 13:07:17 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Unlock the hwsp_ggtt object since it's shared.
|
|
|
|
* In principle we can unlock all the global state locked above
|
|
|
|
* since it's pinned and doesn't need fencing, and will
|
|
|
|
* thus remain resident until it is explicitly unpinned.
|
|
|
|
*/
|
|
|
|
i915_gem_ww_unlock_single(ce->timeline->hwsp_ggtt->obj);
|
|
|
|
|
2020-08-19 14:08:53 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
int __intel_context_do_pin(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
struct i915_gem_ww_ctx ww;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
i915_gem_ww_ctx_init(&ww, true);
|
|
|
|
retry:
|
|
|
|
err = __intel_context_do_pin_ww(ce, &ww);
|
|
|
|
if (err == -EDEADLK) {
|
|
|
|
err = i915_gem_ww_ctx_backoff(&ww);
|
|
|
|
if (!err)
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
i915_gem_ww_ctx_fini(&ww);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-07-21 21:50:51 +00:00
|
|
|
void __intel_context_do_unpin(struct intel_context *ce, int sub)
|
2020-08-19 14:08:53 +00:00
|
|
|
{
|
2021-07-21 21:50:51 +00:00
|
|
|
if (!atomic_sub_and_test(sub, &ce->pin_count))
|
2020-08-19 14:08:53 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
CE_TRACE(ce, "unpin\n");
|
|
|
|
ce->ops->unpin(ce);
|
|
|
|
ce->ops->post_unpin(ce);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Once released, we may asynchronously drop the active reference.
|
|
|
|
* As that may be the only reference keeping the context alive,
|
|
|
|
* take an extra now so that it is not freed before we finish
|
|
|
|
* dereferencing it.
|
|
|
|
*/
|
|
|
|
intel_context_get(ce);
|
|
|
|
intel_context_active_release(ce);
|
2021-07-21 21:51:01 +00:00
|
|
|
trace_intel_context_do_unpin(ce);
|
2020-08-19 14:08:53 +00:00
|
|
|
intel_context_put(ce);
|
|
|
|
}
|
|
|
|
|
2019-06-21 18:38:00 +00:00
|
|
|
static void __intel_context_retire(struct i915_active *active)
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
{
|
|
|
|
struct intel_context *ce = container_of(active, typeof(*ce), active);
|
|
|
|
|
2020-02-16 13:36:20 +00:00
|
|
|
CE_TRACE(ce, "retire runtime: { total:%lluns, avg:%lluns }\n",
|
|
|
|
intel_context_get_total_runtime_ns(ce),
|
|
|
|
intel_context_get_avg_runtime_ns(ce));
|
2019-06-25 19:48:59 +00:00
|
|
|
|
2019-12-03 12:41:55 +00:00
|
|
|
set_bit(CONTEXT_VALID_BIT, &ce->flags);
|
2020-08-19 14:08:53 +00:00
|
|
|
intel_context_post_unpin(ce);
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
intel_context_put(ce);
|
2019-03-08 13:25:19 +00:00
|
|
|
}
|
|
|
|
|
2019-06-21 18:38:00 +00:00
|
|
|
static int __intel_context_active(struct i915_active *active)
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
{
|
2019-06-21 18:38:00 +00:00
|
|
|
struct intel_context *ce = container_of(active, typeof(*ce), active);
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
|
|
|
|
intel_context_get(ce);
|
|
|
|
|
2020-08-19 14:08:53 +00:00
|
|
|
/* everything should already be activated by intel_context_pre_pin() */
|
2020-08-19 14:08:54 +00:00
|
|
|
GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->ring->vma->active));
|
|
|
|
__intel_ring_pin(ce->ring);
|
2019-06-19 17:01:35 +00:00
|
|
|
|
2020-08-19 14:08:54 +00:00
|
|
|
__intel_timeline_pin(ce->timeline);
|
2019-08-09 18:25:18 +00:00
|
|
|
|
2020-08-19 14:08:53 +00:00
|
|
|
if (ce->state) {
|
|
|
|
GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->state->active));
|
|
|
|
__i915_vma_pin(ce->state);
|
|
|
|
i915_vma_make_unshrinkable(ce->state);
|
|
|
|
}
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
|
2019-08-02 10:00:15 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-09-22 01:50:39 +00:00
|
|
|
static int __i915_sw_fence_call
|
|
|
|
sw_fence_dummy_notify(struct i915_sw_fence *sf, enum i915_sw_fence_notify state)
|
2021-07-27 00:23:40 +00:00
|
|
|
{
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
2019-06-21 18:38:00 +00:00
|
|
|
void
|
2020-11-26 14:04:05 +00:00
|
|
|
intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
{
|
2019-06-21 18:38:00 +00:00
|
|
|
GEM_BUG_ON(!engine->cops);
|
2019-12-21 16:03:24 +00:00
|
|
|
GEM_BUG_ON(!engine->gt->vm);
|
2019-06-21 18:38:00 +00:00
|
|
|
|
|
|
|
kref_init(&ce->ref);
|
|
|
|
|
|
|
|
ce->engine = engine;
|
|
|
|
ce->ops = engine->cops;
|
|
|
|
ce->sseu = engine->sseu;
|
2021-07-08 15:48:07 +00:00
|
|
|
ce->ring = NULL;
|
|
|
|
ce->ring_size = SZ_4K;
|
2019-12-21 16:03:24 +00:00
|
|
|
|
2020-02-16 13:36:20 +00:00
|
|
|
ewma_runtime_init(&ce->runtime.avg);
|
|
|
|
|
2019-12-21 16:03:24 +00:00
|
|
|
ce->vm = i915_vm_get(engine->gt->vm);
|
2019-06-21 18:38:00 +00:00
|
|
|
|
drm/i915/gt: Split the breadcrumb spinlock between global and contexts
As we funnel more and more contexts into the breadcrumbs on an engine,
the hold time of b->irq_lock grows. As we may then contend with the
b->irq_lock during request submission, this increases the burden upon
the engine->active.lock and so directly impacts both our execution
latency and client latency. If we split the b->irq_lock by introducing a
per-context spinlock to manage the signalers within a context, we then
only need the b->irq_lock for enabling/disabling the interrupt and can
avoid taking the lock for walking the list of contexts within the signal
worker. Even with the current setup, this greatly reduces the number of
times we have to take and fight for b->irq_lock.
Furthermore, this closes the race between enabling the signaling context
while it is in the process of being signaled and removed:
<4>[ 416.208555] list_add corruption. prev->next should be next (ffff8881951d5910), but was dead000000000100. (prev=ffff8882781bb870).
<4>[ 416.208573] WARNING: CPU: 7 PID: 0 at lib/list_debug.c:28 __list_add_valid+0x4d/0x70
<4>[ 416.208575] Modules linked in: i915(+) vgem snd_hda_codec_hdmi snd_hda_codec_realtek snd_hda_codec_generic ledtrig_audio mei_hdcp x86_pkg_temp_thermal coretemp ax88179_178a usbnet mii crct10dif_pclmul snd_intel_dspcfg crc32_pclmul snd_hda_codec snd_hwdep ghash_clmulni_intel snd_hda_core e1000e snd_pcm ptp pps_core mei_me mei prime_numbers intel_lpss_pci [last unloaded: i915]
<4>[ 416.208611] CPU: 7 PID: 0 Comm: swapper/7 Tainted: G U 5.8.0-CI-CI_DRM_8852+ #1
<4>[ 416.208614] Hardware name: Intel Corporation Ice Lake Client Platform/IceLake Y LPDDR4x T4 RVP TLC, BIOS ICLSFWR1.R00.3212.A00.1905212112 05/21/2019
<4>[ 416.208627] RIP: 0010:__list_add_valid+0x4d/0x70
<4>[ 416.208631] Code: c3 48 89 d1 48 c7 c7 60 18 33 82 48 89 c2 e8 ea e0 b6 ff 0f 0b 31 c0 c3 48 89 c1 4c 89 c6 48 c7 c7 b0 18 33 82 e8 d3 e0 b6 ff <0f> 0b 31 c0 c3 48 89 f2 4c 89 c1 48 89 fe 48 c7 c7 00 19 33 82 e8
<4>[ 416.208633] RSP: 0018:ffffc90000280e18 EFLAGS: 00010086
<4>[ 416.208636] RAX: 0000000000000000 RBX: ffff888250a44880 RCX: 0000000000000105
<4>[ 416.208639] RDX: 0000000000000105 RSI: ffffffff82320c5b RDI: 00000000ffffffff
<4>[ 416.208641] RBP: ffff8882781bb870 R08: 0000000000000000 R09: 0000000000000001
<4>[ 416.208643] R10: 00000000054d2957 R11: 000000006abbd991 R12: ffff8881951d58c8
<4>[ 416.208646] R13: ffff888286073880 R14: ffff888286073848 R15: ffff8881951d5910
<4>[ 416.208669] FS: 0000000000000000(0000) GS:ffff88829c180000(0000) knlGS:0000000000000000
<4>[ 416.208671] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
<4>[ 416.208673] CR2: 0000556231326c48 CR3: 0000000005610001 CR4: 0000000000760ee0
<4>[ 416.208675] PKRU: 55555554
<4>[ 416.208677] Call Trace:
<4>[ 416.208679] <IRQ>
<4>[ 416.208751] i915_request_enable_breadcrumb+0x278/0x400 [i915]
<4>[ 416.208839] __i915_request_submit+0xca/0x2a0 [i915]
<4>[ 416.208892] __execlists_submission_tasklet+0x480/0x1830 [i915]
<4>[ 416.208942] execlists_submission_tasklet+0xc4/0x130 [i915]
<4>[ 416.208947] tasklet_action_common.isra.17+0x6c/0x1c0
<4>[ 416.208954] __do_softirq+0xdf/0x498
<4>[ 416.208960] ? handle_fasteoi_irq+0x150/0x150
<4>[ 416.208964] asm_call_on_stack+0xf/0x20
<4>[ 416.208966] </IRQ>
<4>[ 416.208969] do_softirq_own_stack+0xa1/0xc0
<4>[ 416.208972] irq_exit_rcu+0xb5/0xc0
<4>[ 416.208976] common_interrupt+0xf7/0x260
<4>[ 416.208980] asm_common_interrupt+0x1e/0x40
<4>[ 416.208985] RIP: 0010:cpuidle_enter_state+0xb6/0x410
<4>[ 416.208987] Code: 00 31 ff e8 9c 3e 89 ff 80 7c 24 0b 00 74 12 9c 58 f6 c4 02 0f 85 31 03 00 00 31 ff e8 e3 6c 90 ff e8 fe a4 94 ff fb 45 85 ed <0f> 88 c7 02 00 00 49 63 c5 4c 2b 24 24 48 8d 14 40 48 8d 14 90 48
<4>[ 416.208989] RSP: 0018:ffffc90000143e70 EFLAGS: 00000206
<4>[ 416.208991] RAX: 0000000000000007 RBX: ffffe8ffffda8070 RCX: 0000000000000000
<4>[ 416.208993] RDX: 0000000000000000 RSI: ffffffff8238b4ee RDI: ffffffff8233184f
<4>[ 416.208995] RBP: ffffffff826b4e00 R08: 0000000000000000 R09: 0000000000000000
<4>[ 416.208997] R10: 0000000000000001 R11: 0000000000000000 R12: 00000060e7f24a8f
<4>[ 416.208998] R13: 0000000000000003 R14: 0000000000000003 R15: 0000000000000003
<4>[ 416.209012] cpuidle_enter+0x24/0x40
<4>[ 416.209016] do_idle+0x22f/0x2d0
<4>[ 416.209022] cpu_startup_entry+0x14/0x20
<4>[ 416.209025] start_secondary+0x158/0x1a0
<4>[ 416.209030] secondary_startup_64+0xa4/0xb0
<4>[ 416.209039] irq event stamp: 10186977
<4>[ 416.209042] hardirqs last enabled at (10186976): [<ffffffff810b9363>] tasklet_action_common.isra.17+0xe3/0x1c0
<4>[ 416.209044] hardirqs last disabled at (10186977): [<ffffffff81a5e5ed>] _raw_spin_lock_irqsave+0xd/0x50
<4>[ 416.209047] softirqs last enabled at (10186968): [<ffffffff810b9a1a>] irq_enter_rcu+0x6a/0x70
<4>[ 416.209049] softirqs last disabled at (10186969): [<ffffffff81c00f4f>] asm_call_on_stack+0xf/0x20
<4>[ 416.209317] list_del corruption, ffff8882781bb870->next is LIST_POISON1 (dead000000000100)
<4>[ 416.209317] WARNING: CPU: 7 PID: 46 at lib/list_debug.c:47 __list_del_entry_valid+0x4e/0x90
<4>[ 416.209317] Modules linked in: i915(+) vgem snd_hda_codec_hdmi snd_hda_codec_realtek snd_hda_codec_generic ledtrig_audio mei_hdcp x86_pkg_temp_thermal coretemp ax88179_178a usbnet mii crct10dif_pclmul snd_intel_dspcfg crc32_pclmul snd_hda_codec snd_hwdep ghash_clmulni_intel snd_hda_core e1000e snd_pcm ptp pps_core mei_me mei prime_numbers intel_lpss_pci [last unloaded: i915]
<4>[ 416.209317] CPU: 7 PID: 46 Comm: ksoftirqd/7 Tainted: G U W 5.8.0-CI-CI_DRM_8852+ #1
<4>[ 416.209317] Hardware name: Intel Corporation Ice Lake Client Platform/IceLake Y LPDDR4x T4 RVP TLC, BIOS ICLSFWR1.R00.3212.A00.1905212112 05/21/2019
<4>[ 416.209317] RIP: 0010:__list_del_entry_valid+0x4e/0x90
<4>[ 416.209317] Code: 2e 48 8b 32 48 39 fe 75 3a 48 8b 50 08 48 39 f2 75 48 b8 01 00 00 00 c3 48 89 fe 48 89 c2 48 c7 c7 38 19 33 82 e8 62 e0 b6 ff <0f> 0b 31 c0 c3 48 89 fe 48 c7 c7 70 19 33 82 e8 4e e0 b6 ff 0f 0b
<4>[ 416.209317] RSP: 0018:ffffc90000280de8 EFLAGS: 00010086
<4>[ 416.209317] RAX: 0000000000000000 RBX: ffff8882781bb848 RCX: 0000000000010104
<4>[ 416.209317] RDX: 0000000000010104 RSI: ffffffff8238b4ee RDI: 00000000ffffffff
<4>[ 416.209317] RBP: ffff8882781bb880 R08: 0000000000000000 R09: 0000000000000001
<4>[ 416.209317] R10: 000000009fb6666e R11: 00000000feca9427 R12: ffffc90000280e18
<4>[ 416.209317] R13: ffff8881951d5930 R14: dead0000000000d8 R15: ffff8882781bb880
<4>[ 416.209317] FS: 0000000000000000(0000) GS:ffff88829c180000(0000) knlGS:0000000000000000
<4>[ 416.209317] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
<4>[ 416.209317] CR2: 0000556231326c48 CR3: 0000000005610001 CR4: 0000000000760ee0
<4>[ 416.209317] PKRU: 55555554
<4>[ 416.209317] Call Trace:
<4>[ 416.209317] <IRQ>
<4>[ 416.209317] remove_signaling_context.isra.13+0xd/0x70 [i915]
<4>[ 416.209513] signal_irq_work+0x1f7/0x4b0 [i915]
This is caused by virtual engines where although we take the breadcrumb
lock on each of the active engines, they may be different engines on
different requests, It turns out that the b->irq_lock was not a
sufficient proxy for the engine->active.lock in the case of more than
one request, so introduce an explicit lock around ce->signals.
v2: ce->signal_lock is acquired with only RCU protection and so must be
treated carefully and not cleared during reallocation. We also then need
to confirm that the ce we lock is the same as we found in the breadcrumb
list.
Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/2276
Fixes: c18636f76344 ("drm/i915: Remove requirement for holding i915_request.lock for breadcrumbs")
Fixes: 2854d866327a ("drm/i915/gt: Replace intel_engine_transfer_stale_breadcrumbs")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201126140407.31952-4-chris@chris-wilson.co.uk
(cherry picked from commit c744d50363b714783bbc88d986cc16def13710f7)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
2020-11-26 14:04:06 +00:00
|
|
|
/* NB ce->signal_link/lock is used under RCU */
|
|
|
|
spin_lock_init(&ce->signal_lock);
|
2019-06-21 18:38:00 +00:00
|
|
|
INIT_LIST_HEAD(&ce->signals);
|
|
|
|
|
|
|
|
mutex_init(&ce->pin_mutex);
|
|
|
|
|
2021-07-21 21:50:49 +00:00
|
|
|
spin_lock_init(&ce->guc_state.lock);
|
2021-07-21 21:50:50 +00:00
|
|
|
INIT_LIST_HEAD(&ce->guc_state.fences);
|
2021-09-09 16:47:43 +00:00
|
|
|
INIT_LIST_HEAD(&ce->guc_state.requests);
|
2021-07-27 00:23:22 +00:00
|
|
|
|
2021-09-09 16:47:42 +00:00
|
|
|
ce->guc_id.id = GUC_INVALID_LRC_ID;
|
|
|
|
INIT_LIST_HEAD(&ce->guc_id.link);
|
2021-07-21 21:50:49 +00:00
|
|
|
|
2021-10-14 17:19:42 +00:00
|
|
|
INIT_LIST_HEAD(&ce->destroyed_link);
|
|
|
|
|
2021-10-14 17:19:47 +00:00
|
|
|
INIT_LIST_HEAD(&ce->parallel.child_list);
|
|
|
|
|
2021-07-27 00:23:40 +00:00
|
|
|
/*
|
|
|
|
* Initialize fence to be complete as this is expected to be complete
|
|
|
|
* unless there is a pending schedule disable outstanding.
|
|
|
|
*/
|
2021-09-09 16:47:37 +00:00
|
|
|
i915_sw_fence_init(&ce->guc_state.blocked,
|
|
|
|
sw_fence_dummy_notify);
|
|
|
|
i915_sw_fence_commit(&ce->guc_state.blocked);
|
2021-07-27 00:23:40 +00:00
|
|
|
|
2019-10-04 13:40:00 +00:00
|
|
|
i915_active_init(&ce->active,
|
2021-05-04 16:41:36 +00:00
|
|
|
__intel_context_active, __intel_context_retire, 0);
|
2019-03-08 13:25:19 +00:00
|
|
|
}
|
|
|
|
|
2019-07-18 07:00:06 +00:00
|
|
|
void intel_context_fini(struct intel_context *ce)
|
|
|
|
{
|
2021-10-14 17:19:47 +00:00
|
|
|
struct intel_context *child, *next;
|
|
|
|
|
2019-08-09 18:25:18 +00:00
|
|
|
if (ce->timeline)
|
|
|
|
intel_timeline_put(ce->timeline);
|
2019-07-30 14:32:09 +00:00
|
|
|
i915_vm_put(ce->vm);
|
|
|
|
|
2021-10-14 17:19:47 +00:00
|
|
|
/* Need to put the creation ref for the children */
|
|
|
|
if (intel_context_is_parent(ce))
|
|
|
|
for_each_child_safe(ce, child, next)
|
|
|
|
intel_context_put(child);
|
|
|
|
|
2019-07-18 07:00:06 +00:00
|
|
|
mutex_destroy(&ce->pin_mutex);
|
|
|
|
i915_active_fini(&ce->active);
|
2021-09-24 14:46:46 +00:00
|
|
|
i915_sw_fence_fini(&ce->guc_state.blocked);
|
2019-07-18 07:00:06 +00:00
|
|
|
}
|
|
|
|
|
2021-07-27 12:10:30 +00:00
|
|
|
void i915_context_module_exit(void)
|
2019-03-08 13:25:19 +00:00
|
|
|
{
|
2021-07-27 12:10:30 +00:00
|
|
|
kmem_cache_destroy(slab_ce);
|
2019-03-08 13:25:19 +00:00
|
|
|
}
|
|
|
|
|
2021-07-27 12:10:30 +00:00
|
|
|
int __init i915_context_module_init(void)
|
2019-03-08 13:25:19 +00:00
|
|
|
{
|
2021-07-27 12:10:30 +00:00
|
|
|
slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
|
|
|
|
if (!slab_ce)
|
2019-03-08 13:25:19 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2019-04-24 20:07:15 +00:00
|
|
|
|
|
|
|
void intel_context_enter_engine(struct intel_context *ce)
|
|
|
|
{
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-24 20:07:17 +00:00
|
|
|
intel_engine_pm_get(ce->engine);
|
2019-08-15 20:57:06 +00:00
|
|
|
intel_timeline_enter(ce->timeline);
|
2019-04-24 20:07:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void intel_context_exit_engine(struct intel_context *ce)
|
|
|
|
{
|
2019-08-15 20:57:06 +00:00
|
|
|
intel_timeline_exit(ce->timeline);
|
2020-03-23 19:20:29 +00:00
|
|
|
intel_engine_pm_put(ce->engine);
|
2019-04-24 20:07:15 +00:00
|
|
|
}
|
2019-04-26 16:33:34 +00:00
|
|
|
|
2019-07-16 21:34:43 +00:00
|
|
|
int intel_context_prepare_remote_request(struct intel_context *ce,
|
|
|
|
struct i915_request *rq)
|
|
|
|
{
|
2019-08-09 18:25:18 +00:00
|
|
|
struct intel_timeline *tl = ce->timeline;
|
2019-07-16 21:34:43 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Only suitable for use in remotely modifying this context */
|
2019-12-20 10:12:29 +00:00
|
|
|
GEM_BUG_ON(rq->context == ce);
|
2019-07-16 21:34:43 +00:00
|
|
|
|
drm/i915: Mark i915_request.timeline as a volatile, rcu pointer
The request->timeline is only valid until the request is retired (i.e.
before it is completed). Upon retiring the request, the context may be
unpinned and freed, and along with it the timeline may be freed. We
therefore need to be very careful when chasing rq->timeline that the
pointer does not disappear beneath us. The vast majority of users are in
a protected context, either during request construction or retirement,
where the timeline->mutex is held and the timeline cannot disappear. It
is those few off the beaten path (where we access a second timeline) that
need extra scrutiny -- to be added in the next patch after first adding
the warnings about dangerous access.
One complication, where we cannot use the timeline->mutex itself, is
during request submission onto hardware (under spinlocks). Here, we want
to check on the timeline to finalize the breadcrumb, and so we need to
impose a second rule to ensure that the request->timeline is indeed
valid. As we are submitting the request, it's context and timeline must
be pinned, as it will be used by the hardware. Since it is pinned, we
know the request->timeline must still be valid, and we cannot submit the
idle barrier until after we release the engine->active.lock, ergo while
submitting and holding that spinlock, a second thread cannot release the
timeline.
v2: Don't be lazy inside selftests; hold the timeline->mutex for as long
as we need it, and tidy up acquiring the timeline with a bit of
refactoring (i915_active_add_request)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190919111912.21631-1-chris@chris-wilson.co.uk
2019-09-19 11:19:10 +00:00
|
|
|
if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
|
2019-07-25 13:14:46 +00:00
|
|
|
/* Queue this switch after current activity by this context. */
|
2019-10-04 13:40:00 +00:00
|
|
|
err = i915_active_fence_set(&tl->last_request, rq);
|
2019-07-25 13:14:46 +00:00
|
|
|
if (err)
|
2019-08-16 12:10:00 +00:00
|
|
|
return err;
|
2019-07-25 13:14:46 +00:00
|
|
|
}
|
2019-07-16 21:34:43 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Guarantee context image and the timeline remains pinned until the
|
|
|
|
* modifying request is retired by setting the ce activity tracker.
|
|
|
|
*
|
|
|
|
* But we only need to take one pin on the account of it. Or in other
|
|
|
|
* words transfer the pinned ce object to tracked active request.
|
|
|
|
*/
|
|
|
|
GEM_BUG_ON(i915_active_is_idle(&ce->active));
|
drm/i915: Mark i915_request.timeline as a volatile, rcu pointer
The request->timeline is only valid until the request is retired (i.e.
before it is completed). Upon retiring the request, the context may be
unpinned and freed, and along with it the timeline may be freed. We
therefore need to be very careful when chasing rq->timeline that the
pointer does not disappear beneath us. The vast majority of users are in
a protected context, either during request construction or retirement,
where the timeline->mutex is held and the timeline cannot disappear. It
is those few off the beaten path (where we access a second timeline) that
need extra scrutiny -- to be added in the next patch after first adding
the warnings about dangerous access.
One complication, where we cannot use the timeline->mutex itself, is
during request submission onto hardware (under spinlocks). Here, we want
to check on the timeline to finalize the breadcrumb, and so we need to
impose a second rule to ensure that the request->timeline is indeed
valid. As we are submitting the request, it's context and timeline must
be pinned, as it will be used by the hardware. Since it is pinned, we
know the request->timeline must still be valid, and we cannot submit the
idle barrier until after we release the engine->active.lock, ergo while
submitting and holding that spinlock, a second thread cannot release the
timeline.
v2: Don't be lazy inside selftests; hold the timeline->mutex for as long
as we need it, and tidy up acquiring the timeline with a bit of
refactoring (i915_active_add_request)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190919111912.21631-1-chris@chris-wilson.co.uk
2019-09-19 11:19:10 +00:00
|
|
|
return i915_active_add_request(&ce->active, rq);
|
2019-07-16 21:34:43 +00:00
|
|
|
}
|
|
|
|
|
2019-04-26 16:33:34 +00:00
|
|
|
struct i915_request *intel_context_create_request(struct intel_context *ce)
|
|
|
|
{
|
2020-08-19 14:09:00 +00:00
|
|
|
struct i915_gem_ww_ctx ww;
|
2019-04-26 16:33:34 +00:00
|
|
|
struct i915_request *rq;
|
|
|
|
int err;
|
|
|
|
|
2020-08-19 14:09:00 +00:00
|
|
|
i915_gem_ww_ctx_init(&ww, true);
|
|
|
|
retry:
|
|
|
|
err = intel_context_pin_ww(ce, &ww);
|
|
|
|
if (!err) {
|
|
|
|
rq = i915_request_create(ce);
|
|
|
|
intel_context_unpin(ce);
|
|
|
|
} else if (err == -EDEADLK) {
|
|
|
|
err = i915_gem_ww_ctx_backoff(&ww);
|
|
|
|
if (!err)
|
|
|
|
goto retry;
|
2020-09-18 11:12:08 +00:00
|
|
|
rq = ERR_PTR(err);
|
2020-08-19 14:09:00 +00:00
|
|
|
} else {
|
|
|
|
rq = ERR_PTR(err);
|
|
|
|
}
|
2019-04-26 16:33:34 +00:00
|
|
|
|
2020-08-19 14:09:00 +00:00
|
|
|
i915_gem_ww_ctx_fini(&ww);
|
2019-04-26 16:33:34 +00:00
|
|
|
|
2020-08-19 14:08:58 +00:00
|
|
|
if (IS_ERR(rq))
|
|
|
|
return rq;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* timeline->mutex should be the inner lock, but is used as outer lock.
|
|
|
|
* Hack around this to shut up lockdep in selftests..
|
|
|
|
*/
|
|
|
|
lockdep_unpin_lock(&ce->timeline->mutex, rq->cookie);
|
|
|
|
mutex_release(&ce->timeline->mutex.dep_map, _RET_IP_);
|
|
|
|
mutex_acquire(&ce->timeline->mutex.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);
|
|
|
|
rq->cookie = lockdep_pin_lock(&ce->timeline->mutex);
|
|
|
|
|
2019-04-26 16:33:34 +00:00
|
|
|
return rq;
|
|
|
|
}
|
2019-08-02 10:00:15 +00:00
|
|
|
|
2021-07-27 00:23:33 +00:00
|
|
|
struct i915_request *intel_context_find_active_request(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
struct i915_request *rq, *active = NULL;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
GEM_BUG_ON(!intel_engine_uses_guc(ce->engine));
|
|
|
|
|
2021-09-09 16:47:43 +00:00
|
|
|
spin_lock_irqsave(&ce->guc_state.lock, flags);
|
|
|
|
list_for_each_entry_reverse(rq, &ce->guc_state.requests,
|
2021-07-27 00:23:33 +00:00
|
|
|
sched.link) {
|
|
|
|
if (i915_request_completed(rq))
|
|
|
|
break;
|
|
|
|
|
|
|
|
active = rq;
|
|
|
|
}
|
2021-09-09 16:47:43 +00:00
|
|
|
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
|
2021-07-27 00:23:33 +00:00
|
|
|
|
|
|
|
return active;
|
|
|
|
}
|
|
|
|
|
2021-10-14 17:19:47 +00:00
|
|
|
void intel_context_bind_parent_child(struct intel_context *parent,
|
|
|
|
struct intel_context *child)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Callers responsibility to validate that this function is used
|
|
|
|
* correctly but we use GEM_BUG_ON here ensure that they do.
|
|
|
|
*/
|
|
|
|
GEM_BUG_ON(!intel_engine_uses_guc(parent->engine));
|
|
|
|
GEM_BUG_ON(intel_context_is_pinned(parent));
|
|
|
|
GEM_BUG_ON(intel_context_is_child(parent));
|
|
|
|
GEM_BUG_ON(intel_context_is_pinned(child));
|
|
|
|
GEM_BUG_ON(intel_context_is_child(child));
|
|
|
|
GEM_BUG_ON(intel_context_is_parent(child));
|
|
|
|
|
|
|
|
parent->parallel.number_children++;
|
|
|
|
list_add_tail(&child->parallel.child_link,
|
|
|
|
&parent->parallel.child_list);
|
|
|
|
child->parallel.parent = parent;
|
|
|
|
}
|
|
|
|
|
2019-08-02 10:00:15 +00:00
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
|
|
|
#include "selftest_context.c"
|
|
|
|
#endif
|