linux/drivers/gpu/drm/i915/gt/intel_context.c
Chris Wilson e5429340bf drm/i915/gt: Acquire ce->active before ce->pin_count/ce->pin_mutex
Similar to commit ac0e331a62 ("drm/i915: Tighten atomicity of
i915_active_acquire vs i915_active_release") we have the same race of
trying to pin the context underneath a mutex while allowing the
decrement to be atomic outside of that mutex. This leads to the problem
where two threads may simultaneously try to pin the context and the
second not notice that they needed to repin the context.

<2> [198.669621] kernel BUG at drivers/gpu/drm/i915/gt/intel_timeline.c:387!
<4> [198.669703] invalid opcode: 0000 [#1] PREEMPT SMP PTI
<4> [198.669712] CPU: 0 PID: 1246 Comm: gem_exec_create Tainted: G     U  W         5.5.0-rc6-CI-CI_DRM_7755+ #1
<4> [198.669723] Hardware name:  /NUC7i5BNB, BIOS BNKBL357.86A.0054.2017.1025.1822 10/25/2017
<4> [198.669776] RIP: 0010:timeline_advance+0x7b/0xe0 [i915]
<4> [198.669785] Code: 00 48 c7 c2 10 f1 46 a0 48 c7 c7 70 1b 32 a0 e8 bb dd e7 e0 bf 01 00 00 00 e8 d1 af e7 e0 31 f6 bf 09 00 00 00 e8 35 ef d8 e0 <0f> 0b 48 c7 c1 48 fa 49 a0 ba 84 01 00 00 48 c7 c6 10 f1 46 a0 48
<4> [198.669803] RSP: 0018:ffffc900004c3a38 EFLAGS: 00010296
<4> [198.669810] RAX: ffff888270b35140 RBX: ffff88826f32ee00 RCX: 0000000000000006
<4> [198.669818] RDX: 00000000000017c5 RSI: 0000000000000000 RDI: 0000000000000009
<4> [198.669826] RBP: ffffc900004c3a64 R08: 0000000000000000 R09: 0000000000000000
<4> [198.669834] R10: 0000000000000000 R11: 0000000000000000 R12: ffff88826f9b5980
<4> [198.669841] R13: 0000000000000cc0 R14: ffffc900004c3dc0 R15: ffff888253610068
<4> [198.669849] FS:  00007f63e663fe40(0000) GS:ffff888276c00000(0000) knlGS:0000000000000000
<4> [198.669857] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
<4> [198.669864] CR2: 00007f171f8e39a8 CR3: 000000026b1f6005 CR4: 00000000003606f0
<4> [198.669872] Call Trace:
<4> [198.669924]  intel_timeline_get_seqno+0x12/0x40 [i915]
<4> [198.669977]  __i915_request_create+0x76/0x5a0 [i915]
<4> [198.670024]  i915_request_create+0x86/0x1c0 [i915]
<4> [198.670068]  i915_gem_do_execbuffer+0xbf2/0x2500 [i915]
<4> [198.670082]  ? __lock_acquire+0x460/0x15d0
<4> [198.670128]  i915_gem_execbuffer2_ioctl+0x11f/0x470 [i915]
<4> [198.670171]  ? i915_gem_execbuffer_ioctl+0x300/0x300 [i915]
<4> [198.670181]  drm_ioctl_kernel+0xa7/0xf0
<4> [198.670188]  drm_ioctl+0x2e1/0x390
<4> [198.670233]  ? i915_gem_execbuffer_ioctl+0x300/0x300 [i915]

Fixes: 8413502238 ("drm/i915/gt: Drop mutex serialisation between context pin/unpin")
References: ac0e331a62 ("drm/i915: Tighten atomicity of i915_active_acquire vs i915_active_release")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200127152829.2842149-1-chris@chris-wilson.co.uk
2020-01-27 21:11:59 +00:00

386 lines
7.8 KiB
C

/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "i915_drv.h"
#include "i915_globals.h"
#include "intel_context.h"
#include "intel_engine.h"
#include "intel_engine_pm.h"
#include "intel_ring.h"
static struct i915_global_context {
struct i915_global base;
struct kmem_cache *slab_ce;
} global;
static struct intel_context *intel_context_alloc(void)
{
return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
}
void intel_context_free(struct intel_context *ce)
{
kmem_cache_free(global.slab_ce, ce);
}
struct intel_context *
intel_context_create(struct intel_engine_cs *engine)
{
struct intel_context *ce;
ce = intel_context_alloc();
if (!ce)
return ERR_PTR(-ENOMEM);
intel_context_init(ce, engine);
return ce;
}
int intel_context_alloc_state(struct intel_context *ce)
{
int err = 0;
if (mutex_lock_interruptible(&ce->pin_mutex))
return -EINTR;
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
err = ce->ops->alloc(ce);
if (unlikely(err))
goto unlock;
set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
}
unlock:
mutex_unlock(&ce->pin_mutex);
return err;
}
static int intel_context_active_acquire(struct intel_context *ce)
{
int err;
__i915_active_acquire(&ce->active);
if (intel_context_is_barrier(ce))
return 0;
/* Preallocate tracking nodes */
err = i915_active_acquire_preallocate_barrier(&ce->active,
ce->engine);
if (err)
i915_active_release(&ce->active);
return err;
}
static void intel_context_active_release(struct intel_context *ce)
{
/* Nodes preallocated in intel_context_active() */
i915_active_acquire_barrier(&ce->active);
i915_active_release(&ce->active);
}
int __intel_context_do_pin(struct intel_context *ce)
{
int err;
if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
err = intel_context_alloc_state(ce);
if (err)
return err;
}
err = i915_active_acquire(&ce->active);
if (err)
return err;
if (mutex_lock_interruptible(&ce->pin_mutex)) {
err = -EINTR;
goto out_release;
}
if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
err = intel_context_active_acquire(ce);
if (unlikely(err))
goto out_unlock;
err = ce->ops->pin(ce);
if (unlikely(err))
goto err_active;
CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n",
ce->ring->head, ce->ring->tail);
smp_mb__before_atomic(); /* flush pin before it is visible */
atomic_inc(&ce->pin_count);
}
GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
GEM_BUG_ON(i915_active_is_idle(&ce->active));
goto out_unlock;
err_active:
intel_context_active_release(ce);
out_unlock:
mutex_unlock(&ce->pin_mutex);
out_release:
i915_active_release(&ce->active);
return err;
}
void intel_context_unpin(struct intel_context *ce)
{
if (!atomic_dec_and_test(&ce->pin_count))
return;
CE_TRACE(ce, "unpin\n");
ce->ops->unpin(ce);
/*
* Once released, we may asynchronously drop the active reference.
* As that may be the only reference keeping the context alive,
* take an extra now so that it is not freed before we finish
* dereferencing it.
*/
intel_context_get(ce);
intel_context_active_release(ce);
intel_context_put(ce);
}
static int __context_pin_state(struct i915_vma *vma)
{
unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
int err;
err = i915_ggtt_pin(vma, 0, bias | PIN_HIGH);
if (err)
return err;
err = i915_active_acquire(&vma->active);
if (err)
goto err_unpin;
/*
* And mark it as a globally pinned object to let the shrinker know
* it cannot reclaim the object until we release it.
*/
i915_vma_make_unshrinkable(vma);
vma->obj->mm.dirty = true;
return 0;
err_unpin:
i915_vma_unpin(vma);
return err;
}
static void __context_unpin_state(struct i915_vma *vma)
{
i915_vma_make_shrinkable(vma);
i915_active_release(&vma->active);
__i915_vma_unpin(vma);
}
static int __ring_active(struct intel_ring *ring)
{
int err;
err = i915_active_acquire(&ring->vma->active);
if (err)
return err;
err = intel_ring_pin(ring);
if (err)
goto err_active;
return 0;
err_active:
i915_active_release(&ring->vma->active);
return err;
}
static void __ring_retire(struct intel_ring *ring)
{
intel_ring_unpin(ring);
i915_active_release(&ring->vma->active);
}
__i915_active_call
static void __intel_context_retire(struct i915_active *active)
{
struct intel_context *ce = container_of(active, typeof(*ce), active);
CE_TRACE(ce, "retire\n");
set_bit(CONTEXT_VALID_BIT, &ce->flags);
if (ce->state)
__context_unpin_state(ce->state);
intel_timeline_unpin(ce->timeline);
__ring_retire(ce->ring);
intel_context_put(ce);
}
static int __intel_context_active(struct i915_active *active)
{
struct intel_context *ce = container_of(active, typeof(*ce), active);
int err;
CE_TRACE(ce, "active\n");
intel_context_get(ce);
err = __ring_active(ce->ring);
if (err)
goto err_put;
err = intel_timeline_pin(ce->timeline);
if (err)
goto err_ring;
if (!ce->state)
return 0;
err = __context_pin_state(ce->state);
if (err)
goto err_timeline;
return 0;
err_timeline:
intel_timeline_unpin(ce->timeline);
err_ring:
__ring_retire(ce->ring);
err_put:
intel_context_put(ce);
return err;
}
void
intel_context_init(struct intel_context *ce,
struct intel_engine_cs *engine)
{
GEM_BUG_ON(!engine->cops);
GEM_BUG_ON(!engine->gt->vm);
kref_init(&ce->ref);
ce->engine = engine;
ce->ops = engine->cops;
ce->sseu = engine->sseu;
ce->ring = __intel_context_ring_size(SZ_4K);
ce->vm = i915_vm_get(engine->gt->vm);
INIT_LIST_HEAD(&ce->signal_link);
INIT_LIST_HEAD(&ce->signals);
mutex_init(&ce->pin_mutex);
i915_active_init(&ce->active,
__intel_context_active, __intel_context_retire);
}
void intel_context_fini(struct intel_context *ce)
{
if (ce->timeline)
intel_timeline_put(ce->timeline);
i915_vm_put(ce->vm);
mutex_destroy(&ce->pin_mutex);
i915_active_fini(&ce->active);
}
static void i915_global_context_shrink(void)
{
kmem_cache_shrink(global.slab_ce);
}
static void i915_global_context_exit(void)
{
kmem_cache_destroy(global.slab_ce);
}
static struct i915_global_context global = { {
.shrink = i915_global_context_shrink,
.exit = i915_global_context_exit,
} };
int __init i915_global_context_init(void)
{
global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
if (!global.slab_ce)
return -ENOMEM;
i915_global_register(&global.base);
return 0;
}
void intel_context_enter_engine(struct intel_context *ce)
{
intel_engine_pm_get(ce->engine);
intel_timeline_enter(ce->timeline);
}
void intel_context_exit_engine(struct intel_context *ce)
{
intel_timeline_exit(ce->timeline);
intel_engine_pm_put(ce->engine);
}
int intel_context_prepare_remote_request(struct intel_context *ce,
struct i915_request *rq)
{
struct intel_timeline *tl = ce->timeline;
int err;
/* Only suitable for use in remotely modifying this context */
GEM_BUG_ON(rq->context == ce);
if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
/* Queue this switch after current activity by this context. */
err = i915_active_fence_set(&tl->last_request, rq);
if (err)
return err;
}
/*
* Guarantee context image and the timeline remains pinned until the
* modifying request is retired by setting the ce activity tracker.
*
* But we only need to take one pin on the account of it. Or in other
* words transfer the pinned ce object to tracked active request.
*/
GEM_BUG_ON(i915_active_is_idle(&ce->active));
return i915_active_add_request(&ce->active, rq);
}
struct i915_request *intel_context_create_request(struct intel_context *ce)
{
struct i915_request *rq;
int err;
err = intel_context_pin(ce);
if (unlikely(err))
return ERR_PTR(err);
rq = i915_request_create(ce);
intel_context_unpin(ce);
return rq;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_context.c"
#endif