mirror of
https://github.com/torvalds/linux.git
synced 2024-12-06 19:11:31 +00:00
42fb60de31
If we have a set of active engines marked as being non-persistent, we
lose track of those if the user replaces those engines with
I915_CONTEXT_PARAM_ENGINES. As part of our uABI contract is that
non-persistent requests are terminated if they are no longer being
tracked by the user's context (in order to prevent a lost request
causing an untracked and so unstoppable GPU hang), we need to apply the
same context cancellation upon changing engines.
v2: Track stale engines[] so we only reap at context closure.
v3: Tvrtko spotted races with closing contexts and set-engines, so add a
veneer of kill-everything paranoia to clean up after losing a race.
Fixes: a0e047156c
("drm/i915/gem: Make context persistence optional")
Testcase: igt/gem_ctx_peristence/replace
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200211144831.1011498-1-chris@chris-wilson.co.uk
120 lines
3.1 KiB
C
120 lines
3.1 KiB
C
/*
|
|
* SPDX-License-Identifier: MIT
|
|
*
|
|
* i915_sw_fence.h - library routines for N:M synchronisation points
|
|
*
|
|
* Copyright (C) 2016 Intel Corporation
|
|
*/
|
|
|
|
#ifndef _I915_SW_FENCE_H_
|
|
#define _I915_SW_FENCE_H_
|
|
|
|
#include <linux/dma-fence.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/kref.h>
|
|
#include <linux/notifier.h> /* for NOTIFY_DONE */
|
|
#include <linux/wait.h>
|
|
|
|
struct completion;
|
|
struct dma_resv;
|
|
|
|
struct i915_sw_fence {
|
|
wait_queue_head_t wait;
|
|
unsigned long flags;
|
|
atomic_t pending;
|
|
int error;
|
|
};
|
|
|
|
#define I915_SW_FENCE_CHECKED_BIT 0 /* used internally for DAG checking */
|
|
#define I915_SW_FENCE_PRIVATE_BIT 1 /* available for use by owner */
|
|
#define I915_SW_FENCE_MASK (~3)
|
|
|
|
enum i915_sw_fence_notify {
|
|
FENCE_COMPLETE,
|
|
FENCE_FREE
|
|
};
|
|
|
|
typedef int (*i915_sw_fence_notify_t)(struct i915_sw_fence *,
|
|
enum i915_sw_fence_notify state);
|
|
#define __i915_sw_fence_call __aligned(4)
|
|
|
|
void __i915_sw_fence_init(struct i915_sw_fence *fence,
|
|
i915_sw_fence_notify_t fn,
|
|
const char *name,
|
|
struct lock_class_key *key);
|
|
#ifdef CONFIG_LOCKDEP
|
|
#define i915_sw_fence_init(fence, fn) \
|
|
do { \
|
|
static struct lock_class_key __key; \
|
|
\
|
|
__i915_sw_fence_init((fence), (fn), #fence, &__key); \
|
|
} while (0)
|
|
#else
|
|
#define i915_sw_fence_init(fence, fn) \
|
|
__i915_sw_fence_init((fence), (fn), NULL, NULL)
|
|
#endif
|
|
|
|
void i915_sw_fence_reinit(struct i915_sw_fence *fence);
|
|
|
|
#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
|
|
void i915_sw_fence_fini(struct i915_sw_fence *fence);
|
|
#else
|
|
static inline void i915_sw_fence_fini(struct i915_sw_fence *fence) {}
|
|
#endif
|
|
|
|
void i915_sw_fence_commit(struct i915_sw_fence *fence);
|
|
|
|
int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
|
|
struct i915_sw_fence *after,
|
|
wait_queue_entry_t *wq);
|
|
int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
|
|
struct i915_sw_fence *after,
|
|
gfp_t gfp);
|
|
|
|
struct i915_sw_dma_fence_cb {
|
|
struct dma_fence_cb base;
|
|
struct i915_sw_fence *fence;
|
|
};
|
|
|
|
int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
|
|
struct dma_fence *dma,
|
|
struct i915_sw_dma_fence_cb *cb);
|
|
int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
|
|
struct dma_fence *dma,
|
|
unsigned long timeout,
|
|
gfp_t gfp);
|
|
|
|
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
|
|
struct dma_resv *resv,
|
|
const struct dma_fence_ops *exclude,
|
|
bool write,
|
|
unsigned long timeout,
|
|
gfp_t gfp);
|
|
|
|
bool i915_sw_fence_await(struct i915_sw_fence *fence);
|
|
void i915_sw_fence_complete(struct i915_sw_fence *fence);
|
|
|
|
static inline bool i915_sw_fence_signaled(const struct i915_sw_fence *fence)
|
|
{
|
|
return atomic_read(&fence->pending) <= 0;
|
|
}
|
|
|
|
static inline bool i915_sw_fence_done(const struct i915_sw_fence *fence)
|
|
{
|
|
return atomic_read(&fence->pending) < 0;
|
|
}
|
|
|
|
static inline void i915_sw_fence_wait(struct i915_sw_fence *fence)
|
|
{
|
|
wait_event(fence->wait, i915_sw_fence_done(fence));
|
|
}
|
|
|
|
static inline void
|
|
i915_sw_fence_set_error_once(struct i915_sw_fence *fence, int error)
|
|
{
|
|
if (unlikely(error))
|
|
cmpxchg(&fence->error, 0, error);
|
|
}
|
|
|
|
#endif /* _I915_SW_FENCE_H_ */
|