2019-02-05 13:00:02 +00:00
|
|
|
/*
|
|
|
|
* SPDX-License-Identifier: MIT
|
|
|
|
*
|
|
|
|
* Copyright © 2019 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
2019-06-21 18:37:58 +00:00
|
|
|
#include <linux/debugobjects.h>
|
|
|
|
|
2019-12-21 16:03:24 +00:00
|
|
|
#include "gt/intel_context.h"
|
2020-02-25 19:22:04 +00:00
|
|
|
#include "gt/intel_engine_heartbeat.h"
|
2019-06-18 07:41:28 +00:00
|
|
|
#include "gt/intel_engine_pm.h"
|
2019-10-24 10:03:44 +00:00
|
|
|
#include "gt/intel_ring.h"
|
2019-06-18 07:41:28 +00:00
|
|
|
|
2019-02-05 13:00:02 +00:00
|
|
|
#include "i915_drv.h"
|
|
|
|
#include "i915_active.h"
|
2019-03-05 21:38:30 +00:00
|
|
|
#include "i915_globals.h"
|
2019-02-05 13:00:02 +00:00
|
|
|
|
2019-02-05 13:00:04 +00:00
|
|
|
/*
|
|
|
|
* Active refs memory management
|
|
|
|
*
|
|
|
|
* To be more economical with memory, we reap all the i915_active trees as
|
|
|
|
* they idle (when we know the active requests are inactive) and allocate the
|
|
|
|
* nodes from a local slab cache to hopefully reduce the fragmentation.
|
|
|
|
*/
|
|
|
|
static struct i915_global_active {
|
2019-03-05 21:38:30 +00:00
|
|
|
struct i915_global base;
|
2019-02-05 13:00:04 +00:00
|
|
|
struct kmem_cache *slab_cache;
|
|
|
|
} global;
|
|
|
|
|
2019-02-05 13:00:02 +00:00
|
|
|
struct active_node {
|
2019-10-04 13:40:00 +00:00
|
|
|
struct i915_active_fence base;
|
2019-02-05 13:00:02 +00:00
|
|
|
struct i915_active *ref;
|
|
|
|
struct rb_node node;
|
|
|
|
u64 timeline;
|
|
|
|
};
|
|
|
|
|
2019-08-02 10:00:15 +00:00
|
|
|
static inline struct active_node *
|
2019-10-04 13:40:00 +00:00
|
|
|
node_from_active(struct i915_active_fence *active)
|
2019-08-02 10:00:15 +00:00
|
|
|
{
|
|
|
|
return container_of(active, struct active_node, base);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
|
|
|
|
|
2019-10-04 13:40:00 +00:00
|
|
|
static inline bool is_barrier(const struct i915_active_fence *active)
|
2019-08-02 10:00:15 +00:00
|
|
|
{
|
2019-10-04 13:40:00 +00:00
|
|
|
return IS_ERR(rcu_access_pointer(active->fence));
|
2019-08-02 10:00:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct llist_node *barrier_to_ll(struct active_node *node)
|
|
|
|
{
|
|
|
|
GEM_BUG_ON(!is_barrier(&node->base));
|
2019-10-04 13:40:00 +00:00
|
|
|
return (struct llist_node *)&node->base.cb.node;
|
2019-08-02 10:00:15 +00:00
|
|
|
}
|
|
|
|
|
2019-08-13 20:09:05 +00:00
|
|
|
static inline struct intel_engine_cs *
|
|
|
|
__barrier_to_engine(struct active_node *node)
|
|
|
|
{
|
2019-10-04 13:40:00 +00:00
|
|
|
return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
|
2019-08-13 20:09:05 +00:00
|
|
|
}
|
|
|
|
|
2019-08-02 10:00:15 +00:00
|
|
|
static inline struct intel_engine_cs *
|
|
|
|
barrier_to_engine(struct active_node *node)
|
|
|
|
{
|
|
|
|
GEM_BUG_ON(!is_barrier(&node->base));
|
2019-08-13 20:09:05 +00:00
|
|
|
return __barrier_to_engine(node);
|
2019-08-02 10:00:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct active_node *barrier_from_ll(struct llist_node *x)
|
|
|
|
{
|
|
|
|
return container_of((struct list_head *)x,
|
2019-10-04 13:40:00 +00:00
|
|
|
struct active_node, base.cb.node);
|
2019-08-02 10:00:15 +00:00
|
|
|
}
|
|
|
|
|
2019-06-21 18:37:58 +00:00
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
|
|
|
|
|
|
|
|
static void *active_debug_hint(void *addr)
|
|
|
|
{
|
|
|
|
struct i915_active *ref = addr;
|
|
|
|
|
2019-06-21 18:38:00 +00:00
|
|
|
return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
|
2019-06-21 18:37:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct debug_obj_descr active_debug_desc = {
|
|
|
|
.name = "i915_active",
|
|
|
|
.debug_hint = active_debug_hint,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void debug_active_init(struct i915_active *ref)
|
|
|
|
{
|
|
|
|
debug_object_init(ref, &active_debug_desc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void debug_active_activate(struct i915_active *ref)
|
|
|
|
{
|
2019-12-05 18:33:32 +00:00
|
|
|
lockdep_assert_held(&ref->tree_lock);
|
2019-08-27 13:26:31 +00:00
|
|
|
if (!atomic_read(&ref->count)) /* before the first inc */
|
|
|
|
debug_object_activate(ref, &active_debug_desc);
|
2019-06-21 18:37:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void debug_active_deactivate(struct i915_active *ref)
|
|
|
|
{
|
2019-11-14 17:25:35 +00:00
|
|
|
lockdep_assert_held(&ref->tree_lock);
|
2019-08-27 13:26:31 +00:00
|
|
|
if (!atomic_read(&ref->count)) /* after the last dec */
|
|
|
|
debug_object_deactivate(ref, &active_debug_desc);
|
2019-06-21 18:37:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void debug_active_fini(struct i915_active *ref)
|
|
|
|
{
|
|
|
|
debug_object_free(ref, &active_debug_desc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void debug_active_assert(struct i915_active *ref)
|
|
|
|
{
|
|
|
|
debug_object_assert_init(ref, &active_debug_desc);
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
static inline void debug_active_init(struct i915_active *ref) { }
|
|
|
|
static inline void debug_active_activate(struct i915_active *ref) { }
|
|
|
|
static inline void debug_active_deactivate(struct i915_active *ref) { }
|
|
|
|
static inline void debug_active_fini(struct i915_active *ref) { }
|
|
|
|
static inline void debug_active_assert(struct i915_active *ref) { }
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2019-02-05 13:00:03 +00:00
|
|
|
static void
|
2019-06-21 18:38:00 +00:00
|
|
|
__active_retire(struct i915_active *ref)
|
2019-02-05 13:00:03 +00:00
|
|
|
{
|
|
|
|
struct active_node *it, *n;
|
2019-06-21 18:38:00 +00:00
|
|
|
struct rb_root root;
|
2019-11-14 17:25:35 +00:00
|
|
|
unsigned long flags;
|
2019-06-21 18:38:00 +00:00
|
|
|
|
2019-10-04 13:39:59 +00:00
|
|
|
GEM_BUG_ON(i915_active_is_idle(ref));
|
2019-06-21 18:38:00 +00:00
|
|
|
|
|
|
|
/* return the unused nodes to our slabcache -- flushing the allocator */
|
2019-11-14 17:25:35 +00:00
|
|
|
if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
|
2019-06-21 18:38:00 +00:00
|
|
|
return;
|
|
|
|
|
2019-10-04 13:40:00 +00:00
|
|
|
GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
|
2019-11-14 17:25:35 +00:00
|
|
|
debug_active_deactivate(ref);
|
|
|
|
|
|
|
|
root = ref->tree;
|
|
|
|
ref->tree = RB_ROOT;
|
|
|
|
ref->cache = NULL;
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&ref->tree_lock, flags);
|
2019-08-19 07:58:22 +00:00
|
|
|
|
|
|
|
/* After the final retire, the entire struct may be freed */
|
|
|
|
if (ref->retire)
|
|
|
|
ref->retire(ref);
|
2019-10-04 13:40:00 +00:00
|
|
|
|
|
|
|
/* ... except if you wait on it, you must manage your own references! */
|
|
|
|
wake_up_var(ref);
|
2019-11-14 17:25:35 +00:00
|
|
|
|
|
|
|
rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
|
|
|
|
GEM_BUG_ON(i915_active_fence_isset(&it->base));
|
|
|
|
kmem_cache_free(global.slab_cache, it);
|
|
|
|
}
|
2019-02-05 13:00:03 +00:00
|
|
|
}
|
|
|
|
|
2019-10-04 13:39:59 +00:00
|
|
|
static void
|
|
|
|
active_work(struct work_struct *wrk)
|
|
|
|
{
|
|
|
|
struct i915_active *ref = container_of(wrk, typeof(*ref), work);
|
|
|
|
|
|
|
|
GEM_BUG_ON(!atomic_read(&ref->count));
|
|
|
|
if (atomic_add_unless(&ref->count, -1, 1))
|
|
|
|
return;
|
|
|
|
|
|
|
|
__active_retire(ref);
|
|
|
|
}
|
|
|
|
|
2019-02-05 13:00:02 +00:00
|
|
|
static void
|
2019-06-21 18:38:00 +00:00
|
|
|
active_retire(struct i915_active *ref)
|
2019-02-05 13:00:02 +00:00
|
|
|
{
|
2019-06-21 18:38:00 +00:00
|
|
|
GEM_BUG_ON(!atomic_read(&ref->count));
|
|
|
|
if (atomic_add_unless(&ref->count, -1, 1))
|
2019-02-05 13:00:03 +00:00
|
|
|
return;
|
|
|
|
|
2019-11-14 17:25:35 +00:00
|
|
|
if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
|
2019-10-04 13:39:59 +00:00
|
|
|
queue_work(system_unbound_wq, &ref->work);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-06-21 18:38:00 +00:00
|
|
|
__active_retire(ref);
|
2019-02-05 13:00:02 +00:00
|
|
|
}
|
|
|
|
|
2019-11-27 13:45:27 +00:00
|
|
|
static inline struct dma_fence **
|
|
|
|
__active_fence_slot(struct i915_active_fence *active)
|
|
|
|
{
|
|
|
|
return (struct dma_fence ** __force)&active->fence;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
|
|
|
active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
|
|
|
|
{
|
|
|
|
struct i915_active_fence *active =
|
|
|
|
container_of(cb, typeof(*active), cb);
|
|
|
|
|
|
|
|
return cmpxchg(__active_fence_slot(active), fence, NULL) == fence;
|
|
|
|
}
|
|
|
|
|
2019-02-05 13:00:02 +00:00
|
|
|
static void
|
2019-10-04 13:40:00 +00:00
|
|
|
node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
|
2019-02-05 13:00:02 +00:00
|
|
|
{
|
2019-11-27 13:45:27 +00:00
|
|
|
if (active_fence_cb(fence, cb))
|
|
|
|
active_retire(container_of(cb, struct active_node, base.cb)->ref);
|
2019-02-05 13:00:02 +00:00
|
|
|
}
|
|
|
|
|
2019-10-04 13:40:00 +00:00
|
|
|
static void
|
|
|
|
excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
|
|
|
|
{
|
2019-11-27 13:45:27 +00:00
|
|
|
if (active_fence_cb(fence, cb))
|
|
|
|
active_retire(container_of(cb, struct i915_active, excl.cb));
|
2019-10-04 13:40:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct i915_active_fence *
|
2019-08-16 12:10:00 +00:00
|
|
|
active_instance(struct i915_active *ref, struct intel_timeline *tl)
|
2019-02-05 13:00:02 +00:00
|
|
|
{
|
2019-06-21 18:38:00 +00:00
|
|
|
struct active_node *node, *prealloc;
|
2019-02-05 13:00:02 +00:00
|
|
|
struct rb_node **p, *parent;
|
2019-08-16 12:10:00 +00:00
|
|
|
u64 idx = tl->fence_context;
|
2019-02-05 13:00:02 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We track the most recently used timeline to skip a rbtree search
|
|
|
|
* for the common case, under typical loads we never need the rbtree
|
|
|
|
* at all. We can reuse the last slot if it is empty, that is
|
|
|
|
* after the previous activity has been retired, or if it matches the
|
|
|
|
* current timeline.
|
|
|
|
*/
|
2019-06-21 18:38:00 +00:00
|
|
|
node = READ_ONCE(ref->cache);
|
|
|
|
if (node && node->timeline == idx)
|
|
|
|
return &node->base;
|
|
|
|
|
|
|
|
/* Preallocate a replacement, just in case */
|
|
|
|
prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
|
|
|
|
if (!prealloc)
|
|
|
|
return NULL;
|
2019-02-05 13:00:02 +00:00
|
|
|
|
2019-11-14 17:25:35 +00:00
|
|
|
spin_lock_irq(&ref->tree_lock);
|
2019-06-21 18:38:00 +00:00
|
|
|
GEM_BUG_ON(i915_active_is_idle(ref));
|
2019-02-05 13:00:02 +00:00
|
|
|
|
|
|
|
parent = NULL;
|
|
|
|
p = &ref->tree.rb_node;
|
|
|
|
while (*p) {
|
|
|
|
parent = *p;
|
|
|
|
|
|
|
|
node = rb_entry(parent, struct active_node, node);
|
2019-06-21 18:38:00 +00:00
|
|
|
if (node->timeline == idx) {
|
|
|
|
kmem_cache_free(global.slab_cache, prealloc);
|
|
|
|
goto out;
|
|
|
|
}
|
2019-02-05 13:00:02 +00:00
|
|
|
|
|
|
|
if (node->timeline < idx)
|
|
|
|
p = &parent->rb_right;
|
|
|
|
else
|
|
|
|
p = &parent->rb_left;
|
|
|
|
}
|
|
|
|
|
2019-06-21 18:38:00 +00:00
|
|
|
node = prealloc;
|
2019-11-27 13:45:27 +00:00
|
|
|
__i915_active_fence_init(&node->base, NULL, node_retire);
|
2019-02-05 13:00:02 +00:00
|
|
|
node->ref = ref;
|
|
|
|
node->timeline = idx;
|
|
|
|
|
|
|
|
rb_link_node(&node->node, parent, p);
|
|
|
|
rb_insert_color(&node->node, &ref->tree);
|
|
|
|
|
|
|
|
out:
|
2019-06-21 18:38:00 +00:00
|
|
|
ref->cache = node;
|
2019-11-14 17:25:35 +00:00
|
|
|
spin_unlock_irq(&ref->tree_lock);
|
2019-06-21 18:38:00 +00:00
|
|
|
|
2019-08-02 10:00:15 +00:00
|
|
|
BUILD_BUG_ON(offsetof(typeof(*node), base));
|
2019-06-21 18:38:00 +00:00
|
|
|
return &node->base;
|
2019-02-05 13:00:02 +00:00
|
|
|
}
|
|
|
|
|
2019-10-04 13:40:00 +00:00
|
|
|
void __i915_active_init(struct i915_active *ref,
|
2019-06-21 18:38:00 +00:00
|
|
|
int (*active)(struct i915_active *ref),
|
|
|
|
void (*retire)(struct i915_active *ref),
|
2019-12-02 14:01:32 +00:00
|
|
|
struct lock_class_key *mkey,
|
|
|
|
struct lock_class_key *wkey)
|
2019-02-05 13:00:02 +00:00
|
|
|
{
|
2019-10-04 13:39:59 +00:00
|
|
|
unsigned long bits;
|
|
|
|
|
2019-06-21 18:37:58 +00:00
|
|
|
debug_active_init(ref);
|
|
|
|
|
2019-07-25 22:38:43 +00:00
|
|
|
ref->flags = 0;
|
2019-06-21 18:38:00 +00:00
|
|
|
ref->active = active;
|
2019-10-04 13:39:59 +00:00
|
|
|
ref->retire = ptr_unpack_bits(retire, &bits, 2);
|
|
|
|
if (bits & I915_ACTIVE_MAY_SLEEP)
|
|
|
|
ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
|
drm/i915: Pull i915_vma_pin under the vm->mutex
Replace the struct_mutex requirement for pinning the i915_vma with the
local vm->mutex instead. Note that the vm->mutex is tainted by the
shrinker (we require unbinding from inside fs-reclaim) and so we cannot
allocate while holding that mutex. Instead we have to preallocate
workers to do allocate and apply the PTE updates after we have we
reserved their slot in the drm_mm (using fences to order the PTE writes
with the GPU work and with later unbind).
In adding the asynchronous vma binding, one subtle requirement is to
avoid coupling the binding fence into the backing object->resv. That is
the asynchronous binding only applies to the vma timeline itself and not
to the pages as that is a more global timeline (the binding of one vma
does not need to be ordered with another vma, nor does the implicit GEM
fencing depend on a vma, only on writes to the backing store). Keeping
the vma binding distinct from the backing store timelines is verified by
a number of async gem_exec_fence and gem_exec_schedule tests. The way we
do this is quite simple, we keep the fence for the vma binding separate
and only wait on it as required, and never add it to the obj->resv
itself.
Another consequence in reducing the locking around the vma is the
destruction of the vma is no longer globally serialised by struct_mutex.
A natural solution would be to add a kref to i915_vma, but that requires
decoupling the reference cycles, possibly by introducing a new
i915_mm_pages object that is own by both obj->mm and vma->pages.
However, we have not taken that route due to the overshadowing lmem/ttm
discussions, and instead play a series of complicated games with
trylocks to (hopefully) ensure that only one destruction path is called!
v2: Add some commentary, and some helpers to reduce patch churn.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
2019-10-04 13:39:58 +00:00
|
|
|
|
2019-11-14 17:25:35 +00:00
|
|
|
spin_lock_init(&ref->tree_lock);
|
2019-02-05 13:00:02 +00:00
|
|
|
ref->tree = RB_ROOT;
|
2019-06-21 18:38:00 +00:00
|
|
|
ref->cache = NULL;
|
2019-11-14 17:25:35 +00:00
|
|
|
|
2019-08-02 10:00:15 +00:00
|
|
|
init_llist_head(&ref->preallocated_barriers);
|
2019-06-21 18:38:00 +00:00
|
|
|
atomic_set(&ref->count, 0);
|
2019-12-02 14:01:32 +00:00
|
|
|
__mutex_init(&ref->mutex, "i915_active", mkey);
|
2019-11-27 13:45:27 +00:00
|
|
|
__i915_active_fence_init(&ref->excl, NULL, excl_retire);
|
2019-10-04 13:39:59 +00:00
|
|
|
INIT_WORK(&ref->work, active_work);
|
2019-12-02 14:01:32 +00:00
|
|
|
#if IS_ENABLED(CONFIG_LOCKDEP)
|
|
|
|
lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0);
|
|
|
|
#endif
|
2019-02-05 13:00:02 +00:00
|
|
|
}
|
|
|
|
|
2019-08-13 20:09:05 +00:00
|
|
|
static bool ____active_del_barrier(struct i915_active *ref,
|
|
|
|
struct active_node *node,
|
|
|
|
struct intel_engine_cs *engine)
|
|
|
|
|
2019-08-02 10:00:15 +00:00
|
|
|
{
|
|
|
|
struct llist_node *head = NULL, *tail = NULL;
|
|
|
|
struct llist_node *pos, *next;
|
|
|
|
|
2019-08-09 18:25:18 +00:00
|
|
|
GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
|
2019-08-02 10:00:15 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Rebuild the llist excluding our node. We may perform this
|
|
|
|
* outside of the kernel_context timeline mutex and so someone
|
|
|
|
* else may be manipulating the engine->barrier_tasks, in
|
|
|
|
* which case either we or they will be upset :)
|
|
|
|
*
|
|
|
|
* A second __active_del_barrier() will report failure to claim
|
|
|
|
* the active_node and the caller will just shrug and know not to
|
|
|
|
* claim ownership of its node.
|
|
|
|
*
|
|
|
|
* A concurrent i915_request_add_active_barriers() will miss adding
|
|
|
|
* any of the tasks, but we will try again on the next -- and since
|
|
|
|
* we are actively using the barrier, we know that there will be
|
|
|
|
* at least another opportunity when we idle.
|
|
|
|
*/
|
|
|
|
llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) {
|
|
|
|
if (node == barrier_from_ll(pos)) {
|
|
|
|
node = NULL;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
pos->next = head;
|
|
|
|
head = pos;
|
|
|
|
if (!tail)
|
|
|
|
tail = pos;
|
|
|
|
}
|
|
|
|
if (head)
|
|
|
|
llist_add_batch(head, tail, &engine->barrier_tasks);
|
|
|
|
|
|
|
|
return !node;
|
|
|
|
}
|
|
|
|
|
2019-08-13 20:09:05 +00:00
|
|
|
static bool
|
|
|
|
__active_del_barrier(struct i915_active *ref, struct active_node *node)
|
|
|
|
{
|
|
|
|
return ____active_del_barrier(ref, node, barrier_to_engine(node));
|
|
|
|
}
|
|
|
|
|
2019-02-05 13:00:02 +00:00
|
|
|
int i915_active_ref(struct i915_active *ref,
|
2019-08-16 12:10:00 +00:00
|
|
|
struct intel_timeline *tl,
|
2019-10-04 13:40:00 +00:00
|
|
|
struct dma_fence *fence)
|
2019-02-05 13:00:02 +00:00
|
|
|
{
|
2019-10-04 13:40:00 +00:00
|
|
|
struct i915_active_fence *active;
|
2019-06-21 18:38:00 +00:00
|
|
|
int err;
|
2019-02-08 13:47:04 +00:00
|
|
|
|
2019-08-16 12:10:00 +00:00
|
|
|
lockdep_assert_held(&tl->mutex);
|
|
|
|
|
2019-02-08 13:47:04 +00:00
|
|
|
/* Prevent reaping in case we malloc/wait while building the tree */
|
2019-06-21 18:38:00 +00:00
|
|
|
err = i915_active_acquire(ref);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2019-02-05 13:00:02 +00:00
|
|
|
|
2019-08-16 12:10:00 +00:00
|
|
|
active = active_instance(ref, tl);
|
2019-06-21 18:38:00 +00:00
|
|
|
if (!active) {
|
|
|
|
err = -ENOMEM;
|
2019-02-08 13:47:04 +00:00
|
|
|
goto out;
|
|
|
|
}
|
2019-02-05 13:00:02 +00:00
|
|
|
|
2019-08-02 10:00:15 +00:00
|
|
|
if (is_barrier(active)) { /* proto-node used by our idle barrier */
|
|
|
|
/*
|
|
|
|
* This request is on the kernel_context timeline, and so
|
|
|
|
* we can use it to substitute for the pending idle-barrer
|
|
|
|
* request that we want to emit on the kernel_context.
|
|
|
|
*/
|
|
|
|
__active_del_barrier(ref, node_from_active(active));
|
2019-10-04 13:40:00 +00:00
|
|
|
RCU_INIT_POINTER(active->fence, NULL);
|
|
|
|
atomic_dec(&ref->count);
|
2019-08-02 10:00:15 +00:00
|
|
|
}
|
2019-10-04 13:40:00 +00:00
|
|
|
if (!__i915_active_fence_set(active, fence))
|
|
|
|
atomic_inc(&ref->count);
|
2019-02-05 13:00:02 +00:00
|
|
|
|
2019-02-08 13:47:04 +00:00
|
|
|
out:
|
|
|
|
i915_active_release(ref);
|
|
|
|
return err;
|
2019-02-05 13:00:02 +00:00
|
|
|
}
|
|
|
|
|
2020-01-30 18:17:10 +00:00
|
|
|
struct dma_fence *
|
|
|
|
i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
|
drm/i915: Pull i915_vma_pin under the vm->mutex
Replace the struct_mutex requirement for pinning the i915_vma with the
local vm->mutex instead. Note that the vm->mutex is tainted by the
shrinker (we require unbinding from inside fs-reclaim) and so we cannot
allocate while holding that mutex. Instead we have to preallocate
workers to do allocate and apply the PTE updates after we have we
reserved their slot in the drm_mm (using fences to order the PTE writes
with the GPU work and with later unbind).
In adding the asynchronous vma binding, one subtle requirement is to
avoid coupling the binding fence into the backing object->resv. That is
the asynchronous binding only applies to the vma timeline itself and not
to the pages as that is a more global timeline (the binding of one vma
does not need to be ordered with another vma, nor does the implicit GEM
fencing depend on a vma, only on writes to the backing store). Keeping
the vma binding distinct from the backing store timelines is verified by
a number of async gem_exec_fence and gem_exec_schedule tests. The way we
do this is quite simple, we keep the fence for the vma binding separate
and only wait on it as required, and never add it to the obj->resv
itself.
Another consequence in reducing the locking around the vma is the
destruction of the vma is no longer globally serialised by struct_mutex.
A natural solution would be to add a kref to i915_vma, but that requires
decoupling the reference cycles, possibly by introducing a new
i915_mm_pages object that is own by both obj->mm and vma->pages.
However, we have not taken that route due to the overshadowing lmem/ttm
discussions, and instead play a series of complicated games with
trylocks to (hopefully) ensure that only one destruction path is called!
v2: Add some commentary, and some helpers to reduce patch churn.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
2019-10-04 13:39:58 +00:00
|
|
|
{
|
2020-01-30 18:17:10 +00:00
|
|
|
struct dma_fence *prev;
|
|
|
|
|
drm/i915: Pull i915_vma_pin under the vm->mutex
Replace the struct_mutex requirement for pinning the i915_vma with the
local vm->mutex instead. Note that the vm->mutex is tainted by the
shrinker (we require unbinding from inside fs-reclaim) and so we cannot
allocate while holding that mutex. Instead we have to preallocate
workers to do allocate and apply the PTE updates after we have we
reserved their slot in the drm_mm (using fences to order the PTE writes
with the GPU work and with later unbind).
In adding the asynchronous vma binding, one subtle requirement is to
avoid coupling the binding fence into the backing object->resv. That is
the asynchronous binding only applies to the vma timeline itself and not
to the pages as that is a more global timeline (the binding of one vma
does not need to be ordered with another vma, nor does the implicit GEM
fencing depend on a vma, only on writes to the backing store). Keeping
the vma binding distinct from the backing store timelines is verified by
a number of async gem_exec_fence and gem_exec_schedule tests. The way we
do this is quite simple, we keep the fence for the vma binding separate
and only wait on it as required, and never add it to the obj->resv
itself.
Another consequence in reducing the locking around the vma is the
destruction of the vma is no longer globally serialised by struct_mutex.
A natural solution would be to add a kref to i915_vma, but that requires
decoupling the reference cycles, possibly by introducing a new
i915_mm_pages object that is own by both obj->mm and vma->pages.
However, we have not taken that route due to the overshadowing lmem/ttm
discussions, and instead play a series of complicated games with
trylocks to (hopefully) ensure that only one destruction path is called!
v2: Add some commentary, and some helpers to reduce patch churn.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
2019-10-04 13:39:58 +00:00
|
|
|
/* We expect the caller to manage the exclusive timeline ordering */
|
|
|
|
GEM_BUG_ON(i915_active_is_idle(ref));
|
|
|
|
|
2020-02-03 09:41:47 +00:00
|
|
|
rcu_read_lock();
|
2020-01-30 18:17:10 +00:00
|
|
|
prev = __i915_active_fence_set(&ref->excl, f);
|
2020-02-03 09:41:47 +00:00
|
|
|
if (prev)
|
|
|
|
prev = dma_fence_get_rcu(prev);
|
|
|
|
else
|
2019-10-04 13:40:00 +00:00
|
|
|
atomic_inc(&ref->count);
|
2020-02-03 09:41:47 +00:00
|
|
|
rcu_read_unlock();
|
2020-01-30 18:17:10 +00:00
|
|
|
|
|
|
|
return prev;
|
2019-10-04 13:40:00 +00:00
|
|
|
}
|
drm/i915: Pull i915_vma_pin under the vm->mutex
Replace the struct_mutex requirement for pinning the i915_vma with the
local vm->mutex instead. Note that the vm->mutex is tainted by the
shrinker (we require unbinding from inside fs-reclaim) and so we cannot
allocate while holding that mutex. Instead we have to preallocate
workers to do allocate and apply the PTE updates after we have we
reserved their slot in the drm_mm (using fences to order the PTE writes
with the GPU work and with later unbind).
In adding the asynchronous vma binding, one subtle requirement is to
avoid coupling the binding fence into the backing object->resv. That is
the asynchronous binding only applies to the vma timeline itself and not
to the pages as that is a more global timeline (the binding of one vma
does not need to be ordered with another vma, nor does the implicit GEM
fencing depend on a vma, only on writes to the backing store). Keeping
the vma binding distinct from the backing store timelines is verified by
a number of async gem_exec_fence and gem_exec_schedule tests. The way we
do this is quite simple, we keep the fence for the vma binding separate
and only wait on it as required, and never add it to the obj->resv
itself.
Another consequence in reducing the locking around the vma is the
destruction of the vma is no longer globally serialised by struct_mutex.
A natural solution would be to add a kref to i915_vma, but that requires
decoupling the reference cycles, possibly by introducing a new
i915_mm_pages object that is own by both obj->mm and vma->pages.
However, we have not taken that route due to the overshadowing lmem/ttm
discussions, and instead play a series of complicated games with
trylocks to (hopefully) ensure that only one destruction path is called!
v2: Add some commentary, and some helpers to reduce patch churn.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
2019-10-04 13:39:58 +00:00
|
|
|
|
2019-10-04 13:40:00 +00:00
|
|
|
bool i915_active_acquire_if_busy(struct i915_active *ref)
|
|
|
|
{
|
|
|
|
debug_active_assert(ref);
|
|
|
|
return atomic_add_unless(&ref->count, 1, 0);
|
drm/i915: Pull i915_vma_pin under the vm->mutex
Replace the struct_mutex requirement for pinning the i915_vma with the
local vm->mutex instead. Note that the vm->mutex is tainted by the
shrinker (we require unbinding from inside fs-reclaim) and so we cannot
allocate while holding that mutex. Instead we have to preallocate
workers to do allocate and apply the PTE updates after we have we
reserved their slot in the drm_mm (using fences to order the PTE writes
with the GPU work and with later unbind).
In adding the asynchronous vma binding, one subtle requirement is to
avoid coupling the binding fence into the backing object->resv. That is
the asynchronous binding only applies to the vma timeline itself and not
to the pages as that is a more global timeline (the binding of one vma
does not need to be ordered with another vma, nor does the implicit GEM
fencing depend on a vma, only on writes to the backing store). Keeping
the vma binding distinct from the backing store timelines is verified by
a number of async gem_exec_fence and gem_exec_schedule tests. The way we
do this is quite simple, we keep the fence for the vma binding separate
and only wait on it as required, and never add it to the obj->resv
itself.
Another consequence in reducing the locking around the vma is the
destruction of the vma is no longer globally serialised by struct_mutex.
A natural solution would be to add a kref to i915_vma, but that requires
decoupling the reference cycles, possibly by introducing a new
i915_mm_pages object that is own by both obj->mm and vma->pages.
However, we have not taken that route due to the overshadowing lmem/ttm
discussions, and instead play a series of complicated games with
trylocks to (hopefully) ensure that only one destruction path is called!
v2: Add some commentary, and some helpers to reduce patch churn.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
2019-10-04 13:39:58 +00:00
|
|
|
}
|
|
|
|
|
2019-06-21 18:38:00 +00:00
|
|
|
int i915_active_acquire(struct i915_active *ref)
|
2019-02-05 13:00:02 +00:00
|
|
|
{
|
2019-06-21 18:38:00 +00:00
|
|
|
int err;
|
|
|
|
|
2019-10-04 13:40:00 +00:00
|
|
|
if (i915_active_acquire_if_busy(ref))
|
2019-06-21 18:38:00 +00:00
|
|
|
return 0;
|
2019-06-21 18:37:58 +00:00
|
|
|
|
2019-06-21 18:38:00 +00:00
|
|
|
err = mutex_lock_interruptible(&ref->mutex);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2019-06-21 18:37:58 +00:00
|
|
|
|
2020-01-26 10:23:43 +00:00
|
|
|
if (likely(!i915_active_acquire_if_busy(ref))) {
|
|
|
|
if (ref->active)
|
|
|
|
err = ref->active(ref);
|
|
|
|
if (!err) {
|
|
|
|
spin_lock_irq(&ref->tree_lock); /* __active_retire() */
|
|
|
|
debug_active_activate(ref);
|
|
|
|
atomic_inc(&ref->count);
|
|
|
|
spin_unlock_irq(&ref->tree_lock);
|
|
|
|
}
|
2019-06-21 18:38:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
mutex_unlock(&ref->mutex);
|
|
|
|
|
|
|
|
return err;
|
2019-02-05 13:00:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void i915_active_release(struct i915_active *ref)
|
|
|
|
{
|
2019-06-21 18:37:58 +00:00
|
|
|
debug_active_assert(ref);
|
2019-06-21 18:38:00 +00:00
|
|
|
active_retire(ref);
|
2019-02-05 13:00:02 +00:00
|
|
|
}
|
|
|
|
|
2019-10-04 13:40:00 +00:00
|
|
|
static void enable_signaling(struct i915_active_fence *active)
|
2019-07-25 22:38:43 +00:00
|
|
|
{
|
2019-10-04 13:40:00 +00:00
|
|
|
struct dma_fence *fence;
|
2019-07-25 22:38:43 +00:00
|
|
|
|
2020-02-27 08:57:04 +00:00
|
|
|
if (unlikely(is_barrier(active)))
|
|
|
|
return;
|
|
|
|
|
2019-10-04 13:40:00 +00:00
|
|
|
fence = i915_active_fence_get(active);
|
|
|
|
if (!fence)
|
|
|
|
return;
|
drm/i915: Pull i915_vma_pin under the vm->mutex
Replace the struct_mutex requirement for pinning the i915_vma with the
local vm->mutex instead. Note that the vm->mutex is tainted by the
shrinker (we require unbinding from inside fs-reclaim) and so we cannot
allocate while holding that mutex. Instead we have to preallocate
workers to do allocate and apply the PTE updates after we have we
reserved their slot in the drm_mm (using fences to order the PTE writes
with the GPU work and with later unbind).
In adding the asynchronous vma binding, one subtle requirement is to
avoid coupling the binding fence into the backing object->resv. That is
the asynchronous binding only applies to the vma timeline itself and not
to the pages as that is a more global timeline (the binding of one vma
does not need to be ordered with another vma, nor does the implicit GEM
fencing depend on a vma, only on writes to the backing store). Keeping
the vma binding distinct from the backing store timelines is verified by
a number of async gem_exec_fence and gem_exec_schedule tests. The way we
do this is quite simple, we keep the fence for the vma binding separate
and only wait on it as required, and never add it to the obj->resv
itself.
Another consequence in reducing the locking around the vma is the
destruction of the vma is no longer globally serialised by struct_mutex.
A natural solution would be to add a kref to i915_vma, but that requires
decoupling the reference cycles, possibly by introducing a new
i915_mm_pages object that is own by both obj->mm and vma->pages.
However, we have not taken that route due to the overshadowing lmem/ttm
discussions, and instead play a series of complicated games with
trylocks to (hopefully) ensure that only one destruction path is called!
v2: Add some commentary, and some helpers to reduce patch churn.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
2019-10-04 13:39:58 +00:00
|
|
|
|
2019-10-04 13:40:00 +00:00
|
|
|
dma_fence_enable_sw_signaling(fence);
|
|
|
|
dma_fence_put(fence);
|
drm/i915: Pull i915_vma_pin under the vm->mutex
Replace the struct_mutex requirement for pinning the i915_vma with the
local vm->mutex instead. Note that the vm->mutex is tainted by the
shrinker (we require unbinding from inside fs-reclaim) and so we cannot
allocate while holding that mutex. Instead we have to preallocate
workers to do allocate and apply the PTE updates after we have we
reserved their slot in the drm_mm (using fences to order the PTE writes
with the GPU work and with later unbind).
In adding the asynchronous vma binding, one subtle requirement is to
avoid coupling the binding fence into the backing object->resv. That is
the asynchronous binding only applies to the vma timeline itself and not
to the pages as that is a more global timeline (the binding of one vma
does not need to be ordered with another vma, nor does the implicit GEM
fencing depend on a vma, only on writes to the backing store). Keeping
the vma binding distinct from the backing store timelines is verified by
a number of async gem_exec_fence and gem_exec_schedule tests. The way we
do this is quite simple, we keep the fence for the vma binding separate
and only wait on it as required, and never add it to the obj->resv
itself.
Another consequence in reducing the locking around the vma is the
destruction of the vma is no longer globally serialised by struct_mutex.
A natural solution would be to add a kref to i915_vma, but that requires
decoupling the reference cycles, possibly by introducing a new
i915_mm_pages object that is own by both obj->mm and vma->pages.
However, we have not taken that route due to the overshadowing lmem/ttm
discussions, and instead play a series of complicated games with
trylocks to (hopefully) ensure that only one destruction path is called!
v2: Add some commentary, and some helpers to reduce patch churn.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
2019-10-04 13:39:58 +00:00
|
|
|
}
|
|
|
|
|
2020-02-25 19:22:04 +00:00
|
|
|
static int flush_barrier(struct active_node *it)
|
2019-02-05 13:00:02 +00:00
|
|
|
{
|
2020-02-25 19:22:04 +00:00
|
|
|
struct intel_engine_cs *engine;
|
2019-02-05 13:00:02 +00:00
|
|
|
|
2020-02-25 19:22:04 +00:00
|
|
|
if (likely(!is_barrier(&it->base)))
|
|
|
|
return 0;
|
2019-06-21 18:38:00 +00:00
|
|
|
|
2020-02-25 19:22:04 +00:00
|
|
|
engine = __barrier_to_engine(it);
|
|
|
|
smp_rmb(); /* serialise with add_active_barriers */
|
|
|
|
if (!is_barrier(&it->base))
|
2019-06-21 18:38:00 +00:00
|
|
|
return 0;
|
drm/i915: Pull i915_vma_pin under the vm->mutex
Replace the struct_mutex requirement for pinning the i915_vma with the
local vm->mutex instead. Note that the vm->mutex is tainted by the
shrinker (we require unbinding from inside fs-reclaim) and so we cannot
allocate while holding that mutex. Instead we have to preallocate
workers to do allocate and apply the PTE updates after we have we
reserved their slot in the drm_mm (using fences to order the PTE writes
with the GPU work and with later unbind).
In adding the asynchronous vma binding, one subtle requirement is to
avoid coupling the binding fence into the backing object->resv. That is
the asynchronous binding only applies to the vma timeline itself and not
to the pages as that is a more global timeline (the binding of one vma
does not need to be ordered with another vma, nor does the implicit GEM
fencing depend on a vma, only on writes to the backing store). Keeping
the vma binding distinct from the backing store timelines is verified by
a number of async gem_exec_fence and gem_exec_schedule tests. The way we
do this is quite simple, we keep the fence for the vma binding separate
and only wait on it as required, and never add it to the obj->resv
itself.
Another consequence in reducing the locking around the vma is the
destruction of the vma is no longer globally serialised by struct_mutex.
A natural solution would be to add a kref to i915_vma, but that requires
decoupling the reference cycles, possibly by introducing a new
i915_mm_pages object that is own by both obj->mm and vma->pages.
However, we have not taken that route due to the overshadowing lmem/ttm
discussions, and instead play a series of complicated games with
trylocks to (hopefully) ensure that only one destruction path is called!
v2: Add some commentary, and some helpers to reduce patch churn.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
2019-10-04 13:39:58 +00:00
|
|
|
|
2020-02-25 19:22:04 +00:00
|
|
|
return intel_engine_flush_barriers(engine);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int flush_lazy_signals(struct i915_active *ref)
|
|
|
|
{
|
|
|
|
struct active_node *it, *n;
|
|
|
|
int err = 0;
|
|
|
|
|
2019-10-04 13:40:00 +00:00
|
|
|
enable_signaling(&ref->excl);
|
2019-02-05 13:00:02 +00:00
|
|
|
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
|
2020-02-25 19:22:04 +00:00
|
|
|
err = flush_barrier(it); /* unconnected idle barrier? */
|
|
|
|
if (err)
|
|
|
|
break;
|
2019-08-02 10:00:15 +00:00
|
|
|
|
2019-10-04 13:40:00 +00:00
|
|
|
enable_signaling(&it->base);
|
2019-02-05 13:00:02 +00:00
|
|
|
}
|
|
|
|
|
2020-02-25 19:22:04 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-03-27 11:22:10 +00:00
|
|
|
int __i915_active_wait(struct i915_active *ref, int state)
|
2020-02-25 19:22:04 +00:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
might_sleep();
|
|
|
|
|
|
|
|
if (!i915_active_acquire_if_busy(ref))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Any fence added after the wait begins will not be auto-signaled */
|
|
|
|
err = flush_lazy_signals(ref);
|
2019-10-04 13:40:00 +00:00
|
|
|
i915_active_release(ref);
|
2019-07-02 09:21:17 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2020-03-27 11:22:10 +00:00
|
|
|
if (!i915_active_is_idle(ref) &&
|
|
|
|
___wait_var_event(ref, i915_active_is_idle(ref),
|
|
|
|
state, 0, 0, schedule()))
|
2019-07-25 22:38:43 +00:00
|
|
|
return -EINTR;
|
|
|
|
|
2019-12-02 14:01:33 +00:00
|
|
|
flush_work(&ref->work);
|
2019-07-02 09:21:17 +00:00
|
|
|
return 0;
|
2019-02-05 13:00:02 +00:00
|
|
|
}
|
|
|
|
|
2020-03-11 09:20:44 +00:00
|
|
|
static int __await_active(struct i915_active_fence *active,
|
|
|
|
int (*fn)(void *arg, struct dma_fence *fence),
|
|
|
|
void *arg)
|
|
|
|
{
|
|
|
|
struct dma_fence *fence;
|
|
|
|
|
|
|
|
if (is_barrier(active)) /* XXX flush the barrier? */
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fence = i915_active_fence_get(active);
|
|
|
|
if (fence) {
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = fn(arg, fence);
|
|
|
|
dma_fence_put(fence);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-06 15:58:39 +00:00
|
|
|
struct wait_barrier {
|
|
|
|
struct wait_queue_entry base;
|
|
|
|
struct i915_active *ref;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
barrier_wake(wait_queue_entry_t *wq, unsigned int mode, int flags, void *key)
|
|
|
|
{
|
|
|
|
struct wait_barrier *wb = container_of(wq, typeof(*wb), base);
|
|
|
|
|
|
|
|
if (i915_active_is_idle(wb->ref)) {
|
|
|
|
list_del(&wq->entry);
|
|
|
|
i915_sw_fence_complete(wq->private);
|
|
|
|
kfree(wq);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence)
|
|
|
|
{
|
|
|
|
struct wait_barrier *wb;
|
|
|
|
|
|
|
|
wb = kmalloc(sizeof(*wb), GFP_KERNEL);
|
|
|
|
if (unlikely(!wb))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
GEM_BUG_ON(i915_active_is_idle(ref));
|
|
|
|
if (!i915_sw_fence_await(fence)) {
|
|
|
|
kfree(wb);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
wb->base.flags = 0;
|
|
|
|
wb->base.func = barrier_wake;
|
|
|
|
wb->base.private = fence;
|
|
|
|
wb->ref = ref;
|
|
|
|
|
|
|
|
add_wait_queue(__var_waitqueue(ref), &wb->base);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-11 09:20:44 +00:00
|
|
|
static int await_active(struct i915_active *ref,
|
|
|
|
unsigned int flags,
|
|
|
|
int (*fn)(void *arg, struct dma_fence *fence),
|
2020-04-06 15:58:39 +00:00
|
|
|
void *arg, struct i915_sw_fence *barrier)
|
2019-02-05 13:00:02 +00:00
|
|
|
{
|
drm/i915: Pull i915_vma_pin under the vm->mutex
Replace the struct_mutex requirement for pinning the i915_vma with the
local vm->mutex instead. Note that the vm->mutex is tainted by the
shrinker (we require unbinding from inside fs-reclaim) and so we cannot
allocate while holding that mutex. Instead we have to preallocate
workers to do allocate and apply the PTE updates after we have we
reserved their slot in the drm_mm (using fences to order the PTE writes
with the GPU work and with later unbind).
In adding the asynchronous vma binding, one subtle requirement is to
avoid coupling the binding fence into the backing object->resv. That is
the asynchronous binding only applies to the vma timeline itself and not
to the pages as that is a more global timeline (the binding of one vma
does not need to be ordered with another vma, nor does the implicit GEM
fencing depend on a vma, only on writes to the backing store). Keeping
the vma binding distinct from the backing store timelines is verified by
a number of async gem_exec_fence and gem_exec_schedule tests. The way we
do this is quite simple, we keep the fence for the vma binding separate
and only wait on it as required, and never add it to the obj->resv
itself.
Another consequence in reducing the locking around the vma is the
destruction of the vma is no longer globally serialised by struct_mutex.
A natural solution would be to add a kref to i915_vma, but that requires
decoupling the reference cycles, possibly by introducing a new
i915_mm_pages object that is own by both obj->mm and vma->pages.
However, we have not taken that route due to the overshadowing lmem/ttm
discussions, and instead play a series of complicated games with
trylocks to (hopefully) ensure that only one destruction path is called!
v2: Add some commentary, and some helpers to reduce patch churn.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
2019-10-04 13:39:58 +00:00
|
|
|
int err = 0;
|
2019-02-05 13:00:02 +00:00
|
|
|
|
2020-04-06 15:58:39 +00:00
|
|
|
if (!i915_active_acquire_if_busy(ref))
|
|
|
|
return 0;
|
|
|
|
|
2020-04-06 15:58:38 +00:00
|
|
|
if (flags & I915_ACTIVE_AWAIT_EXCL &&
|
|
|
|
rcu_access_pointer(ref->excl.fence)) {
|
2020-03-11 09:20:44 +00:00
|
|
|
err = __await_active(&ref->excl, fn, arg);
|
|
|
|
if (err)
|
2020-04-06 15:58:39 +00:00
|
|
|
goto out;
|
2020-03-11 09:20:44 +00:00
|
|
|
}
|
|
|
|
|
2020-04-06 15:58:39 +00:00
|
|
|
if (flags & I915_ACTIVE_AWAIT_ACTIVE) {
|
2020-03-11 09:20:44 +00:00
|
|
|
struct active_node *it, *n;
|
|
|
|
|
|
|
|
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
|
|
|
|
err = __await_active(&it->base, fn, arg);
|
|
|
|
if (err)
|
2020-04-06 15:58:39 +00:00
|
|
|
goto out;
|
drm/i915: Pull i915_vma_pin under the vm->mutex
Replace the struct_mutex requirement for pinning the i915_vma with the
local vm->mutex instead. Note that the vm->mutex is tainted by the
shrinker (we require unbinding from inside fs-reclaim) and so we cannot
allocate while holding that mutex. Instead we have to preallocate
workers to do allocate and apply the PTE updates after we have we
reserved their slot in the drm_mm (using fences to order the PTE writes
with the GPU work and with later unbind).
In adding the asynchronous vma binding, one subtle requirement is to
avoid coupling the binding fence into the backing object->resv. That is
the asynchronous binding only applies to the vma timeline itself and not
to the pages as that is a more global timeline (the binding of one vma
does not need to be ordered with another vma, nor does the implicit GEM
fencing depend on a vma, only on writes to the backing store). Keeping
the vma binding distinct from the backing store timelines is verified by
a number of async gem_exec_fence and gem_exec_schedule tests. The way we
do this is quite simple, we keep the fence for the vma binding separate
and only wait on it as required, and never add it to the obj->resv
itself.
Another consequence in reducing the locking around the vma is the
destruction of the vma is no longer globally serialised by struct_mutex.
A natural solution would be to add a kref to i915_vma, but that requires
decoupling the reference cycles, possibly by introducing a new
i915_mm_pages object that is own by both obj->mm and vma->pages.
However, we have not taken that route due to the overshadowing lmem/ttm
discussions, and instead play a series of complicated games with
trylocks to (hopefully) ensure that only one destruction path is called!
v2: Add some commentary, and some helpers to reduce patch churn.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
2019-10-04 13:39:58 +00:00
|
|
|
}
|
2020-04-06 15:58:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & I915_ACTIVE_AWAIT_BARRIER) {
|
|
|
|
err = flush_lazy_signals(ref);
|
2020-03-11 09:20:44 +00:00
|
|
|
if (err)
|
2020-04-06 15:58:39 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
err = __await_barrier(ref, barrier);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
2019-02-05 13:00:02 +00:00
|
|
|
}
|
|
|
|
|
2020-04-06 15:58:39 +00:00
|
|
|
out:
|
|
|
|
i915_active_release(ref);
|
|
|
|
return err;
|
2020-03-11 09:20:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int rq_await_fence(void *arg, struct dma_fence *fence)
|
|
|
|
{
|
|
|
|
return i915_request_await_dma_fence(arg, fence);
|
|
|
|
}
|
drm/i915: Pull i915_vma_pin under the vm->mutex
Replace the struct_mutex requirement for pinning the i915_vma with the
local vm->mutex instead. Note that the vm->mutex is tainted by the
shrinker (we require unbinding from inside fs-reclaim) and so we cannot
allocate while holding that mutex. Instead we have to preallocate
workers to do allocate and apply the PTE updates after we have we
reserved their slot in the drm_mm (using fences to order the PTE writes
with the GPU work and with later unbind).
In adding the asynchronous vma binding, one subtle requirement is to
avoid coupling the binding fence into the backing object->resv. That is
the asynchronous binding only applies to the vma timeline itself and not
to the pages as that is a more global timeline (the binding of one vma
does not need to be ordered with another vma, nor does the implicit GEM
fencing depend on a vma, only on writes to the backing store). Keeping
the vma binding distinct from the backing store timelines is verified by
a number of async gem_exec_fence and gem_exec_schedule tests. The way we
do this is quite simple, we keep the fence for the vma binding separate
and only wait on it as required, and never add it to the obj->resv
itself.
Another consequence in reducing the locking around the vma is the
destruction of the vma is no longer globally serialised by struct_mutex.
A natural solution would be to add a kref to i915_vma, but that requires
decoupling the reference cycles, possibly by introducing a new
i915_mm_pages object that is own by both obj->mm and vma->pages.
However, we have not taken that route due to the overshadowing lmem/ttm
discussions, and instead play a series of complicated games with
trylocks to (hopefully) ensure that only one destruction path is called!
v2: Add some commentary, and some helpers to reduce patch churn.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
2019-10-04 13:39:58 +00:00
|
|
|
|
2020-03-11 09:20:44 +00:00
|
|
|
int i915_request_await_active(struct i915_request *rq,
|
|
|
|
struct i915_active *ref,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
2020-04-06 15:58:39 +00:00
|
|
|
return await_active(ref, flags, rq_await_fence, rq, &rq->submit);
|
2020-03-11 09:20:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int sw_await_fence(void *arg, struct dma_fence *fence)
|
|
|
|
{
|
|
|
|
return i915_sw_fence_await_dma_fence(arg, fence, 0,
|
|
|
|
GFP_NOWAIT | __GFP_NOWARN);
|
|
|
|
}
|
|
|
|
|
|
|
|
int i915_sw_fence_await_active(struct i915_sw_fence *fence,
|
|
|
|
struct i915_active *ref,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
2020-04-06 15:58:39 +00:00
|
|
|
return await_active(ref, flags, sw_await_fence, fence, fence);
|
2019-02-05 13:00:02 +00:00
|
|
|
}
|
|
|
|
|
2019-02-05 13:00:03 +00:00
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
|
2019-02-05 13:00:02 +00:00
|
|
|
void i915_active_fini(struct i915_active *ref)
|
|
|
|
{
|
2019-06-21 18:37:58 +00:00
|
|
|
debug_active_fini(ref);
|
2019-06-21 18:38:00 +00:00
|
|
|
GEM_BUG_ON(atomic_read(&ref->count));
|
2019-10-04 13:39:59 +00:00
|
|
|
GEM_BUG_ON(work_pending(&ref->work));
|
|
|
|
GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
|
2019-06-21 18:38:00 +00:00
|
|
|
mutex_destroy(&ref->mutex);
|
2019-02-05 13:00:02 +00:00
|
|
|
}
|
2019-02-05 13:00:03 +00:00
|
|
|
#endif
|
2019-02-05 13:00:02 +00:00
|
|
|
|
2019-08-02 10:00:15 +00:00
|
|
|
static inline bool is_idle_barrier(struct active_node *node, u64 idx)
|
|
|
|
{
|
2019-10-04 13:40:00 +00:00
|
|
|
return node->timeline == idx && !i915_active_fence_isset(&node->base);
|
2019-08-02 10:00:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
|
|
|
|
{
|
|
|
|
struct rb_node *prev, *p;
|
|
|
|
|
|
|
|
if (RB_EMPTY_ROOT(&ref->tree))
|
|
|
|
return NULL;
|
|
|
|
|
2019-11-14 17:25:35 +00:00
|
|
|
spin_lock_irq(&ref->tree_lock);
|
2019-08-02 10:00:15 +00:00
|
|
|
GEM_BUG_ON(i915_active_is_idle(ref));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try to reuse any existing barrier nodes already allocated for this
|
|
|
|
* i915_active, due to overlapping active phases there is likely a
|
|
|
|
* node kept alive (as we reuse before parking). We prefer to reuse
|
|
|
|
* completely idle barriers (less hassle in manipulating the llists),
|
|
|
|
* but otherwise any will do.
|
|
|
|
*/
|
|
|
|
if (ref->cache && is_idle_barrier(ref->cache, idx)) {
|
|
|
|
p = &ref->cache->node;
|
|
|
|
goto match;
|
|
|
|
}
|
|
|
|
|
|
|
|
prev = NULL;
|
|
|
|
p = ref->tree.rb_node;
|
|
|
|
while (p) {
|
|
|
|
struct active_node *node =
|
|
|
|
rb_entry(p, struct active_node, node);
|
|
|
|
|
|
|
|
if (is_idle_barrier(node, idx))
|
|
|
|
goto match;
|
|
|
|
|
|
|
|
prev = p;
|
|
|
|
if (node->timeline < idx)
|
|
|
|
p = p->rb_right;
|
|
|
|
else
|
|
|
|
p = p->rb_left;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* No quick match, but we did find the leftmost rb_node for the
|
|
|
|
* kernel_context. Walk the rb_tree in-order to see if there were
|
|
|
|
* any idle-barriers on this timeline that we missed, or just use
|
|
|
|
* the first pending barrier.
|
|
|
|
*/
|
|
|
|
for (p = prev; p; p = rb_next(p)) {
|
|
|
|
struct active_node *node =
|
|
|
|
rb_entry(p, struct active_node, node);
|
2019-08-13 20:09:05 +00:00
|
|
|
struct intel_engine_cs *engine;
|
2019-08-02 10:00:15 +00:00
|
|
|
|
|
|
|
if (node->timeline > idx)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (node->timeline < idx)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (is_idle_barrier(node, idx))
|
|
|
|
goto match;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The list of pending barriers is protected by the
|
|
|
|
* kernel_context timeline, which notably we do not hold
|
|
|
|
* here. i915_request_add_active_barriers() may consume
|
|
|
|
* the barrier before we claim it, so we have to check
|
|
|
|
* for success.
|
|
|
|
*/
|
2019-08-13 20:09:05 +00:00
|
|
|
engine = __barrier_to_engine(node);
|
|
|
|
smp_rmb(); /* serialise with add_active_barriers */
|
|
|
|
if (is_barrier(&node->base) &&
|
|
|
|
____active_del_barrier(ref, node, engine))
|
2019-08-02 10:00:15 +00:00
|
|
|
goto match;
|
|
|
|
}
|
|
|
|
|
2019-11-14 17:25:35 +00:00
|
|
|
spin_unlock_irq(&ref->tree_lock);
|
2019-08-02 10:00:15 +00:00
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
match:
|
|
|
|
rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
|
|
|
|
if (p == &ref->cache->node)
|
|
|
|
ref->cache = NULL;
|
2019-11-14 17:25:35 +00:00
|
|
|
spin_unlock_irq(&ref->tree_lock);
|
2019-08-02 10:00:15 +00:00
|
|
|
|
|
|
|
return rb_entry(p, struct active_node, node);
|
|
|
|
}
|
|
|
|
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
|
|
|
|
struct intel_engine_cs *engine)
|
|
|
|
{
|
2019-07-25 12:50:56 +00:00
|
|
|
intel_engine_mask_t tmp, mask = engine->mask;
|
2020-01-29 23:23:45 +00:00
|
|
|
struct llist_node *first = NULL, *last = NULL;
|
2019-10-17 16:18:52 +00:00
|
|
|
struct intel_gt *gt = engine->gt;
|
2019-06-18 07:41:28 +00:00
|
|
|
int err;
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
|
2019-10-21 17:43:39 +00:00
|
|
|
GEM_BUG_ON(i915_active_is_idle(ref));
|
2020-01-06 11:42:33 +00:00
|
|
|
|
|
|
|
/* Wait until the previous preallocation is completed */
|
|
|
|
while (!llist_empty(&ref->preallocated_barriers))
|
|
|
|
cond_resched();
|
2019-08-02 10:00:15 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Preallocate a node for each physical engine supporting the target
|
|
|
|
* engine (remember virtual engines have more than one sibling).
|
|
|
|
* We can then use the preallocated nodes in
|
|
|
|
* i915_active_acquire_barrier()
|
|
|
|
*/
|
2020-01-17 11:06:02 +00:00
|
|
|
GEM_BUG_ON(!mask);
|
2019-10-17 16:18:52 +00:00
|
|
|
for_each_engine_masked(engine, gt, mask, tmp) {
|
2019-08-09 18:25:18 +00:00
|
|
|
u64 idx = engine->kernel_context->timeline->fence_context;
|
2020-01-29 23:23:45 +00:00
|
|
|
struct llist_node *prev = first;
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
struct active_node *node;
|
|
|
|
|
2019-08-02 10:00:15 +00:00
|
|
|
node = reuse_idle_barrier(ref, idx);
|
|
|
|
if (!node) {
|
|
|
|
node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
|
|
|
|
if (!node) {
|
|
|
|
err = ENOMEM;
|
|
|
|
goto unwind;
|
|
|
|
}
|
|
|
|
|
2019-10-04 13:40:00 +00:00
|
|
|
RCU_INIT_POINTER(node->base.fence, NULL);
|
|
|
|
node->base.cb.func = node_retire;
|
2019-08-02 10:00:15 +00:00
|
|
|
node->timeline = idx;
|
|
|
|
node->ref = ref;
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
}
|
|
|
|
|
2019-10-04 13:40:00 +00:00
|
|
|
if (!i915_active_fence_isset(&node->base)) {
|
2019-08-02 10:00:15 +00:00
|
|
|
/*
|
|
|
|
* Mark this as being *our* unconnected proto-node.
|
|
|
|
*
|
|
|
|
* Since this node is not in any list, and we have
|
|
|
|
* decoupled it from the rbtree, we can reuse the
|
|
|
|
* request to indicate this is an idle-barrier node
|
|
|
|
* and then we can use the rb_node and list pointers
|
|
|
|
* for our tracking of the pending barrier.
|
|
|
|
*/
|
2019-10-04 13:40:00 +00:00
|
|
|
RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
|
|
|
|
node->base.cb.node.prev = (void *)engine;
|
2019-08-02 10:00:15 +00:00
|
|
|
atomic_inc(&ref->count);
|
|
|
|
}
|
2019-11-27 13:45:27 +00:00
|
|
|
GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
|
2019-08-02 10:00:15 +00:00
|
|
|
GEM_BUG_ON(barrier_to_engine(node) != engine);
|
2020-01-29 23:23:45 +00:00
|
|
|
first = barrier_to_ll(node);
|
|
|
|
first->next = prev;
|
|
|
|
if (!last)
|
|
|
|
last = first;
|
2019-06-18 07:41:28 +00:00
|
|
|
intel_engine_pm_get(engine);
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
}
|
|
|
|
|
2020-01-06 11:42:33 +00:00
|
|
|
GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
|
2020-01-29 23:23:45 +00:00
|
|
|
llist_add_batch(first, last, &ref->preallocated_barriers);
|
2020-01-06 11:42:33 +00:00
|
|
|
|
2019-06-18 07:41:28 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
unwind:
|
2020-01-29 23:23:45 +00:00
|
|
|
while (first) {
|
|
|
|
struct active_node *node = barrier_from_ll(first);
|
2019-06-18 07:41:28 +00:00
|
|
|
|
2020-01-29 23:23:45 +00:00
|
|
|
first = first->next;
|
2020-01-06 11:42:33 +00:00
|
|
|
|
2019-08-02 10:00:15 +00:00
|
|
|
atomic_dec(&ref->count);
|
|
|
|
intel_engine_pm_put(barrier_to_engine(node));
|
2019-06-18 07:41:28 +00:00
|
|
|
|
|
|
|
kmem_cache_free(global.slab_cache, node);
|
|
|
|
}
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
void i915_active_acquire_barrier(struct i915_active *ref)
|
|
|
|
{
|
|
|
|
struct llist_node *pos, *next;
|
2019-11-14 17:25:35 +00:00
|
|
|
unsigned long flags;
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
|
2019-06-21 18:38:00 +00:00
|
|
|
GEM_BUG_ON(i915_active_is_idle(ref));
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
|
2019-08-02 10:00:15 +00:00
|
|
|
/*
|
|
|
|
* Transfer the list of preallocated barriers into the
|
|
|
|
* i915_active rbtree, but only as proto-nodes. They will be
|
|
|
|
* populated by i915_request_add_active_barriers() to point to the
|
|
|
|
* request that will eventually release them.
|
|
|
|
*/
|
|
|
|
llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
|
|
|
|
struct active_node *node = barrier_from_ll(pos);
|
|
|
|
struct intel_engine_cs *engine = barrier_to_engine(node);
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
struct rb_node **p, *parent;
|
|
|
|
|
2019-11-20 12:54:33 +00:00
|
|
|
spin_lock_irqsave_nested(&ref->tree_lock, flags,
|
|
|
|
SINGLE_DEPTH_NESTING);
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
parent = NULL;
|
|
|
|
p = &ref->tree.rb_node;
|
|
|
|
while (*p) {
|
2019-08-02 10:00:15 +00:00
|
|
|
struct active_node *it;
|
|
|
|
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
parent = *p;
|
2019-08-02 10:00:15 +00:00
|
|
|
|
|
|
|
it = rb_entry(parent, struct active_node, node);
|
|
|
|
if (it->timeline < node->timeline)
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
p = &parent->rb_right;
|
|
|
|
else
|
|
|
|
p = &parent->rb_left;
|
|
|
|
}
|
|
|
|
rb_link_node(&node->node, parent, p);
|
|
|
|
rb_insert_color(&node->node, &ref->tree);
|
2019-11-20 12:54:33 +00:00
|
|
|
spin_unlock_irqrestore(&ref->tree_lock, flags);
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
|
2019-10-04 13:40:01 +00:00
|
|
|
GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
|
2019-08-02 10:00:15 +00:00
|
|
|
llist_add(barrier_to_ll(node), &engine->barrier_tasks);
|
2020-03-23 09:28:37 +00:00
|
|
|
intel_engine_pm_put_delay(engine, 1);
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-27 13:45:27 +00:00
|
|
|
static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
|
|
|
|
{
|
|
|
|
return __active_fence_slot(&barrier_from_ll(node)->base);
|
|
|
|
}
|
|
|
|
|
2019-08-02 10:00:15 +00:00
|
|
|
void i915_request_add_active_barriers(struct i915_request *rq)
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
{
|
|
|
|
struct intel_engine_cs *engine = rq->engine;
|
|
|
|
struct llist_node *node, *next;
|
2019-10-04 13:40:00 +00:00
|
|
|
unsigned long flags;
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
|
2019-12-21 16:03:24 +00:00
|
|
|
GEM_BUG_ON(!intel_context_is_barrier(rq->context));
|
2019-08-02 10:00:15 +00:00
|
|
|
GEM_BUG_ON(intel_engine_is_virtual(engine));
|
drm/i915: Mark i915_request.timeline as a volatile, rcu pointer
The request->timeline is only valid until the request is retired (i.e.
before it is completed). Upon retiring the request, the context may be
unpinned and freed, and along with it the timeline may be freed. We
therefore need to be very careful when chasing rq->timeline that the
pointer does not disappear beneath us. The vast majority of users are in
a protected context, either during request construction or retirement,
where the timeline->mutex is held and the timeline cannot disappear. It
is those few off the beaten path (where we access a second timeline) that
need extra scrutiny -- to be added in the next patch after first adding
the warnings about dangerous access.
One complication, where we cannot use the timeline->mutex itself, is
during request submission onto hardware (under spinlocks). Here, we want
to check on the timeline to finalize the breadcrumb, and so we need to
impose a second rule to ensure that the request->timeline is indeed
valid. As we are submitting the request, it's context and timeline must
be pinned, as it will be used by the hardware. Since it is pinned, we
know the request->timeline must still be valid, and we cannot submit the
idle barrier until after we release the engine->active.lock, ergo while
submitting and holding that spinlock, a second thread cannot release the
timeline.
v2: Don't be lazy inside selftests; hold the timeline->mutex for as long
as we need it, and tidy up acquiring the timeline with a bit of
refactoring (i915_active_add_request)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190919111912.21631-1-chris@chris-wilson.co.uk
2019-09-19 11:19:10 +00:00
|
|
|
GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
|
2019-08-02 10:00:15 +00:00
|
|
|
|
2019-10-04 13:40:00 +00:00
|
|
|
node = llist_del_all(&engine->barrier_tasks);
|
|
|
|
if (!node)
|
|
|
|
return;
|
2019-08-02 10:00:15 +00:00
|
|
|
/*
|
|
|
|
* Attach the list of proto-fences to the in-flight request such
|
|
|
|
* that the parent i915_active will be released when this request
|
|
|
|
* is retired.
|
|
|
|
*/
|
2019-10-04 13:40:00 +00:00
|
|
|
spin_lock_irqsave(&rq->lock, flags);
|
|
|
|
llist_for_each_safe(node, next, node) {
|
2019-11-27 13:45:27 +00:00
|
|
|
/* serialise with reuse_idle_barrier */
|
|
|
|
smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
|
2019-10-04 13:40:00 +00:00
|
|
|
list_add_tail((struct list_head *)node, &rq->fence.cb_list);
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&rq->lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* __i915_active_fence_set: Update the last active fence along its timeline
|
|
|
|
* @active: the active tracker
|
|
|
|
* @fence: the new fence (under construction)
|
|
|
|
*
|
|
|
|
* Records the new @fence as the last active fence along its timeline in
|
|
|
|
* this active tracker, moving the tracking callbacks from the previous
|
|
|
|
* fence onto this one. Returns the previous fence (if not already completed),
|
|
|
|
* which the caller must ensure is executed before the new fence. To ensure
|
|
|
|
* that the order of fences within the timeline of the i915_active_fence is
|
2019-11-27 13:45:27 +00:00
|
|
|
* understood, it should be locked by the caller.
|
2019-10-04 13:40:00 +00:00
|
|
|
*/
|
|
|
|
struct dma_fence *
|
|
|
|
__i915_active_fence_set(struct i915_active_fence *active,
|
|
|
|
struct dma_fence *fence)
|
|
|
|
{
|
|
|
|
struct dma_fence *prev;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2019-11-27 13:45:27 +00:00
|
|
|
if (fence == rcu_access_pointer(active->fence))
|
|
|
|
return fence;
|
|
|
|
|
2019-10-04 13:40:00 +00:00
|
|
|
GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
|
|
|
|
|
2019-11-27 13:45:27 +00:00
|
|
|
/*
|
|
|
|
* Consider that we have two threads arriving (A and B), with
|
|
|
|
* C already resident as the active->fence.
|
|
|
|
*
|
|
|
|
* A does the xchg first, and so it sees C or NULL depending
|
|
|
|
* on the timing of the interrupt handler. If it is NULL, the
|
|
|
|
* previous fence must have been signaled and we know that
|
|
|
|
* we are first on the timeline. If it is still present,
|
|
|
|
* we acquire the lock on that fence and serialise with the interrupt
|
|
|
|
* handler, in the process removing it from any future interrupt
|
|
|
|
* callback. A will then wait on C before executing (if present).
|
|
|
|
*
|
|
|
|
* As B is second, it sees A as the previous fence and so waits for
|
|
|
|
* it to complete its transition and takes over the occupancy for
|
|
|
|
* itself -- remembering that it needs to wait on A before executing.
|
|
|
|
*
|
|
|
|
* Note the strong ordering of the timeline also provides consistent
|
|
|
|
* nesting rules for the fence->lock; the inner lock is always the
|
|
|
|
* older lock.
|
|
|
|
*/
|
|
|
|
spin_lock_irqsave(fence->lock, flags);
|
|
|
|
prev = xchg(__active_fence_slot(active), fence);
|
2019-10-04 13:40:00 +00:00
|
|
|
if (prev) {
|
|
|
|
GEM_BUG_ON(prev == fence);
|
|
|
|
spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
|
|
|
|
__list_del_entry(&active->cb.node);
|
|
|
|
spin_unlock(prev->lock); /* serialise with prev->cb_list */
|
2019-08-02 10:00:15 +00:00
|
|
|
}
|
2019-10-04 13:40:00 +00:00
|
|
|
list_add_tail(&active->cb.node, &fence->cb_list);
|
|
|
|
spin_unlock_irqrestore(fence->lock, flags);
|
|
|
|
|
|
|
|
return prev;
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 16:46:04 +00:00
|
|
|
}
|
|
|
|
|
2019-10-04 13:40:00 +00:00
|
|
|
int i915_active_fence_set(struct i915_active_fence *active,
|
|
|
|
struct i915_request *rq)
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 13:00:05 +00:00
|
|
|
{
|
2019-10-04 13:40:00 +00:00
|
|
|
struct dma_fence *fence;
|
|
|
|
int err = 0;
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 13:00:05 +00:00
|
|
|
|
2019-10-04 13:40:00 +00:00
|
|
|
/* Must maintain timeline ordering wrt previous active requests */
|
|
|
|
rcu_read_lock();
|
|
|
|
fence = __i915_active_fence_set(active, &rq->fence);
|
|
|
|
if (fence) /* but the previous fence may not belong to that timeline! */
|
|
|
|
fence = dma_fence_get_rcu(fence);
|
|
|
|
rcu_read_unlock();
|
|
|
|
if (fence) {
|
|
|
|
err = i915_request_await_dma_fence(rq, fence);
|
|
|
|
dma_fence_put(fence);
|
|
|
|
}
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 13:00:05 +00:00
|
|
|
|
2019-10-04 13:40:00 +00:00
|
|
|
return err;
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 13:00:05 +00:00
|
|
|
}
|
|
|
|
|
2019-10-04 13:40:00 +00:00
|
|
|
void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 13:00:05 +00:00
|
|
|
{
|
2019-11-27 13:45:27 +00:00
|
|
|
active_fence_cb(fence, cb);
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 13:00:05 +00:00
|
|
|
}
|
|
|
|
|
2020-03-27 11:22:11 +00:00
|
|
|
struct auto_active {
|
|
|
|
struct i915_active base;
|
|
|
|
struct kref ref;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct i915_active *i915_active_get(struct i915_active *ref)
|
|
|
|
{
|
|
|
|
struct auto_active *aa = container_of(ref, typeof(*aa), base);
|
|
|
|
|
|
|
|
kref_get(&aa->ref);
|
|
|
|
return &aa->base;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void auto_release(struct kref *ref)
|
|
|
|
{
|
|
|
|
struct auto_active *aa = container_of(ref, typeof(*aa), ref);
|
|
|
|
|
|
|
|
i915_active_fini(&aa->base);
|
|
|
|
kfree(aa);
|
|
|
|
}
|
|
|
|
|
|
|
|
void i915_active_put(struct i915_active *ref)
|
|
|
|
{
|
|
|
|
struct auto_active *aa = container_of(ref, typeof(*aa), base);
|
|
|
|
|
|
|
|
kref_put(&aa->ref, auto_release);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int auto_active(struct i915_active *ref)
|
|
|
|
{
|
|
|
|
i915_active_get(ref);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void auto_retire(struct i915_active *ref)
|
|
|
|
{
|
|
|
|
i915_active_put(ref);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct i915_active *i915_active_create(void)
|
|
|
|
{
|
|
|
|
struct auto_active *aa;
|
|
|
|
|
|
|
|
aa = kmalloc(sizeof(*aa), GFP_KERNEL);
|
|
|
|
if (!aa)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
kref_init(&aa->ref);
|
|
|
|
i915_active_init(&aa->base, auto_active, auto_retire);
|
|
|
|
|
|
|
|
return &aa->base;
|
|
|
|
}
|
|
|
|
|
2019-02-05 13:00:02 +00:00
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
|
|
|
#include "selftests/i915_active.c"
|
|
|
|
#endif
|
2019-02-05 13:00:04 +00:00
|
|
|
|
2019-03-05 21:38:30 +00:00
|
|
|
static void i915_global_active_shrink(void)
|
2019-02-05 13:00:04 +00:00
|
|
|
{
|
2019-03-05 21:38:30 +00:00
|
|
|
kmem_cache_shrink(global.slab_cache);
|
2019-02-05 13:00:04 +00:00
|
|
|
}
|
|
|
|
|
2019-03-05 21:38:30 +00:00
|
|
|
static void i915_global_active_exit(void)
|
2019-02-28 10:20:33 +00:00
|
|
|
{
|
2019-03-05 21:38:30 +00:00
|
|
|
kmem_cache_destroy(global.slab_cache);
|
2019-02-28 10:20:33 +00:00
|
|
|
}
|
|
|
|
|
2019-03-05 21:38:30 +00:00
|
|
|
static struct i915_global_active global = { {
|
|
|
|
.shrink = i915_global_active_shrink,
|
|
|
|
.exit = i915_global_active_exit,
|
|
|
|
} };
|
|
|
|
|
|
|
|
int __init i915_global_active_init(void)
|
2019-02-05 13:00:04 +00:00
|
|
|
{
|
2019-03-05 21:38:30 +00:00
|
|
|
global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
|
|
|
|
if (!global.slab_cache)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
i915_global_register(&global.base);
|
|
|
|
return 0;
|
2019-02-05 13:00:04 +00:00
|
|
|
}
|