drm/i915/gt: Support multiple pinned timelines
We may need to allocate more than one pinned context/timeline for each engine which can utilise the per-engine HWSP, so we need to give each a different offset within it. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200730183906.25422-1-chris@chris-wilson.co.uk Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
This commit is contained in:
parent
eb4dedae92
commit
d1bf5dd8f6
@ -886,7 +886,7 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
|
||||
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
|
||||
struct intel_timeline *timeline;
|
||||
|
||||
timeline = intel_timeline_create(&i915->gt, NULL);
|
||||
timeline = intel_timeline_create(&i915->gt);
|
||||
if (IS_ERR(timeline)) {
|
||||
context_close(ctx);
|
||||
return ERR_CAST(timeline);
|
||||
|
@ -785,9 +785,11 @@ intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
|
||||
}
|
||||
|
||||
static struct intel_context *
|
||||
create_kernel_context(struct intel_engine_cs *engine)
|
||||
create_pinned_context(struct intel_engine_cs *engine,
|
||||
unsigned int hwsp,
|
||||
struct lock_class_key *key,
|
||||
const char *name)
|
||||
{
|
||||
static struct lock_class_key kernel;
|
||||
struct intel_context *ce;
|
||||
int err;
|
||||
|
||||
@ -796,6 +798,7 @@ create_kernel_context(struct intel_engine_cs *engine)
|
||||
return ce;
|
||||
|
||||
__set_bit(CONTEXT_BARRIER_BIT, &ce->flags);
|
||||
ce->timeline = page_pack_bits(NULL, hwsp);
|
||||
|
||||
err = intel_context_pin(ce); /* perma-pin so it is always available */
|
||||
if (err) {
|
||||
@ -809,11 +812,20 @@ create_kernel_context(struct intel_engine_cs *engine)
|
||||
* should we need to inject GPU operations during their request
|
||||
* construction.
|
||||
*/
|
||||
lockdep_set_class(&ce->timeline->mutex, &kernel);
|
||||
lockdep_set_class_and_name(&ce->timeline->mutex, key, name);
|
||||
|
||||
return ce;
|
||||
}
|
||||
|
||||
static struct intel_context *
|
||||
create_kernel_context(struct intel_engine_cs *engine)
|
||||
{
|
||||
static struct lock_class_key kernel;
|
||||
|
||||
return create_pinned_context(engine, I915_GEM_HWS_SEQNO_ADDR,
|
||||
&kernel, "kernel_context");
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_engines_init_common - initialize cengine state which might require hw access
|
||||
* @engine: Engine to initialize.
|
||||
|
@ -5306,6 +5306,14 @@ populate_lr_context(struct intel_context *ce,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct intel_timeline *pinned_timeline(struct intel_context *ce)
|
||||
{
|
||||
struct intel_timeline *tl = fetch_and_zero(&ce->timeline);
|
||||
|
||||
return intel_timeline_create_from_engine(ce->engine,
|
||||
page_unmask_bits(tl));
|
||||
}
|
||||
|
||||
static int __execlists_context_alloc(struct intel_context *ce,
|
||||
struct intel_engine_cs *engine)
|
||||
{
|
||||
@ -5336,19 +5344,17 @@ static int __execlists_context_alloc(struct intel_context *ce,
|
||||
goto error_deref_obj;
|
||||
}
|
||||
|
||||
if (!ce->timeline) {
|
||||
if (!page_mask_bits(ce->timeline)) {
|
||||
struct intel_timeline *tl;
|
||||
struct i915_vma *hwsp;
|
||||
|
||||
/*
|
||||
* Use the static global HWSP for the kernel context, and
|
||||
* a dynamically allocated cacheline for everyone else.
|
||||
*/
|
||||
hwsp = NULL;
|
||||
if (unlikely(intel_context_is_barrier(ce)))
|
||||
hwsp = engine->status_page.vma;
|
||||
|
||||
tl = intel_timeline_create(engine->gt, hwsp);
|
||||
if (unlikely(ce->timeline))
|
||||
tl = pinned_timeline(ce);
|
||||
else
|
||||
tl = intel_timeline_create(engine->gt);
|
||||
if (IS_ERR(tl)) {
|
||||
ret = PTR_ERR(tl);
|
||||
goto error_deref_obj;
|
||||
|
@ -1250,7 +1250,8 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
timeline = intel_timeline_create(engine->gt, engine->status_page.vma);
|
||||
timeline = intel_timeline_create_from_engine(engine,
|
||||
I915_GEM_HWS_SEQNO_ADDR);
|
||||
if (IS_ERR(timeline)) {
|
||||
err = PTR_ERR(timeline);
|
||||
goto err;
|
||||
|
@ -215,7 +215,8 @@ static void cacheline_free(struct intel_timeline_cacheline *cl)
|
||||
|
||||
static int intel_timeline_init(struct intel_timeline *timeline,
|
||||
struct intel_gt *gt,
|
||||
struct i915_vma *hwsp)
|
||||
struct i915_vma *hwsp,
|
||||
unsigned int offset)
|
||||
{
|
||||
void *vaddr;
|
||||
|
||||
@ -246,8 +247,7 @@ static int intel_timeline_init(struct intel_timeline *timeline,
|
||||
|
||||
vaddr = page_mask_bits(cl->vaddr);
|
||||
} else {
|
||||
timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR;
|
||||
|
||||
timeline->hwsp_offset = offset;
|
||||
vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
|
||||
if (IS_ERR(vaddr))
|
||||
return PTR_ERR(vaddr);
|
||||
@ -297,7 +297,9 @@ static void intel_timeline_fini(struct intel_timeline *timeline)
|
||||
}
|
||||
|
||||
struct intel_timeline *
|
||||
intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp)
|
||||
__intel_timeline_create(struct intel_gt *gt,
|
||||
struct i915_vma *global_hwsp,
|
||||
unsigned int offset)
|
||||
{
|
||||
struct intel_timeline *timeline;
|
||||
int err;
|
||||
@ -306,7 +308,7 @@ intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp)
|
||||
if (!timeline)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
err = intel_timeline_init(timeline, gt, global_hwsp);
|
||||
err = intel_timeline_init(timeline, gt, global_hwsp, offset);
|
||||
if (err) {
|
||||
kfree(timeline);
|
||||
return ERR_PTR(err);
|
||||
|
@ -29,10 +29,27 @@
|
||||
|
||||
#include "i915_active.h"
|
||||
#include "i915_syncmap.h"
|
||||
#include "gt/intel_timeline_types.h"
|
||||
#include "intel_timeline_types.h"
|
||||
|
||||
struct intel_timeline *
|
||||
intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp);
|
||||
__intel_timeline_create(struct intel_gt *gt,
|
||||
struct i915_vma *global_hwsp,
|
||||
unsigned int offset);
|
||||
|
||||
static inline struct intel_timeline *
|
||||
intel_timeline_create(struct intel_gt *gt)
|
||||
{
|
||||
return __intel_timeline_create(gt, NULL, 0);
|
||||
}
|
||||
|
||||
static inline struct intel_timeline *
|
||||
intel_timeline_create_from_engine(struct intel_engine_cs *engine,
|
||||
unsigned int offset)
|
||||
{
|
||||
return __intel_timeline_create(engine->gt,
|
||||
engine->status_page.vma,
|
||||
offset);
|
||||
}
|
||||
|
||||
static inline struct intel_timeline *
|
||||
intel_timeline_get(struct intel_timeline *timeline)
|
||||
|
@ -153,7 +153,7 @@ static int mock_context_alloc(struct intel_context *ce)
|
||||
return -ENOMEM;
|
||||
|
||||
GEM_BUG_ON(ce->timeline);
|
||||
ce->timeline = intel_timeline_create(ce->engine->gt, NULL);
|
||||
ce->timeline = intel_timeline_create(ce->engine->gt);
|
||||
if (IS_ERR(ce->timeline)) {
|
||||
kfree(ce->engine);
|
||||
return PTR_ERR(ce->timeline);
|
||||
|
@ -72,7 +72,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
|
||||
unsigned long cacheline;
|
||||
int err;
|
||||
|
||||
tl = intel_timeline_create(state->gt, NULL);
|
||||
tl = intel_timeline_create(state->gt);
|
||||
if (IS_ERR(tl))
|
||||
return PTR_ERR(tl);
|
||||
|
||||
@ -487,7 +487,7 @@ checked_intel_timeline_create(struct intel_gt *gt)
|
||||
{
|
||||
struct intel_timeline *tl;
|
||||
|
||||
tl = intel_timeline_create(gt, NULL);
|
||||
tl = intel_timeline_create(gt);
|
||||
if (IS_ERR(tl))
|
||||
return tl;
|
||||
|
||||
@ -660,7 +660,7 @@ static int live_hwsp_wrap(void *arg)
|
||||
* foreign GPU references.
|
||||
*/
|
||||
|
||||
tl = intel_timeline_create(gt, NULL);
|
||||
tl = intel_timeline_create(gt);
|
||||
if (IS_ERR(tl))
|
||||
return PTR_ERR(tl);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user