drm/i915: Make for_each_engine_masked work on intel_gt
Medium term goal is to eliminate the i915->engine[] array and to get there we have recently introduced equivalent array in intel_gt. Now we need to migrate the code further towards this state. This next step is to eliminate usage of i915->engines[] from the for_each_engine_masked iterator. For this to work we also need to use engine->id as index when populating the gt->engine[] array and adjust the default engine set indexing to use engine->legacy_idx instead of assuming gt->engines[] indexing. v2: * Populate gt->engine[] earlier. * Check that we don't duplicate engine->legacy_idx v3: * Work around the initialization order issue between default_engines() and intel_engines_driver_register() which sets engine->legacy_idx for now. It will be fixed properly later. v4: * Merge with forgotten v2.5. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20191017161852.8836-1-tvrtko.ursulin@linux.intel.com
This commit is contained in:
parent
1dfffa0051
commit
a50134b198
@ -203,15 +203,22 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
|
|||||||
for_each_engine(engine, gt, id) {
|
for_each_engine(engine, gt, id) {
|
||||||
struct intel_context *ce;
|
struct intel_context *ce;
|
||||||
|
|
||||||
|
if (engine->legacy_idx == INVALID_ENGINE)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
|
||||||
|
GEM_BUG_ON(e->engines[engine->legacy_idx]);
|
||||||
|
|
||||||
ce = intel_context_create(ctx, engine);
|
ce = intel_context_create(ctx, engine);
|
||||||
if (IS_ERR(ce)) {
|
if (IS_ERR(ce)) {
|
||||||
__free_engines(e, id);
|
__free_engines(e, e->num_engines + 1);
|
||||||
return ERR_CAST(ce);
|
return ERR_CAST(ce);
|
||||||
}
|
}
|
||||||
|
|
||||||
e->engines[id] = ce;
|
e->engines[engine->legacy_idx] = ce;
|
||||||
e->num_engines = id + 1;
|
e->num_engines = max(e->num_engines, engine->legacy_idx);
|
||||||
}
|
}
|
||||||
|
e->num_engines++;
|
||||||
|
|
||||||
return e;
|
return e;
|
||||||
}
|
}
|
||||||
|
@ -277,6 +277,9 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
|
|||||||
BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
|
BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
|
||||||
BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
|
BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
|
||||||
|
|
||||||
|
if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
|
if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
@ -293,6 +296,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
|
|||||||
BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
|
BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
|
||||||
|
|
||||||
engine->id = id;
|
engine->id = id;
|
||||||
|
engine->legacy_idx = INVALID_ENGINE;
|
||||||
engine->mask = BIT(id);
|
engine->mask = BIT(id);
|
||||||
engine->i915 = gt->i915;
|
engine->i915 = gt->i915;
|
||||||
engine->gt = gt;
|
engine->gt = gt;
|
||||||
@ -328,6 +332,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
|
|||||||
intel_engine_sanitize_mmio(engine);
|
intel_engine_sanitize_mmio(engine);
|
||||||
|
|
||||||
gt->engine_class[info->class][info->instance] = engine;
|
gt->engine_class[info->class][info->instance] = engine;
|
||||||
|
gt->engine[id] = engine;
|
||||||
|
|
||||||
intel_engine_add_user(engine);
|
intel_engine_add_user(engine);
|
||||||
gt->i915->engine[id] = engine;
|
gt->i915->engine[id] = engine;
|
||||||
|
@ -148,6 +148,7 @@ enum intel_engine_id {
|
|||||||
VECS1,
|
VECS1,
|
||||||
#define _VECS(n) (VECS0 + (n))
|
#define _VECS(n) (VECS0 + (n))
|
||||||
I915_NUM_ENGINES
|
I915_NUM_ENGINES
|
||||||
|
#define INVALID_ENGINE ((enum intel_engine_id)-1)
|
||||||
};
|
};
|
||||||
|
|
||||||
struct st_preempt_hang {
|
struct st_preempt_hang {
|
||||||
|
@ -160,10 +160,10 @@ static int legacy_ring_idx(const struct legacy_ring *ring)
|
|||||||
};
|
};
|
||||||
|
|
||||||
if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
|
if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
|
||||||
return -1;
|
return INVALID_ENGINE;
|
||||||
|
|
||||||
if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
|
if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
|
||||||
return -1;
|
return INVALID_ENGINE;
|
||||||
|
|
||||||
return map[ring->class].base + ring->instance;
|
return map[ring->class].base + ring->instance;
|
||||||
}
|
}
|
||||||
@ -171,23 +171,15 @@ static int legacy_ring_idx(const struct legacy_ring *ring)
|
|||||||
static void add_legacy_ring(struct legacy_ring *ring,
|
static void add_legacy_ring(struct legacy_ring *ring,
|
||||||
struct intel_engine_cs *engine)
|
struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
int idx;
|
|
||||||
|
|
||||||
if (engine->gt != ring->gt || engine->class != ring->class) {
|
if (engine->gt != ring->gt || engine->class != ring->class) {
|
||||||
ring->gt = engine->gt;
|
ring->gt = engine->gt;
|
||||||
ring->class = engine->class;
|
ring->class = engine->class;
|
||||||
ring->instance = 0;
|
ring->instance = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
idx = legacy_ring_idx(ring);
|
engine->legacy_idx = legacy_ring_idx(ring);
|
||||||
if (unlikely(idx == -1))
|
if (engine->legacy_idx != INVALID_ENGINE)
|
||||||
return;
|
ring->instance++;
|
||||||
|
|
||||||
GEM_BUG_ON(idx >= ARRAY_SIZE(ring->gt->engine));
|
|
||||||
ring->gt->engine[idx] = engine;
|
|
||||||
ring->instance++;
|
|
||||||
|
|
||||||
engine->legacy_idx = idx;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_engines_driver_register(struct drm_i915_private *i915)
|
void intel_engines_driver_register(struct drm_i915_private *i915)
|
||||||
|
@ -186,7 +186,7 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
|
|||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
enum intel_engine_id id;
|
enum intel_engine_id id;
|
||||||
|
|
||||||
for_each_engine_masked(engine, i915, engine_mask, id)
|
for_each_engine_masked(engine, gt, engine_mask, id)
|
||||||
gen8_clear_engine_error_register(engine);
|
gen8_clear_engine_error_register(engine);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -237,7 +237,7 @@ static void hangcheck_declare_hang(struct intel_gt *gt,
|
|||||||
hung &= ~stuck;
|
hung &= ~stuck;
|
||||||
len = scnprintf(msg, sizeof(msg),
|
len = scnprintf(msg, sizeof(msg),
|
||||||
"%s on ", stuck == hung ? "no progress" : "hang");
|
"%s on ", stuck == hung ? "no progress" : "hang");
|
||||||
for_each_engine_masked(engine, gt->i915, hung, tmp)
|
for_each_engine_masked(engine, gt, hung, tmp)
|
||||||
len += scnprintf(msg + len, sizeof(msg) - len,
|
len += scnprintf(msg + len, sizeof(msg) - len,
|
||||||
"%s, ", engine->name);
|
"%s, ", engine->name);
|
||||||
msg[len-2] = '\0';
|
msg[len-2] = '\0';
|
||||||
|
@ -298,7 +298,7 @@ static int gen6_reset_engines(struct intel_gt *gt,
|
|||||||
intel_engine_mask_t tmp;
|
intel_engine_mask_t tmp;
|
||||||
|
|
||||||
hw_mask = 0;
|
hw_mask = 0;
|
||||||
for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
|
for_each_engine_masked(engine, gt, engine_mask, tmp) {
|
||||||
GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
|
GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
|
||||||
hw_mask |= hw_engine_mask[engine->id];
|
hw_mask |= hw_engine_mask[engine->id];
|
||||||
}
|
}
|
||||||
@ -432,7 +432,7 @@ static int gen11_reset_engines(struct intel_gt *gt,
|
|||||||
hw_mask = GEN11_GRDOM_FULL;
|
hw_mask = GEN11_GRDOM_FULL;
|
||||||
} else {
|
} else {
|
||||||
hw_mask = 0;
|
hw_mask = 0;
|
||||||
for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
|
for_each_engine_masked(engine, gt, engine_mask, tmp) {
|
||||||
GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
|
GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
|
||||||
hw_mask |= hw_engine_mask[engine->id];
|
hw_mask |= hw_engine_mask[engine->id];
|
||||||
ret = gen11_lock_sfc(engine, &hw_mask);
|
ret = gen11_lock_sfc(engine, &hw_mask);
|
||||||
@ -451,7 +451,7 @@ sfc_unlock:
|
|||||||
* expiration).
|
* expiration).
|
||||||
*/
|
*/
|
||||||
if (engine_mask != ALL_ENGINES)
|
if (engine_mask != ALL_ENGINES)
|
||||||
for_each_engine_masked(engine, gt->i915, engine_mask, tmp)
|
for_each_engine_masked(engine, gt, engine_mask, tmp)
|
||||||
gen11_unlock_sfc(engine);
|
gen11_unlock_sfc(engine);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -510,7 +510,7 @@ static int gen8_reset_engines(struct intel_gt *gt,
|
|||||||
intel_engine_mask_t tmp;
|
intel_engine_mask_t tmp;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
|
for_each_engine_masked(engine, gt, engine_mask, tmp) {
|
||||||
ret = gen8_engine_reset_prepare(engine);
|
ret = gen8_engine_reset_prepare(engine);
|
||||||
if (ret && !reset_non_ready)
|
if (ret && !reset_non_ready)
|
||||||
goto skip_reset;
|
goto skip_reset;
|
||||||
@ -536,7 +536,7 @@ static int gen8_reset_engines(struct intel_gt *gt,
|
|||||||
ret = gen6_reset_engines(gt, engine_mask, retry);
|
ret = gen6_reset_engines(gt, engine_mask, retry);
|
||||||
|
|
||||||
skip_reset:
|
skip_reset:
|
||||||
for_each_engine_masked(engine, gt->i915, engine_mask, tmp)
|
for_each_engine_masked(engine, gt, engine_mask, tmp)
|
||||||
gen8_engine_reset_cancel(engine);
|
gen8_engine_reset_cancel(engine);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -1206,7 +1206,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
|
|||||||
* single reset fails.
|
* single reset fails.
|
||||||
*/
|
*/
|
||||||
if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
|
if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
|
||||||
for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
|
for_each_engine_masked(engine, gt, engine_mask, tmp) {
|
||||||
BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
|
BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
|
||||||
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
|
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
|
||||||
>->reset.flags))
|
>->reset.flags))
|
||||||
|
@ -534,7 +534,7 @@ static void clean_execlist(struct intel_vgpu *vgpu,
|
|||||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||||
intel_engine_mask_t tmp;
|
intel_engine_mask_t tmp;
|
||||||
|
|
||||||
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
|
for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
|
||||||
kfree(s->ring_scan_buffer[engine->id]);
|
kfree(s->ring_scan_buffer[engine->id]);
|
||||||
s->ring_scan_buffer[engine->id] = NULL;
|
s->ring_scan_buffer[engine->id] = NULL;
|
||||||
s->ring_scan_buffer_size[engine->id] = 0;
|
s->ring_scan_buffer_size[engine->id] = 0;
|
||||||
@ -548,7 +548,7 @@ static void reset_execlist(struct intel_vgpu *vgpu,
|
|||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
intel_engine_mask_t tmp;
|
intel_engine_mask_t tmp;
|
||||||
|
|
||||||
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
|
for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp)
|
||||||
init_vgpu_execlist(vgpu, engine->id);
|
init_vgpu_execlist(vgpu, engine->id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -887,7 +887,7 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
|
|||||||
intel_engine_mask_t tmp;
|
intel_engine_mask_t tmp;
|
||||||
|
|
||||||
/* free the unsubmited workloads in the queues. */
|
/* free the unsubmited workloads in the queues. */
|
||||||
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
|
for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
|
||||||
list_for_each_entry_safe(pos, n,
|
list_for_each_entry_safe(pos, n,
|
||||||
&s->workload_q_head[engine->id], list) {
|
&s->workload_q_head[engine->id], list) {
|
||||||
list_del_init(&pos->list);
|
list_del_init(&pos->list);
|
||||||
|
@ -590,8 +590,8 @@ match:
|
|||||||
int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
|
int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
|
||||||
struct intel_engine_cs *engine)
|
struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *i915 = engine->i915;
|
|
||||||
intel_engine_mask_t tmp, mask = engine->mask;
|
intel_engine_mask_t tmp, mask = engine->mask;
|
||||||
|
struct intel_gt *gt = engine->gt;
|
||||||
struct llist_node *pos, *next;
|
struct llist_node *pos, *next;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
@ -603,7 +603,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
|
|||||||
* We can then use the preallocated nodes in
|
* We can then use the preallocated nodes in
|
||||||
* i915_active_acquire_barrier()
|
* i915_active_acquire_barrier()
|
||||||
*/
|
*/
|
||||||
for_each_engine_masked(engine, i915, mask, tmp) {
|
for_each_engine_masked(engine, gt, mask, tmp) {
|
||||||
u64 idx = engine->kernel_context->timeline->fence_context;
|
u64 idx = engine->kernel_context->timeline->fence_context;
|
||||||
struct active_node *node;
|
struct active_node *node;
|
||||||
|
|
||||||
|
@ -1415,10 +1415,10 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
|
|||||||
for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
|
for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
|
||||||
|
|
||||||
/* Iterator over subset of engines selected by mask */
|
/* Iterator over subset of engines selected by mask */
|
||||||
#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
|
#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
|
||||||
for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->engine_mask; \
|
for ((tmp__) = (mask__) & INTEL_INFO((gt__)->i915)->engine_mask; \
|
||||||
(tmp__) ? \
|
(tmp__) ? \
|
||||||
((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \
|
((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
|
||||||
0;)
|
0;)
|
||||||
|
|
||||||
#define rb_to_uabi_engine(rb) \
|
#define rb_to_uabi_engine(rb) \
|
||||||
|
Loading…
Reference in New Issue
Block a user