forked from Minki/linux
drm/i915/gt: Widen CSB pointer to u64 for the parsers
A CSB entry is 64b, and it is simpler for us to treat it as an array of
64b entries than as an array of pairs of 32b entries.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200915134923.30088-1-chris@chris-wilson.co.uk
(cherry picked from commit f24a44e52f
)
(cherry picked from commit 3d4dbe0e0f0d04ebcea917b7279586817da8cf46)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
parent
db9bc2d35f
commit
ca05277e40
@ -278,7 +278,7 @@ struct intel_engine_execlists {
|
||||
*
|
||||
* Note these register may be either mmio or HWSP shadow.
|
||||
*/
|
||||
u32 *csb_status;
|
||||
u64 *csb_status;
|
||||
|
||||
/**
|
||||
* @csb_size: context status buffer FIFO size
|
||||
|
@ -2463,7 +2463,7 @@ cancel_port_requests(struct intel_engine_execlists * const execlists)
|
||||
}
|
||||
|
||||
static inline void
|
||||
invalidate_csb_entries(const u32 *first, const u32 *last)
|
||||
invalidate_csb_entries(const u64 *first, const u64 *last)
|
||||
{
|
||||
clflush((void *)first);
|
||||
clflush((void *)last);
|
||||
@ -2495,14 +2495,12 @@ invalidate_csb_entries(const u32 *first, const u32 *last)
|
||||
* bits 47-57: sw context id of the lrc the GT switched away from
|
||||
* bits 58-63: sw counter of the lrc the GT switched away from
|
||||
*/
|
||||
static inline bool
|
||||
gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
|
||||
static inline bool gen12_csb_parse(const u64 *csb)
|
||||
{
|
||||
u32 lower_dw = csb[0];
|
||||
u32 upper_dw = csb[1];
|
||||
bool ctx_to_valid = GEN12_CSB_CTX_VALID(lower_dw);
|
||||
bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_dw);
|
||||
bool new_queue = lower_dw & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE;
|
||||
u64 entry = READ_ONCE(*csb);
|
||||
bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_32_bits(entry));
|
||||
bool new_queue =
|
||||
lower_32_bits(entry) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE;
|
||||
|
||||
/*
|
||||
* The context switch detail is not guaranteed to be 5 when a preemption
|
||||
@ -2512,7 +2510,7 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
|
||||
* would require some extra handling, but we don't support that.
|
||||
*/
|
||||
if (!ctx_away_valid || new_queue) {
|
||||
GEM_BUG_ON(!ctx_to_valid);
|
||||
GEM_BUG_ON(!GEN12_CSB_CTX_VALID(lower_32_bits(entry)));
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2521,12 +2519,11 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
|
||||
* context switch on an unsuccessful wait instruction since we always
|
||||
* use polling mode.
|
||||
*/
|
||||
GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_dw));
|
||||
GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_32_bits(entry)));
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
gen8_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
|
||||
static inline bool gen8_csb_parse(const u64 *csb)
|
||||
{
|
||||
return *csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED);
|
||||
}
|
||||
@ -2534,7 +2531,7 @@ gen8_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
|
||||
static void process_csb(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
const u32 * const buf = execlists->csb_status;
|
||||
const u64 * const buf = execlists->csb_status;
|
||||
const u8 num_entries = execlists->csb_size;
|
||||
u8 head, tail;
|
||||
|
||||
@ -2615,12 +2612,14 @@ static void process_csb(struct intel_engine_cs *engine)
|
||||
*/
|
||||
|
||||
ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
|
||||
head, buf[2 * head + 0], buf[2 * head + 1]);
|
||||
head,
|
||||
upper_32_bits(buf[head]),
|
||||
lower_32_bits(buf[head]));
|
||||
|
||||
if (INTEL_GEN(engine->i915) >= 12)
|
||||
promote = gen12_csb_parse(execlists, buf + 2 * head);
|
||||
promote = gen12_csb_parse(buf + head);
|
||||
else
|
||||
promote = gen8_csb_parse(execlists, buf + 2 * head);
|
||||
promote = gen8_csb_parse(buf + head);
|
||||
if (promote) {
|
||||
struct i915_request * const *old = execlists->active;
|
||||
|
||||
@ -5159,7 +5158,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
|
||||
}
|
||||
|
||||
execlists->csb_status =
|
||||
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
|
||||
(u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
|
||||
|
||||
execlists->csb_write =
|
||||
&engine->status_page.addr[intel_hws_csb_write_index(i915)];
|
||||
|
Loading…
Reference in New Issue
Block a user