forked from Minki/linux
drm/i915: More use of the cached LRC state
Since: commit82352e908a
Author: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Date: Fri Jan 15 17:12:45 2016 +0000 drm/i915: Cache LRC state page in the context and: commit0eb973d31d
Author: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Date: Fri Jan 15 15:10:28 2016 +0000 drm/i915: Cache ringbuffer GTT VMA We can also remove the ring buffer start updates on every context update since the address will not change for the duration of the LRC pin. For GuC we can remove the update altogether because it only cares about the ring buffer start. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Alex Dai <yu.dai@intel.com> Cc: Dave Gordon <david.s.gordon@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1453466567-33369-1-git-send-email-tvrtko.ursulin@linux.intel.com
This commit is contained in:
parent
947eaebc31
commit
77b04a0428
@ -560,29 +560,6 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define CTX_RING_BUFFER_START 0x08
|
|
||||||
|
|
||||||
/* Update the ringbuffer pointer in a saved context image */
|
|
||||||
static void lr_context_update(struct drm_i915_gem_request *rq)
|
|
||||||
{
|
|
||||||
enum intel_ring_id ring_id = rq->ring->id;
|
|
||||||
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring_id].state;
|
|
||||||
struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
|
|
||||||
struct page *page;
|
|
||||||
uint32_t *reg_state;
|
|
||||||
|
|
||||||
BUG_ON(!ctx_obj);
|
|
||||||
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
|
|
||||||
WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
|
|
||||||
|
|
||||||
page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
|
|
||||||
reg_state = kmap_atomic(page);
|
|
||||||
|
|
||||||
reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
|
|
||||||
|
|
||||||
kunmap_atomic(reg_state);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* i915_guc_submit() - Submit commands through GuC
|
* i915_guc_submit() - Submit commands through GuC
|
||||||
* @client: the guc client where commands will go through
|
* @client: the guc client where commands will go through
|
||||||
@ -597,10 +574,6 @@ int i915_guc_submit(struct i915_guc_client *client,
|
|||||||
enum intel_ring_id ring_id = rq->ring->id;
|
enum intel_ring_id ring_id = rq->ring->id;
|
||||||
int q_ret, b_ret;
|
int q_ret, b_ret;
|
||||||
|
|
||||||
/* Need this because of the deferred pin ctx and ring */
|
|
||||||
/* Shall we move this right after ring is pinned? */
|
|
||||||
lr_context_update(rq);
|
|
||||||
|
|
||||||
q_ret = guc_add_workqueue_item(client, rq);
|
q_ret = guc_add_workqueue_item(client, rq);
|
||||||
if (q_ret == 0)
|
if (q_ret == 0)
|
||||||
b_ret = guc_ring_doorbell(client);
|
b_ret = guc_ring_doorbell(client);
|
||||||
|
@ -393,7 +393,6 @@ static int execlists_update_context(struct drm_i915_gem_request *rq)
|
|||||||
uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state;
|
uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state;
|
||||||
|
|
||||||
reg_state[CTX_RING_TAIL+1] = rq->tail;
|
reg_state[CTX_RING_TAIL+1] = rq->tail;
|
||||||
reg_state[CTX_RING_BUFFER_START+1] = rq->ringbuf->vma->node.start;
|
|
||||||
|
|
||||||
if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
|
if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
|
||||||
/* True 32b PPGTT with dynamic page allocation: update PDP
|
/* True 32b PPGTT with dynamic page allocation: update PDP
|
||||||
@ -1067,6 +1066,7 @@ static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
|
|||||||
struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
|
struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
|
||||||
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
|
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
|
||||||
struct page *lrc_state_page;
|
struct page *lrc_state_page;
|
||||||
|
uint32_t *lrc_reg_state;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
|
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
|
||||||
@ -1088,7 +1088,9 @@ static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
|
|||||||
|
|
||||||
ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
|
ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
|
||||||
intel_lr_context_descriptor_update(ctx, ring);
|
intel_lr_context_descriptor_update(ctx, ring);
|
||||||
ctx->engine[ring->id].lrc_reg_state = kmap(lrc_state_page);
|
lrc_reg_state = kmap(lrc_state_page);
|
||||||
|
lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
|
||||||
|
ctx->engine[ring->id].lrc_reg_state = lrc_reg_state;
|
||||||
ctx_obj->dirty = true;
|
ctx_obj->dirty = true;
|
||||||
|
|
||||||
/* Invalidate GuC TLB. */
|
/* Invalidate GuC TLB. */
|
||||||
|
Loading…
Reference in New Issue
Block a user