drm/i915/gt: Replace opencoded i915_gem_object_pin_map()

As we have a pin_map interface, that knows how to flush the data to the
device, use it. The only downside is that we keep the kmap around, as
once acquired we keep the mapping cached until the object's backing
store is released.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200708173748.32734-2-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2020-07-08 18:37:46 +01:00
parent 09137e9454
commit 59c94b9d26

View File

@ -3880,7 +3880,6 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx,
&wa_ctx->per_ctx };
wa_bb_func_t wa_bb_fn[2];
struct page *page;
void *batch, *batch_ptr;
unsigned int i;
int ret;
@ -3916,14 +3915,14 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
return ret;
}
page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
batch = batch_ptr = kmap_atomic(page);
batch = i915_gem_object_pin_map(wa_ctx->vma->obj, I915_MAP_WB);
/*
* Emit the two workaround batch buffers, recording the offset from the
* start of the workaround batch buffer object for each and their
* respective sizes.
*/
batch_ptr = batch;
for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
wa_bb[i]->offset = batch_ptr - batch;
if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
@ -3935,10 +3934,10 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
batch_ptr = wa_bb_fn[i](engine, batch_ptr);
wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
}
GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE);
BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE);
kunmap_atomic(batch);
__i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch);
i915_gem_object_unpin_map(wa_ctx->vma->obj);
if (ret)
lrc_destroy_wa_ctx(engine);