diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 9e28b2f9df72..890ad041ea5c 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -3880,7 +3880,6 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx, &wa_ctx->per_ctx }; wa_bb_func_t wa_bb_fn[2]; - struct page *page; void *batch, *batch_ptr; unsigned int i; int ret; @@ -3916,14 +3915,14 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) return ret; } - page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0); - batch = batch_ptr = kmap_atomic(page); + batch = i915_gem_object_pin_map(wa_ctx->vma->obj, I915_MAP_WB); /* * Emit the two workaround batch buffers, recording the offset from the * start of the workaround batch buffer object for each and their * respective sizes. */ + batch_ptr = batch; for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) { wa_bb[i]->offset = batch_ptr - batch; if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset, @@ -3935,10 +3934,10 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) batch_ptr = wa_bb_fn[i](engine, batch_ptr); wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset); } + GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE); - BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE); - - kunmap_atomic(batch); + __i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch); + i915_gem_object_unpin_map(wa_ctx->vma->obj); if (ret) lrc_destroy_wa_ctx(engine);