drm/i915: Release shortlived maps of longlived objects

Some objects we map once during their construction, and then never
access their mappings again, even if they are kept around for the
duration of the driver. Keeping those pages mapped, often vmapped, is
therefore wasteful and we should release the maps as soon as we no
longer need them.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200708173748.32734-3-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2020-07-08 18:37:47 +01:00
parent 59c94b9d26
commit 89d19b2b45
7 changed files with 23 additions and 6 deletions

View File

@ -394,6 +394,8 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
i915_gem_object_unpin_pages(obj);
}
void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
void
i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
unsigned int flush_domains);

View File

@ -408,6 +408,21 @@ void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
}
}
void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(!obj->mm.mapping);
/*
* We allow removing the mapping from underneath pinned pages!
*
* Furthermore, since this is an unsafe operation reserved only
* for construction time manipulation, we ignore locking prudence.
*/
unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
i915_gem_object_unpin_map(obj);
}
struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
unsigned int n,

View File

@ -396,7 +396,7 @@ int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
emit_batch(vma, memset(batch, 0, bv.max_size), &bv);
i915_gem_object_flush_map(vma->obj);
i915_gem_object_unpin_map(vma->obj);
__i915_gem_object_release_map(vma->obj);
return 0;
}

View File

@ -3937,7 +3937,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE);
__i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch);
i915_gem_object_unpin_map(wa_ctx->vma->obj);
__i915_gem_object_release_map(wa_ctx->vma->obj);
if (ret)
lrc_destroy_wa_ctx(engine);

View File

@ -150,7 +150,7 @@ static int render_state_setup(struct intel_renderstate *so,
ret = 0;
out:
__i915_gem_object_flush_map(so->vma->obj, 0, i * sizeof(u32));
i915_gem_object_unpin_map(so->vma->obj);
__i915_gem_object_release_map(so->vma->obj);
return ret;
}

View File

@ -543,7 +543,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
vaddr, engine->context_size);
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
__i915_gem_object_release_map(obj);
}
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);

View File

@ -1772,7 +1772,7 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
i915_gem_object_flush_map(bo);
i915_gem_object_unpin_map(bo);
__i915_gem_object_release_map(bo);
stream->noa_wait = vma;
return 0;
@ -1867,7 +1867,7 @@ alloc_oa_config_buffer(struct i915_perf_stream *stream,
*cs++ = 0;
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
__i915_gem_object_release_map(obj);
oa_bo->vma = i915_vma_instance(obj,
&stream->engine->gt->ggtt->vm,