forked from Minki/linux
drm/i915: Pull unpin map into vma release
A reasonably common operation is to pin the map of the vma alongside the vma itself for the lifetime of the vma, and so release both pins at the same time as destroying the vma. It is common enough to pull into the release function, making that central function more attractive to a couple of other callsites. The continual ulterior motive is to sweep over errors on module load aborting... Testcase: igt/drv_module_reload/basic-reload-inject Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Michał Winiarski <michal.winiarski@intel.com> Cc: Michal Wajdeczko <michal.wajdeczko@intel.com> Reviewed-by: Michał Winiarski <michal.winiarski@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180721125037.20127-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
102506d529
commit
6a2f59e45a
@ -1338,14 +1338,12 @@ free_oa_buffer(struct drm_i915_private *i915)
|
||||
{
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
|
||||
i915_gem_object_unpin_map(i915->perf.oa.oa_buffer.vma->obj);
|
||||
i915_vma_unpin(i915->perf.oa.oa_buffer.vma);
|
||||
i915_gem_object_put(i915->perf.oa.oa_buffer.vma->obj);
|
||||
|
||||
i915->perf.oa.oa_buffer.vma = NULL;
|
||||
i915->perf.oa.oa_buffer.vaddr = NULL;
|
||||
i915_vma_unpin_and_release(&i915->perf.oa.oa_buffer.vma,
|
||||
I915_VMA_RELEASE_MAP);
|
||||
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
i915->perf.oa.oa_buffer.vaddr = NULL;
|
||||
}
|
||||
|
||||
static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
|
||||
|
@ -406,7 +406,7 @@ void i915_vma_unpin_iomap(struct i915_vma *vma)
|
||||
i915_vma_unpin(vma);
|
||||
}
|
||||
|
||||
void i915_vma_unpin_and_release(struct i915_vma **p_vma)
|
||||
void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
struct drm_i915_gem_object *obj;
|
||||
@ -421,6 +421,9 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma)
|
||||
i915_vma_unpin(vma);
|
||||
i915_vma_close(vma);
|
||||
|
||||
if (flags & I915_VMA_RELEASE_MAP)
|
||||
i915_gem_object_unpin_map(obj);
|
||||
|
||||
__i915_gem_object_release_unless_active(obj);
|
||||
}
|
||||
|
||||
|
@ -138,7 +138,8 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
|
||||
struct i915_address_space *vm,
|
||||
const struct i915_ggtt_view *view);
|
||||
|
||||
void i915_vma_unpin_and_release(struct i915_vma **p_vma);
|
||||
void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags);
|
||||
#define I915_VMA_RELEASE_MAP BIT(0)
|
||||
|
||||
static inline bool i915_vma_is_active(struct i915_vma *vma)
|
||||
{
|
||||
|
@ -527,7 +527,7 @@ err_unref:
|
||||
|
||||
void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
|
||||
{
|
||||
i915_vma_unpin_and_release(&engine->scratch);
|
||||
i915_vma_unpin_and_release(&engine->scratch, 0);
|
||||
}
|
||||
|
||||
static void cleanup_phys_status_page(struct intel_engine_cs *engine)
|
||||
@ -543,20 +543,8 @@ static void cleanup_phys_status_page(struct intel_engine_cs *engine)
|
||||
|
||||
static void cleanup_status_page(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
vma = fetch_and_zero(&engine->status_page.vma);
|
||||
if (!vma)
|
||||
return;
|
||||
|
||||
obj = vma->obj;
|
||||
|
||||
i915_vma_unpin(vma);
|
||||
i915_vma_close(vma);
|
||||
|
||||
i915_gem_object_unpin_map(obj);
|
||||
__i915_gem_object_release_unless_active(obj);
|
||||
i915_vma_unpin_and_release(&engine->status_page.vma,
|
||||
I915_VMA_RELEASE_MAP);
|
||||
}
|
||||
|
||||
static int init_status_page(struct intel_engine_cs *engine)
|
||||
|
@ -170,7 +170,7 @@ static int guc_shared_data_create(struct intel_guc *guc)
|
||||
|
||||
vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
|
||||
if (IS_ERR(vaddr)) {
|
||||
i915_vma_unpin_and_release(&vma);
|
||||
i915_vma_unpin_and_release(&vma, 0);
|
||||
return PTR_ERR(vaddr);
|
||||
}
|
||||
|
||||
@ -182,8 +182,7 @@ static int guc_shared_data_create(struct intel_guc *guc)
|
||||
|
||||
static void guc_shared_data_destroy(struct intel_guc *guc)
|
||||
{
|
||||
i915_gem_object_unpin_map(guc->shared_data->obj);
|
||||
i915_vma_unpin_and_release(&guc->shared_data);
|
||||
i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP);
|
||||
}
|
||||
|
||||
int intel_guc_init(struct intel_guc *guc)
|
||||
|
@ -148,5 +148,5 @@ int intel_guc_ads_create(struct intel_guc *guc)
|
||||
|
||||
void intel_guc_ads_destroy(struct intel_guc *guc)
|
||||
{
|
||||
i915_vma_unpin_and_release(&guc->ads_vma);
|
||||
i915_vma_unpin_and_release(&guc->ads_vma, 0);
|
||||
}
|
||||
|
@ -204,7 +204,7 @@ static int ctch_init(struct intel_guc *guc,
|
||||
return 0;
|
||||
|
||||
err_vma:
|
||||
i915_vma_unpin_and_release(&ctch->vma);
|
||||
i915_vma_unpin_and_release(&ctch->vma, 0);
|
||||
err_out:
|
||||
CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n",
|
||||
ctch->owner, err);
|
||||
@ -214,10 +214,7 @@ err_out:
|
||||
static void ctch_fini(struct intel_guc *guc,
|
||||
struct intel_guc_ct_channel *ctch)
|
||||
{
|
||||
GEM_BUG_ON(!ctch->vma);
|
||||
|
||||
i915_gem_object_unpin_map(ctch->vma->obj);
|
||||
i915_vma_unpin_and_release(&ctch->vma);
|
||||
i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP);
|
||||
}
|
||||
|
||||
static int ctch_open(struct intel_guc *guc,
|
||||
|
@ -498,7 +498,7 @@ err:
|
||||
|
||||
void intel_guc_log_destroy(struct intel_guc_log *log)
|
||||
{
|
||||
i915_vma_unpin_and_release(&log->vma);
|
||||
i915_vma_unpin_and_release(&log->vma, 0);
|
||||
}
|
||||
|
||||
int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
|
||||
|
@ -317,7 +317,7 @@ static int guc_stage_desc_pool_create(struct intel_guc *guc)
|
||||
|
||||
vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
|
||||
if (IS_ERR(vaddr)) {
|
||||
i915_vma_unpin_and_release(&vma);
|
||||
i915_vma_unpin_and_release(&vma, 0);
|
||||
return PTR_ERR(vaddr);
|
||||
}
|
||||
|
||||
@ -331,8 +331,7 @@ static int guc_stage_desc_pool_create(struct intel_guc *guc)
|
||||
static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
|
||||
{
|
||||
ida_destroy(&guc->stage_ids);
|
||||
i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
|
||||
i915_vma_unpin_and_release(&guc->stage_desc_pool);
|
||||
i915_vma_unpin_and_release(&guc->stage_desc_pool, I915_VMA_RELEASE_MAP);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1008,7 +1007,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
|
||||
err_vaddr:
|
||||
i915_gem_object_unpin_map(client->vma->obj);
|
||||
err_vma:
|
||||
i915_vma_unpin_and_release(&client->vma);
|
||||
i915_vma_unpin_and_release(&client->vma, 0);
|
||||
err_id:
|
||||
ida_simple_remove(&guc->stage_ids, client->stage_id);
|
||||
err_client:
|
||||
@ -1020,8 +1019,7 @@ static void guc_client_free(struct intel_guc_client *client)
|
||||
{
|
||||
unreserve_doorbell(client);
|
||||
guc_stage_desc_fini(client->guc, client);
|
||||
i915_gem_object_unpin_map(client->vma->obj);
|
||||
i915_vma_unpin_and_release(&client->vma);
|
||||
i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP);
|
||||
ida_simple_remove(&client->guc->stage_ids, client->stage_id);
|
||||
kfree(client);
|
||||
}
|
||||
|
@ -1657,7 +1657,7 @@ err:
|
||||
|
||||
static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
|
||||
{
|
||||
i915_vma_unpin_and_release(&engine->wa_ctx.vma);
|
||||
i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
|
||||
}
|
||||
|
||||
typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
|
||||
|
Loading…
Reference in New Issue
Block a user