drm/ttm: cleanup ttm_mem_type_manager_func.get_node interface v3
Instead of signaling failure by setting the node pointer to NULL do so by returning -ENOSPC. v2: add memset() to make sure that mem is always initialized. v3: drop memset() only set mm_node = NULL, move mm_node init in amdgpu Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com> Link: https://patchwork.freedesktop.org/patch/373181/
This commit is contained in:
parent
60e9eabf41
commit
58e4d686d4
@ -229,7 +229,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
|
||||
if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
|
||||
atomic64_read(&mgr->available) < mem->num_pages) {
|
||||
spin_unlock(&mgr->lock);
|
||||
return 0;
|
||||
return -ENOSPC;
|
||||
}
|
||||
atomic64_sub(mem->num_pages, &mgr->available);
|
||||
spin_unlock(&mgr->lock);
|
||||
@ -250,7 +250,6 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
|
||||
if (unlikely(r)) {
|
||||
kfree(node);
|
||||
mem->mm_node = NULL;
|
||||
r = 0;
|
||||
goto err_out;
|
||||
}
|
||||
} else {
|
||||
|
@ -319,8 +319,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
|
||||
mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
|
||||
if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
|
||||
atomic64_sub(mem_bytes, &mgr->usage);
|
||||
mem->mm_node = NULL;
|
||||
return 0;
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
|
||||
@ -400,7 +399,7 @@ error:
|
||||
atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
|
||||
|
||||
kvfree(nodes);
|
||||
return r == -ENOSPC ? 0 : r;
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -75,10 +75,6 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
|
||||
ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
|
||||
if (ret) {
|
||||
nouveau_mem_del(reg);
|
||||
if (ret == -ENOSPC) {
|
||||
reg->mm_node = NULL;
|
||||
return 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -139,10 +135,6 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man,
|
||||
reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
|
||||
if (ret) {
|
||||
nouveau_mem_del(reg);
|
||||
if (ret == -ENOSPC) {
|
||||
reg->mm_node = NULL;
|
||||
return 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -909,10 +909,10 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
|
||||
ticket = dma_resv_locking_ctx(bo->base.resv);
|
||||
do {
|
||||
ret = (*man->func->get_node)(man, bo, place, mem);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
if (mem->mm_node)
|
||||
if (likely(!ret))
|
||||
break;
|
||||
if (unlikely(ret != -ENOSPC))
|
||||
return ret;
|
||||
ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx,
|
||||
ticket);
|
||||
if (unlikely(ret != 0))
|
||||
@ -1056,12 +1056,11 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
|
||||
man = &bdev->man[mem->mem_type];
|
||||
ret = (*man->func->get_node)(man, bo, place, mem);
|
||||
if (ret == -ENOSPC)
|
||||
continue;
|
||||
if (unlikely(ret))
|
||||
goto error;
|
||||
|
||||
if (!mem->mm_node)
|
||||
continue;
|
||||
|
||||
ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
|
||||
if (unlikely(ret)) {
|
||||
(*man->func->put_node)(man, mem);
|
||||
@ -1126,6 +1125,8 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
|
||||
mem.page_alignment = bo->mem.page_alignment;
|
||||
mem.bus.io_reserved_vm = false;
|
||||
mem.bus.io_reserved_count = 0;
|
||||
mem.mm_node = NULL;
|
||||
|
||||
/*
|
||||
* Determine where to move the buffer.
|
||||
*/
|
||||
|
@ -86,7 +86,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
|
||||
mem->start = node->start;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
|
||||
|
@ -53,8 +53,6 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
|
||||
(struct vmwgfx_gmrid_man *)man->priv;
|
||||
int id;
|
||||
|
||||
mem->mm_node = NULL;
|
||||
|
||||
id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
|
||||
if (id < 0)
|
||||
return (id != -ENOMEM ? 0 : id);
|
||||
@ -78,7 +76,7 @@ nospace:
|
||||
gman->used_gmr_pages -= bo->num_pages;
|
||||
spin_unlock(&gman->lock);
|
||||
ida_free(&gman->gmr_ida, id);
|
||||
return 0;
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
|
||||
|
Loading…
Reference in New Issue
Block a user