drm/ttm: drop special pipeline accel cleanup function.
The two accel cleanup paths were mostly the same once refactored. Just pass a bool to say if the evictions are to be pipelined. Signed-off-by: Dave Airlie <airlied@redhat.com> Reviewed-by: Christian König <christian.koenig@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200917064132.148521-2-airlied@gmail.com
This commit is contained in:
parent
92afce9095
commit
e46f468fef
@ -500,9 +500,9 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
|
|||||||
|
|
||||||
/* Always block for VM page tables before committing the new location */
|
/* Always block for VM page tables before committing the new location */
|
||||||
if (bo->type == ttm_bo_type_kernel)
|
if (bo->type == ttm_bo_type_kernel)
|
||||||
r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem);
|
r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
|
||||||
else
|
else
|
||||||
r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
|
r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
|
||||||
dma_fence_put(fence);
|
dma_fence_put(fence);
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -824,7 +824,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
|
|||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
ret = ttm_bo_move_accel_cleanup(bo,
|
ret = ttm_bo_move_accel_cleanup(bo,
|
||||||
&fence->base,
|
&fence->base,
|
||||||
evict,
|
evict, false,
|
||||||
new_reg);
|
new_reg);
|
||||||
nouveau_fence_unref(&fence);
|
nouveau_fence_unref(&fence);
|
||||||
}
|
}
|
||||||
|
@ -200,7 +200,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
|
|||||||
if (IS_ERR(fence))
|
if (IS_ERR(fence))
|
||||||
return PTR_ERR(fence);
|
return PTR_ERR(fence);
|
||||||
|
|
||||||
r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, new_mem);
|
r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false, new_mem);
|
||||||
radeon_fence_unref(&fence);
|
radeon_fence_unref(&fence);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -580,20 +580,48 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
|
||||||
|
struct dma_fence *fence)
|
||||||
|
{
|
||||||
|
struct ttm_bo_device *bdev = bo->bdev;
|
||||||
|
struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* BO doesn't have a TTM we need to bind/unbind. Just remember
|
||||||
|
* this eviction and free up the allocation
|
||||||
|
*/
|
||||||
|
spin_lock(&from->move_lock);
|
||||||
|
if (!from->move || dma_fence_is_later(fence, from->move)) {
|
||||||
|
dma_fence_put(from->move);
|
||||||
|
from->move = dma_fence_get(fence);
|
||||||
|
}
|
||||||
|
spin_unlock(&from->move_lock);
|
||||||
|
|
||||||
|
ttm_bo_free_old_node(bo);
|
||||||
|
|
||||||
|
dma_fence_put(bo->moving);
|
||||||
|
bo->moving = dma_fence_get(fence);
|
||||||
|
}
|
||||||
|
|
||||||
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
||||||
struct dma_fence *fence,
|
struct dma_fence *fence,
|
||||||
bool evict,
|
bool evict,
|
||||||
|
bool pipeline,
|
||||||
struct ttm_resource *new_mem)
|
struct ttm_resource *new_mem)
|
||||||
{
|
{
|
||||||
struct ttm_bo_device *bdev = bo->bdev;
|
struct ttm_bo_device *bdev = bo->bdev;
|
||||||
|
struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
|
||||||
struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
|
struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
|
||||||
int ret;
|
int ret = 0;
|
||||||
|
|
||||||
dma_resv_add_excl_fence(bo->base.resv, fence);
|
dma_resv_add_excl_fence(bo->base.resv, fence);
|
||||||
if (evict)
|
if (!evict)
|
||||||
ret = ttm_bo_wait_free_node(bo, man->use_tt);
|
|
||||||
else
|
|
||||||
ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
|
ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
|
||||||
|
else if (!from->use_tt && pipeline)
|
||||||
|
ttm_bo_move_pipeline_evict(bo, fence);
|
||||||
|
else
|
||||||
|
ret = ttm_bo_wait_free_node(bo, man->use_tt);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -603,59 +631,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
|
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
|
||||||
|
|
||||||
int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
|
|
||||||
struct dma_fence *fence, bool evict,
|
|
||||||
struct ttm_resource *new_mem)
|
|
||||||
{
|
|
||||||
struct ttm_bo_device *bdev = bo->bdev;
|
|
||||||
|
|
||||||
struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
|
|
||||||
struct ttm_resource_manager *to = ttm_manager_type(bdev, new_mem->mem_type);
|
|
||||||
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
dma_resv_add_excl_fence(bo->base.resv, fence);
|
|
||||||
|
|
||||||
if (!evict) {
|
|
||||||
ret = ttm_bo_move_to_ghost(bo, fence, to->use_tt);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
} else if (!from->use_tt) {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* BO doesn't have a TTM we need to bind/unbind. Just remember
|
|
||||||
* this eviction and free up the allocation
|
|
||||||
*/
|
|
||||||
|
|
||||||
spin_lock(&from->move_lock);
|
|
||||||
if (!from->move || dma_fence_is_later(fence, from->move)) {
|
|
||||||
dma_fence_put(from->move);
|
|
||||||
from->move = dma_fence_get(fence);
|
|
||||||
}
|
|
||||||
spin_unlock(&from->move_lock);
|
|
||||||
|
|
||||||
ttm_bo_free_old_node(bo);
|
|
||||||
|
|
||||||
dma_fence_put(bo->moving);
|
|
||||||
bo->moving = dma_fence_get(fence);
|
|
||||||
|
|
||||||
} else {
|
|
||||||
/**
|
|
||||||
* Last resort, wait for the move to be completed.
|
|
||||||
*
|
|
||||||
* Should never happen in pratice.
|
|
||||||
*/
|
|
||||||
ret = ttm_bo_wait_free_node(bo, to->use_tt);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
ttm_bo_assign_mem(bo, new_mem);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(ttm_bo_pipeline_move);
|
|
||||||
|
|
||||||
int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
|
int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
|
||||||
{
|
{
|
||||||
struct ttm_buffer_object *ghost;
|
struct ttm_buffer_object *ghost;
|
||||||
|
@ -642,6 +642,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
|
|||||||
* @bo: A pointer to a struct ttm_buffer_object.
|
* @bo: A pointer to a struct ttm_buffer_object.
|
||||||
* @fence: A fence object that signals when moving is complete.
|
* @fence: A fence object that signals when moving is complete.
|
||||||
* @evict: This is an evict move. Don't return until the buffer is idle.
|
* @evict: This is an evict move. Don't return until the buffer is idle.
|
||||||
|
* @pipeline: evictions are to be pipelined.
|
||||||
* @new_mem: struct ttm_resource indicating where to move.
|
* @new_mem: struct ttm_resource indicating where to move.
|
||||||
*
|
*
|
||||||
* Accelerated move function to be called when an accelerated move
|
* Accelerated move function to be called when an accelerated move
|
||||||
@ -653,23 +654,9 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
|
|||||||
*/
|
*/
|
||||||
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
||||||
struct dma_fence *fence, bool evict,
|
struct dma_fence *fence, bool evict,
|
||||||
|
bool pipeline,
|
||||||
struct ttm_resource *new_mem);
|
struct ttm_resource *new_mem);
|
||||||
|
|
||||||
/**
|
|
||||||
* ttm_bo_pipeline_move.
|
|
||||||
*
|
|
||||||
* @bo: A pointer to a struct ttm_buffer_object.
|
|
||||||
* @fence: A fence object that signals when moving is complete.
|
|
||||||
* @evict: This is an evict move. Don't return until the buffer is idle.
|
|
||||||
* @new_mem: struct ttm_resource indicating where to move.
|
|
||||||
*
|
|
||||||
* Function for pipelining accelerated moves. Either free the memory
|
|
||||||
* immediately or hang it on a temporary buffer object.
|
|
||||||
*/
|
|
||||||
int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
|
|
||||||
struct dma_fence *fence, bool evict,
|
|
||||||
struct ttm_resource *new_mem);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ttm_bo_pipeline_gutting.
|
* ttm_bo_pipeline_gutting.
|
||||||
*
|
*
|
||||||
|
Loading…
Reference in New Issue
Block a user