drm/radeon: stop using TTMs fault callback

We already implemented the fault handler ourself,
just open code what is necessary here.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Dave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/392322/
This commit is contained in:
Christian König 2020-09-25 14:17:09 +02:00
parent 0b5aebec2b
commit 8e0310f0ff
3 changed files with 34 additions and 19 deletions

View File

@ -775,7 +775,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
}
int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct ttm_operation_ctx ctx = { false, false };
struct radeon_device *rdev;
@ -798,7 +798,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* Can't move a pinned BO to visible VRAM */
if (rbo->tbo.pin_count > 0)
return -EINVAL;
return VM_FAULT_SIGBUS;
/* hurrah the memory is not visible ! */
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
@ -812,16 +812,20 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
r = ttm_bo_validate(bo, &rbo->placement, &ctx);
if (unlikely(r == -ENOMEM)) {
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
return ttm_bo_validate(bo, &rbo->placement, &ctx);
} else if (unlikely(r != 0)) {
return r;
r = ttm_bo_validate(bo, &rbo->placement, &ctx);
} else if (likely(!r)) {
offset = bo->mem.start << PAGE_SHIFT;
/* this should never happen */
if ((offset + size) > rdev->mc.visible_vram_size)
return VM_FAULT_SIGBUS;
}
offset = bo->mem.start << PAGE_SHIFT;
/* this should never happen */
if ((offset + size) > rdev->mc.visible_vram_size)
return -EINVAL;
if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
return VM_FAULT_NOPAGE;
else if (unlikely(r))
return VM_FAULT_SIGBUS;
ttm_bo_move_to_lru_tail_unlocked(bo);
return 0;
}

View File

@ -163,7 +163,7 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_resource *new_mem);
extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
bool shared);

View File

@ -803,7 +803,6 @@ static struct ttm_bo_driver radeon_bo_driver = {
.move = &radeon_bo_move,
.verify_access = &radeon_verify_access,
.move_notify = &radeon_bo_move_notify,
.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
.io_mem_reserve = &radeon_ttm_io_mem_reserve,
};
@ -904,17 +903,29 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *bo;
struct radeon_device *rdev;
struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
vm_fault_t ret;
bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
if (bo == NULL)
return VM_FAULT_NOPAGE;
rdev = radeon_get_rdev(bo->bdev);
down_read(&rdev->pm.mclk_lock);
ret = ttm_bo_vm_fault(vmf);
ret = ttm_bo_vm_reserve(bo, vmf);
if (ret)
goto unlock_mclk;
ret = radeon_bo_fault_reserve_notify(bo);
if (ret)
goto unlock_resv;
ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
TTM_BO_VM_NUM_PREFAULT, 1);
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
goto unlock_mclk;
unlock_resv:
dma_resv_unlock(bo->base.resv);
unlock_mclk:
up_read(&rdev->pm.mclk_lock);
return ret;
}