drm/amdgpu: implement amdgpu_gem_prime_move_notify v2

Implement the importer side of unpinned DMA-buf handling.

v2: update page tables immediately

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/353998/?series=73646&rev=1
This commit is contained in:
Christian König 2018-06-07 10:28:47 +02:00
parent 2d4dad2734
commit a448cb003e
2 changed files with 71 additions and 1 deletions

View File

@ -451,7 +451,71 @@ error:
return ERR_PTR(ret);
}
/**
* amdgpu_dma_buf_move_notify - &attach.move_notify implementation
*
* @attach: the DMA-buf attachment
*
* Invalidate the DMA-buf attachment, making sure that the we re-create the
* mapping before the next use.
*/
static void
amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = attach->importer_priv;
struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv);
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_operation_ctx ctx = { false, false };
struct ttm_placement placement = {};
struct amdgpu_vm_bo_base *bo_base;
int r;
if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
return;
r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
if (r) {
DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r);
return;
}
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm;
struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
if (ticket) {
/* When we get an error here it means that somebody
* else is holding the VM lock and updating page tables
* So we can just continue here.
*/
r = dma_resv_lock(resv, ticket);
if (r)
continue;
} else {
/* TODO: This is more problematic and we actually need
* to allow page tables updates without holding the
* lock.
*/
if (!dma_resv_trylock(resv))
continue;
}
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (!r)
r = amdgpu_vm_handle_moved(adev, vm);
if (r && r != -EBUSY)
DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
r);
dma_resv_unlock(resv);
}
}
static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
.move_notify = amdgpu_dma_buf_move_notify
};
/**
@ -487,7 +551,7 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
return obj;
attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
&amdgpu_dma_buf_attach_ops, NULL);
&amdgpu_dma_buf_attach_ops, obj);
if (IS_ERR(attach)) {
drm_gem_object_put(obj);
return ERR_CAST(attach);

View File

@ -926,6 +926,9 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return 0;
}
if (bo->tbo.base.import_attach)
dma_buf_pin(bo->tbo.base.import_attach);
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
/* force to pin into visible video ram */
if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
@ -1009,6 +1012,9 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
amdgpu_bo_subtract_pin_size(bo);
if (bo->tbo.base.import_attach)
dma_buf_unpin(bo->tbo.base.import_attach);
for (i = 0; i < bo->placement.num_placement; i++) {
bo->placements[i].lpfn = 0;
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;