drm/ttm: Implement vm_operations_struct.access v2

Allows gdb to access contents of user mode mapped BOs. System memory
is handled by TTM using kmap. Other memory pools require a new driver
callback in ttm_bo_driver.

v2:
* kmap only one page at a time
* swap in BO if needed
* make driver callback more generic to handle private memory pools
* document callback return value
* WARN_ON -> WARN_ON_ONCE

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Felix Kuehling 2017-07-13 17:01:16 -04:00 committed by Alex Deucher
parent 16d6e96227
commit 09ac4fcb3f
2 changed files with 95 additions and 1 deletions

View File

@ -294,10 +294,87 @@ static void ttm_bo_vm_close(struct vm_area_struct *vma)
vma->vm_private_data = NULL;
}
static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
unsigned long offset,
void *buf, int len, int write)
{
unsigned long page = offset >> PAGE_SHIFT;
unsigned long bytes_left = len;
int ret;
/* Copy a page at a time, that way no extra virtual address
* mapping is needed
*/
offset -= page << PAGE_SHIFT;
do {
unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
struct ttm_bo_kmap_obj map;
void *ptr;
bool is_iomem;
ret = ttm_bo_kmap(bo, page, 1, &map);
if (ret)
return ret;
ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
WARN_ON_ONCE(is_iomem);
if (write)
memcpy(ptr, buf, bytes);
else
memcpy(buf, ptr, bytes);
ttm_bo_kunmap(&map);
page++;
bytes_left -= bytes;
offset = 0;
} while (bytes_left);
return len;
}
static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write)
{
unsigned long offset = (addr) - vma->vm_start;
struct ttm_buffer_object *bo = vma->vm_private_data;
int ret;
if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
return -EIO;
ret = ttm_bo_reserve(bo, true, false, NULL);
if (ret)
return ret;
switch (bo->mem.mem_type) {
case TTM_PL_SYSTEM:
if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
ret = ttm_tt_swapin(bo->ttm);
if (unlikely(ret != 0))
return ret;
}
/* fall through */
case TTM_PL_TT:
ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
break;
default:
if (bo->bdev->driver->access_memory)
ret = bo->bdev->driver->access_memory(
bo, offset, buf, len, write);
else
ret = -EIO;
}
ttm_bo_unreserve(bo);
return ret;
}
static const struct vm_operations_struct ttm_bo_vm_ops = {
.fault = ttm_bo_vm_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close
.close = ttm_bo_vm_close,
.access = ttm_bo_vm_access
};
static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,

View File

@ -472,6 +472,23 @@ struct ttm_bo_driver {
*/
unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
unsigned long page_offset);
/**
* Read/write memory buffers for ptrace access
*
* @bo: the BO to access
* @offset: the offset from the start of the BO
* @buf: pointer to source/destination buffer
* @len: number of bytes to copy
* @write: whether to read (0) from or write (non-0) to BO
*
* If successful, this function should return the number of
* bytes copied, -EIO otherwise. If the number of bytes
* returned is < len, the function may be called again with
* the remainder of the buffer to copy.
*/
int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
void *buf, int len, int write);
};
/**