forked from Minki/linux
drm/ttm: remove io_reserve_lru handling v3
That is not used any more. v2: keep the NULL checks in TTM. v3: remove unused variable Signed-off-by: Christian König <christian.koenig@amd.com> Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Ben Skeggs <bskeggs@redhat.com> Link: https://patchwork.freedesktop.org/patch/388646/
This commit is contained in:
parent
141b15e591
commit
fe662d846c
@ -263,11 +263,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
|
||||
struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type);
|
||||
int ret;
|
||||
|
||||
ret = ttm_mem_io_lock(old_man, true);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
ttm_bo_unmap_virtual_locked(bo);
|
||||
ttm_mem_io_unlock(old_man);
|
||||
ttm_bo_unmap_virtual(bo);
|
||||
|
||||
/*
|
||||
* Create and bind a ttm if required.
|
||||
@ -538,7 +534,6 @@ static void ttm_bo_release(struct kref *kref)
|
||||
struct ttm_buffer_object *bo =
|
||||
container_of(kref, struct ttm_buffer_object, kref);
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type);
|
||||
size_t acc_size = bo->acc_size;
|
||||
int ret;
|
||||
|
||||
@ -556,9 +551,7 @@ static void ttm_bo_release(struct kref *kref)
|
||||
bo->bdev->driver->release_notify(bo);
|
||||
|
||||
drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
|
||||
ttm_mem_io_lock(man, false);
|
||||
ttm_mem_io_free_vm(bo);
|
||||
ttm_mem_io_unlock(man);
|
||||
ttm_mem_io_free(bdev, &bo->mem);
|
||||
}
|
||||
|
||||
if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
|
||||
@ -648,8 +641,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
|
||||
|
||||
evict_mem = bo->mem;
|
||||
evict_mem.mm_node = NULL;
|
||||
evict_mem.bus.io_reserved_vm = false;
|
||||
evict_mem.bus.io_reserved_count = 0;
|
||||
evict_mem.bus.base = 0;
|
||||
evict_mem.bus.offset = 0;
|
||||
evict_mem.bus.addr = NULL;
|
||||
@ -1085,8 +1076,6 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
|
||||
mem.num_pages = bo->num_pages;
|
||||
mem.size = mem.num_pages << PAGE_SHIFT;
|
||||
mem.page_alignment = bo->mem.page_alignment;
|
||||
mem.bus.io_reserved_vm = false;
|
||||
mem.bus.io_reserved_count = 0;
|
||||
mem.bus.base = 0;
|
||||
mem.bus.offset = 0;
|
||||
mem.bus.addr = NULL;
|
||||
@ -1238,7 +1227,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
|
||||
INIT_LIST_HEAD(&bo->lru);
|
||||
INIT_LIST_HEAD(&bo->ddestroy);
|
||||
INIT_LIST_HEAD(&bo->swap);
|
||||
INIT_LIST_HEAD(&bo->io_reserve_lru);
|
||||
bo->bdev = bdev;
|
||||
bo->type = type;
|
||||
bo->num_pages = num_pages;
|
||||
@ -1247,8 +1235,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
|
||||
bo->mem.num_pages = bo->num_pages;
|
||||
bo->mem.mm_node = NULL;
|
||||
bo->mem.page_alignment = page_alignment;
|
||||
bo->mem.bus.io_reserved_vm = false;
|
||||
bo->mem.bus.io_reserved_count = 0;
|
||||
bo->mem.bus.base = 0;
|
||||
bo->mem.bus.offset = 0;
|
||||
bo->mem.bus.addr = NULL;
|
||||
@ -1554,25 +1540,13 @@ EXPORT_SYMBOL(ttm_bo_device_init);
|
||||
* buffer object vm functions.
|
||||
*/
|
||||
|
||||
void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
|
||||
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
|
||||
drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
|
||||
ttm_mem_io_free_vm(bo);
|
||||
ttm_mem_io_free(bdev, &bo->mem);
|
||||
}
|
||||
|
||||
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type);
|
||||
|
||||
ttm_mem_io_lock(man, false);
|
||||
ttm_bo_unmap_virtual_locked(bo);
|
||||
ttm_mem_io_unlock(man);
|
||||
}
|
||||
|
||||
|
||||
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
|
||||
|
||||
int ttm_bo_wait(struct ttm_buffer_object *bo,
|
||||
|
@ -91,122 +91,42 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_move_ttm);
|
||||
|
||||
int ttm_mem_io_lock(struct ttm_resource_manager *man, bool interruptible)
|
||||
{
|
||||
if (likely(!man->use_io_reserve_lru))
|
||||
return 0;
|
||||
|
||||
if (interruptible)
|
||||
return mutex_lock_interruptible(&man->io_reserve_mutex);
|
||||
|
||||
mutex_lock(&man->io_reserve_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ttm_mem_io_unlock(struct ttm_resource_manager *man)
|
||||
{
|
||||
if (likely(!man->use_io_reserve_lru))
|
||||
return;
|
||||
|
||||
mutex_unlock(&man->io_reserve_mutex);
|
||||
}
|
||||
|
||||
static int ttm_mem_io_evict(struct ttm_resource_manager *man)
|
||||
{
|
||||
struct ttm_buffer_object *bo;
|
||||
|
||||
bo = list_first_entry_or_null(&man->io_reserve_lru,
|
||||
struct ttm_buffer_object,
|
||||
io_reserve_lru);
|
||||
if (!bo)
|
||||
return -ENOSPC;
|
||||
|
||||
list_del_init(&bo->io_reserve_lru);
|
||||
ttm_bo_unmap_virtual_locked(bo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
|
||||
struct ttm_resource *mem)
|
||||
{
|
||||
struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
|
||||
int ret;
|
||||
|
||||
if (mem->bus.io_reserved_count++)
|
||||
if (mem->bus.base || mem->bus.offset || mem->bus.addr)
|
||||
return 0;
|
||||
|
||||
mem->bus.is_iomem = false;
|
||||
if (!bdev->driver->io_mem_reserve)
|
||||
return 0;
|
||||
|
||||
mem->bus.addr = NULL;
|
||||
mem->bus.offset = 0;
|
||||
mem->bus.base = 0;
|
||||
mem->bus.is_iomem = false;
|
||||
retry:
|
||||
ret = bdev->driver->io_mem_reserve(bdev, mem);
|
||||
if (ret == -ENOSPC) {
|
||||
ret = ttm_mem_io_evict(man);
|
||||
if (ret == 0)
|
||||
goto retry;
|
||||
}
|
||||
return ret;
|
||||
return bdev->driver->io_mem_reserve(bdev, mem);
|
||||
}
|
||||
|
||||
void ttm_mem_io_free(struct ttm_bo_device *bdev,
|
||||
struct ttm_resource *mem)
|
||||
{
|
||||
if (--mem->bus.io_reserved_count)
|
||||
if (!mem->bus.base && !mem->bus.offset && !mem->bus.addr)
|
||||
return;
|
||||
|
||||
if (!bdev->driver->io_mem_free)
|
||||
return;
|
||||
if (bdev->driver->io_mem_free)
|
||||
bdev->driver->io_mem_free(bdev, mem);
|
||||
|
||||
bdev->driver->io_mem_free(bdev, mem);
|
||||
}
|
||||
|
||||
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type);
|
||||
struct ttm_resource *mem = &bo->mem;
|
||||
int ret;
|
||||
|
||||
if (mem->bus.io_reserved_vm)
|
||||
return 0;
|
||||
|
||||
ret = ttm_mem_io_reserve(bo->bdev, mem);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
mem->bus.io_reserved_vm = true;
|
||||
if (man->use_io_reserve_lru)
|
||||
list_add_tail(&bo->io_reserve_lru,
|
||||
&man->io_reserve_lru);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_resource *mem = &bo->mem;
|
||||
|
||||
if (!mem->bus.io_reserved_vm)
|
||||
return;
|
||||
|
||||
mem->bus.io_reserved_vm = false;
|
||||
list_del_init(&bo->io_reserve_lru);
|
||||
ttm_mem_io_free(bo->bdev, mem);
|
||||
mem->bus.base = 0;
|
||||
mem->bus.offset = 0;
|
||||
mem->bus.addr = NULL;
|
||||
}
|
||||
|
||||
static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
|
||||
struct ttm_resource *mem,
|
||||
void **virtual)
|
||||
{
|
||||
struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
|
||||
int ret;
|
||||
void *addr;
|
||||
|
||||
*virtual = NULL;
|
||||
(void) ttm_mem_io_lock(man, false);
|
||||
ret = ttm_mem_io_reserve(bdev, mem);
|
||||
ttm_mem_io_unlock(man);
|
||||
if (ret || !mem->bus.is_iomem)
|
||||
return ret;
|
||||
|
||||
@ -222,9 +142,7 @@ static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
|
||||
addr = ioremap(mem->bus.base + mem->bus.offset,
|
||||
bus_size);
|
||||
if (!addr) {
|
||||
(void) ttm_mem_io_lock(man, false);
|
||||
ttm_mem_io_free(bdev, mem);
|
||||
ttm_mem_io_unlock(man);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
@ -236,15 +154,9 @@ static void ttm_resource_iounmap(struct ttm_bo_device *bdev,
|
||||
struct ttm_resource *mem,
|
||||
void *virtual)
|
||||
{
|
||||
struct ttm_resource_manager *man;
|
||||
|
||||
man = ttm_manager_type(bdev, mem->mem_type);
|
||||
|
||||
if (virtual && mem->bus.addr == NULL)
|
||||
iounmap(virtual);
|
||||
(void) ttm_mem_io_lock(man, false);
|
||||
ttm_mem_io_free(bdev, mem);
|
||||
ttm_mem_io_unlock(man);
|
||||
}
|
||||
|
||||
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
|
||||
@ -458,7 +370,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
|
||||
INIT_LIST_HEAD(&fbo->base.ddestroy);
|
||||
INIT_LIST_HEAD(&fbo->base.lru);
|
||||
INIT_LIST_HEAD(&fbo->base.swap);
|
||||
INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
|
||||
fbo->base.moving = NULL;
|
||||
drm_vma_node_reset(&fbo->base.base.vma_node);
|
||||
|
||||
@ -573,8 +484,6 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
|
||||
unsigned long start_page, unsigned long num_pages,
|
||||
struct ttm_bo_kmap_obj *map)
|
||||
{
|
||||
struct ttm_resource_manager *man =
|
||||
ttm_manager_type(bo->bdev, bo->mem.mem_type);
|
||||
unsigned long offset, size;
|
||||
int ret;
|
||||
|
||||
@ -585,9 +494,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
|
||||
if (start_page > bo->num_pages)
|
||||
return -EINVAL;
|
||||
|
||||
(void) ttm_mem_io_lock(man, false);
|
||||
ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
|
||||
ttm_mem_io_unlock(man);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!bo->mem.bus.is_iomem) {
|
||||
@ -602,10 +509,6 @@ EXPORT_SYMBOL(ttm_bo_kmap);
|
||||
|
||||
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
|
||||
{
|
||||
struct ttm_buffer_object *bo = map->bo;
|
||||
struct ttm_resource_manager *man =
|
||||
ttm_manager_type(bo->bdev, bo->mem.mem_type);
|
||||
|
||||
if (!map->virtual)
|
||||
return;
|
||||
switch (map->bo_kmap_type) {
|
||||
@ -623,9 +526,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
(void) ttm_mem_io_lock(man, false);
|
||||
ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
|
||||
ttm_mem_io_unlock(man);
|
||||
map->virtual = NULL;
|
||||
map->page = NULL;
|
||||
}
|
||||
|
@ -281,8 +281,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
|
||||
pgoff_t i;
|
||||
vm_fault_t ret = VM_FAULT_NOPAGE;
|
||||
unsigned long address = vmf->address;
|
||||
struct ttm_resource_manager *man =
|
||||
ttm_manager_type(bdev, bo->mem.mem_type);
|
||||
|
||||
/*
|
||||
* Refuse to fault imported pages. This should be handled
|
||||
@ -321,24 +319,17 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
err = ttm_mem_io_lock(man, true);
|
||||
err = ttm_mem_io_reserve(bdev, &bo->mem);
|
||||
if (unlikely(err != 0))
|
||||
return VM_FAULT_NOPAGE;
|
||||
err = ttm_mem_io_reserve_vm(bo);
|
||||
if (unlikely(err != 0)) {
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
goto out_io_unlock;
|
||||
}
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
|
||||
vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
|
||||
page_last = vma_pages(vma) + vma->vm_pgoff -
|
||||
drm_vma_node_start(&bo->base.vma_node);
|
||||
|
||||
if (unlikely(page_offset >= bo->num_pages)) {
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
goto out_io_unlock;
|
||||
}
|
||||
if (unlikely(page_offset >= bo->num_pages))
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
prot = ttm_io_prot(bo->mem.placement, prot);
|
||||
if (!bo->mem.bus.is_iomem) {
|
||||
@ -350,21 +341,17 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
|
||||
};
|
||||
|
||||
ttm = bo->ttm;
|
||||
if (ttm_tt_populate(bo->ttm, &ctx)) {
|
||||
ret = VM_FAULT_OOM;
|
||||
goto out_io_unlock;
|
||||
}
|
||||
if (ttm_tt_populate(bo->ttm, &ctx))
|
||||
return VM_FAULT_OOM;
|
||||
} else {
|
||||
/* Iomem should not be marked encrypted */
|
||||
prot = pgprot_decrypted(prot);
|
||||
}
|
||||
|
||||
/* We don't prefault on huge faults. Yet. */
|
||||
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1) {
|
||||
ret = ttm_bo_vm_insert_huge(vmf, bo, page_offset,
|
||||
fault_page_size, prot);
|
||||
goto out_io_unlock;
|
||||
}
|
||||
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1)
|
||||
return ttm_bo_vm_insert_huge(vmf, bo, page_offset,
|
||||
fault_page_size, prot);
|
||||
|
||||
/*
|
||||
* Speculatively prefault a number of pages. Only error on
|
||||
@ -376,8 +363,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
|
||||
} else {
|
||||
page = ttm->pages[page_offset];
|
||||
if (unlikely(!page && i == 0)) {
|
||||
ret = VM_FAULT_OOM;
|
||||
goto out_io_unlock;
|
||||
return VM_FAULT_OOM;
|
||||
} else if (unlikely(!page)) {
|
||||
break;
|
||||
}
|
||||
@ -404,7 +390,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
|
||||
/* Never error on prefaulted PTEs */
|
||||
if (unlikely((ret & VM_FAULT_ERROR))) {
|
||||
if (i == 0)
|
||||
goto out_io_unlock;
|
||||
return VM_FAULT_NOPAGE;
|
||||
else
|
||||
break;
|
||||
}
|
||||
@ -413,9 +399,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
|
||||
if (unlikely(++page_offset >= page_last))
|
||||
break;
|
||||
}
|
||||
ret = VM_FAULT_NOPAGE;
|
||||
out_io_unlock:
|
||||
ttm_mem_io_unlock(man);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
|
||||
|
@ -65,10 +65,7 @@ void ttm_resource_manager_init(struct ttm_resource_manager *man,
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
man->use_io_reserve_lru = false;
|
||||
mutex_init(&man->io_reserve_mutex);
|
||||
spin_lock_init(&man->move_lock);
|
||||
INIT_LIST_HEAD(&man->io_reserve_lru);
|
||||
man->size = p_size;
|
||||
|
||||
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
|
||||
|
@ -151,7 +151,6 @@ struct ttm_buffer_object {
|
||||
struct list_head lru;
|
||||
struct list_head ddestroy;
|
||||
struct list_head swap;
|
||||
struct list_head io_reserve_lru;
|
||||
|
||||
/**
|
||||
* Members protected by a bo reservation.
|
||||
|
@ -441,11 +441,6 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
|
||||
*/
|
||||
void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
|
||||
|
||||
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
|
||||
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
|
||||
int ttm_mem_io_lock(struct ttm_resource_manager *man, bool interruptible);
|
||||
void ttm_mem_io_unlock(struct ttm_resource_manager *man);
|
||||
|
||||
/**
|
||||
* ttm_bo_reserve:
|
||||
*
|
||||
|
@ -113,10 +113,6 @@ struct ttm_resource_manager_func {
|
||||
* @default_caching: The default caching policy used for a buffer object
|
||||
* placed in this memory type if the user doesn't provide one.
|
||||
* @func: structure pointer implementing the range manager. See above
|
||||
* @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
|
||||
* @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
|
||||
* reserved by the TTM vm system.
|
||||
* @io_reserve_lru: Optional lru list for unreserving io mem regions.
|
||||
* @move_lock: lock for move fence
|
||||
* static information. bdev::driver::io_mem_free is never used.
|
||||
* @lru: The lru list for this memory type.
|
||||
@ -134,16 +130,8 @@ struct ttm_resource_manager {
|
||||
uint32_t available_caching;
|
||||
uint32_t default_caching;
|
||||
const struct ttm_resource_manager_func *func;
|
||||
struct mutex io_reserve_mutex;
|
||||
bool use_io_reserve_lru;
|
||||
spinlock_t move_lock;
|
||||
|
||||
/*
|
||||
* Protected by @io_reserve_mutex:
|
||||
*/
|
||||
|
||||
struct list_head io_reserve_lru;
|
||||
|
||||
/*
|
||||
* Protected by the global->lru_lock.
|
||||
*/
|
||||
@ -163,8 +151,6 @@ struct ttm_resource_manager {
|
||||
* @base: bus base address
|
||||
* @is_iomem: is this io memory ?
|
||||
* @offset: offset from the base address
|
||||
* @io_reserved_vm: The VM system has a refcount in @io_reserved_count
|
||||
* @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve
|
||||
*
|
||||
* Structure indicating the bus placement of an object.
|
||||
*/
|
||||
@ -173,8 +159,6 @@ struct ttm_bus_placement {
|
||||
phys_addr_t base;
|
||||
unsigned long offset;
|
||||
bool is_iomem;
|
||||
bool io_reserved_vm;
|
||||
uint64_t io_reserved_count;
|
||||
};
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user