drm/ttm: remove swap LRU v3
Instead evict round robin from each devices SYSTEM and TT domain. v2: reorder num_pages access reported by Dan's script v3: fix rebase fallout, num_pages should be 32bit Signed-off-by: Christian König <christian.koenig@amd.com> Tested-by: Nirmoy Das <nirmoy.das@amd.com> Reviewed-by: Huang Rui <ray.huang@amd.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/424009/
This commit is contained in:
parent
ebd59851c7
commit
f9e2a03e11
@ -73,7 +73,6 @@ static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
|
|||||||
{
|
{
|
||||||
struct ttm_device *bdev = bo->bdev;
|
struct ttm_device *bdev = bo->bdev;
|
||||||
|
|
||||||
list_del_init(&bo->swap);
|
|
||||||
list_del_init(&bo->lru);
|
list_del_init(&bo->lru);
|
||||||
|
|
||||||
if (bdev->funcs->del_from_lru_notify)
|
if (bdev->funcs->del_from_lru_notify)
|
||||||
@ -105,16 +104,6 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
|
|||||||
|
|
||||||
man = ttm_manager_type(bdev, mem->mem_type);
|
man = ttm_manager_type(bdev, mem->mem_type);
|
||||||
list_move_tail(&bo->lru, &man->lru[bo->priority]);
|
list_move_tail(&bo->lru, &man->lru[bo->priority]);
|
||||||
if (man->use_tt && bo->ttm &&
|
|
||||||
!(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
|
|
||||||
TTM_PAGE_FLAG_SWAPPED))) {
|
|
||||||
struct list_head *swap;
|
|
||||||
|
|
||||||
swap = &ttm_glob.swap_lru[bo->priority];
|
|
||||||
list_move_tail(&bo->swap, swap);
|
|
||||||
} else {
|
|
||||||
list_del_init(&bo->swap);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (bdev->funcs->del_from_lru_notify)
|
if (bdev->funcs->del_from_lru_notify)
|
||||||
bdev->funcs->del_from_lru_notify(bo);
|
bdev->funcs->del_from_lru_notify(bo);
|
||||||
@ -129,9 +118,6 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
|
|||||||
ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
|
ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (bo->ttm && !(bo->ttm->page_flags &
|
|
||||||
(TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED)))
|
|
||||||
ttm_bo_bulk_move_set_pos(&bulk->swap[bo->priority], bo);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
|
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
|
||||||
@ -169,20 +155,6 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
|
|||||||
list_bulk_move_tail(&man->lru[i], &pos->first->lru,
|
list_bulk_move_tail(&man->lru[i], &pos->first->lru,
|
||||||
&pos->last->lru);
|
&pos->last->lru);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
|
|
||||||
struct ttm_lru_bulk_move_pos *pos = &bulk->swap[i];
|
|
||||||
struct list_head *lru;
|
|
||||||
|
|
||||||
if (!pos->first)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
dma_resv_assert_held(pos->first->base.resv);
|
|
||||||
dma_resv_assert_held(pos->last->base.resv);
|
|
||||||
|
|
||||||
lru = &ttm_glob.swap_lru[i];
|
|
||||||
list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
|
EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
|
||||||
|
|
||||||
@ -1065,7 +1037,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
|
|||||||
kref_init(&bo->kref);
|
kref_init(&bo->kref);
|
||||||
INIT_LIST_HEAD(&bo->lru);
|
INIT_LIST_HEAD(&bo->lru);
|
||||||
INIT_LIST_HEAD(&bo->ddestroy);
|
INIT_LIST_HEAD(&bo->ddestroy);
|
||||||
INIT_LIST_HEAD(&bo->swap);
|
|
||||||
bo->bdev = bdev;
|
bo->bdev = bdev;
|
||||||
bo->type = type;
|
bo->type = type;
|
||||||
bo->mem.mem_type = TTM_PL_SYSTEM;
|
bo->mem.mem_type = TTM_PL_SYSTEM;
|
||||||
|
@ -303,7 +303,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
|
|||||||
atomic_inc(&ttm_glob.bo_count);
|
atomic_inc(&ttm_glob.bo_count);
|
||||||
INIT_LIST_HEAD(&fbo->base.ddestroy);
|
INIT_LIST_HEAD(&fbo->base.ddestroy);
|
||||||
INIT_LIST_HEAD(&fbo->base.lru);
|
INIT_LIST_HEAD(&fbo->base.lru);
|
||||||
INIT_LIST_HEAD(&fbo->base.swap);
|
|
||||||
fbo->base.moving = NULL;
|
fbo->base.moving = NULL;
|
||||||
drm_vma_node_reset(&fbo->base.base.vma_node);
|
drm_vma_node_reset(&fbo->base.base.vma_node);
|
||||||
|
|
||||||
|
@ -67,7 +67,6 @@ static int ttm_global_init(void)
|
|||||||
unsigned long num_pages;
|
unsigned long num_pages;
|
||||||
struct sysinfo si;
|
struct sysinfo si;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
unsigned i;
|
|
||||||
|
|
||||||
mutex_lock(&ttm_global_mutex);
|
mutex_lock(&ttm_global_mutex);
|
||||||
if (++ttm_glob_use_count > 1)
|
if (++ttm_glob_use_count > 1)
|
||||||
@ -90,8 +89,6 @@ static int ttm_global_init(void)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
|
|
||||||
INIT_LIST_HEAD(&glob->swap_lru[i]);
|
|
||||||
INIT_LIST_HEAD(&glob->device_list);
|
INIT_LIST_HEAD(&glob->device_list);
|
||||||
atomic_set(&glob->bo_count, 0);
|
atomic_set(&glob->bo_count, 0);
|
||||||
|
|
||||||
@ -109,27 +106,60 @@ out:
|
|||||||
int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
|
int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
|
||||||
{
|
{
|
||||||
struct ttm_global *glob = &ttm_glob;
|
struct ttm_global *glob = &ttm_glob;
|
||||||
|
struct ttm_device *bdev;
|
||||||
|
int ret = -EBUSY;
|
||||||
|
|
||||||
|
mutex_lock(&ttm_global_mutex);
|
||||||
|
list_for_each_entry(bdev, &glob->device_list, device_list) {
|
||||||
|
ret = ttm_device_swapout(bdev, ctx, gfp_flags);
|
||||||
|
if (ret > 0) {
|
||||||
|
list_move_tail(&bdev->device_list, &glob->device_list);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mutex_unlock(&ttm_global_mutex);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ttm_global_swapout);
|
||||||
|
|
||||||
|
int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
|
||||||
|
gfp_t gfp_flags)
|
||||||
|
{
|
||||||
|
struct ttm_global *glob = &ttm_glob;
|
||||||
|
struct ttm_resource_manager *man;
|
||||||
struct ttm_buffer_object *bo;
|
struct ttm_buffer_object *bo;
|
||||||
unsigned i;
|
unsigned i, j;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
spin_lock(&glob->lru_lock);
|
spin_lock(&glob->lru_lock);
|
||||||
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
|
for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
|
||||||
list_for_each_entry(bo, &glob->swap_lru[i], swap) {
|
man = ttm_manager_type(bdev, i);
|
||||||
uint32_t num_pages = bo->ttm->num_pages;
|
if (!man || !man->use_tt)
|
||||||
|
continue;
|
||||||
|
|
||||||
ret = ttm_bo_swapout(bo, ctx, gfp_flags);
|
for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
|
||||||
/* ttm_bo_swapout has dropped the lru_lock */
|
list_for_each_entry(bo, &man->lru[j], lru) {
|
||||||
if (!ret)
|
uint32_t num_pages;
|
||||||
return num_pages;
|
|
||||||
if (ret != -EBUSY)
|
if (!bo->ttm ||
|
||||||
return ret;
|
bo->ttm->page_flags & TTM_PAGE_FLAG_SG ||
|
||||||
|
bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
num_pages = bo->ttm->num_pages;
|
||||||
|
ret = ttm_bo_swapout(bo, ctx, gfp_flags);
|
||||||
|
/* ttm_bo_swapout has dropped the lru_lock */
|
||||||
|
if (!ret)
|
||||||
|
return num_pages;
|
||||||
|
if (ret != -EBUSY)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock(&glob->lru_lock);
|
spin_unlock(&glob->lru_lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ttm_global_swapout);
|
EXPORT_SYMBOL(ttm_device_swapout);
|
||||||
|
|
||||||
static void ttm_init_sysman(struct ttm_device *bdev)
|
static void ttm_init_sysman(struct ttm_device *bdev)
|
||||||
{
|
{
|
||||||
|
@ -1371,7 +1371,7 @@ static int vmw_pm_freeze(struct device *kdev)
|
|||||||
vmw_execbuf_release_pinned_bo(dev_priv);
|
vmw_execbuf_release_pinned_bo(dev_priv);
|
||||||
vmw_resource_evict_all(dev_priv);
|
vmw_resource_evict_all(dev_priv);
|
||||||
vmw_release_device_early(dev_priv);
|
vmw_release_device_early(dev_priv);
|
||||||
while (ttm_global_swapout(&ctx, GFP_KERNEL) > 0);
|
while (ttm_device_swapout(&dev_priv->bdev, &ctx, GFP_KERNEL) > 0);
|
||||||
if (dev_priv->enable_fb)
|
if (dev_priv->enable_fb)
|
||||||
vmw_fifo_resource_dec(dev_priv);
|
vmw_fifo_resource_dec(dev_priv);
|
||||||
if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
|
if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
|
||||||
|
@ -144,7 +144,6 @@ struct ttm_buffer_object {
|
|||||||
|
|
||||||
struct list_head lru;
|
struct list_head lru;
|
||||||
struct list_head ddestroy;
|
struct list_head ddestroy;
|
||||||
struct list_head swap;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Members protected by a bo reservation.
|
* Members protected by a bo reservation.
|
||||||
|
@ -69,7 +69,6 @@ struct ttm_lru_bulk_move_pos {
|
|||||||
struct ttm_lru_bulk_move {
|
struct ttm_lru_bulk_move {
|
||||||
struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY];
|
struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY];
|
||||||
struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY];
|
struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY];
|
||||||
struct ttm_lru_bulk_move_pos swap[TTM_MAX_BO_PRIORITY];
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -63,11 +63,6 @@ extern struct ttm_global {
|
|||||||
*/
|
*/
|
||||||
struct list_head device_list;
|
struct list_head device_list;
|
||||||
|
|
||||||
/**
|
|
||||||
* Protected by the lru_lock.
|
|
||||||
*/
|
|
||||||
struct list_head swap_lru[TTM_MAX_BO_PRIORITY];
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Internal protection.
|
* Internal protection.
|
||||||
*/
|
*/
|
||||||
@ -298,6 +293,8 @@ struct ttm_device {
|
|||||||
};
|
};
|
||||||
|
|
||||||
int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags);
|
int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags);
|
||||||
|
int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
|
||||||
|
gfp_t gfp_flags);
|
||||||
|
|
||||||
static inline struct ttm_resource_manager *
|
static inline struct ttm_resource_manager *
|
||||||
ttm_manager_type(struct ttm_device *bdev, int mem_type)
|
ttm_manager_type(struct ttm_device *bdev, int mem_type)
|
||||||
|
Loading…
Reference in New Issue
Block a user