2018-05-02 13:46:21 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
|
2009-06-10 13:20:19 +00:00
|
|
|
/**************************************************************************
|
|
|
|
*
|
|
|
|
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
|
|
|
* All Rights Reserved.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the
|
|
|
|
* "Software"), to deal in the Software without restriction, including
|
|
|
|
* without limitation the rights to use, copy, modify, merge, publish,
|
|
|
|
* distribute, sub license, and/or sell copies of the Software, and to
|
|
|
|
* permit persons to whom the Software is furnished to do so, subject to
|
|
|
|
* the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the
|
|
|
|
* next paragraph) shall be included in all copies or substantial portions
|
|
|
|
* of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
|
|
|
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
|
|
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
|
|
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
**************************************************************************/
|
|
|
|
/*
|
|
|
|
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
|
|
|
*/
|
|
|
|
|
2012-03-17 04:43:50 +00:00
|
|
|
#define pr_fmt(fmt) "[TTM] " fmt
|
|
|
|
|
2012-10-02 17:01:07 +00:00
|
|
|
#include <drm/ttm/ttm_module.h>
|
|
|
|
#include <drm/ttm/ttm_bo_driver.h>
|
|
|
|
#include <drm/ttm/ttm_placement.h>
|
2009-06-10 13:20:19 +00:00
|
|
|
#include <linux/jiffies.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/file.h>
|
|
|
|
#include <linux/module.h>
|
2011-07-26 23:09:06 +00:00
|
|
|
#include <linux/atomic.h>
|
2019-08-11 08:06:32 +00:00
|
|
|
#include <linux/dma-resv.h>
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2009-08-18 14:51:56 +00:00
|
|
|
static void ttm_bo_global_kobj_release(struct kobject *kobj);
|
|
|
|
|
2018-10-19 12:09:24 +00:00
|
|
|
/**
|
|
|
|
* ttm_global_mutex - protecting the global BO state
|
|
|
|
*/
|
|
|
|
DEFINE_MUTEX(ttm_global_mutex);
|
2019-04-16 09:49:17 +00:00
|
|
|
unsigned ttm_bo_glob_use_count;
|
|
|
|
struct ttm_bo_global ttm_bo_glob;
|
2019-09-25 09:38:50 +00:00
|
|
|
EXPORT_SYMBOL(ttm_bo_glob);
|
2018-10-19 12:09:24 +00:00
|
|
|
|
2009-08-18 14:51:56 +00:00
|
|
|
static struct attribute ttm_bo_count = {
|
|
|
|
.name = "bo_count",
|
|
|
|
.mode = S_IRUGO
|
|
|
|
};
|
|
|
|
|
2018-01-24 19:26:41 +00:00
|
|
|
/* default destructor */
|
|
|
|
static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
|
|
|
|
{
|
|
|
|
kfree(bo);
|
|
|
|
}
|
|
|
|
|
2014-08-27 11:16:04 +00:00
|
|
|
static inline int ttm_mem_type_from_place(const struct ttm_place *place,
|
|
|
|
uint32_t *mem_type)
|
2009-12-09 20:55:10 +00:00
|
|
|
{
|
2016-09-12 11:16:16 +00:00
|
|
|
int pos;
|
2009-12-09 20:55:10 +00:00
|
|
|
|
2016-09-12 11:16:16 +00:00
|
|
|
pos = ffs(place->flags & TTM_PL_MASK_MEM);
|
|
|
|
if (unlikely(!pos))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
*mem_type = pos - 1;
|
|
|
|
return 0;
|
2009-12-09 20:55:10 +00:00
|
|
|
}
|
|
|
|
|
2020-08-04 02:55:41 +00:00
|
|
|
void ttm_mem_type_manager_debug(struct ttm_mem_type_manager *man,
|
|
|
|
struct drm_printer *p)
|
2009-12-09 20:55:10 +00:00
|
|
|
{
|
2018-11-30 17:15:22 +00:00
|
|
|
drm_printf(p, " has_type: %d\n", man->has_type);
|
|
|
|
drm_printf(p, " use_type: %d\n", man->use_type);
|
2020-07-21 07:58:13 +00:00
|
|
|
drm_printf(p, " use_tt: %d\n", man->use_tt);
|
2018-11-30 17:15:22 +00:00
|
|
|
drm_printf(p, " size: %llu\n", man->size);
|
|
|
|
drm_printf(p, " available_caching: 0x%08X\n", man->available_caching);
|
|
|
|
drm_printf(p, " default_caching: 0x%08X\n", man->default_caching);
|
2020-08-04 02:55:41 +00:00
|
|
|
if (man->func && man->func->debug)
|
2018-11-30 17:15:22 +00:00
|
|
|
(*man->func->debug)(man, p);
|
2009-12-09 20:55:10 +00:00
|
|
|
}
|
2020-08-04 02:55:41 +00:00
|
|
|
EXPORT_SYMBOL(ttm_mem_type_manager_debug);
|
2009-12-09 20:55:10 +00:00
|
|
|
|
|
|
|
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
|
|
|
|
struct ttm_placement *placement)
|
|
|
|
{
|
2018-11-30 17:15:22 +00:00
|
|
|
struct drm_printer p = drm_debug_printer(TTM_PFX);
|
2009-12-09 20:55:10 +00:00
|
|
|
int i, ret, mem_type;
|
2020-08-04 02:55:41 +00:00
|
|
|
struct ttm_mem_type_manager *man;
|
2009-12-09 20:55:10 +00:00
|
|
|
|
2018-11-30 17:15:22 +00:00
|
|
|
drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n",
|
|
|
|
bo, bo->mem.num_pages, bo->mem.size >> 10,
|
|
|
|
bo->mem.size >> 20);
|
2009-12-09 20:55:10 +00:00
|
|
|
for (i = 0; i < placement->num_placement; i++) {
|
2014-08-27 11:16:04 +00:00
|
|
|
ret = ttm_mem_type_from_place(&placement->placement[i],
|
2009-12-09 20:55:10 +00:00
|
|
|
&mem_type);
|
|
|
|
if (ret)
|
|
|
|
return;
|
2018-11-30 17:15:22 +00:00
|
|
|
drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
|
|
|
|
i, placement->placement[i].flags, mem_type);
|
2020-08-04 02:55:41 +00:00
|
|
|
man = &bo->bdev->man[mem_type];
|
|
|
|
ttm_mem_type_manager_debug(man, &p);
|
2009-12-09 20:55:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-18 14:51:56 +00:00
|
|
|
static ssize_t ttm_bo_global_show(struct kobject *kobj,
|
|
|
|
struct attribute *attr,
|
|
|
|
char *buffer)
|
|
|
|
{
|
|
|
|
struct ttm_bo_global *glob =
|
|
|
|
container_of(kobj, struct ttm_bo_global, kobj);
|
|
|
|
|
2017-08-10 12:38:41 +00:00
|
|
|
return snprintf(buffer, PAGE_SIZE, "%d\n",
|
|
|
|
atomic_read(&glob->bo_count));
|
2009-08-18 14:51:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct attribute *ttm_bo_global_attrs[] = {
|
|
|
|
&ttm_bo_count,
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2010-01-19 01:58:23 +00:00
|
|
|
static const struct sysfs_ops ttm_bo_global_ops = {
|
2009-08-18 14:51:56 +00:00
|
|
|
.show = &ttm_bo_global_show
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct kobj_type ttm_bo_glob_kobj_type = {
|
|
|
|
.release = &ttm_bo_global_kobj_release,
|
|
|
|
.sysfs_ops = &ttm_bo_global_ops,
|
|
|
|
.default_attrs = ttm_bo_global_attrs
|
|
|
|
};
|
|
|
|
|
2009-06-10 13:20:19 +00:00
|
|
|
|
|
|
|
static inline uint32_t ttm_bo_type_flags(unsigned type)
|
|
|
|
{
|
|
|
|
return 1 << (type);
|
|
|
|
}
|
|
|
|
|
2019-05-13 15:58:23 +00:00
|
|
|
static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
|
|
|
|
struct ttm_mem_reg *mem)
|
2009-06-10 13:20:19 +00:00
|
|
|
{
|
|
|
|
struct ttm_bo_device *bdev = bo->bdev;
|
2017-01-12 10:50:13 +00:00
|
|
|
struct ttm_mem_type_manager *man;
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2019-05-10 12:15:08 +00:00
|
|
|
if (!list_empty(&bo->lru))
|
|
|
|
return;
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2019-05-13 15:58:23 +00:00
|
|
|
if (mem->placement & TTM_PL_FLAG_NO_EVICT)
|
2019-05-10 12:15:08 +00:00
|
|
|
return;
|
|
|
|
|
2019-05-13 15:58:23 +00:00
|
|
|
man = &bdev->man[mem->mem_type];
|
2019-05-10 12:15:08 +00:00
|
|
|
list_add_tail(&bo->lru, &man->lru[bo->priority]);
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2020-07-21 07:58:13 +00:00
|
|
|
if (man->use_tt && bo->ttm &&
|
2019-10-10 11:24:17 +00:00
|
|
|
!(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
|
|
|
|
TTM_PAGE_FLAG_SWAPPED))) {
|
2019-09-25 09:38:50 +00:00
|
|
|
list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
|
2009-06-10 13:20:19 +00:00
|
|
|
}
|
|
|
|
}
|
2019-05-13 15:58:23 +00:00
|
|
|
|
2019-09-19 10:56:15 +00:00
|
|
|
static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
|
2009-06-10 13:20:19 +00:00
|
|
|
{
|
2019-01-10 09:56:39 +00:00
|
|
|
struct ttm_bo_device *bdev = bo->bdev;
|
|
|
|
bool notify = false;
|
|
|
|
|
2009-06-10 13:20:19 +00:00
|
|
|
if (!list_empty(&bo->swap)) {
|
|
|
|
list_del_init(&bo->swap);
|
2019-01-10 09:56:39 +00:00
|
|
|
notify = true;
|
2009-06-10 13:20:19 +00:00
|
|
|
}
|
|
|
|
if (!list_empty(&bo->lru)) {
|
|
|
|
list_del_init(&bo->lru);
|
2019-01-10 09:56:39 +00:00
|
|
|
notify = true;
|
2009-06-10 13:20:19 +00:00
|
|
|
}
|
|
|
|
|
2019-01-10 09:56:39 +00:00
|
|
|
if (notify && bdev->driver->del_from_lru_notify)
|
|
|
|
bdev->driver->del_from_lru_notify(bo);
|
2010-11-22 03:24:40 +00:00
|
|
|
}
|
|
|
|
|
2018-08-06 09:05:30 +00:00
|
|
|
static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
|
|
|
|
struct ttm_buffer_object *bo)
|
|
|
|
{
|
|
|
|
if (!pos->first)
|
|
|
|
pos->first = bo;
|
|
|
|
pos->last = bo;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
|
|
|
|
struct ttm_lru_bulk_move *bulk)
|
2016-01-11 14:35:20 +00:00
|
|
|
{
|
2019-08-11 08:06:32 +00:00
|
|
|
dma_resv_assert_held(bo->base.resv);
|
2016-01-11 14:35:20 +00:00
|
|
|
|
2016-11-14 16:34:19 +00:00
|
|
|
ttm_bo_del_from_lru(bo);
|
2019-09-19 10:56:15 +00:00
|
|
|
ttm_bo_add_mem_to_lru(bo, &bo->mem);
|
2018-08-06 09:05:30 +00:00
|
|
|
|
|
|
|
if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
|
|
|
|
switch (bo->mem.mem_type) {
|
|
|
|
case TTM_PL_TT:
|
|
|
|
ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case TTM_PL_VRAM:
|
|
|
|
ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (bo->ttm && !(bo->ttm->page_flags &
|
|
|
|
(TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED)))
|
|
|
|
ttm_bo_bulk_move_set_pos(&bulk->swap[bo->priority], bo);
|
|
|
|
}
|
2016-01-11 14:35:20 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
|
|
|
|
|
2018-08-06 09:28:35 +00:00
|
|
|
void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
|
2018-09-12 19:19:57 +00:00
|
|
|
struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i];
|
2018-08-06 09:28:35 +00:00
|
|
|
struct ttm_mem_type_manager *man;
|
|
|
|
|
2018-09-12 19:19:57 +00:00
|
|
|
if (!pos->first)
|
2018-08-06 09:28:35 +00:00
|
|
|
continue;
|
|
|
|
|
2019-08-11 08:06:32 +00:00
|
|
|
dma_resv_assert_held(pos->first->base.resv);
|
|
|
|
dma_resv_assert_held(pos->last->base.resv);
|
2018-09-12 19:19:57 +00:00
|
|
|
|
|
|
|
man = &pos->first->bdev->man[TTM_PL_TT];
|
2018-09-13 09:17:23 +00:00
|
|
|
list_bulk_move_tail(&man->lru[i], &pos->first->lru,
|
|
|
|
&pos->last->lru);
|
2018-08-06 09:28:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
|
2018-09-12 19:19:57 +00:00
|
|
|
struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i];
|
2018-08-06 09:28:35 +00:00
|
|
|
struct ttm_mem_type_manager *man;
|
|
|
|
|
2018-09-12 19:19:57 +00:00
|
|
|
if (!pos->first)
|
2018-08-06 09:28:35 +00:00
|
|
|
continue;
|
|
|
|
|
2019-08-11 08:06:32 +00:00
|
|
|
dma_resv_assert_held(pos->first->base.resv);
|
|
|
|
dma_resv_assert_held(pos->last->base.resv);
|
2018-09-12 19:19:57 +00:00
|
|
|
|
|
|
|
man = &pos->first->bdev->man[TTM_PL_VRAM];
|
2018-09-13 09:17:23 +00:00
|
|
|
list_bulk_move_tail(&man->lru[i], &pos->first->lru,
|
|
|
|
&pos->last->lru);
|
2018-08-06 09:28:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
|
|
|
|
struct ttm_lru_bulk_move_pos *pos = &bulk->swap[i];
|
|
|
|
struct list_head *lru;
|
|
|
|
|
|
|
|
if (!pos->first)
|
|
|
|
continue;
|
|
|
|
|
2019-08-11 08:06:32 +00:00
|
|
|
dma_resv_assert_held(pos->first->base.resv);
|
|
|
|
dma_resv_assert_held(pos->last->base.resv);
|
2018-09-12 19:19:57 +00:00
|
|
|
|
2019-09-25 09:38:50 +00:00
|
|
|
lru = &ttm_bo_glob.swap_lru[i];
|
2018-09-13 09:17:23 +00:00
|
|
|
list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
|
2018-08-06 09:28:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
|
|
|
|
|
2009-06-10 13:20:19 +00:00
|
|
|
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
|
2017-04-12 14:48:39 +00:00
|
|
|
struct ttm_mem_reg *mem, bool evict,
|
|
|
|
struct ttm_operation_ctx *ctx)
|
2009-06-10 13:20:19 +00:00
|
|
|
{
|
|
|
|
struct ttm_bo_device *bdev = bo->bdev;
|
|
|
|
struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
|
|
|
|
struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
|
2020-07-15 12:52:05 +00:00
|
|
|
int ret;
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2020-07-15 12:52:05 +00:00
|
|
|
ret = ttm_mem_io_lock(old_man, true);
|
|
|
|
if (unlikely(ret != 0))
|
|
|
|
goto out_err;
|
|
|
|
ttm_bo_unmap_virtual_locked(bo);
|
|
|
|
ttm_mem_io_unlock(old_man);
|
2009-06-10 13:20:19 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Create and bind a ttm if required.
|
|
|
|
*/
|
|
|
|
|
2020-07-21 07:58:13 +00:00
|
|
|
if (new_man->use_tt) {
|
|
|
|
/* Zero init the new TTM structure if the old location should
|
|
|
|
* have used one as well.
|
|
|
|
*/
|
|
|
|
ret = ttm_tt_create(bo, old_man->use_tt);
|
2020-06-24 13:15:20 +00:00
|
|
|
if (ret)
|
|
|
|
goto out_err;
|
2009-06-10 13:20:19 +00:00
|
|
|
|
|
|
|
ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
|
|
|
|
if (ret)
|
2009-06-17 10:29:57 +00:00
|
|
|
goto out_err;
|
2009-06-10 13:20:19 +00:00
|
|
|
|
|
|
|
if (mem->mem_type != TTM_PL_SYSTEM) {
|
2017-12-21 09:42:51 +00:00
|
|
|
ret = ttm_tt_bind(bo->ttm, mem, ctx);
|
2009-06-10 13:20:19 +00:00
|
|
|
if (ret)
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bo->mem.mem_type == TTM_PL_SYSTEM) {
|
2011-02-02 00:27:10 +00:00
|
|
|
if (bdev->driver->move_notify)
|
2016-12-15 16:23:49 +00:00
|
|
|
bdev->driver->move_notify(bo, evict, mem);
|
2009-12-08 14:33:32 +00:00
|
|
|
bo->mem = *mem;
|
2009-06-10 13:20:19 +00:00
|
|
|
goto moved;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
drm/ttm: fix two regressions since move_notify changes
Both changes in dc97b3409a790d2a21aac6e5cdb99558b5944119 cause serious
regressions in the nouveau driver.
move_notify() was originally able to presume that bo->mem is the old node,
and new_mem is the new node. The above commit moves the call to
move_notify() to after move() has been done, which means that now, sometimes,
new_mem isn't the new node at all, bo->mem is, and new_mem points at a
stale, possibly-just-been-killed-by-move node.
This is clearly not a good situation. This patch reverts this change, and
replaces it with a cleanup in the move() failure path instead.
The second issue is that the call to move_notify() from cleanup_memtype_use()
causes the TTM ghost objects to get passed into the driver. This is clearly
bad as the driver knows nothing about these "fake" TTM BOs, and ends up
accessing uninitialised memory.
I worked around this in nouveau's move_notify() hook by ensuring the BO
destructor was nouveau's. I don't particularly like this solution, and
would rather TTM never pass the driver these objects. However, I don't
clearly understand the reason why we're calling move_notify() here anyway
and am happy to work around the problem in nouveau instead of breaking the
behaviour expected by other drivers.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Jerome Glisse <j.glisse@gmail.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2012-01-25 05:34:22 +00:00
|
|
|
if (bdev->driver->move_notify)
|
2016-12-15 16:23:49 +00:00
|
|
|
bdev->driver->move_notify(bo, evict, mem);
|
drm/ttm: fix two regressions since move_notify changes
Both changes in dc97b3409a790d2a21aac6e5cdb99558b5944119 cause serious
regressions in the nouveau driver.
move_notify() was originally able to presume that bo->mem is the old node,
and new_mem is the new node. The above commit moves the call to
move_notify() to after move() has been done, which means that now, sometimes,
new_mem isn't the new node at all, bo->mem is, and new_mem points at a
stale, possibly-just-been-killed-by-move node.
This is clearly not a good situation. This patch reverts this change, and
replaces it with a cleanup in the move() failure path instead.
The second issue is that the call to move_notify() from cleanup_memtype_use()
causes the TTM ghost objects to get passed into the driver. This is clearly
bad as the driver knows nothing about these "fake" TTM BOs, and ends up
accessing uninitialised memory.
I worked around this in nouveau's move_notify() hook by ensuring the BO
destructor was nouveau's. I don't particularly like this solution, and
would rather TTM never pass the driver these objects. However, I don't
clearly understand the reason why we're calling move_notify() here anyway
and am happy to work around the problem in nouveau instead of breaking the
behaviour expected by other drivers.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Jerome Glisse <j.glisse@gmail.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2012-01-25 05:34:22 +00:00
|
|
|
|
2020-07-21 07:58:13 +00:00
|
|
|
if (old_man->use_tt && new_man->use_tt)
|
2017-12-08 12:19:32 +00:00
|
|
|
ret = ttm_bo_move_ttm(bo, ctx, mem);
|
2009-06-10 13:20:19 +00:00
|
|
|
else if (bdev->driver->move)
|
2017-04-26 14:31:14 +00:00
|
|
|
ret = bdev->driver->move(bo, evict, ctx, mem);
|
2009-06-10 13:20:19 +00:00
|
|
|
else
|
2017-12-08 12:19:32 +00:00
|
|
|
ret = ttm_bo_move_memcpy(bo, ctx, mem);
|
2009-06-10 13:20:19 +00:00
|
|
|
|
drm/ttm: fix two regressions since move_notify changes
Both changes in dc97b3409a790d2a21aac6e5cdb99558b5944119 cause serious
regressions in the nouveau driver.
move_notify() was originally able to presume that bo->mem is the old node,
and new_mem is the new node. The above commit moves the call to
move_notify() to after move() has been done, which means that now, sometimes,
new_mem isn't the new node at all, bo->mem is, and new_mem points at a
stale, possibly-just-been-killed-by-move node.
This is clearly not a good situation. This patch reverts this change, and
replaces it with a cleanup in the move() failure path instead.
The second issue is that the call to move_notify() from cleanup_memtype_use()
causes the TTM ghost objects to get passed into the driver. This is clearly
bad as the driver knows nothing about these "fake" TTM BOs, and ends up
accessing uninitialised memory.
I worked around this in nouveau's move_notify() hook by ensuring the BO
destructor was nouveau's. I don't particularly like this solution, and
would rather TTM never pass the driver these objects. However, I don't
clearly understand the reason why we're calling move_notify() here anyway
and am happy to work around the problem in nouveau instead of breaking the
behaviour expected by other drivers.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Jerome Glisse <j.glisse@gmail.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2012-01-25 05:34:22 +00:00
|
|
|
if (ret) {
|
|
|
|
if (bdev->driver->move_notify) {
|
2018-07-09 15:24:47 +00:00
|
|
|
swap(*mem, bo->mem);
|
2016-12-15 16:23:49 +00:00
|
|
|
bdev->driver->move_notify(bo, false, mem);
|
2018-07-09 15:24:47 +00:00
|
|
|
swap(*mem, bo->mem);
|
drm/ttm: fix two regressions since move_notify changes
Both changes in dc97b3409a790d2a21aac6e5cdb99558b5944119 cause serious
regressions in the nouveau driver.
move_notify() was originally able to presume that bo->mem is the old node,
and new_mem is the new node. The above commit moves the call to
move_notify() to after move() has been done, which means that now, sometimes,
new_mem isn't the new node at all, bo->mem is, and new_mem points at a
stale, possibly-just-been-killed-by-move node.
This is clearly not a good situation. This patch reverts this change, and
replaces it with a cleanup in the move() failure path instead.
The second issue is that the call to move_notify() from cleanup_memtype_use()
causes the TTM ghost objects to get passed into the driver. This is clearly
bad as the driver knows nothing about these "fake" TTM BOs, and ends up
accessing uninitialised memory.
I worked around this in nouveau's move_notify() hook by ensuring the BO
destructor was nouveau's. I don't particularly like this solution, and
would rather TTM never pass the driver these objects. However, I don't
clearly understand the reason why we're calling move_notify() here anyway
and am happy to work around the problem in nouveau instead of breaking the
behaviour expected by other drivers.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Jerome Glisse <j.glisse@gmail.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2012-01-25 05:34:22 +00:00
|
|
|
}
|
2009-06-10 13:20:19 +00:00
|
|
|
|
drm/ttm: fix two regressions since move_notify changes
Both changes in dc97b3409a790d2a21aac6e5cdb99558b5944119 cause serious
regressions in the nouveau driver.
move_notify() was originally able to presume that bo->mem is the old node,
and new_mem is the new node. The above commit moves the call to
move_notify() to after move() has been done, which means that now, sometimes,
new_mem isn't the new node at all, bo->mem is, and new_mem points at a
stale, possibly-just-been-killed-by-move node.
This is clearly not a good situation. This patch reverts this change, and
replaces it with a cleanup in the move() failure path instead.
The second issue is that the call to move_notify() from cleanup_memtype_use()
causes the TTM ghost objects to get passed into the driver. This is clearly
bad as the driver knows nothing about these "fake" TTM BOs, and ends up
accessing uninitialised memory.
I worked around this in nouveau's move_notify() hook by ensuring the BO
destructor was nouveau's. I don't particularly like this solution, and
would rather TTM never pass the driver these objects. However, I don't
clearly understand the reason why we're calling move_notify() here anyway
and am happy to work around the problem in nouveau instead of breaking the
behaviour expected by other drivers.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Jerome Glisse <j.glisse@gmail.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
2012-01-25 05:34:22 +00:00
|
|
|
goto out_err;
|
|
|
|
}
|
2011-11-18 16:47:03 +00:00
|
|
|
|
2009-06-10 13:20:19 +00:00
|
|
|
moved:
|
2020-01-10 15:09:54 +00:00
|
|
|
bo->evicted = false;
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2017-04-27 16:19:46 +00:00
|
|
|
ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
|
2009-06-10 13:20:19 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_err:
|
|
|
|
new_man = &bdev->man[bo->mem.mem_type];
|
2020-07-21 07:58:13 +00:00
|
|
|
if (!new_man->use_tt) {
|
2009-06-10 13:20:19 +00:00
|
|
|
ttm_tt_destroy(bo->ttm);
|
|
|
|
bo->ttm = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-09-30 10:36:45 +00:00
|
|
|
/**
|
2010-10-19 07:01:00 +00:00
|
|
|
* Call bo::reserved.
|
2010-09-30 10:36:45 +00:00
|
|
|
* Will release GPU memory type usage on destruction.
|
2010-10-19 07:01:00 +00:00
|
|
|
* This is the place to put in driver specific hooks to release
|
|
|
|
* driver private resources.
|
|
|
|
* Will release the bo::reserved lock.
|
2010-09-30 10:36:45 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
|
|
|
|
{
|
2011-11-18 16:47:03 +00:00
|
|
|
if (bo->bdev->driver->move_notify)
|
2016-12-15 16:23:49 +00:00
|
|
|
bo->bdev->driver->move_notify(bo, false, NULL);
|
2011-11-18 16:47:03 +00:00
|
|
|
|
2016-06-06 08:17:51 +00:00
|
|
|
ttm_tt_destroy(bo->ttm);
|
|
|
|
bo->ttm = NULL;
|
2010-10-19 07:01:00 +00:00
|
|
|
ttm_bo_mem_put(bo, &bo->mem);
|
2010-09-30 10:36:45 +00:00
|
|
|
}
|
|
|
|
|
2017-07-20 18:55:06 +00:00
|
|
|
static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
2019-08-05 14:01:12 +00:00
|
|
|
if (bo->base.resv == &bo->base._resv)
|
2017-07-20 18:55:06 +00:00
|
|
|
return 0;
|
|
|
|
|
2019-08-11 08:06:32 +00:00
|
|
|
BUG_ON(!dma_resv_trylock(&bo->base._resv));
|
2017-07-20 18:55:06 +00:00
|
|
|
|
2019-08-11 08:06:32 +00:00
|
|
|
r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
|
2019-11-11 13:42:13 +00:00
|
|
|
dma_resv_unlock(&bo->base._resv);
|
2019-11-11 14:16:56 +00:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
if (bo->type != ttm_bo_type_sg) {
|
|
|
|
/* This works because the BO is about to be destroyed and nobody
|
|
|
|
* reference it any more. The only tricky case is the trylock on
|
|
|
|
* the resv object while holding the lru_lock.
|
|
|
|
*/
|
|
|
|
spin_lock(&ttm_bo_glob.lru_lock);
|
|
|
|
bo->base.resv = &bo->base._resv;
|
|
|
|
spin_unlock(&ttm_bo_glob.lru_lock);
|
|
|
|
}
|
2017-07-20 18:55:06 +00:00
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2014-04-02 15:14:48 +00:00
|
|
|
static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
|
|
|
|
{
|
2020-02-10 13:41:39 +00:00
|
|
|
struct dma_resv *resv = &bo->base._resv;
|
2019-08-11 08:06:32 +00:00
|
|
|
struct dma_resv_list *fobj;
|
2016-10-25 12:00:45 +00:00
|
|
|
struct dma_fence *fence;
|
2014-04-02 15:14:48 +00:00
|
|
|
int i;
|
|
|
|
|
2020-02-10 13:41:39 +00:00
|
|
|
rcu_read_lock();
|
|
|
|
fobj = rcu_dereference(resv->fence);
|
|
|
|
fence = rcu_dereference(resv->fence_excl);
|
2014-04-02 15:14:48 +00:00
|
|
|
if (fence && !fence->ops->signaled)
|
2016-10-25 12:00:45 +00:00
|
|
|
dma_fence_enable_sw_signaling(fence);
|
2014-04-02 15:14:48 +00:00
|
|
|
|
|
|
|
for (i = 0; fobj && i < fobj->shared_count; ++i) {
|
2020-02-10 13:41:39 +00:00
|
|
|
fence = rcu_dereference(fobj->shared[i]);
|
2014-04-02 15:14:48 +00:00
|
|
|
|
|
|
|
if (!fence->ops->signaled)
|
2016-10-25 12:00:45 +00:00
|
|
|
dma_fence_enable_sw_signaling(fence);
|
2014-04-02 15:14:48 +00:00
|
|
|
}
|
2020-02-10 13:41:39 +00:00
|
|
|
rcu_read_unlock();
|
2014-04-02 15:14:48 +00:00
|
|
|
}
|
|
|
|
|
2010-10-19 07:01:01 +00:00
|
|
|
/**
|
2017-11-08 13:57:45 +00:00
|
|
|
* function ttm_bo_cleanup_refs
|
2019-11-11 13:42:13 +00:00
|
|
|
* If bo idle, remove from lru lists, and unref.
|
|
|
|
* If not idle, block if possible.
|
2010-10-19 07:01:01 +00:00
|
|
|
*
|
2012-11-29 11:36:54 +00:00
|
|
|
* Must be called with lru_lock and reservation held, this function
|
2017-11-08 13:57:45 +00:00
|
|
|
* will drop the lru lock and optionally the reservation lock before returning.
|
2012-11-29 11:36:54 +00:00
|
|
|
*
|
2010-10-19 07:01:01 +00:00
|
|
|
* @interruptible Any sleeps should occur interruptibly.
|
|
|
|
* @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
|
2017-11-08 13:57:45 +00:00
|
|
|
* @unlock_resv Unlock the reservation lock as well.
|
2010-10-19 07:01:01 +00:00
|
|
|
*/
|
|
|
|
|
2017-11-08 13:57:45 +00:00
|
|
|
static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
|
|
|
|
bool interruptible, bool no_wait_gpu,
|
|
|
|
bool unlock_resv)
|
2010-10-19 07:01:01 +00:00
|
|
|
{
|
2019-11-11 12:52:03 +00:00
|
|
|
struct dma_resv *resv = &bo->base._resv;
|
2012-11-29 11:36:54 +00:00
|
|
|
int ret;
|
2010-10-19 07:01:01 +00:00
|
|
|
|
2019-08-11 08:06:32 +00:00
|
|
|
if (dma_resv_test_signaled_rcu(resv, true))
|
2017-07-20 18:55:06 +00:00
|
|
|
ret = 0;
|
|
|
|
else
|
|
|
|
ret = -EBUSY;
|
2010-10-19 07:01:01 +00:00
|
|
|
|
2012-11-29 11:36:54 +00:00
|
|
|
if (ret && !no_wait_gpu) {
|
2014-05-14 13:42:29 +00:00
|
|
|
long lret;
|
2017-11-08 13:38:34 +00:00
|
|
|
|
2017-11-08 13:57:45 +00:00
|
|
|
if (unlock_resv)
|
2019-08-11 08:06:32 +00:00
|
|
|
dma_resv_unlock(bo->base.resv);
|
2019-09-25 09:38:50 +00:00
|
|
|
spin_unlock(&ttm_bo_glob.lru_lock);
|
2014-05-14 13:42:29 +00:00
|
|
|
|
2019-11-11 12:52:03 +00:00
|
|
|
lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
|
|
|
|
30 * HZ);
|
2014-05-14 13:42:29 +00:00
|
|
|
|
|
|
|
if (lret < 0)
|
|
|
|
return lret;
|
|
|
|
else if (lret == 0)
|
|
|
|
return -EBUSY;
|
2012-10-22 12:51:26 +00:00
|
|
|
|
2019-09-25 09:38:50 +00:00
|
|
|
spin_lock(&ttm_bo_glob.lru_lock);
|
2019-08-11 08:06:32 +00:00
|
|
|
if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
|
2017-11-08 13:57:45 +00:00
|
|
|
/*
|
|
|
|
* We raced, and lost, someone else holds the reservation now,
|
|
|
|
* and is probably busy in ttm_bo_cleanup_memtype_use.
|
|
|
|
*
|
|
|
|
* Even if it's not the case, because we finished waiting any
|
|
|
|
* delayed destruction would succeed, so just return success
|
|
|
|
* here.
|
|
|
|
*/
|
2019-09-25 09:38:50 +00:00
|
|
|
spin_unlock(&ttm_bo_glob.lru_lock);
|
2012-11-29 11:36:54 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2017-11-08 13:57:45 +00:00
|
|
|
ret = 0;
|
2014-01-21 12:07:01 +00:00
|
|
|
}
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2012-11-29 11:36:54 +00:00
|
|
|
if (ret || unlikely(list_empty(&bo->ddestroy))) {
|
2017-11-08 13:57:45 +00:00
|
|
|
if (unlock_resv)
|
2019-08-11 08:06:32 +00:00
|
|
|
dma_resv_unlock(bo->base.resv);
|
2019-09-25 09:38:50 +00:00
|
|
|
spin_unlock(&ttm_bo_glob.lru_lock);
|
2012-11-29 11:36:54 +00:00
|
|
|
return ret;
|
2009-06-10 13:20:19 +00:00
|
|
|
}
|
|
|
|
|
2016-11-14 16:34:19 +00:00
|
|
|
ttm_bo_del_from_lru(bo);
|
2010-10-19 07:01:01 +00:00
|
|
|
list_del_init(&bo->ddestroy);
|
2019-09-25 09:38:50 +00:00
|
|
|
spin_unlock(&ttm_bo_glob.lru_lock);
|
2010-10-19 07:01:01 +00:00
|
|
|
ttm_bo_cleanup_memtype_use(bo);
|
2017-11-08 13:57:45 +00:00
|
|
|
|
|
|
|
if (unlock_resv)
|
2019-08-11 08:06:32 +00:00
|
|
|
dma_resv_unlock(bo->base.resv);
|
2010-10-19 07:01:01 +00:00
|
|
|
|
2019-11-11 13:42:13 +00:00
|
|
|
ttm_bo_put(bo);
|
|
|
|
|
2010-10-19 07:01:01 +00:00
|
|
|
return 0;
|
2009-06-10 13:20:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Traverse the delayed list, and call ttm_bo_cleanup_refs on all
|
|
|
|
* encountered buffers.
|
|
|
|
*/
|
2017-11-15 12:20:09 +00:00
|
|
|
static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
|
2009-06-10 13:20:19 +00:00
|
|
|
{
|
2019-09-25 09:38:50 +00:00
|
|
|
struct ttm_bo_global *glob = &ttm_bo_glob;
|
2017-11-15 12:20:09 +00:00
|
|
|
struct list_head removed;
|
|
|
|
bool empty;
|
2010-01-20 19:01:30 +00:00
|
|
|
|
2017-11-15 12:20:09 +00:00
|
|
|
INIT_LIST_HEAD(&removed);
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2017-11-15 12:20:09 +00:00
|
|
|
spin_lock(&glob->lru_lock);
|
|
|
|
while (!list_empty(&bdev->ddestroy)) {
|
|
|
|
struct ttm_buffer_object *bo;
|
2013-01-15 13:56:37 +00:00
|
|
|
|
2017-11-15 12:20:09 +00:00
|
|
|
bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
|
|
|
|
ddestroy);
|
|
|
|
list_move_tail(&bo->ddestroy, &removed);
|
2019-11-11 13:42:13 +00:00
|
|
|
if (!ttm_bo_get_unless_zero(bo))
|
|
|
|
continue;
|
2012-11-29 11:36:54 +00:00
|
|
|
|
2019-08-05 14:01:12 +00:00
|
|
|
if (remove_all || bo->base.resv != &bo->base._resv) {
|
2017-12-15 12:36:49 +00:00
|
|
|
spin_unlock(&glob->lru_lock);
|
2019-08-11 08:06:32 +00:00
|
|
|
dma_resv_lock(bo->base.resv, NULL);
|
2010-01-20 19:01:30 +00:00
|
|
|
|
2017-12-15 12:36:49 +00:00
|
|
|
spin_lock(&glob->lru_lock);
|
|
|
|
ttm_bo_cleanup_refs(bo, false, !remove_all, true);
|
|
|
|
|
2019-08-11 08:06:32 +00:00
|
|
|
} else if (dma_resv_trylock(bo->base.resv)) {
|
2017-12-15 12:36:49 +00:00
|
|
|
ttm_bo_cleanup_refs(bo, false, !remove_all, true);
|
2017-12-21 18:04:15 +00:00
|
|
|
} else {
|
|
|
|
spin_unlock(&glob->lru_lock);
|
2017-12-15 12:36:49 +00:00
|
|
|
}
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2019-11-11 13:42:13 +00:00
|
|
|
ttm_bo_put(bo);
|
2009-08-18 14:51:56 +00:00
|
|
|
spin_lock(&glob->lru_lock);
|
2009-06-10 13:20:19 +00:00
|
|
|
}
|
2017-11-15 12:20:09 +00:00
|
|
|
list_splice_tail(&removed, &bdev->ddestroy);
|
|
|
|
empty = list_empty(&bdev->ddestroy);
|
2010-01-20 19:01:30 +00:00
|
|
|
spin_unlock(&glob->lru_lock);
|
2017-11-15 12:20:09 +00:00
|
|
|
|
|
|
|
return empty;
|
2009-06-10 13:20:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ttm_bo_delayed_workqueue(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct ttm_bo_device *bdev =
|
|
|
|
container_of(work, struct ttm_bo_device, wq.work);
|
|
|
|
|
2018-01-24 19:21:19 +00:00
|
|
|
if (!ttm_bo_delayed_delete(bdev, false))
|
2009-06-10 13:20:19 +00:00
|
|
|
schedule_delayed_work(&bdev->wq,
|
|
|
|
((HZ / 100) < 1) ? 1 : HZ / 100);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ttm_bo_release(struct kref *kref)
|
|
|
|
{
|
|
|
|
struct ttm_buffer_object *bo =
|
|
|
|
container_of(kref, struct ttm_buffer_object, kref);
|
|
|
|
struct ttm_bo_device *bdev = bo->bdev;
|
2010-11-11 08:41:57 +00:00
|
|
|
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
|
2019-11-11 13:42:13 +00:00
|
|
|
size_t acc_size = bo->acc_size;
|
|
|
|
int ret;
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2019-11-11 13:42:13 +00:00
|
|
|
if (!bo->deleted) {
|
|
|
|
ret = ttm_bo_individualize_resv(bo);
|
|
|
|
if (ret) {
|
|
|
|
/* Last resort, if we fail to allocate memory for the
|
|
|
|
* fences block for the BO to become idle
|
|
|
|
*/
|
|
|
|
dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
|
|
|
|
30 * HZ);
|
|
|
|
}
|
2020-02-10 12:04:25 +00:00
|
|
|
|
|
|
|
if (bo->bdev->driver->release_notify)
|
|
|
|
bo->bdev->driver->release_notify(bo);
|
|
|
|
|
|
|
|
drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
|
|
|
|
ttm_mem_io_lock(man, false);
|
|
|
|
ttm_mem_io_free_vm(bo);
|
|
|
|
ttm_mem_io_unlock(man);
|
2019-11-11 13:42:13 +00:00
|
|
|
}
|
|
|
|
|
2020-03-30 13:45:01 +00:00
|
|
|
if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
|
|
|
|
!dma_resv_trylock(bo->base.resv)) {
|
2019-11-11 13:42:13 +00:00
|
|
|
/* The BO is not idle, resurrect it for delayed destroy */
|
|
|
|
ttm_bo_flush_all_fences(bo);
|
|
|
|
bo->deleted = true;
|
|
|
|
|
|
|
|
spin_lock(&ttm_bo_glob.lru_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make NO_EVICT bos immediately available to
|
|
|
|
* shrinkers, now that they are queued for
|
|
|
|
* destruction.
|
|
|
|
*/
|
|
|
|
if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
|
|
|
|
bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
|
2020-03-06 12:41:55 +00:00
|
|
|
ttm_bo_del_from_lru(bo);
|
|
|
|
ttm_bo_add_mem_to_lru(bo, &bo->mem);
|
2019-11-11 13:42:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
kref_init(&bo->kref);
|
|
|
|
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
|
|
|
|
spin_unlock(&ttm_bo_glob.lru_lock);
|
|
|
|
|
|
|
|
schedule_delayed_work(&bdev->wq,
|
|
|
|
((HZ / 100) < 1) ? 1 : HZ / 100);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock(&ttm_bo_glob.lru_lock);
|
|
|
|
ttm_bo_del_from_lru(bo);
|
|
|
|
list_del(&bo->ddestroy);
|
|
|
|
spin_unlock(&ttm_bo_glob.lru_lock);
|
|
|
|
|
|
|
|
ttm_bo_cleanup_memtype_use(bo);
|
2020-03-30 13:45:01 +00:00
|
|
|
dma_resv_unlock(bo->base.resv);
|
2019-11-11 13:42:13 +00:00
|
|
|
|
|
|
|
atomic_dec(&ttm_bo_glob.bo_count);
|
|
|
|
dma_fence_put(bo->moving);
|
|
|
|
if (!ttm_bo_uses_embedded_gem_object(bo))
|
|
|
|
dma_resv_fini(&bo->base._resv);
|
|
|
|
bo->destroy(bo);
|
|
|
|
ttm_mem_global_free(&ttm_mem_glob, acc_size);
|
2009-06-10 13:20:19 +00:00
|
|
|
}
|
|
|
|
|
2018-06-21 13:21:35 +00:00
|
|
|
void ttm_bo_put(struct ttm_buffer_object *bo)
|
|
|
|
{
|
|
|
|
kref_put(&bo->kref, ttm_bo_release);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ttm_bo_put);
|
|
|
|
|
2010-04-26 20:00:09 +00:00
|
|
|
int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
|
|
|
|
{
|
|
|
|
return cancel_delayed_work_sync(&bdev->wq);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
|
|
|
|
|
|
|
|
void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
|
|
|
|
{
|
|
|
|
if (resched)
|
|
|
|
schedule_delayed_work(&bdev->wq,
|
|
|
|
((HZ / 100) < 1) ? 1 : HZ / 100);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
|
|
|
|
|
2017-04-12 14:48:39 +00:00
|
|
|
static int ttm_bo_evict(struct ttm_buffer_object *bo,
|
|
|
|
struct ttm_operation_ctx *ctx)
|
2009-06-10 13:20:19 +00:00
|
|
|
{
|
|
|
|
struct ttm_bo_device *bdev = bo->bdev;
|
|
|
|
struct ttm_mem_reg evict_mem;
|
2009-12-08 14:33:32 +00:00
|
|
|
struct ttm_placement placement;
|
|
|
|
int ret = 0;
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2019-08-11 08:06:32 +00:00
|
|
|
dma_resv_assert_held(bo->base.resv);
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2018-02-20 14:35:21 +00:00
|
|
|
placement.num_placement = 0;
|
|
|
|
placement.num_busy_placement = 0;
|
|
|
|
bdev->driver->evict_flags(bo, &placement);
|
|
|
|
|
2020-07-23 08:58:12 +00:00
|
|
|
if (!placement.num_placement && !placement.num_busy_placement) {
|
|
|
|
ttm_bo_wait(bo, false, false);
|
|
|
|
|
|
|
|
ttm_bo_cleanup_memtype_use(bo);
|
|
|
|
return 0;
|
|
|
|
}
|
2018-02-20 14:35:21 +00:00
|
|
|
|
2009-06-10 13:20:19 +00:00
|
|
|
evict_mem = bo->mem;
|
|
|
|
evict_mem.mm_node = NULL;
|
2010-11-11 08:41:57 +00:00
|
|
|
evict_mem.bus.io_reserved_vm = false;
|
|
|
|
evict_mem.bus.io_reserved_count = 0;
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2017-04-12 14:48:39 +00:00
|
|
|
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
|
2009-06-10 13:20:19 +00:00
|
|
|
if (ret) {
|
2009-12-09 20:55:10 +00:00
|
|
|
if (ret != -ERESTARTSYS) {
|
2012-03-17 04:43:50 +00:00
|
|
|
pr_err("Failed to find memory space for buffer 0x%p eviction\n",
|
|
|
|
bo);
|
2009-12-09 20:55:10 +00:00
|
|
|
ttm_bo_mem_space_debug(bo, &placement);
|
|
|
|
}
|
2009-06-10 13:20:19 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2017-04-12 14:48:39 +00:00
|
|
|
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx);
|
2016-06-06 08:17:56 +00:00
|
|
|
if (unlikely(ret)) {
|
2009-12-07 17:36:18 +00:00
|
|
|
if (ret != -ERESTARTSYS)
|
2012-03-17 04:43:50 +00:00
|
|
|
pr_err("Buffer eviction failed\n");
|
2010-08-04 02:07:08 +00:00
|
|
|
ttm_bo_mem_put(bo, &evict_mem);
|
2009-06-10 13:20:19 +00:00
|
|
|
goto out;
|
|
|
|
}
|
2009-12-08 14:33:32 +00:00
|
|
|
bo->evicted = true;
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-08-30 15:26:04 +00:00
|
|
|
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
|
|
|
|
const struct ttm_place *place)
|
|
|
|
{
|
|
|
|
/* Don't evict this BO if it's outside of the
|
|
|
|
* requested placement range
|
|
|
|
*/
|
|
|
|
if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
|
|
|
|
(place->lpfn && place->lpfn <= bo->mem.start))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ttm_bo_eviction_valuable);
|
|
|
|
|
2017-12-21 09:42:52 +00:00
|
|
|
/**
|
|
|
|
* Check the target bo is allowable to be evicted or swapout, including cases:
|
|
|
|
*
|
|
|
|
* a. if share same reservation object with ctx->resv, have assumption
|
|
|
|
* reservation objects should already be locked, so not lock again and
|
|
|
|
* return true directly when either the opreation allow_reserved_eviction
|
|
|
|
* or the target bo already is in delayed free list;
|
|
|
|
*
|
|
|
|
* b. Otherwise, trylock it.
|
|
|
|
*/
|
|
|
|
static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
|
2019-05-22 07:51:47 +00:00
|
|
|
struct ttm_operation_ctx *ctx, bool *locked, bool *busy)
|
2017-12-21 09:42:52 +00:00
|
|
|
{
|
|
|
|
bool ret = false;
|
|
|
|
|
2019-08-05 14:01:12 +00:00
|
|
|
if (bo->base.resv == ctx->resv) {
|
2019-08-11 08:06:32 +00:00
|
|
|
dma_resv_assert_held(bo->base.resv);
|
2019-11-11 14:16:56 +00:00
|
|
|
if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT)
|
2017-12-21 09:42:52 +00:00
|
|
|
ret = true;
|
2019-05-22 07:51:47 +00:00
|
|
|
*locked = false;
|
|
|
|
if (busy)
|
|
|
|
*busy = false;
|
2017-12-21 09:42:52 +00:00
|
|
|
} else {
|
2019-08-11 08:06:32 +00:00
|
|
|
ret = dma_resv_trylock(bo->base.resv);
|
2019-05-22 07:51:47 +00:00
|
|
|
*locked = ret;
|
|
|
|
if (busy)
|
|
|
|
*busy = !ret;
|
2017-12-21 09:42:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-05-22 07:51:47 +00:00
|
|
|
/**
|
|
|
|
* ttm_mem_evict_wait_busy - wait for a busy BO to become available
|
|
|
|
*
|
|
|
|
* @busy_bo: BO which couldn't be locked with trylock
|
|
|
|
* @ctx: operation context
|
|
|
|
* @ticket: acquire ticket
|
|
|
|
*
|
|
|
|
* Try to lock a busy buffer object to avoid failing eviction.
|
|
|
|
*/
|
|
|
|
static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
|
|
|
|
struct ttm_operation_ctx *ctx,
|
|
|
|
struct ww_acquire_ctx *ticket)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (!busy_bo || !ticket)
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
if (ctx->interruptible)
|
2019-08-11 08:06:32 +00:00
|
|
|
r = dma_resv_lock_interruptible(busy_bo->base.resv,
|
2019-05-22 07:51:47 +00:00
|
|
|
ticket);
|
|
|
|
else
|
2019-08-11 08:06:32 +00:00
|
|
|
r = dma_resv_lock(busy_bo->base.resv, ticket);
|
2019-05-22 07:51:47 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* TODO: It would be better to keep the BO locked until allocation is at
|
|
|
|
* least tried one more time, but that would mean a much larger rework
|
|
|
|
* of TTM.
|
|
|
|
*/
|
|
|
|
if (!r)
|
2019-08-11 08:06:32 +00:00
|
|
|
dma_resv_unlock(busy_bo->base.resv);
|
2019-05-22 07:51:47 +00:00
|
|
|
|
2019-06-26 06:32:43 +00:00
|
|
|
return r == -EDEADLK ? -EBUSY : r;
|
2019-05-22 07:51:47 +00:00
|
|
|
}
|
|
|
|
|
2009-12-08 14:33:32 +00:00
|
|
|
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
|
2020-08-04 02:55:57 +00:00
|
|
|
struct ttm_mem_type_manager *man,
|
2017-11-08 14:55:44 +00:00
|
|
|
const struct ttm_place *place,
|
2019-05-22 07:51:47 +00:00
|
|
|
struct ttm_operation_ctx *ctx,
|
|
|
|
struct ww_acquire_ctx *ticket)
|
2009-12-08 14:33:32 +00:00
|
|
|
{
|
2019-05-22 07:51:47 +00:00
|
|
|
struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
|
2017-11-08 14:55:44 +00:00
|
|
|
bool locked = false;
|
2017-01-10 13:08:28 +00:00
|
|
|
unsigned i;
|
2017-11-08 14:55:44 +00:00
|
|
|
int ret;
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2019-09-25 09:38:50 +00:00
|
|
|
spin_lock(&ttm_bo_glob.lru_lock);
|
2017-01-10 13:08:28 +00:00
|
|
|
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
|
|
|
|
list_for_each_entry(bo, &man->lru[i], lru) {
|
2019-05-22 07:51:47 +00:00
|
|
|
bool busy;
|
|
|
|
|
|
|
|
if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
|
|
|
|
&busy)) {
|
2019-07-31 07:41:50 +00:00
|
|
|
if (busy && !busy_bo && ticket !=
|
2019-08-11 08:06:32 +00:00
|
|
|
dma_resv_locking_ctx(bo->base.resv))
|
2019-05-22 07:51:47 +00:00
|
|
|
busy_bo = bo;
|
2017-12-21 09:42:52 +00:00
|
|
|
continue;
|
2019-05-22 07:51:47 +00:00
|
|
|
}
|
2014-10-09 06:02:59 +00:00
|
|
|
|
2017-01-10 13:08:28 +00:00
|
|
|
if (place && !bdev->driver->eviction_valuable(bo,
|
|
|
|
place)) {
|
2017-11-08 14:55:44 +00:00
|
|
|
if (locked)
|
2019-08-11 08:06:32 +00:00
|
|
|
dma_resv_unlock(bo->base.resv);
|
2017-01-10 13:08:28 +00:00
|
|
|
continue;
|
|
|
|
}
|
2019-11-11 13:42:13 +00:00
|
|
|
if (!ttm_bo_get_unless_zero(bo)) {
|
|
|
|
if (locked)
|
|
|
|
dma_resv_unlock(bo->base.resv);
|
|
|
|
continue;
|
|
|
|
}
|
2017-01-10 13:08:28 +00:00
|
|
|
break;
|
2014-10-09 06:02:59 +00:00
|
|
|
}
|
2016-08-30 15:26:04 +00:00
|
|
|
|
2017-11-08 14:55:44 +00:00
|
|
|
/* If the inner loop terminated early, we have our candidate */
|
|
|
|
if (&bo->lru != &man->lru[i])
|
2017-01-10 13:08:28 +00:00
|
|
|
break;
|
2017-11-08 14:55:44 +00:00
|
|
|
|
|
|
|
bo = NULL;
|
2012-11-28 11:25:43 +00:00
|
|
|
}
|
|
|
|
|
2017-11-08 14:55:44 +00:00
|
|
|
if (!bo) {
|
2019-11-11 13:42:13 +00:00
|
|
|
if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
|
|
|
|
busy_bo = NULL;
|
2019-09-25 09:38:50 +00:00
|
|
|
spin_unlock(&ttm_bo_glob.lru_lock);
|
2019-05-22 07:51:47 +00:00
|
|
|
ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
|
|
|
|
if (busy_bo)
|
2019-11-11 13:42:13 +00:00
|
|
|
ttm_bo_put(busy_bo);
|
2019-05-22 07:51:47 +00:00
|
|
|
return ret;
|
2009-12-02 17:33:46 +00:00
|
|
|
}
|
|
|
|
|
2019-11-11 13:42:13 +00:00
|
|
|
if (bo->deleted) {
|
2017-04-12 14:48:39 +00:00
|
|
|
ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
|
|
|
|
ctx->no_wait_gpu, locked);
|
2019-11-11 13:42:13 +00:00
|
|
|
ttm_bo_put(bo);
|
2012-10-22 12:51:26 +00:00
|
|
|
return ret;
|
2010-10-19 07:01:01 +00:00
|
|
|
}
|
|
|
|
|
2019-09-25 09:38:50 +00:00
|
|
|
spin_unlock(&ttm_bo_glob.lru_lock);
|
2009-12-02 17:33:46 +00:00
|
|
|
|
2017-04-12 14:48:39 +00:00
|
|
|
ret = ttm_bo_evict(bo, ctx);
|
2019-09-19 10:56:15 +00:00
|
|
|
if (locked)
|
2017-11-08 14:55:44 +00:00
|
|
|
ttm_bo_unreserve(bo);
|
2009-12-02 17:33:46 +00:00
|
|
|
|
2019-11-11 13:42:13 +00:00
|
|
|
ttm_bo_put(bo);
|
2009-06-10 13:20:19 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-07-06 15:32:55 +00:00
|
|
|
static int ttm_bo_mem_get(struct ttm_buffer_object *bo,
|
|
|
|
const struct ttm_place *place,
|
|
|
|
struct ttm_mem_reg *mem)
|
|
|
|
{
|
|
|
|
struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
|
|
|
|
|
|
|
|
mem->mm_node = NULL;
|
|
|
|
if (!man->func || !man->func->get_node)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return man->func->get_node(man, bo, place, mem);
|
|
|
|
}
|
|
|
|
|
2010-08-04 02:07:08 +00:00
|
|
|
void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
|
|
|
|
{
|
2010-08-05 00:48:18 +00:00
|
|
|
struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
|
2010-08-04 02:07:08 +00:00
|
|
|
|
2020-07-06 15:32:55 +00:00
|
|
|
if (!man->func || !man->func->put_node)
|
|
|
|
return;
|
|
|
|
|
|
|
|
man->func->put_node(man, mem);
|
|
|
|
mem->mm_node = NULL;
|
|
|
|
mem->mem_type = TTM_PL_SYSTEM;
|
2010-08-04 02:07:08 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ttm_bo_mem_put);
|
|
|
|
|
2016-06-15 11:44:03 +00:00
|
|
|
/**
|
|
|
|
* Add the last move fence to the BO and reserve a new shared slot.
|
|
|
|
*/
|
|
|
|
static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
|
|
|
|
struct ttm_mem_type_manager *man,
|
2019-09-16 15:20:47 +00:00
|
|
|
struct ttm_mem_reg *mem,
|
|
|
|
bool no_wait_gpu)
|
2016-06-15 11:44:03 +00:00
|
|
|
{
|
2016-10-25 12:00:45 +00:00
|
|
|
struct dma_fence *fence;
|
2016-06-15 11:44:03 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
spin_lock(&man->move_lock);
|
2016-10-25 12:00:45 +00:00
|
|
|
fence = dma_fence_get(man->move);
|
2016-06-15 11:44:03 +00:00
|
|
|
spin_unlock(&man->move_lock);
|
|
|
|
|
2019-09-16 15:20:47 +00:00
|
|
|
if (!fence)
|
|
|
|
return 0;
|
2016-06-15 11:44:03 +00:00
|
|
|
|
2019-09-16 15:20:47 +00:00
|
|
|
if (no_wait_gpu)
|
|
|
|
return -EBUSY;
|
2016-06-15 11:44:03 +00:00
|
|
|
|
2019-09-16 15:20:47 +00:00
|
|
|
dma_resv_add_shared_fence(bo->base.resv, fence);
|
|
|
|
|
|
|
|
ret = dma_resv_reserve_shared(bo->base.resv, 1);
|
|
|
|
if (unlikely(ret)) {
|
|
|
|
dma_fence_put(fence);
|
|
|
|
return ret;
|
2016-06-15 11:44:03 +00:00
|
|
|
}
|
|
|
|
|
2019-09-16 15:20:47 +00:00
|
|
|
dma_fence_put(bo->moving);
|
|
|
|
bo->moving = fence;
|
2016-06-15 11:44:03 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-06-10 13:20:19 +00:00
|
|
|
/**
|
|
|
|
* Repeatedly evict memory from the LRU for @mem_type until we create enough
|
|
|
|
* space, or we've evicted everything and there isn't enough space.
|
|
|
|
*/
|
2009-12-08 14:33:32 +00:00
|
|
|
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
|
2019-05-13 15:34:29 +00:00
|
|
|
const struct ttm_place *place,
|
|
|
|
struct ttm_mem_reg *mem,
|
|
|
|
struct ttm_operation_ctx *ctx)
|
2009-06-10 13:20:19 +00:00
|
|
|
{
|
2009-12-08 14:33:32 +00:00
|
|
|
struct ttm_bo_device *bdev = bo->bdev;
|
2019-05-13 15:34:29 +00:00
|
|
|
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
2019-07-31 07:41:50 +00:00
|
|
|
struct ww_acquire_ctx *ticket;
|
2009-06-10 13:20:19 +00:00
|
|
|
int ret;
|
|
|
|
|
2019-08-11 08:06:32 +00:00
|
|
|
ticket = dma_resv_locking_ctx(bo->base.resv);
|
2009-06-10 13:20:19 +00:00
|
|
|
do {
|
2020-07-06 15:32:55 +00:00
|
|
|
ret = ttm_bo_mem_get(bo, place, mem);
|
2020-06-16 12:33:23 +00:00
|
|
|
if (likely(!ret))
|
2009-06-10 13:20:19 +00:00
|
|
|
break;
|
2020-06-16 12:33:23 +00:00
|
|
|
if (unlikely(ret != -ENOSPC))
|
|
|
|
return ret;
|
2020-08-04 02:55:57 +00:00
|
|
|
ret = ttm_mem_evict_first(bdev, man, place, ctx,
|
2019-07-31 07:41:50 +00:00
|
|
|
ticket);
|
2009-06-10 13:20:19 +00:00
|
|
|
if (unlikely(ret != 0))
|
|
|
|
return ret;
|
|
|
|
} while (1);
|
2019-05-13 15:34:29 +00:00
|
|
|
|
2019-09-16 15:20:47 +00:00
|
|
|
return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
|
2009-06-10 13:20:19 +00:00
|
|
|
}
|
|
|
|
|
2009-06-24 17:57:34 +00:00
|
|
|
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
|
|
|
|
uint32_t cur_placement,
|
|
|
|
uint32_t proposed_placement)
|
|
|
|
{
|
|
|
|
uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
|
|
|
|
uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Keep current caching if possible.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if ((cur_placement & caching) != 0)
|
|
|
|
result |= (cur_placement & caching);
|
|
|
|
else if ((man->default_caching & caching) != 0)
|
|
|
|
result |= man->default_caching;
|
|
|
|
else if ((TTM_PL_FLAG_CACHED & caching) != 0)
|
|
|
|
result |= TTM_PL_FLAG_CACHED;
|
|
|
|
else if ((TTM_PL_FLAG_WC & caching) != 0)
|
|
|
|
result |= TTM_PL_FLAG_WC;
|
|
|
|
else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
|
|
|
|
result |= TTM_PL_FLAG_UNCACHED;
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2009-06-10 13:20:19 +00:00
|
|
|
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
|
|
|
|
uint32_t mem_type,
|
2014-08-27 11:16:04 +00:00
|
|
|
const struct ttm_place *place,
|
2009-06-24 17:57:34 +00:00
|
|
|
uint32_t *masked_placement)
|
2009-06-10 13:20:19 +00:00
|
|
|
{
|
|
|
|
uint32_t cur_flags = ttm_bo_type_flags(mem_type);
|
|
|
|
|
2014-08-27 11:16:04 +00:00
|
|
|
if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
|
2009-06-10 13:20:19 +00:00
|
|
|
return false;
|
|
|
|
|
2014-08-27 11:16:04 +00:00
|
|
|
if ((place->flags & man->available_caching) == 0)
|
2009-06-10 13:20:19 +00:00
|
|
|
return false;
|
|
|
|
|
2014-08-27 11:16:04 +00:00
|
|
|
cur_flags |= (place->flags & man->available_caching);
|
2009-06-24 17:57:34 +00:00
|
|
|
|
|
|
|
*masked_placement = cur_flags;
|
2009-06-10 13:20:19 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-05-13 15:34:29 +00:00
|
|
|
/**
|
|
|
|
* ttm_bo_mem_placement - check if placement is compatible
|
|
|
|
* @bo: BO to find memory for
|
|
|
|
* @place: where to search
|
|
|
|
* @mem: the memory object to fill in
|
|
|
|
* @ctx: operation context
|
|
|
|
*
|
|
|
|
* Check if placement is compatible and fill in mem structure.
|
|
|
|
* Returns -EBUSY if placement won't work or negative error code.
|
|
|
|
* 0 when placement can be used.
|
|
|
|
*/
|
|
|
|
static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
|
|
|
|
const struct ttm_place *place,
|
|
|
|
struct ttm_mem_reg *mem,
|
|
|
|
struct ttm_operation_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct ttm_bo_device *bdev = bo->bdev;
|
|
|
|
uint32_t mem_type = TTM_PL_SYSTEM;
|
|
|
|
struct ttm_mem_type_manager *man;
|
|
|
|
uint32_t cur_flags = 0;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = ttm_mem_type_from_place(place, &mem_type);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
man = &bdev->man[mem_type];
|
|
|
|
if (!man->has_type || !man->use_type)
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags);
|
|
|
|
/*
|
|
|
|
* Use the access and other non-mapping-related flag bits from
|
|
|
|
* the memory placement flags to the current flags
|
|
|
|
*/
|
|
|
|
ttm_flag_masked(&cur_flags, place->flags, ~TTM_PL_MASK_MEMTYPE);
|
|
|
|
|
|
|
|
mem->mem_type = mem_type;
|
|
|
|
mem->placement = cur_flags;
|
2019-05-13 15:58:23 +00:00
|
|
|
|
2019-09-25 09:38:50 +00:00
|
|
|
spin_lock(&ttm_bo_glob.lru_lock);
|
2019-09-19 10:56:15 +00:00
|
|
|
ttm_bo_del_from_lru(bo);
|
|
|
|
ttm_bo_add_mem_to_lru(bo, mem);
|
2019-09-25 09:38:50 +00:00
|
|
|
spin_unlock(&ttm_bo_glob.lru_lock);
|
2019-05-13 15:58:23 +00:00
|
|
|
|
2019-05-13 15:34:29 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-06-10 13:20:19 +00:00
|
|
|
/**
|
|
|
|
* Creates space for memory region @mem according to its type.
|
|
|
|
*
|
|
|
|
* This function first searches for free space in compatible memory types in
|
|
|
|
* the priority order defined by the driver. If free space isn't found, then
|
|
|
|
* ttm_bo_mem_force_space is attempted in priority order to evict and find
|
|
|
|
* space.
|
|
|
|
*/
|
|
|
|
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
2009-12-08 14:33:32 +00:00
|
|
|
struct ttm_placement *placement,
|
|
|
|
struct ttm_mem_reg *mem,
|
2017-04-12 13:33:00 +00:00
|
|
|
struct ttm_operation_ctx *ctx)
|
2009-06-10 13:20:19 +00:00
|
|
|
{
|
|
|
|
struct ttm_bo_device *bdev = bo->bdev;
|
|
|
|
bool type_found = false;
|
2009-12-08 14:33:32 +00:00
|
|
|
int i, ret;
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2019-08-11 08:06:32 +00:00
|
|
|
ret = dma_resv_reserve_shared(bo->base.resv, 1);
|
2016-06-15 11:44:03 +00:00
|
|
|
if (unlikely(ret))
|
|
|
|
return ret;
|
|
|
|
|
2009-12-14 04:51:35 +00:00
|
|
|
for (i = 0; i < placement->num_placement; ++i) {
|
2014-08-27 11:16:04 +00:00
|
|
|
const struct ttm_place *place = &placement->placement[i];
|
2019-05-13 15:34:29 +00:00
|
|
|
struct ttm_mem_type_manager *man;
|
2014-08-27 11:16:04 +00:00
|
|
|
|
2019-05-13 15:34:29 +00:00
|
|
|
ret = ttm_bo_mem_placement(bo, place, mem, ctx);
|
|
|
|
if (ret == -EBUSY)
|
|
|
|
continue;
|
2009-12-08 14:33:32 +00:00
|
|
|
if (ret)
|
2019-05-13 15:58:23 +00:00
|
|
|
goto error;
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2015-09-14 08:24:41 +00:00
|
|
|
type_found = true;
|
2020-07-06 15:32:55 +00:00
|
|
|
ret = ttm_bo_mem_get(bo, place, mem);
|
2020-06-16 12:33:23 +00:00
|
|
|
if (ret == -ENOSPC)
|
|
|
|
continue;
|
2015-09-14 08:24:41 +00:00
|
|
|
if (unlikely(ret))
|
2019-05-13 15:58:23 +00:00
|
|
|
goto error;
|
2016-06-15 11:44:03 +00:00
|
|
|
|
2020-07-06 15:32:55 +00:00
|
|
|
man = &bdev->man[mem->mem_type];
|
2019-09-16 15:20:47 +00:00
|
|
|
ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
|
|
|
|
if (unlikely(ret)) {
|
2020-07-06 15:32:55 +00:00
|
|
|
ttm_bo_mem_put(bo, mem);
|
2019-09-16 15:20:47 +00:00
|
|
|
if (ret == -EBUSY)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
goto error;
|
2016-06-15 11:44:03 +00:00
|
|
|
}
|
2019-09-16 15:20:47 +00:00
|
|
|
return 0;
|
2009-06-10 13:20:19 +00:00
|
|
|
}
|
|
|
|
|
2009-12-14 04:51:35 +00:00
|
|
|
for (i = 0; i < placement->num_busy_placement; ++i) {
|
2014-08-27 11:16:04 +00:00
|
|
|
const struct ttm_place *place = &placement->busy_placement[i];
|
|
|
|
|
2019-05-13 15:34:29 +00:00
|
|
|
ret = ttm_bo_mem_placement(bo, place, mem, ctx);
|
|
|
|
if (ret == -EBUSY)
|
|
|
|
continue;
|
2009-12-08 14:33:32 +00:00
|
|
|
if (ret)
|
2019-05-13 15:58:23 +00:00
|
|
|
goto error;
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2015-09-14 08:24:41 +00:00
|
|
|
type_found = true;
|
2019-05-13 15:34:29 +00:00
|
|
|
ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
|
2020-07-06 15:32:55 +00:00
|
|
|
if (likely(!ret))
|
2009-06-10 13:20:19 +00:00
|
|
|
return 0;
|
2019-05-13 15:34:29 +00:00
|
|
|
|
2019-05-13 13:36:08 +00:00
|
|
|
if (ret && ret != -EBUSY)
|
2019-05-13 15:58:23 +00:00
|
|
|
goto error;
|
2009-06-10 13:20:19 +00:00
|
|
|
}
|
2015-09-14 08:24:41 +00:00
|
|
|
|
2019-05-13 15:58:23 +00:00
|
|
|
ret = -ENOMEM;
|
2015-09-14 08:24:41 +00:00
|
|
|
if (!type_found) {
|
2017-02-28 12:55:54 +00:00
|
|
|
pr_err(TTM_PFX "No compatible memory type found\n");
|
2019-05-13 15:58:23 +00:00
|
|
|
ret = -EINVAL;
|
2015-09-14 08:24:41 +00:00
|
|
|
}
|
|
|
|
|
2019-05-13 15:58:23 +00:00
|
|
|
error:
|
|
|
|
if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
|
2020-08-04 02:55:39 +00:00
|
|
|
ttm_bo_move_to_lru_tail_unlocked(bo);
|
2019-05-13 15:58:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2009-06-10 13:20:19 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ttm_bo_mem_space);
|
|
|
|
|
2014-01-06 16:42:58 +00:00
|
|
|
static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
|
2017-04-12 14:48:39 +00:00
|
|
|
struct ttm_placement *placement,
|
|
|
|
struct ttm_operation_ctx *ctx)
|
2009-06-10 13:20:19 +00:00
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
struct ttm_mem_reg mem;
|
|
|
|
|
2019-08-11 08:06:32 +00:00
|
|
|
dma_resv_assert_held(bo->base.resv);
|
2009-06-10 13:20:19 +00:00
|
|
|
|
|
|
|
mem.num_pages = bo->num_pages;
|
|
|
|
mem.size = mem.num_pages << PAGE_SHIFT;
|
|
|
|
mem.page_alignment = bo->mem.page_alignment;
|
2010-11-11 08:41:57 +00:00
|
|
|
mem.bus.io_reserved_vm = false;
|
|
|
|
mem.bus.io_reserved_count = 0;
|
2020-06-16 12:33:23 +00:00
|
|
|
mem.mm_node = NULL;
|
|
|
|
|
2009-06-10 13:20:19 +00:00
|
|
|
/*
|
|
|
|
* Determine where to move the buffer.
|
|
|
|
*/
|
2017-04-12 14:48:39 +00:00
|
|
|
ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
|
2009-06-10 13:20:19 +00:00
|
|
|
if (ret)
|
|
|
|
goto out_unlock;
|
2017-04-12 14:48:39 +00:00
|
|
|
ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx);
|
2009-06-10 13:20:19 +00:00
|
|
|
out_unlock:
|
2020-07-06 15:32:55 +00:00
|
|
|
if (ret)
|
2010-08-05 00:48:18 +00:00
|
|
|
ttm_bo_mem_put(bo, &mem);
|
2009-06-10 13:20:19 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-03-29 09:47:04 +00:00
|
|
|
static bool ttm_bo_places_compat(const struct ttm_place *places,
|
|
|
|
unsigned num_placement,
|
|
|
|
struct ttm_mem_reg *mem,
|
|
|
|
uint32_t *new_flags)
|
2009-06-10 13:20:19 +00:00
|
|
|
{
|
2017-03-29 09:47:04 +00:00
|
|
|
unsigned i;
|
2010-02-11 23:18:00 +00:00
|
|
|
|
2017-03-29 09:47:04 +00:00
|
|
|
for (i = 0; i < num_placement; i++) {
|
|
|
|
const struct ttm_place *heap = &places[i];
|
2014-08-27 11:16:04 +00:00
|
|
|
|
2020-07-06 15:32:55 +00:00
|
|
|
if ((mem->start < heap->fpfn ||
|
2014-10-09 06:03:03 +00:00
|
|
|
(heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
|
2014-08-27 11:16:04 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
*new_flags = heap->flags;
|
2013-10-28 09:02:19 +00:00
|
|
|
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
|
2017-03-29 10:13:54 +00:00
|
|
|
(*new_flags & mem->placement & TTM_PL_MASK_MEM) &&
|
|
|
|
(!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
|
|
|
|
(mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
|
2013-10-28 09:02:19 +00:00
|
|
|
return true;
|
2009-12-08 14:33:32 +00:00
|
|
|
}
|
2017-03-29 09:47:04 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ttm_bo_mem_compat(struct ttm_placement *placement,
|
|
|
|
struct ttm_mem_reg *mem,
|
|
|
|
uint32_t *new_flags)
|
|
|
|
{
|
|
|
|
if (ttm_bo_places_compat(placement->placement, placement->num_placement,
|
|
|
|
mem, new_flags))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if ((placement->busy_placement != placement->placement ||
|
|
|
|
placement->num_busy_placement > placement->num_placement) &&
|
|
|
|
ttm_bo_places_compat(placement->busy_placement,
|
|
|
|
placement->num_busy_placement,
|
|
|
|
mem, new_flags))
|
|
|
|
return true;
|
2013-10-28 09:02:19 +00:00
|
|
|
|
|
|
|
return false;
|
2009-06-10 13:20:19 +00:00
|
|
|
}
|
2016-06-29 19:58:49 +00:00
|
|
|
EXPORT_SYMBOL(ttm_bo_mem_compat);
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2009-12-10 16:16:27 +00:00
|
|
|
int ttm_bo_validate(struct ttm_buffer_object *bo,
|
2017-04-12 12:24:39 +00:00
|
|
|
struct ttm_placement *placement,
|
|
|
|
struct ttm_operation_ctx *ctx)
|
2009-06-10 13:20:19 +00:00
|
|
|
{
|
|
|
|
int ret;
|
2013-10-28 09:02:19 +00:00
|
|
|
uint32_t new_flags;
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2019-08-11 08:06:32 +00:00
|
|
|
dma_resv_assert_held(bo->base.resv);
|
2018-03-15 15:48:20 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove the backing store if no placement is given.
|
|
|
|
*/
|
2020-06-24 13:15:20 +00:00
|
|
|
if (!placement->num_placement && !placement->num_busy_placement)
|
|
|
|
return ttm_bo_pipeline_gutting(bo);
|
2018-03-15 15:48:20 +00:00
|
|
|
|
2009-06-10 13:20:19 +00:00
|
|
|
/*
|
|
|
|
* Check whether we need to move buffer.
|
|
|
|
*/
|
2013-10-28 09:02:19 +00:00
|
|
|
if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
|
2017-04-12 14:48:39 +00:00
|
|
|
ret = ttm_bo_move_buffer(bo, placement, ctx);
|
2009-12-08 14:33:32 +00:00
|
|
|
if (ret)
|
2009-06-10 13:20:19 +00:00
|
|
|
return ret;
|
2009-12-08 14:33:32 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Use the access and other non-mapping-related flag bits from
|
|
|
|
* the compatible memory placement flags to the active flags
|
|
|
|
*/
|
2013-10-28 09:02:19 +00:00
|
|
|
ttm_flag_masked(&bo->mem.placement, new_flags,
|
2009-12-08 14:33:32 +00:00
|
|
|
~TTM_PL_MASK_MEMTYPE);
|
2009-06-10 13:20:19 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2009-12-10 16:16:27 +00:00
|
|
|
EXPORT_SYMBOL(ttm_bo_validate);
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2017-02-16 09:56:40 +00:00
|
|
|
int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
|
|
|
|
struct ttm_buffer_object *bo,
|
|
|
|
unsigned long size,
|
|
|
|
enum ttm_bo_type type,
|
|
|
|
struct ttm_placement *placement,
|
|
|
|
uint32_t page_alignment,
|
2017-04-12 12:41:43 +00:00
|
|
|
struct ttm_operation_ctx *ctx,
|
2017-02-16 09:56:40 +00:00
|
|
|
size_t acc_size,
|
|
|
|
struct sg_table *sg,
|
2019-08-11 08:06:32 +00:00
|
|
|
struct dma_resv *resv,
|
2017-02-16 09:56:40 +00:00
|
|
|
void (*destroy) (struct ttm_buffer_object *))
|
2009-06-10 13:20:19 +00:00
|
|
|
{
|
2019-09-25 09:38:50 +00:00
|
|
|
struct ttm_mem_global *mem_glob = &ttm_mem_glob;
|
2009-12-10 16:16:27 +00:00
|
|
|
int ret = 0;
|
2009-06-10 13:20:19 +00:00
|
|
|
unsigned long num_pages;
|
2013-06-27 11:48:19 +00:00
|
|
|
bool locked;
|
2011-11-11 20:42:57 +00:00
|
|
|
|
2017-12-08 07:09:50 +00:00
|
|
|
ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
|
2011-11-11 20:42:57 +00:00
|
|
|
if (ret) {
|
2012-03-17 04:43:50 +00:00
|
|
|
pr_err("Out of kernel memory\n");
|
2011-11-11 20:42:57 +00:00
|
|
|
if (destroy)
|
|
|
|
(*destroy)(bo);
|
|
|
|
else
|
|
|
|
kfree(bo);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2009-06-10 13:20:19 +00:00
|
|
|
|
|
|
|
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
|
|
if (num_pages == 0) {
|
2012-03-17 04:43:50 +00:00
|
|
|
pr_err("Illegal buffer object size\n");
|
2010-11-09 20:31:44 +00:00
|
|
|
if (destroy)
|
|
|
|
(*destroy)(bo);
|
|
|
|
else
|
|
|
|
kfree(bo);
|
2012-06-12 11:28:42 +00:00
|
|
|
ttm_mem_global_free(mem_glob, acc_size);
|
2009-06-10 13:20:19 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2018-01-24 19:26:41 +00:00
|
|
|
bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
|
2009-06-10 13:20:19 +00:00
|
|
|
|
|
|
|
kref_init(&bo->kref);
|
|
|
|
INIT_LIST_HEAD(&bo->lru);
|
|
|
|
INIT_LIST_HEAD(&bo->ddestroy);
|
|
|
|
INIT_LIST_HEAD(&bo->swap);
|
2010-11-11 08:41:57 +00:00
|
|
|
INIT_LIST_HEAD(&bo->io_reserve_lru);
|
2009-06-10 13:20:19 +00:00
|
|
|
bo->bdev = bdev;
|
|
|
|
bo->type = type;
|
|
|
|
bo->num_pages = num_pages;
|
2009-12-10 15:15:52 +00:00
|
|
|
bo->mem.size = num_pages << PAGE_SHIFT;
|
2009-06-10 13:20:19 +00:00
|
|
|
bo->mem.mem_type = TTM_PL_SYSTEM;
|
|
|
|
bo->mem.num_pages = bo->num_pages;
|
|
|
|
bo->mem.mm_node = NULL;
|
|
|
|
bo->mem.page_alignment = page_alignment;
|
2010-11-11 08:41:57 +00:00
|
|
|
bo->mem.bus.io_reserved_vm = false;
|
|
|
|
bo->mem.bus.io_reserved_count = 0;
|
2016-06-15 11:44:01 +00:00
|
|
|
bo->moving = NULL;
|
2009-06-10 13:20:19 +00:00
|
|
|
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
|
|
|
|
bo->acc_size = acc_size;
|
2012-04-02 10:46:06 +00:00
|
|
|
bo->sg = sg;
|
2014-01-09 10:03:15 +00:00
|
|
|
if (resv) {
|
2019-08-05 14:01:11 +00:00
|
|
|
bo->base.resv = resv;
|
2019-08-11 08:06:32 +00:00
|
|
|
dma_resv_assert_held(bo->base.resv);
|
2014-01-09 10:03:15 +00:00
|
|
|
} else {
|
2019-08-05 14:01:11 +00:00
|
|
|
bo->base.resv = &bo->base._resv;
|
2019-08-05 14:01:09 +00:00
|
|
|
}
|
|
|
|
if (!ttm_bo_uses_embedded_gem_object(bo)) {
|
|
|
|
/*
|
|
|
|
* bo.gem is not initialized, so we have to setup the
|
|
|
|
* struct elements we want use regardless.
|
|
|
|
*/
|
2019-08-11 08:06:32 +00:00
|
|
|
dma_resv_init(&bo->base._resv);
|
2019-08-05 14:01:10 +00:00
|
|
|
drm_vma_node_reset(&bo->base.vma_node);
|
2014-01-09 10:03:15 +00:00
|
|
|
}
|
2019-09-25 09:38:50 +00:00
|
|
|
atomic_inc(&ttm_bo_glob.bo_count);
|
2009-06-10 13:20:19 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For ttm_bo_type_device buffers, allocate
|
|
|
|
* address space from the device.
|
|
|
|
*/
|
2014-08-27 11:16:04 +00:00
|
|
|
if (bo->type == ttm_bo_type_device ||
|
|
|
|
bo->type == ttm_bo_type_sg)
|
2019-09-05 07:05:02 +00:00
|
|
|
ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
|
2013-07-25 12:08:51 +00:00
|
|
|
bo->mem.num_pages);
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2014-01-09 10:03:15 +00:00
|
|
|
/* passed reservation objects should already be locked,
|
|
|
|
* since otherwise lockdep will be angered in radeon.
|
|
|
|
*/
|
|
|
|
if (!resv) {
|
2019-08-11 08:06:32 +00:00
|
|
|
locked = dma_resv_trylock(bo->base.resv);
|
2014-01-09 10:03:15 +00:00
|
|
|
WARN_ON(!locked);
|
|
|
|
}
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2017-04-12 12:41:43 +00:00
|
|
|
if (likely(!ret))
|
|
|
|
ret = ttm_bo_validate(bo, placement, ctx);
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2017-02-14 08:37:12 +00:00
|
|
|
if (unlikely(ret)) {
|
2017-02-16 09:56:40 +00:00
|
|
|
if (!resv)
|
|
|
|
ttm_bo_unreserve(bo);
|
|
|
|
|
2018-06-21 13:21:37 +00:00
|
|
|
ttm_bo_put(bo);
|
2017-02-14 08:37:12 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-08-04 02:55:39 +00:00
|
|
|
ttm_bo_move_to_lru_tail_unlocked(bo);
|
2016-01-11 14:35:18 +00:00
|
|
|
|
2009-06-10 13:20:19 +00:00
|
|
|
return ret;
|
|
|
|
}
|
2017-02-16 09:56:40 +00:00
|
|
|
EXPORT_SYMBOL(ttm_bo_init_reserved);
|
|
|
|
|
|
|
|
int ttm_bo_init(struct ttm_bo_device *bdev,
|
|
|
|
struct ttm_buffer_object *bo,
|
|
|
|
unsigned long size,
|
|
|
|
enum ttm_bo_type type,
|
|
|
|
struct ttm_placement *placement,
|
|
|
|
uint32_t page_alignment,
|
|
|
|
bool interruptible,
|
|
|
|
size_t acc_size,
|
|
|
|
struct sg_table *sg,
|
2019-08-11 08:06:32 +00:00
|
|
|
struct dma_resv *resv,
|
2017-02-16 09:56:40 +00:00
|
|
|
void (*destroy) (struct ttm_buffer_object *))
|
|
|
|
{
|
2017-04-12 12:41:43 +00:00
|
|
|
struct ttm_operation_ctx ctx = { interruptible, false };
|
2017-02-16 09:56:40 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
|
2018-02-22 14:52:31 +00:00
|
|
|
page_alignment, &ctx, acc_size,
|
2017-02-16 09:56:40 +00:00
|
|
|
sg, resv, destroy);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (!resv)
|
|
|
|
ttm_bo_unreserve(bo);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2009-12-10 16:16:27 +00:00
|
|
|
EXPORT_SYMBOL(ttm_bo_init);
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2011-11-11 20:42:57 +00:00
|
|
|
size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
|
|
|
|
unsigned long bo_size,
|
|
|
|
unsigned struct_size)
|
2009-06-10 13:20:19 +00:00
|
|
|
{
|
2011-11-11 20:42:57 +00:00
|
|
|
unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
|
|
|
|
size_t size = 0;
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2011-11-11 20:42:57 +00:00
|
|
|
size += ttm_round_pot(struct_size);
|
2016-04-08 01:42:17 +00:00
|
|
|
size += ttm_round_pot(npages * sizeof(void *));
|
2011-11-11 20:42:57 +00:00
|
|
|
size += ttm_round_pot(sizeof(struct ttm_tt));
|
|
|
|
return size;
|
2009-06-10 13:20:19 +00:00
|
|
|
}
|
2011-11-11 20:42:57 +00:00
|
|
|
EXPORT_SYMBOL(ttm_bo_acc_size);
|
|
|
|
|
|
|
|
size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
|
|
|
|
unsigned long bo_size,
|
|
|
|
unsigned struct_size)
|
|
|
|
{
|
|
|
|
unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
|
|
|
|
size_t size = 0;
|
|
|
|
|
|
|
|
size += ttm_round_pot(struct_size);
|
2016-04-08 01:42:17 +00:00
|
|
|
size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
|
2011-11-11 20:42:57 +00:00
|
|
|
size += ttm_round_pot(sizeof(struct ttm_dma_tt));
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ttm_bo_dma_acc_size);
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2009-12-10 16:16:27 +00:00
|
|
|
int ttm_bo_create(struct ttm_bo_device *bdev,
|
|
|
|
unsigned long size,
|
|
|
|
enum ttm_bo_type type,
|
|
|
|
struct ttm_placement *placement,
|
|
|
|
uint32_t page_alignment,
|
|
|
|
bool interruptible,
|
|
|
|
struct ttm_buffer_object **p_bo)
|
2009-06-10 13:20:19 +00:00
|
|
|
{
|
|
|
|
struct ttm_buffer_object *bo;
|
2011-11-11 20:42:57 +00:00
|
|
|
size_t acc_size;
|
2009-12-08 14:33:32 +00:00
|
|
|
int ret;
|
2009-06-10 13:20:19 +00:00
|
|
|
|
|
|
|
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
|
2012-06-12 11:28:42 +00:00
|
|
|
if (unlikely(bo == NULL))
|
2009-06-10 13:20:19 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2012-06-12 11:28:42 +00:00
|
|
|
acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
|
2009-12-10 16:16:27 +00:00
|
|
|
ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
|
2018-02-22 14:52:31 +00:00
|
|
|
interruptible, acc_size,
|
2014-01-09 10:03:15 +00:00
|
|
|
NULL, NULL, NULL);
|
2009-06-10 13:20:19 +00:00
|
|
|
if (likely(ret == 0))
|
|
|
|
*p_bo = bo;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2011-10-04 18:13:11 +00:00
|
|
|
EXPORT_SYMBOL(ttm_bo_create);
|
2009-06-10 13:20:19 +00:00
|
|
|
|
|
|
|
static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
|
2020-08-04 02:55:57 +00:00
|
|
|
struct ttm_mem_type_manager *man)
|
2009-06-10 13:20:19 +00:00
|
|
|
{
|
2018-02-06 07:00:06 +00:00
|
|
|
struct ttm_operation_ctx ctx = {
|
|
|
|
.interruptible = false,
|
|
|
|
.no_wait_gpu = false,
|
|
|
|
.flags = TTM_OPT_FLAG_FORCE_ALLOC
|
|
|
|
};
|
2019-09-25 09:38:50 +00:00
|
|
|
struct ttm_bo_global *glob = &ttm_bo_glob;
|
2016-10-25 12:00:45 +00:00
|
|
|
struct dma_fence *fence;
|
2009-06-10 13:20:19 +00:00
|
|
|
int ret;
|
2017-01-10 13:08:28 +00:00
|
|
|
unsigned i;
|
2009-06-10 13:20:19 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Can't use standard list traversal since we're unlocking.
|
|
|
|
*/
|
|
|
|
|
2009-08-18 14:51:56 +00:00
|
|
|
spin_lock(&glob->lru_lock);
|
2017-01-10 13:08:28 +00:00
|
|
|
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
|
|
|
|
while (!list_empty(&man->lru[i])) {
|
|
|
|
spin_unlock(&glob->lru_lock);
|
2020-08-04 02:55:57 +00:00
|
|
|
ret = ttm_mem_evict_first(bdev, man, NULL, &ctx,
|
2019-05-22 07:51:47 +00:00
|
|
|
NULL);
|
2017-01-10 13:08:28 +00:00
|
|
|
if (ret)
|
2009-12-08 14:33:32 +00:00
|
|
|
return ret;
|
2017-01-10 13:08:28 +00:00
|
|
|
spin_lock(&glob->lru_lock);
|
2009-12-08 14:33:32 +00:00
|
|
|
}
|
2009-06-10 13:20:19 +00:00
|
|
|
}
|
2009-08-18 14:51:56 +00:00
|
|
|
spin_unlock(&glob->lru_lock);
|
2016-06-22 12:16:28 +00:00
|
|
|
|
|
|
|
spin_lock(&man->move_lock);
|
2016-10-25 12:00:45 +00:00
|
|
|
fence = dma_fence_get(man->move);
|
2016-06-22 12:16:28 +00:00
|
|
|
spin_unlock(&man->move_lock);
|
|
|
|
|
|
|
|
if (fence) {
|
2016-10-25 12:00:45 +00:00
|
|
|
ret = dma_fence_wait(fence, false);
|
|
|
|
dma_fence_put(fence);
|
2017-01-06 18:16:07 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2016-06-22 12:16:28 +00:00
|
|
|
}
|
|
|
|
|
2009-06-10 13:20:19 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
|
|
|
|
{
|
2009-08-03 12:22:53 +00:00
|
|
|
struct ttm_mem_type_manager *man;
|
2009-06-10 13:20:19 +00:00
|
|
|
int ret = -EINVAL;
|
|
|
|
|
|
|
|
if (mem_type >= TTM_NUM_MEM_TYPES) {
|
2012-03-17 04:43:50 +00:00
|
|
|
pr_err("Illegal memory type %d\n", mem_type);
|
2009-06-10 13:20:19 +00:00
|
|
|
return ret;
|
|
|
|
}
|
2009-08-03 12:22:53 +00:00
|
|
|
man = &bdev->man[mem_type];
|
2009-06-10 13:20:19 +00:00
|
|
|
|
|
|
|
if (!man->has_type) {
|
2012-03-17 04:43:50 +00:00
|
|
|
pr_err("Trying to take down uninitialized memory manager type %u\n",
|
|
|
|
mem_type);
|
2009-06-10 13:20:19 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-08-04 02:55:58 +00:00
|
|
|
ttm_mem_type_manager_disable(man);
|
2009-06-10 13:20:19 +00:00
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
if (mem_type > 0) {
|
2020-08-04 02:55:57 +00:00
|
|
|
ret = ttm_bo_force_list_clean(bdev, man);
|
2017-01-06 18:16:07 +00:00
|
|
|
if (ret) {
|
|
|
|
pr_err("Cleanup eviction failed\n");
|
|
|
|
return ret;
|
|
|
|
}
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2010-08-05 00:48:18 +00:00
|
|
|
ret = (*man->func->takedown)(man);
|
2009-06-10 13:20:19 +00:00
|
|
|
}
|
|
|
|
|
2020-08-04 02:55:58 +00:00
|
|
|
ttm_mem_type_manager_cleanup(man);
|
2017-07-03 18:05:34 +00:00
|
|
|
|
2009-06-10 13:20:19 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ttm_bo_clean_mm);
|
|
|
|
|
|
|
|
int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
|
|
|
|
{
|
|
|
|
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
|
|
|
|
|
|
|
|
if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
|
2012-03-17 04:43:50 +00:00
|
|
|
pr_err("Illegal memory manager memory type %u\n", mem_type);
|
2009-06-10 13:20:19 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!man->has_type) {
|
2012-03-17 04:43:50 +00:00
|
|
|
pr_err("Memory type %u has not been initialized\n", mem_type);
|
2009-06-10 13:20:19 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-04 02:55:57 +00:00
|
|
|
return ttm_bo_force_list_clean(bdev, man);
|
2009-06-10 13:20:19 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ttm_bo_evict_mm);
|
|
|
|
|
2020-08-04 02:55:46 +00:00
|
|
|
void ttm_mem_type_manager_init(struct ttm_bo_device *bdev,
|
|
|
|
struct ttm_mem_type_manager *man,
|
|
|
|
unsigned long p_size)
|
2009-06-10 13:20:19 +00:00
|
|
|
{
|
2017-01-10 13:08:28 +00:00
|
|
|
unsigned i;
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2010-10-29 08:46:47 +00:00
|
|
|
BUG_ON(man->has_type);
|
2010-11-11 08:41:57 +00:00
|
|
|
man->use_io_reserve_lru = false;
|
|
|
|
mutex_init(&man->io_reserve_mutex);
|
2016-06-15 11:44:03 +00:00
|
|
|
spin_lock_init(&man->move_lock);
|
2010-11-11 08:41:57 +00:00
|
|
|
INIT_LIST_HEAD(&man->io_reserve_lru);
|
2010-08-05 00:48:18 +00:00
|
|
|
man->bdev = bdev;
|
2009-06-10 13:20:19 +00:00
|
|
|
man->size = p_size;
|
|
|
|
|
2017-01-10 13:08:28 +00:00
|
|
|
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
|
|
|
|
INIT_LIST_HEAD(&man->lru[i]);
|
2016-06-15 11:44:03 +00:00
|
|
|
man->move = NULL;
|
2020-08-04 02:55:46 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ttm_mem_type_manager_init);
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2009-08-18 14:51:56 +00:00
|
|
|
static void ttm_bo_global_kobj_release(struct kobject *kobj)
|
|
|
|
{
|
|
|
|
struct ttm_bo_global *glob =
|
|
|
|
container_of(kobj, struct ttm_bo_global, kobj);
|
|
|
|
|
|
|
|
__free_page(glob->dummy_read_page);
|
|
|
|
}
|
|
|
|
|
2018-10-19 14:55:26 +00:00
|
|
|
static void ttm_bo_global_release(void)
|
2009-08-18 14:51:56 +00:00
|
|
|
{
|
2018-10-19 13:06:06 +00:00
|
|
|
struct ttm_bo_global *glob = &ttm_bo_glob;
|
|
|
|
|
|
|
|
mutex_lock(&ttm_global_mutex);
|
2019-04-16 09:49:17 +00:00
|
|
|
if (--ttm_bo_glob_use_count > 0)
|
2018-10-19 13:06:06 +00:00
|
|
|
goto out;
|
2009-08-18 14:51:56 +00:00
|
|
|
|
|
|
|
kobject_del(&glob->kobj);
|
|
|
|
kobject_put(&glob->kobj);
|
2018-10-19 11:49:05 +00:00
|
|
|
ttm_mem_global_release(&ttm_mem_glob);
|
2019-04-16 09:49:17 +00:00
|
|
|
memset(glob, 0, sizeof(*glob));
|
2018-10-19 13:06:06 +00:00
|
|
|
out:
|
|
|
|
mutex_unlock(&ttm_global_mutex);
|
2009-08-18 14:51:56 +00:00
|
|
|
}
|
|
|
|
|
2018-10-19 14:55:26 +00:00
|
|
|
static int ttm_bo_global_init(void)
|
2009-08-18 14:51:56 +00:00
|
|
|
{
|
2018-10-19 13:06:06 +00:00
|
|
|
struct ttm_bo_global *glob = &ttm_bo_glob;
|
|
|
|
int ret = 0;
|
2017-01-10 13:08:28 +00:00
|
|
|
unsigned i;
|
2009-08-18 14:51:56 +00:00
|
|
|
|
2018-10-19 13:06:06 +00:00
|
|
|
mutex_lock(&ttm_global_mutex);
|
2019-04-16 09:49:17 +00:00
|
|
|
if (++ttm_bo_glob_use_count > 1)
|
2018-10-19 13:06:06 +00:00
|
|
|
goto out;
|
|
|
|
|
2018-10-19 11:49:05 +00:00
|
|
|
ret = ttm_mem_global_init(&ttm_mem_glob);
|
|
|
|
if (ret)
|
2018-10-19 13:06:06 +00:00
|
|
|
goto out;
|
2018-10-19 11:49:05 +00:00
|
|
|
|
2009-08-18 14:51:56 +00:00
|
|
|
spin_lock_init(&glob->lru_lock);
|
|
|
|
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
|
|
|
|
|
|
|
|
if (unlikely(glob->dummy_read_page == NULL)) {
|
|
|
|
ret = -ENOMEM;
|
2018-10-19 13:06:06 +00:00
|
|
|
goto out;
|
2009-08-18 14:51:56 +00:00
|
|
|
}
|
|
|
|
|
2017-01-10 13:08:28 +00:00
|
|
|
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
|
|
|
|
INIT_LIST_HEAD(&glob->swap_lru[i]);
|
2009-08-18 14:51:56 +00:00
|
|
|
INIT_LIST_HEAD(&glob->device_list);
|
|
|
|
atomic_set(&glob->bo_count, 0);
|
|
|
|
|
2010-03-13 10:36:32 +00:00
|
|
|
ret = kobject_init_and_add(
|
|
|
|
&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
|
2009-08-18 14:51:56 +00:00
|
|
|
if (unlikely(ret != 0))
|
|
|
|
kobject_put(&glob->kobj);
|
2018-10-19 13:06:06 +00:00
|
|
|
out:
|
|
|
|
mutex_unlock(&ttm_global_mutex);
|
2009-08-18 14:51:56 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-06-10 13:20:19 +00:00
|
|
|
int ttm_bo_device_release(struct ttm_bo_device *bdev)
|
|
|
|
{
|
2019-09-25 09:38:50 +00:00
|
|
|
struct ttm_bo_global *glob = &ttm_bo_glob;
|
2009-06-10 13:20:19 +00:00
|
|
|
int ret = 0;
|
|
|
|
unsigned i = TTM_NUM_MEM_TYPES;
|
|
|
|
struct ttm_mem_type_manager *man;
|
|
|
|
|
|
|
|
while (i--) {
|
|
|
|
man = &bdev->man[i];
|
|
|
|
if (man->has_type) {
|
|
|
|
man->use_type = false;
|
|
|
|
if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
|
|
|
|
ret = -EBUSY;
|
2012-03-17 04:43:50 +00:00
|
|
|
pr_err("DRM memory manager type %d is not clean\n",
|
|
|
|
i);
|
2009-06-10 13:20:19 +00:00
|
|
|
}
|
|
|
|
man->has_type = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-19 12:09:24 +00:00
|
|
|
mutex_lock(&ttm_global_mutex);
|
2009-08-18 14:51:56 +00:00
|
|
|
list_del(&bdev->device_list);
|
2018-10-19 12:09:24 +00:00
|
|
|
mutex_unlock(&ttm_global_mutex);
|
2009-08-18 14:51:56 +00:00
|
|
|
|
2010-12-24 14:59:06 +00:00
|
|
|
cancel_delayed_work_sync(&bdev->wq);
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2017-11-15 12:20:09 +00:00
|
|
|
if (ttm_bo_delayed_delete(bdev, true))
|
2017-12-15 16:39:32 +00:00
|
|
|
pr_debug("Delayed destroy list was clean\n");
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2017-11-15 12:20:09 +00:00
|
|
|
spin_lock(&glob->lru_lock);
|
2017-01-10 13:08:28 +00:00
|
|
|
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
|
|
|
|
if (list_empty(&bdev->man[0].lru[0]))
|
2017-12-15 16:39:32 +00:00
|
|
|
pr_debug("Swap list %d was clean\n", i);
|
2009-08-18 14:51:56 +00:00
|
|
|
spin_unlock(&glob->lru_lock);
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2018-10-19 14:55:26 +00:00
|
|
|
if (!ret)
|
|
|
|
ttm_bo_global_release();
|
|
|
|
|
2009-06-10 13:20:19 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ttm_bo_device_release);
|
|
|
|
|
2020-08-04 02:55:55 +00:00
|
|
|
static void ttm_bo_init_sysman(struct ttm_bo_device *bdev)
|
|
|
|
{
|
|
|
|
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_SYSTEM];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the system memory buffer type.
|
|
|
|
* Other types need to be driver / IOCTL initialized.
|
|
|
|
*/
|
|
|
|
man->use_tt = true;
|
|
|
|
man->available_caching = TTM_PL_MASK_CACHING;
|
|
|
|
man->default_caching = TTM_PL_FLAG_CACHED;
|
|
|
|
|
|
|
|
ttm_mem_type_manager_init(bdev, man, 0);
|
|
|
|
ttm_mem_type_manager_set_used(man, true);
|
|
|
|
}
|
|
|
|
|
2009-06-10 13:20:19 +00:00
|
|
|
int ttm_bo_device_init(struct ttm_bo_device *bdev,
|
2009-08-18 14:51:56 +00:00
|
|
|
struct ttm_bo_driver *driver,
|
2013-08-13 17:10:30 +00:00
|
|
|
struct address_space *mapping,
|
2019-09-05 07:05:02 +00:00
|
|
|
struct drm_vma_offset_manager *vma_manager,
|
2009-07-10 12:36:26 +00:00
|
|
|
bool need_dma32)
|
2009-06-10 13:20:19 +00:00
|
|
|
{
|
2018-10-19 14:55:26 +00:00
|
|
|
struct ttm_bo_global *glob = &ttm_bo_glob;
|
|
|
|
int ret;
|
|
|
|
|
2019-09-05 07:05:09 +00:00
|
|
|
if (WARN_ON(vma_manager == NULL))
|
|
|
|
return -EINVAL;
|
2019-09-05 07:05:02 +00:00
|
|
|
|
2018-10-19 14:55:26 +00:00
|
|
|
ret = ttm_bo_global_init();
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2009-06-10 13:20:19 +00:00
|
|
|
|
|
|
|
bdev->driver = driver;
|
|
|
|
|
|
|
|
memset(bdev->man, 0, sizeof(bdev->man));
|
|
|
|
|
2020-08-04 02:55:55 +00:00
|
|
|
ttm_bo_init_sysman(bdev);
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2019-09-05 07:05:02 +00:00
|
|
|
bdev->vma_manager = vma_manager;
|
2009-06-10 13:20:19 +00:00
|
|
|
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
|
|
|
|
INIT_LIST_HEAD(&bdev->ddestroy);
|
2013-08-13 17:10:30 +00:00
|
|
|
bdev->dev_mapping = mapping;
|
2009-07-10 12:36:26 +00:00
|
|
|
bdev->need_dma32 = need_dma32;
|
2018-10-19 12:09:24 +00:00
|
|
|
mutex_lock(&ttm_global_mutex);
|
2009-08-18 14:51:56 +00:00
|
|
|
list_add_tail(&bdev->device_list, &glob->device_list);
|
2018-10-19 12:09:24 +00:00
|
|
|
mutex_unlock(&ttm_global_mutex);
|
2009-06-10 13:20:19 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ttm_bo_device_init);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* buffer object vm functions.
|
|
|
|
*/
|
|
|
|
|
2010-11-11 08:41:57 +00:00
|
|
|
void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
|
2009-06-10 13:20:19 +00:00
|
|
|
{
|
|
|
|
struct ttm_bo_device *bdev = bo->bdev;
|
|
|
|
|
2019-08-05 14:01:10 +00:00
|
|
|
drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
|
2010-11-11 08:41:57 +00:00
|
|
|
ttm_mem_io_free_vm(bo);
|
2009-06-10 13:20:19 +00:00
|
|
|
}
|
2010-11-11 08:41:57 +00:00
|
|
|
|
|
|
|
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
|
|
|
|
{
|
|
|
|
struct ttm_bo_device *bdev = bo->bdev;
|
|
|
|
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
|
|
|
|
|
|
|
|
ttm_mem_io_lock(man, false);
|
|
|
|
ttm_bo_unmap_virtual_locked(bo);
|
|
|
|
ttm_mem_io_unlock(man);
|
2009-06-10 13:20:19 +00:00
|
|
|
}
|
2010-11-11 08:41:57 +00:00
|
|
|
|
|
|
|
|
2009-06-23 23:48:08 +00:00
|
|
|
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
|
2009-06-10 13:20:19 +00:00
|
|
|
|
|
|
|
int ttm_bo_wait(struct ttm_buffer_object *bo,
|
2016-04-06 09:12:04 +00:00
|
|
|
bool interruptible, bool no_wait)
|
2009-06-10 13:20:19 +00:00
|
|
|
{
|
2016-11-07 21:16:15 +00:00
|
|
|
long timeout = 15 * HZ;
|
|
|
|
|
|
|
|
if (no_wait) {
|
2019-08-11 08:06:32 +00:00
|
|
|
if (dma_resv_test_signaled_rcu(bo->base.resv, true))
|
2016-11-07 21:16:15 +00:00
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
2014-04-02 15:14:48 +00:00
|
|
|
|
2019-08-11 08:06:32 +00:00
|
|
|
timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
|
2016-06-15 11:44:02 +00:00
|
|
|
interruptible, timeout);
|
2014-04-02 15:14:48 +00:00
|
|
|
if (timeout < 0)
|
|
|
|
return timeout;
|
|
|
|
|
|
|
|
if (timeout == 0)
|
|
|
|
return -EBUSY;
|
|
|
|
|
2019-08-11 08:06:32 +00:00
|
|
|
dma_resv_add_excl_fence(bo->base.resv, NULL);
|
2014-04-02 15:14:48 +00:00
|
|
|
return 0;
|
2009-06-10 13:20:19 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ttm_bo_wait);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* A buffer object shrink method that tries to swap out the first
|
|
|
|
* buffer object on the bo_global::swap_lru list.
|
|
|
|
*/
|
2017-12-21 09:42:53 +00:00
|
|
|
int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
|
2009-06-10 13:20:19 +00:00
|
|
|
{
|
|
|
|
struct ttm_buffer_object *bo;
|
|
|
|
int ret = -EBUSY;
|
2017-12-21 09:42:53 +00:00
|
|
|
bool locked;
|
2017-01-10 13:08:28 +00:00
|
|
|
unsigned i;
|
2009-06-10 13:20:19 +00:00
|
|
|
|
2009-08-18 14:51:56 +00:00
|
|
|
spin_lock(&glob->lru_lock);
|
2017-01-10 13:08:28 +00:00
|
|
|
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
|
|
|
|
list_for_each_entry(bo, &glob->swap_lru[i], swap) {
|
2019-11-11 13:42:13 +00:00
|
|
|
if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
|
|
|
|
NULL))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!ttm_bo_get_unless_zero(bo)) {
|
|
|
|
if (locked)
|
|
|
|
dma_resv_unlock(bo->base.resv);
|
|
|
|
continue;
|
2017-12-21 09:42:53 +00:00
|
|
|
}
|
2019-11-11 13:42:13 +00:00
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
break;
|
2017-01-10 13:08:28 +00:00
|
|
|
}
|
2012-11-28 11:25:42 +00:00
|
|
|
if (!ret)
|
|
|
|
break;
|
|
|
|
}
|
2012-11-29 11:36:54 +00:00
|
|
|
|
2012-11-28 11:25:42 +00:00
|
|
|
if (ret) {
|
|
|
|
spin_unlock(&glob->lru_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
2010-10-19 07:01:01 +00:00
|
|
|
|
2019-11-11 13:42:13 +00:00
|
|
|
if (bo->deleted) {
|
2018-01-19 06:34:16 +00:00
|
|
|
ret = ttm_bo_cleanup_refs(bo, false, false, locked);
|
2019-11-11 13:42:13 +00:00
|
|
|
ttm_bo_put(bo);
|
2012-11-28 11:25:42 +00:00
|
|
|
return ret;
|
2009-06-10 13:20:19 +00:00
|
|
|
}
|
|
|
|
|
2016-11-14 16:34:19 +00:00
|
|
|
ttm_bo_del_from_lru(bo);
|
2009-08-18 14:51:56 +00:00
|
|
|
spin_unlock(&glob->lru_lock);
|
2009-06-10 13:20:19 +00:00
|
|
|
|
|
|
|
/**
|
2016-06-06 08:17:57 +00:00
|
|
|
* Move to system cached
|
2009-06-10 13:20:19 +00:00
|
|
|
*/
|
|
|
|
|
2017-01-25 08:21:31 +00:00
|
|
|
if (bo->mem.mem_type != TTM_PL_SYSTEM ||
|
|
|
|
bo->ttm->caching_state != tt_cached) {
|
2017-04-12 14:48:39 +00:00
|
|
|
struct ttm_operation_ctx ctx = { false, false };
|
2009-06-10 13:20:19 +00:00
|
|
|
struct ttm_mem_reg evict_mem;
|
|
|
|
|
|
|
|
evict_mem = bo->mem;
|
|
|
|
evict_mem.mm_node = NULL;
|
|
|
|
evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
|
|
|
|
evict_mem.mem_type = TTM_PL_SYSTEM;
|
|
|
|
|
2017-04-12 14:48:39 +00:00
|
|
|
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
|
2009-06-10 13:20:19 +00:00
|
|
|
if (unlikely(ret != 0))
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-06-06 08:17:57 +00:00
|
|
|
/**
|
|
|
|
* Make sure BO is idle.
|
|
|
|
*/
|
|
|
|
|
|
|
|
ret = ttm_bo_wait(bo, false, false);
|
|
|
|
if (unlikely(ret != 0))
|
|
|
|
goto out;
|
|
|
|
|
2009-06-10 13:20:19 +00:00
|
|
|
ttm_bo_unmap_virtual(bo);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Swap out. Buffer will be swapped in again as soon as
|
|
|
|
* anyone tries to access a ttm page.
|
|
|
|
*/
|
|
|
|
|
2010-01-13 21:28:40 +00:00
|
|
|
if (bo->bdev->driver->swap_notify)
|
|
|
|
bo->bdev->driver->swap_notify(bo);
|
|
|
|
|
2011-04-03 23:25:18 +00:00
|
|
|
ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
|
2009-06-10 13:20:19 +00:00
|
|
|
out:
|
|
|
|
|
|
|
|
/**
|
|
|
|
*
|
|
|
|
* Unreserve without putting on LRU to avoid swapping out an
|
|
|
|
* already swapped buffer.
|
|
|
|
*/
|
2018-01-18 04:54:07 +00:00
|
|
|
if (locked)
|
2019-08-11 08:06:32 +00:00
|
|
|
dma_resv_unlock(bo->base.resv);
|
2019-11-11 13:42:13 +00:00
|
|
|
ttm_bo_put(bo);
|
2009-06-10 13:20:19 +00:00
|
|
|
return ret;
|
|
|
|
}
|
2017-12-18 11:50:08 +00:00
|
|
|
EXPORT_SYMBOL(ttm_bo_swapout);
|
2009-06-10 13:20:19 +00:00
|
|
|
|
|
|
|
void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
|
|
|
|
{
|
2017-12-21 09:42:53 +00:00
|
|
|
struct ttm_operation_ctx ctx = {
|
|
|
|
.interruptible = false,
|
|
|
|
.no_wait_gpu = false
|
|
|
|
};
|
|
|
|
|
2019-09-25 09:38:50 +00:00
|
|
|
while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
|
2009-06-10 13:20:19 +00:00
|
|
|
}
|
2010-01-13 21:28:42 +00:00
|
|
|
EXPORT_SYMBOL(ttm_bo_swapout_all);
|