drm/ttm: use an operation ctx for ttm_mem_global_alloc_page

forward the operation context to ttm_mem_global_alloc_page as well,
and the ultimate goal is swapout enablement for reserved BOs.

Here reserved BOs refer to all the BOs which share same reservation object

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Roger He <Hongbo.He@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Roger He 2017-12-08 15:21:18 +08:00 committed by Alex Deucher
parent 279c01f6ef
commit 9de2fb99eb
4 changed files with 16 additions and 11 deletions

View File

@ -539,14 +539,10 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
EXPORT_SYMBOL(ttm_mem_global_alloc);
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
struct page *page, uint64_t size)
struct page *page, uint64_t size,
struct ttm_operation_ctx *ctx)
{
struct ttm_mem_zone *zone = NULL;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
/**
* Page allocations may be registed in a single zone
@ -560,7 +556,7 @@ int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
zone = glob->zone_kernel;
#endif
return ttm_mem_global_alloc_zone(glob, zone, size, &ctx);
return ttm_mem_global_alloc_zone(glob, zone, size, ctx);
}
void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,

View File

@ -1061,6 +1061,10 @@ void ttm_page_alloc_fini(void)
int ttm_pool_populate(struct ttm_tt *ttm)
{
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
unsigned i;
int ret;
@ -1076,7 +1080,7 @@ int ttm_pool_populate(struct ttm_tt *ttm)
for (i = 0; i < ttm->num_pages; ++i) {
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
PAGE_SIZE);
PAGE_SIZE, &ctx);
if (unlikely(ret != 0)) {
ttm_pool_unpopulate(ttm);
return -ENOMEM;

View File

@ -927,6 +927,10 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
unsigned long num_pages = ttm->num_pages;
struct dma_pool *pool;
enum pool_type type;
@ -962,7 +966,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
break;
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
pool->size);
pool->size, &ctx);
if (unlikely(ret != 0)) {
ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM;
@ -998,7 +1002,7 @@ skip_huge:
}
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
pool->size);
pool->size, &ctx);
if (unlikely(ret != 0)) {
ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM;

View File

@ -84,7 +84,8 @@ extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
extern void ttm_mem_global_free(struct ttm_mem_global *glob,
uint64_t amount);
extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
struct page *page, uint64_t size);
struct page *page, uint64_t size,
struct ttm_operation_ctx *ctx);
extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
struct page *page, uint64_t size);
extern size_t ttm_round_pot(size_t size);