drm/ttm: move the page_alignment into the BO v2
The alignment is a constant property and shouldn't change. v2: move documentation as well as suggested by Matthew. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210413135248.1266-4-christian.koenig@amd.com
This commit is contained in:
@@ -763,7 +763,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
|
|||||||
void __user *out = u64_to_user_ptr(args->value);
|
void __user *out = u64_to_user_ptr(args->value);
|
||||||
|
|
||||||
info.bo_size = robj->tbo.base.size;
|
info.bo_size = robj->tbo.base.size;
|
||||||
info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
|
info.alignment = robj->tbo.page_alignment << PAGE_SHIFT;
|
||||||
info.domains = robj->preferred_domains;
|
info.domains = robj->preferred_domains;
|
||||||
info.domain_flags = robj->flags;
|
info.domain_flags = robj->flags;
|
||||||
amdgpu_bo_unreserve(robj);
|
amdgpu_bo_unreserve(robj);
|
||||||
|
|||||||
@@ -207,7 +207,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
|
|||||||
|
|
||||||
spin_lock(&mgr->lock);
|
spin_lock(&mgr->lock);
|
||||||
r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
|
r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
|
||||||
mem->page_alignment, 0, place->fpfn,
|
tbo->page_alignment, 0, place->fpfn,
|
||||||
place->lpfn, DRM_MM_INSERT_BEST);
|
place->lpfn, DRM_MM_INSERT_BEST);
|
||||||
spin_unlock(&mgr->lock);
|
spin_unlock(&mgr->lock);
|
||||||
|
|
||||||
|
|||||||
@@ -184,7 +184,7 @@ static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
|
|||||||
|
|
||||||
static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
|
static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
|
||||||
{
|
{
|
||||||
return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
|
return (bo->tbo.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -451,7 +451,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
|||||||
/* default to 2MB */
|
/* default to 2MB */
|
||||||
pages_per_node = (2UL << (20UL - PAGE_SHIFT));
|
pages_per_node = (2UL << (20UL - PAGE_SHIFT));
|
||||||
#endif
|
#endif
|
||||||
pages_per_node = max((uint32_t)pages_per_node, mem->page_alignment);
|
pages_per_node = max((uint32_t)pages_per_node,
|
||||||
|
tbo->page_alignment);
|
||||||
num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
|
num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -490,7 +491,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
|||||||
|
|
||||||
for (; pages_left; ++i) {
|
for (; pages_left; ++i) {
|
||||||
unsigned long pages = min(pages_left, pages_per_node);
|
unsigned long pages = min(pages_left, pages_per_node);
|
||||||
uint32_t alignment = mem->page_alignment;
|
uint32_t alignment = tbo->page_alignment;
|
||||||
|
|
||||||
if (pages == pages_per_node)
|
if (pages == pages_per_node)
|
||||||
alignment = pages_per_node;
|
alignment = pages_per_node;
|
||||||
|
|||||||
@@ -119,7 +119,7 @@ static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
|
|||||||
|
|
||||||
static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
|
static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
|
||||||
{
|
{
|
||||||
return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
|
return (bo->tbo.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -903,7 +903,6 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
|
|||||||
memset(&hop, 0, sizeof(hop));
|
memset(&hop, 0, sizeof(hop));
|
||||||
|
|
||||||
mem.num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
|
mem.num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
|
||||||
mem.page_alignment = bo->mem.page_alignment;
|
|
||||||
mem.bus.offset = 0;
|
mem.bus.offset = 0;
|
||||||
mem.bus.addr = NULL;
|
mem.bus.addr = NULL;
|
||||||
mem.mm_node = NULL;
|
mem.mm_node = NULL;
|
||||||
@@ -1038,10 +1037,10 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
|
|||||||
INIT_LIST_HEAD(&bo->ddestroy);
|
INIT_LIST_HEAD(&bo->ddestroy);
|
||||||
bo->bdev = bdev;
|
bo->bdev = bdev;
|
||||||
bo->type = type;
|
bo->type = type;
|
||||||
|
bo->page_alignment = page_alignment;
|
||||||
bo->mem.mem_type = TTM_PL_SYSTEM;
|
bo->mem.mem_type = TTM_PL_SYSTEM;
|
||||||
bo->mem.num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
bo->mem.num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
bo->mem.mm_node = NULL;
|
bo->mem.mm_node = NULL;
|
||||||
bo->mem.page_alignment = page_alignment;
|
|
||||||
bo->mem.bus.offset = 0;
|
bo->mem.bus.offset = 0;
|
||||||
bo->mem.bus.addr = NULL;
|
bo->mem.bus.addr = NULL;
|
||||||
bo->moving = NULL;
|
bo->moving = NULL;
|
||||||
|
|||||||
@@ -79,9 +79,8 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
|
|||||||
mode = DRM_MM_INSERT_HIGH;
|
mode = DRM_MM_INSERT_HIGH;
|
||||||
|
|
||||||
spin_lock(&rman->lock);
|
spin_lock(&rman->lock);
|
||||||
ret = drm_mm_insert_node_in_range(mm, node,
|
ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
|
||||||
mem->num_pages,
|
bo->page_alignment, 0,
|
||||||
mem->page_alignment, 0,
|
|
||||||
place->fpfn, lpfn, mode);
|
place->fpfn, lpfn, mode);
|
||||||
spin_unlock(&rman->lock);
|
spin_unlock(&rman->lock);
|
||||||
|
|
||||||
|
|||||||
@@ -28,15 +28,16 @@ static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man)
|
|||||||
|
|
||||||
static const struct ttm_resource_manager_func vmw_thp_func;
|
static const struct ttm_resource_manager_func vmw_thp_func;
|
||||||
|
|
||||||
static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node,
|
static int vmw_thp_insert_aligned(struct ttm_buffer_object *bo,
|
||||||
|
struct drm_mm *mm, struct drm_mm_node *node,
|
||||||
unsigned long align_pages,
|
unsigned long align_pages,
|
||||||
const struct ttm_place *place,
|
const struct ttm_place *place,
|
||||||
struct ttm_resource *mem,
|
struct ttm_resource *mem,
|
||||||
unsigned long lpfn,
|
unsigned long lpfn,
|
||||||
enum drm_mm_insert_mode mode)
|
enum drm_mm_insert_mode mode)
|
||||||
{
|
{
|
||||||
if (align_pages >= mem->page_alignment &&
|
if (align_pages >= bo->page_alignment &&
|
||||||
(!mem->page_alignment || align_pages % mem->page_alignment == 0)) {
|
(!bo->page_alignment || align_pages % bo->page_alignment == 0)) {
|
||||||
return drm_mm_insert_node_in_range(mm, node,
|
return drm_mm_insert_node_in_range(mm, node,
|
||||||
mem->num_pages,
|
mem->num_pages,
|
||||||
align_pages, 0,
|
align_pages, 0,
|
||||||
@@ -75,7 +76,7 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
|
|||||||
if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
|
if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
|
||||||
align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
|
align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
|
||||||
if (mem->num_pages >= align_pages) {
|
if (mem->num_pages >= align_pages) {
|
||||||
ret = vmw_thp_insert_aligned(mm, node, align_pages,
|
ret = vmw_thp_insert_aligned(bo, mm, node, align_pages,
|
||||||
place, mem, lpfn, mode);
|
place, mem, lpfn, mode);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
goto found_unlock;
|
goto found_unlock;
|
||||||
@@ -84,14 +85,14 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
|
|||||||
|
|
||||||
align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
|
align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
|
||||||
if (mem->num_pages >= align_pages) {
|
if (mem->num_pages >= align_pages) {
|
||||||
ret = vmw_thp_insert_aligned(mm, node, align_pages, place, mem,
|
ret = vmw_thp_insert_aligned(bo, mm, node, align_pages, place,
|
||||||
lpfn, mode);
|
mem, lpfn, mode);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
goto found_unlock;
|
goto found_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
|
ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
|
||||||
mem->page_alignment, 0,
|
bo->page_alignment, 0,
|
||||||
place->fpfn, lpfn, mode);
|
place->fpfn, lpfn, mode);
|
||||||
found_unlock:
|
found_unlock:
|
||||||
spin_unlock(&rman->lock);
|
spin_unlock(&rman->lock);
|
||||||
|
|||||||
@@ -86,6 +86,7 @@ struct ttm_tt;
|
|||||||
* @base: drm_gem_object superclass data.
|
* @base: drm_gem_object superclass data.
|
||||||
* @bdev: Pointer to the buffer object device structure.
|
* @bdev: Pointer to the buffer object device structure.
|
||||||
* @type: The bo type.
|
* @type: The bo type.
|
||||||
|
* @page_alignment: Page alignment.
|
||||||
* @destroy: Destruction function. If NULL, kfree is used.
|
* @destroy: Destruction function. If NULL, kfree is used.
|
||||||
* @num_pages: Actual number of pages.
|
* @num_pages: Actual number of pages.
|
||||||
* @kref: Reference count of this buffer object. When this refcount reaches
|
* @kref: Reference count of this buffer object. When this refcount reaches
|
||||||
@@ -123,6 +124,7 @@ struct ttm_buffer_object {
|
|||||||
|
|
||||||
struct ttm_device *bdev;
|
struct ttm_device *bdev;
|
||||||
enum ttm_bo_type type;
|
enum ttm_bo_type type;
|
||||||
|
uint32_t page_alignment;
|
||||||
void (*destroy) (struct ttm_buffer_object *);
|
void (*destroy) (struct ttm_buffer_object *);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -161,7 +161,6 @@ struct ttm_bus_placement {
|
|||||||
* @mm_node: Memory manager node.
|
* @mm_node: Memory manager node.
|
||||||
* @size: Requested size of memory region.
|
* @size: Requested size of memory region.
|
||||||
* @num_pages: Actual size of memory region in pages.
|
* @num_pages: Actual size of memory region in pages.
|
||||||
* @page_alignment: Page alignment.
|
|
||||||
* @placement: Placement flags.
|
* @placement: Placement flags.
|
||||||
* @bus: Placement on io bus accessible to the CPU
|
* @bus: Placement on io bus accessible to the CPU
|
||||||
*
|
*
|
||||||
@@ -172,7 +171,6 @@ struct ttm_resource {
|
|||||||
void *mm_node;
|
void *mm_node;
|
||||||
unsigned long start;
|
unsigned long start;
|
||||||
unsigned long num_pages;
|
unsigned long num_pages;
|
||||||
uint32_t page_alignment;
|
|
||||||
uint32_t mem_type;
|
uint32_t mem_type;
|
||||||
uint32_t placement;
|
uint32_t placement;
|
||||||
struct ttm_bus_placement bus;
|
struct ttm_bus_placement bus;
|
||||||
|
|||||||
Reference in New Issue
Block a user