forked from Minki/linux
drm/i915/ttm: fix 32b build
Since segment_pages is no longer a compile time constant, it looks the DIV_ROUND_UP(node->size, segment_pages) breaks the 32b build. Simplest is just to use the ULL variant, but really we should need not need more than u32 for the page alignment (also we are limited by that due to the sg->length type), so also make it all u32. Reported-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Fixes:aff1e0b09b
("drm/i915/ttm: fix sg_table construction") Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Nirmoy Das <nirmoy.das@linux.intel.com> Reviewed-by: Nirmoy Das <nirmoy.das@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220712174050.592550-1-matthew.auld@intel.com (cherry picked from commit9306b2b2df
) Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
parent
333991c4e6
commit
ced7866db3
@ -60,6 +60,8 @@ __i915_gem_object_create_region(struct intel_memory_region *mem,
|
||||
if (page_size)
|
||||
default_page_size = page_size;
|
||||
|
||||
/* We should be able to fit a page within an sg entry */
|
||||
GEM_BUG_ON(overflows_type(default_page_size, u32));
|
||||
GEM_BUG_ON(!is_power_of_2_u64(default_page_size));
|
||||
GEM_BUG_ON(default_page_size < PAGE_SIZE);
|
||||
|
||||
|
@ -620,7 +620,7 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
|
||||
struct ttm_resource *res)
|
||||
{
|
||||
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
|
||||
u64 page_alignment;
|
||||
u32 page_alignment;
|
||||
|
||||
if (!i915_ttm_gtt_binds_lmem(res))
|
||||
return i915_ttm_tt_get_st(bo->ttm);
|
||||
|
@ -79,10 +79,10 @@ void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size)
|
||||
*/
|
||||
struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
|
||||
u64 region_start,
|
||||
u64 page_alignment)
|
||||
u32 page_alignment)
|
||||
{
|
||||
const u64 max_segment = round_down(UINT_MAX, page_alignment);
|
||||
u64 segment_pages = max_segment >> PAGE_SHIFT;
|
||||
const u32 max_segment = round_down(UINT_MAX, page_alignment);
|
||||
const u32 segment_pages = max_segment >> PAGE_SHIFT;
|
||||
u64 block_size, offset, prev_end;
|
||||
struct i915_refct_sgt *rsgt;
|
||||
struct sg_table *st;
|
||||
@ -96,7 +96,7 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
|
||||
|
||||
i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT);
|
||||
st = &rsgt->table;
|
||||
if (sg_alloc_table(st, DIV_ROUND_UP(node->size, segment_pages),
|
||||
if (sg_alloc_table(st, DIV_ROUND_UP_ULL(node->size, segment_pages),
|
||||
GFP_KERNEL)) {
|
||||
i915_refct_sgt_put(rsgt);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -123,7 +123,7 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
|
||||
st->nents++;
|
||||
}
|
||||
|
||||
len = min(block_size, max_segment - sg->length);
|
||||
len = min_t(u64, block_size, max_segment - sg->length);
|
||||
sg->length += len;
|
||||
sg_dma_len(sg) += len;
|
||||
|
||||
@ -155,11 +155,11 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
|
||||
*/
|
||||
struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
|
||||
u64 region_start,
|
||||
u64 page_alignment)
|
||||
u32 page_alignment)
|
||||
{
|
||||
struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
|
||||
const u64 size = res->num_pages << PAGE_SHIFT;
|
||||
const u64 max_segment = round_down(UINT_MAX, page_alignment);
|
||||
const u32 max_segment = round_down(UINT_MAX, page_alignment);
|
||||
struct drm_buddy *mm = bman_res->mm;
|
||||
struct list_head *blocks = &bman_res->blocks;
|
||||
struct drm_buddy_block *block;
|
||||
@ -207,7 +207,7 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
|
||||
st->nents++;
|
||||
}
|
||||
|
||||
len = min(block_size, max_segment - sg->length);
|
||||
len = min_t(u64, block_size, max_segment - sg->length);
|
||||
sg->length += len;
|
||||
sg_dma_len(sg) += len;
|
||||
|
||||
|
@ -214,10 +214,10 @@ void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size);
|
||||
|
||||
struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
|
||||
u64 region_start,
|
||||
u64 page_alignment);
|
||||
u32 page_alignment);
|
||||
|
||||
struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
|
||||
u64 region_start,
|
||||
u64 page_alignment);
|
||||
u32 page_alignment);
|
||||
|
||||
#endif
|
||||
|
@ -163,7 +163,7 @@ int intel_region_ttm_fini(struct intel_memory_region *mem)
|
||||
struct i915_refct_sgt *
|
||||
intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
|
||||
struct ttm_resource *res,
|
||||
u64 page_alignment)
|
||||
u32 page_alignment)
|
||||
{
|
||||
if (mem->is_range_manager) {
|
||||
struct ttm_range_mgr_node *range_node =
|
||||
|
@ -25,7 +25,7 @@ int intel_region_ttm_fini(struct intel_memory_region *mem);
|
||||
struct i915_refct_sgt *
|
||||
intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
|
||||
struct ttm_resource *res,
|
||||
u64 page_alignment);
|
||||
u32 page_alignment);
|
||||
|
||||
void intel_region_ttm_resource_free(struct intel_memory_region *mem,
|
||||
struct ttm_resource *res);
|
||||
|
Loading…
Reference in New Issue
Block a user