mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
drm/i915: Use struct resource for memory region IO as well
mem->region is a struct resource, but mem->io_start and mem->io_size are not for whatever reason. Let's unify this and convert the io stuff into a struct resource as well. Should make life a little less annoying when you don't have juggle between two different approaches all the time. Mostly done using cocci (with manual tweaks at all the places where we mutate io_size by hand): @@ struct intel_memory_region *M; expression START, SIZE; @@ - M->io_start = START; - M->io_size = SIZE; + M->io = DEFINE_RES_MEM(START, SIZE); @@ struct intel_memory_region *M; @@ - M->io_start + M->io.start @@ struct intel_memory_region M; @@ - M.io_start + M.io.start @@ expression M; @@ - M->io_size + resource_size(&M->io) @@ expression M; @@ - M.io_size + resource_size(&M.io) Reviewed-by: Andrzej Hajda <andrzej.hajda@intel.com> Acked-by: Nirmoy Das <nirmoy.das@intel.com> Tested-by: Paz Zcharya <pazz@chromium.org> Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240202224340.30647-2-ville.syrjala@linux.intel.com
This commit is contained in:
parent
2a2e2f5f48
commit
3c0fa9f4ec
@ -78,7 +78,7 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info
|
||||
|
||||
/* Use fbdev's framebuffer from lmem for discrete */
|
||||
info->fix.smem_start =
|
||||
(unsigned long)(mem->io_start +
|
||||
(unsigned long)(mem->io.start +
|
||||
i915_gem_object_get_dma_address(obj, 0));
|
||||
info->fix.smem_len = obj->base.size;
|
||||
} else {
|
||||
|
@ -129,7 +129,7 @@ i915_gem_object_create_region_at(struct intel_memory_region *mem,
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (!(flags & I915_BO_ALLOC_GPU_ONLY) &&
|
||||
offset + size > mem->io_size &&
|
||||
offset + size > resource_size(&mem->io) &&
|
||||
!i915_ggtt_has_aperture(to_gt(mem->i915)->ggtt))
|
||||
return ERR_PTR(-ENOSPC);
|
||||
|
||||
|
@ -541,7 +541,9 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
|
||||
|
||||
/* Exclude the reserved region from driver use */
|
||||
mem->region.end = i915->dsm.reserved.start - 1;
|
||||
mem->io_size = min(mem->io_size, resource_size(&mem->region));
|
||||
mem->io = DEFINE_RES_MEM(mem->io.start,
|
||||
min(resource_size(&mem->io),
|
||||
resource_size(&mem->region)));
|
||||
|
||||
i915->dsm.usable_size = resource_size(&mem->region);
|
||||
|
||||
@ -752,7 +754,7 @@ static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
|
||||
* With discrete devices, where we lack a mappable aperture there is no
|
||||
* possible way to ever access this memory on the CPU side.
|
||||
*/
|
||||
if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !mem->io_size &&
|
||||
if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !resource_size(&mem->io) &&
|
||||
!(flags & I915_BO_ALLOC_GPU_ONLY))
|
||||
return -ENOSPC;
|
||||
|
||||
@ -838,13 +840,12 @@ static int init_stolen_lmem(struct intel_memory_region *mem)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (mem->io_size &&
|
||||
!io_mapping_init_wc(&mem->iomap, mem->io_start, mem->io_size))
|
||||
if (resource_size(&mem->io) &&
|
||||
!io_mapping_init_wc(&mem->iomap, mem->io.start, resource_size(&mem->io)))
|
||||
goto err_cleanup;
|
||||
|
||||
drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n",
|
||||
&mem->io_start);
|
||||
drm_dbg(&i915->drm, "Stolen Local DSM base: %pa\n", &mem->region.start);
|
||||
drm_dbg(&i915->drm, "Stolen Local DSM: %pR\n", &mem->region);
|
||||
drm_dbg(&i915->drm, "Stolen Local memory IO: %pR\n", &mem->io);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -855,7 +856,7 @@ err_cleanup:
|
||||
|
||||
static int release_stolen_lmem(struct intel_memory_region *mem)
|
||||
{
|
||||
if (mem->io_size)
|
||||
if (resource_size(&mem->io))
|
||||
io_mapping_fini(&mem->iomap);
|
||||
i915_gem_cleanup_stolen(mem->i915);
|
||||
return 0;
|
||||
|
@ -144,13 +144,13 @@ i915_ttm_place_from_region(const struct intel_memory_region *mr,
|
||||
place->fpfn = offset >> PAGE_SHIFT;
|
||||
WARN_ON(overflows_type(place->fpfn + (size >> PAGE_SHIFT), place->lpfn));
|
||||
place->lpfn = place->fpfn + (size >> PAGE_SHIFT);
|
||||
} else if (mr->io_size && mr->io_size < mr->total) {
|
||||
} else if (resource_size(&mr->io) && resource_size(&mr->io) < mr->total) {
|
||||
if (flags & I915_BO_ALLOC_GPU_ONLY) {
|
||||
place->flags |= TTM_PL_FLAG_TOPDOWN;
|
||||
} else {
|
||||
place->fpfn = 0;
|
||||
WARN_ON(overflows_type(mr->io_size >> PAGE_SHIFT, place->lpfn));
|
||||
place->lpfn = mr->io_size >> PAGE_SHIFT;
|
||||
WARN_ON(overflows_type(resource_size(&mr->io) >> PAGE_SHIFT, place->lpfn));
|
||||
place->lpfn = resource_size(&mr->io) >> PAGE_SHIFT;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1090,7 +1090,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
|
||||
struct intel_memory_region *mr = obj->mm.placements[i];
|
||||
unsigned int flags;
|
||||
|
||||
if (!mr->io_size && mr->type != INTEL_MEMORY_SYSTEM)
|
||||
if (!resource_size(&mr->io) && mr->type != INTEL_MEMORY_SYSTEM)
|
||||
continue;
|
||||
|
||||
flags = obj->flags;
|
||||
|
@ -1054,7 +1054,7 @@ static int igt_fill_mappable(struct intel_memory_region *mr,
|
||||
int err;
|
||||
|
||||
total = 0;
|
||||
size = mr->io_size;
|
||||
size = resource_size(&mr->io);
|
||||
do {
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
@ -1315,28 +1315,28 @@ static int igt_mmap_migrate(void *arg)
|
||||
struct intel_memory_region *mixed[] = { mr, system };
|
||||
struct intel_memory_region *single[] = { mr };
|
||||
struct ttm_resource_manager *man = mr->region_private;
|
||||
resource_size_t saved_io_size;
|
||||
struct resource saved_io;
|
||||
int err;
|
||||
|
||||
if (mr->private)
|
||||
continue;
|
||||
|
||||
if (!mr->io_size)
|
||||
if (!resource_size(&mr->io))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* For testing purposes let's force small BAR, if not already
|
||||
* present.
|
||||
*/
|
||||
saved_io_size = mr->io_size;
|
||||
if (mr->io_size == mr->total) {
|
||||
resource_size_t io_size = mr->io_size;
|
||||
saved_io = mr->io;
|
||||
if (resource_size(&mr->io) == mr->total) {
|
||||
resource_size_t io_size = resource_size(&mr->io);
|
||||
|
||||
io_size = rounddown_pow_of_two(io_size >> 1);
|
||||
if (io_size < PAGE_SIZE)
|
||||
continue;
|
||||
|
||||
mr->io_size = io_size;
|
||||
mr->io = DEFINE_RES_MEM(mr->io.start, io_size);
|
||||
i915_ttm_buddy_man_force_visible_size(man,
|
||||
io_size >> PAGE_SHIFT);
|
||||
}
|
||||
@ -1396,9 +1396,9 @@ static int igt_mmap_migrate(void *arg)
|
||||
IGT_MMAP_MIGRATE_FAIL_GPU |
|
||||
IGT_MMAP_MIGRATE_UNFAULTABLE);
|
||||
out_io_size:
|
||||
mr->io_size = saved_io_size;
|
||||
mr->io = saved_io;
|
||||
i915_ttm_buddy_man_force_visible_size(man,
|
||||
mr->io_size >> PAGE_SHIFT);
|
||||
resource_size(&mr->io) >> PAGE_SHIFT);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
@ -144,8 +144,8 @@ region_lmem_init(struct intel_memory_region *mem)
|
||||
int ret;
|
||||
|
||||
if (!io_mapping_init_wc(&mem->iomap,
|
||||
mem->io_start,
|
||||
mem->io_size))
|
||||
mem->io.start,
|
||||
resource_size(&mem->io)))
|
||||
return -EIO;
|
||||
|
||||
ret = intel_region_ttm_init(mem);
|
||||
@ -274,12 +274,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
|
||||
goto err_region_put;
|
||||
|
||||
drm_dbg(&i915->drm, "Local memory: %pR\n", &mem->region);
|
||||
drm_dbg(&i915->drm, "Local memory IO start: %pa\n",
|
||||
&mem->io_start);
|
||||
drm_info(&i915->drm, "Local memory IO size: %pa\n",
|
||||
&mem->io_size);
|
||||
drm_info(&i915->drm, "Local memory available: %pa\n",
|
||||
&lmem_size);
|
||||
drm_dbg(&i915->drm, "Local memory IO: %pR\n", &mem->io);
|
||||
|
||||
if (io_size < lmem_size)
|
||||
drm_info(&i915->drm, "Using a reduced BAR size of %lluMiB. Consider enabling 'Resizable BAR' or similar, if available in the BIOS.\n",
|
||||
|
@ -206,8 +206,8 @@ static struct drm_i915_gem_object *create_lmem(struct intel_gt *gt)
|
||||
* of pages. To succeed with both allocations, especially in case of Small
|
||||
* BAR, try to allocate no more than quarter of mappable memory.
|
||||
*/
|
||||
if (mr && size > mr->io_size / 4)
|
||||
size = mr->io_size / 4;
|
||||
if (mr && size > resource_size(&mr->io) / 4)
|
||||
size = resource_size(&mr->io) / 4;
|
||||
|
||||
return i915_gem_object_create_lmem(gt->i915, size, I915_BO_ALLOC_CONTIGUOUS);
|
||||
}
|
||||
|
@ -1157,7 +1157,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
|
||||
dma_addr_t offset = dma - mem->region.start;
|
||||
void __iomem *s;
|
||||
|
||||
if (offset + PAGE_SIZE > mem->io_size) {
|
||||
if (offset + PAGE_SIZE > resource_size(&mem->io)) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
@ -502,7 +502,7 @@ static int query_memregion_info(struct drm_i915_private *i915,
|
||||
info.probed_size = mr->total;
|
||||
|
||||
if (mr->type == INTEL_MEMORY_LOCAL)
|
||||
info.probed_cpu_visible_size = mr->io_size;
|
||||
info.probed_cpu_visible_size = resource_size(&mr->io);
|
||||
else
|
||||
info.probed_cpu_visible_size = mr->total;
|
||||
|
||||
|
@ -50,7 +50,7 @@ static int __iopagetest(struct intel_memory_region *mem,
|
||||
if (memchr_inv(result, value, sizeof(result))) {
|
||||
dev_err(mem->i915->drm.dev,
|
||||
"Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n",
|
||||
&mem->region, &mem->io_start, &offset, caller,
|
||||
&mem->region, &mem->io.start, &offset, caller,
|
||||
value, result[0], result[1], result[2]);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -67,11 +67,11 @@ static int iopagetest(struct intel_memory_region *mem,
|
||||
int err;
|
||||
int i;
|
||||
|
||||
va = ioremap_wc(mem->io_start + offset, PAGE_SIZE);
|
||||
va = ioremap_wc(mem->io.start + offset, PAGE_SIZE);
|
||||
if (!va) {
|
||||
dev_err(mem->i915->drm.dev,
|
||||
"Failed to ioremap memory region [%pa + %pa] for %ps\n",
|
||||
&mem->io_start, &offset, caller);
|
||||
&mem->io.start, &offset, caller);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
@ -102,10 +102,10 @@ static int iomemtest(struct intel_memory_region *mem,
|
||||
resource_size_t last, page;
|
||||
int err;
|
||||
|
||||
if (mem->io_size < PAGE_SIZE)
|
||||
if (resource_size(&mem->io) < PAGE_SIZE)
|
||||
return 0;
|
||||
|
||||
last = mem->io_size - PAGE_SIZE;
|
||||
last = resource_size(&mem->io) - PAGE_SIZE;
|
||||
|
||||
/*
|
||||
* Quick test to check read/write access to the iomap (backing store).
|
||||
@ -207,7 +207,7 @@ static int intel_memory_region_memtest(struct intel_memory_region *mem,
|
||||
struct drm_i915_private *i915 = mem->i915;
|
||||
int err = 0;
|
||||
|
||||
if (!mem->io_start)
|
||||
if (!mem->io.start)
|
||||
return 0;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) || i915->params.memtest)
|
||||
@ -252,8 +252,7 @@ intel_memory_region_create(struct drm_i915_private *i915,
|
||||
|
||||
mem->i915 = i915;
|
||||
mem->region = DEFINE_RES_MEM(start, size);
|
||||
mem->io_start = io_start;
|
||||
mem->io_size = io_size;
|
||||
mem->io = DEFINE_RES_MEM(io_start, io_size);
|
||||
mem->min_page_size = min_page_size;
|
||||
mem->ops = ops;
|
||||
mem->total = size;
|
||||
|
@ -71,8 +71,7 @@ struct intel_memory_region {
|
||||
struct io_mapping iomap;
|
||||
struct resource region;
|
||||
|
||||
resource_size_t io_start;
|
||||
resource_size_t io_size;
|
||||
struct resource io;
|
||||
resource_size_t min_page_size;
|
||||
resource_size_t total;
|
||||
|
||||
|
@ -87,7 +87,7 @@ int intel_region_ttm_init(struct intel_memory_region *mem)
|
||||
|
||||
ret = i915_ttm_buddy_man_init(bdev, mem_type, false,
|
||||
resource_size(&mem->region),
|
||||
mem->io_size,
|
||||
resource_size(&mem->io),
|
||||
mem->min_page_size, PAGE_SIZE);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -219,16 +219,16 @@ intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
|
||||
goto out;
|
||||
}
|
||||
place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
|
||||
} else if (mem->io_size && mem->io_size < mem->total) {
|
||||
} else if (resource_size(&mem->io) && resource_size(&mem->io) < mem->total) {
|
||||
if (flags & I915_BO_ALLOC_GPU_ONLY) {
|
||||
place.flags |= TTM_PL_FLAG_TOPDOWN;
|
||||
} else {
|
||||
place.fpfn = 0;
|
||||
if (WARN_ON(overflows_type(mem->io_size >> PAGE_SHIFT, place.lpfn))) {
|
||||
if (WARN_ON(overflows_type(resource_size(&mem->io) >> PAGE_SHIFT, place.lpfn))) {
|
||||
ret = -E2BIG;
|
||||
goto out;
|
||||
}
|
||||
place.lpfn = mem->io_size >> PAGE_SHIFT;
|
||||
place.lpfn = resource_size(&mem->io) >> PAGE_SHIFT;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -544,8 +544,8 @@ static u64 igt_object_mappable_total(struct drm_i915_gem_object *obj)
|
||||
u64 start = drm_buddy_block_offset(block);
|
||||
u64 end = start + drm_buddy_block_size(mm, block);
|
||||
|
||||
if (start < mr->io_size)
|
||||
total += min_t(u64, end, mr->io_size) - start;
|
||||
if (start < resource_size(&mr->io))
|
||||
total += min_t(u64, end, resource_size(&mr->io)) - start;
|
||||
}
|
||||
|
||||
return total;
|
||||
|
Loading…
Reference in New Issue
Block a user