drm/ttm: ioremap buffer according to TTM mem caching setting

If tbo.mem.bus.caching is cached, buffer is intended to be mapped
as cached from CPU. Map it with ioremap_cache.

This wasn't necessary before as device memory was never mapped
as cached from CPU side. It becomes necessary for aldebaran as
device memory is mapped cached from CPU.

Signed-off-by: Oak Zeng <Oak.Zeng@amd.com>
Reviewed-by: Christian Konig <Christian.Koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1614638628-10508-1-git-send-email-Oak.Zeng@amd.com
Signed-off-by: Christian König <christian.koenig@amd.com>
This commit is contained in:
Oak Zeng 2021-02-26 19:09:42 -06:00 committed by Christian König
parent e92b0ff603
commit b849bec29a

View File

@ -91,6 +91,10 @@ static int ttm_resource_ioremap(struct ttm_device *bdev,
if (mem->bus.caching == ttm_write_combined)
addr = ioremap_wc(mem->bus.offset, bus_size);
#ifdef CONFIG_X86
else if (mem->bus.caching == ttm_cached)
addr = ioremap_cache(mem->bus.offset, bus_size);
#endif
else
addr = ioremap(mem->bus.offset, bus_size);
if (!addr) {
@ -371,6 +375,11 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
if (mem->bus.caching == ttm_write_combined)
map->virtual = ioremap_wc(bo->mem.bus.offset + offset,
size);
#ifdef CONFIG_X86
else if (mem->bus.caching == ttm_cached)
map->virtual = ioremap_cache(bo->mem.bus.offset + offset,
size);
#endif
else
map->virtual = ioremap(bo->mem.bus.offset + offset,
size);
@ -489,6 +498,11 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
else if (mem->bus.caching == ttm_write_combined)
vaddr_iomem = ioremap_wc(mem->bus.offset,
bo->base.size);
#ifdef CONFIG_X86
else if (mem->bus.caching == ttm_cached)
vaddr_iomem = ioremap_cache(mem->bus.offset,
bo->base.size);
#endif
else
vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);