mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 13:41:51 +00:00
drm fixes for v6.9-rc5
ttm: - Stop pooling cached NUMA pages amdgpu: - Fix invalid resource->start check - USB-C DSC fix - Fix a potential UAF in VA IOCTL - Fix visible VRAM handling during faults amdkfd: - Fix memory leak in create_process failure radeon: - Silence UBSAN warnings from variable sized arrays nouveau: - dp: Don't probe DP ports twice - nv04: Fix OOB access - nv50: Disable AUX bus for disconnected DP ports - nvkm: Fix instmem race condition panel: - Don't unregister DSI devices in several drivers v3d: - Fix enabled_ns increment xe: - Fix bo leak on error path during fb init - Fix use-after-free due to order vm is put and destroyed -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmYhv5wACgkQDHTzWXnE hr7H2RAAho4Ry8+0Tfg2rUDwqhUD2tNEkzvVJcD/1Y0GBmxkj4asKTOWH7fAQjEB r4hSqNA1cIa4LP6Gg/hwuqPi+3OSyYRgVEBGjSH3fhOeNNHeN7PWW9ihlTNJNAnh UUPfHPe6U7ZLUKxH7ob/NgKrxfaPoEiCXvU6L0e2rOWpSmcn/j76NwP4WTsqDriv Zfh4lBnhCPkImp4OChIDG9RIRUvKL6tucNHMNpPee6lQ64pnM1zalSjLzMqmsuTG Y5b0v0P1lif5YNXxGOAQJdg6xe96w4VzkezyNNCDJrx5Eva3x48i2HVtAcdUau0K t11iai7M72Igjl3N9t4Ym84s1nsqwWr6nZ+pS4+8/MrEgEhhFyHm9FeYMdnax8jh wmlRD6QbvF3l6dxqgg/RChatTxFkqaI3BYU9rzh+Ofk52LUpjNFsHLD6qNzHxa05 c/byKvgwJIfcfnCEAuUFP+MRyS4+2xl1RPrC2usCVQLZIVoj1Y/N4G8kx9kvQuY5 m3a/Ym4uC5yrOaHrACtKDmbkGIQIExorqBuWi9VRbsXcHv4IEFL3JmQpwRq4UEs7 StHWhfENHnIELhKj/Bsf368UfBUbbIsdGLRCTFvzSO1yolgu1KAI4z0Yl4OhU0kU DZu4/rqJzi3YiJXqlctqoGwuzAujVJGwN23/YWJvbI5HsRjQA9U= =1H9l -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2024-04-19' of https://gitlab.freedesktop.org/drm/kernel Pull drm fixes from Dave Airlie: "Regular week of fixes, seems to be about right for this time in the release cycle, amdgpu, and nouveau are the main one with some scattered fixes otherwise. ttm: - Stop pooling cached NUMA pages amdgpu: - Fix invalid resource->start check - USB-C DSC fix - Fix a potential UAF in VA IOCTL - Fix visible VRAM handling during faults amdkfd: - Fix memory leak in create_process failure radeon: - Silence UBSAN warnings from variable sized arrays nouveau: - dp: Don't probe DP ports twice - nv04: Fix OOB access - nv50: Disable AUX bus for disconnected DP ports - nvkm: Fix instmem race condition panel: - Don't unregister DSI devices in several drivers v3d: - Fix enabled_ns increment xe: - Fix bo leak on error path during fb init - Fix use-after-free due to order vm is put and destroyed" * tag 'drm-fixes-2024-04-19' of https://gitlab.freedesktop.org/drm/kernel: drm/radeon: silence UBSAN warning (v3) drm/radeon: make -fstrict-flex-arrays=3 happy drm/amdgpu: fix visible VRAM handling during faults drm/amdgpu: validate the parameters of bo mapping operations more clearly Revert "drm/amd/display: fix USB-C flag update after enc10 feature init" drm/amdkfd: Fix memory leak in create_process failure drm/amdgpu: remove invalid resource->start check v2 drm/xe/vm: prevent UAF with asid based lookup drm/xe: Fix bo leak in intel_fb_bo_framebuffer_init drm/panel: novatek-nt36682e: don't unregister DSI device drm/panel: visionox-rm69299: don't unregister DSI device drm/nouveau/dp: Don't probe eDP ports twice harder drm/nouveau/kms/nv50-: Disable AUX bus for disconnected DP ports drm/v3d: Don't increment `enabled_ns` twice drm/vmwgfx: Sort primary plane formats by order of preference drm/vmwgfx: Fix crtc's atomic check conditional drm/vmwgfx: Fix prime import/export drm/ttm: stop pooling cached NUMA pages v2 drm: nv04: Fix out of bounds access nouveau: fix instmem race condition around ptr stores
This commit is contained in:
commit
ce944f3f97
@ -819,7 +819,7 @@ retry:
|
|||||||
|
|
||||||
p->bytes_moved += ctx.bytes_moved;
|
p->bytes_moved += ctx.bytes_moved;
|
||||||
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
|
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
|
||||||
amdgpu_bo_in_cpu_visible_vram(bo))
|
amdgpu_res_cpu_visible(adev, bo->tbo.resource))
|
||||||
p->bytes_moved_vis += ctx.bytes_moved;
|
p->bytes_moved_vis += ctx.bytes_moved;
|
||||||
|
|
||||||
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
|
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
|
||||||
|
@ -617,8 +617,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
|||||||
return r;
|
return r;
|
||||||
|
|
||||||
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
|
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
|
||||||
bo->tbo.resource->mem_type == TTM_PL_VRAM &&
|
amdgpu_res_cpu_visible(adev, bo->tbo.resource))
|
||||||
amdgpu_bo_in_cpu_visible_vram(bo))
|
|
||||||
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
|
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
|
||||||
ctx.bytes_moved);
|
ctx.bytes_moved);
|
||||||
else
|
else
|
||||||
@ -1272,23 +1271,25 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
|
|||||||
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
|
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
|
||||||
struct amdgpu_mem_stats *stats)
|
struct amdgpu_mem_stats *stats)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||||
|
struct ttm_resource *res = bo->tbo.resource;
|
||||||
uint64_t size = amdgpu_bo_size(bo);
|
uint64_t size = amdgpu_bo_size(bo);
|
||||||
struct drm_gem_object *obj;
|
struct drm_gem_object *obj;
|
||||||
unsigned int domain;
|
unsigned int domain;
|
||||||
bool shared;
|
bool shared;
|
||||||
|
|
||||||
/* Abort if the BO doesn't currently have a backing store */
|
/* Abort if the BO doesn't currently have a backing store */
|
||||||
if (!bo->tbo.resource)
|
if (!res)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
obj = &bo->tbo.base;
|
obj = &bo->tbo.base;
|
||||||
shared = drm_gem_object_is_shared_for_memory_stats(obj);
|
shared = drm_gem_object_is_shared_for_memory_stats(obj);
|
||||||
|
|
||||||
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
|
domain = amdgpu_mem_type_to_domain(res->mem_type);
|
||||||
switch (domain) {
|
switch (domain) {
|
||||||
case AMDGPU_GEM_DOMAIN_VRAM:
|
case AMDGPU_GEM_DOMAIN_VRAM:
|
||||||
stats->vram += size;
|
stats->vram += size;
|
||||||
if (amdgpu_bo_in_cpu_visible_vram(bo))
|
if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
|
||||||
stats->visible_vram += size;
|
stats->visible_vram += size;
|
||||||
if (shared)
|
if (shared)
|
||||||
stats->vram_shared += size;
|
stats->vram_shared += size;
|
||||||
@ -1389,10 +1390,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
|||||||
/* Remember that this BO was accessed by the CPU */
|
/* Remember that this BO was accessed by the CPU */
|
||||||
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
|
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
|
||||||
|
|
||||||
if (bo->resource->mem_type != TTM_PL_VRAM)
|
if (amdgpu_res_cpu_visible(adev, bo->resource))
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (amdgpu_bo_in_cpu_visible_vram(abo))
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Can't move a pinned BO to visible VRAM */
|
/* Can't move a pinned BO to visible VRAM */
|
||||||
@ -1415,7 +1413,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
|||||||
|
|
||||||
/* this should never happen */
|
/* this should never happen */
|
||||||
if (bo->resource->mem_type == TTM_PL_VRAM &&
|
if (bo->resource->mem_type == TTM_PL_VRAM &&
|
||||||
!amdgpu_bo_in_cpu_visible_vram(abo))
|
!amdgpu_res_cpu_visible(adev, bo->resource))
|
||||||
return VM_FAULT_SIGBUS;
|
return VM_FAULT_SIGBUS;
|
||||||
|
|
||||||
ttm_bo_move_to_lru_tail_unlocked(bo);
|
ttm_bo_move_to_lru_tail_unlocked(bo);
|
||||||
@ -1579,6 +1577,7 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
|
|||||||
*/
|
*/
|
||||||
u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
|
u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||||
struct dma_buf_attachment *attachment;
|
struct dma_buf_attachment *attachment;
|
||||||
struct dma_buf *dma_buf;
|
struct dma_buf *dma_buf;
|
||||||
const char *placement;
|
const char *placement;
|
||||||
@ -1587,10 +1586,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
|
|||||||
|
|
||||||
if (dma_resv_trylock(bo->tbo.base.resv)) {
|
if (dma_resv_trylock(bo->tbo.base.resv)) {
|
||||||
unsigned int domain;
|
unsigned int domain;
|
||||||
|
|
||||||
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
|
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
|
||||||
switch (domain) {
|
switch (domain) {
|
||||||
case AMDGPU_GEM_DOMAIN_VRAM:
|
case AMDGPU_GEM_DOMAIN_VRAM:
|
||||||
if (amdgpu_bo_in_cpu_visible_vram(bo))
|
if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
|
||||||
placement = "VRAM VISIBLE";
|
placement = "VRAM VISIBLE";
|
||||||
else
|
else
|
||||||
placement = "VRAM";
|
placement = "VRAM";
|
||||||
|
@ -250,28 +250,6 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
|
|||||||
return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
|
return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
|
|
||||||
*/
|
|
||||||
static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
|
|
||||||
{
|
|
||||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
|
||||||
struct amdgpu_res_cursor cursor;
|
|
||||||
|
|
||||||
if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
|
|
||||||
while (cursor.remaining) {
|
|
||||||
if (cursor.start < adev->gmc.visible_vram_size)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
amdgpu_res_next(&cursor, cursor.size);
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
|
* amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
|
||||||
*/
|
*/
|
||||||
|
@ -133,7 +133,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
|
|||||||
|
|
||||||
} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
|
} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
|
||||||
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
|
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
|
||||||
amdgpu_bo_in_cpu_visible_vram(abo)) {
|
amdgpu_res_cpu_visible(adev, bo->resource)) {
|
||||||
|
|
||||||
/* Try evicting to the CPU inaccessible part of VRAM
|
/* Try evicting to the CPU inaccessible part of VRAM
|
||||||
* first, but only set GTT as busy placement, so this
|
* first, but only set GTT as busy placement, so this
|
||||||
@ -403,40 +403,55 @@ error:
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
|
||||||
|
* @adev: amdgpu device
|
||||||
|
* @res: the resource to check
|
||||||
|
*
|
||||||
|
* Returns: true if the full resource is CPU visible, false otherwise.
|
||||||
|
*/
|
||||||
|
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
|
||||||
|
struct ttm_resource *res)
|
||||||
|
{
|
||||||
|
struct amdgpu_res_cursor cursor;
|
||||||
|
|
||||||
|
if (!res)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
|
||||||
|
res->mem_type == AMDGPU_PL_PREEMPT)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (res->mem_type != TTM_PL_VRAM)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
amdgpu_res_first(res, 0, res->size, &cursor);
|
||||||
|
while (cursor.remaining) {
|
||||||
|
if ((cursor.start + cursor.size) >= adev->gmc.visible_vram_size)
|
||||||
|
return false;
|
||||||
|
amdgpu_res_next(&cursor, cursor.size);
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
|
* amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
|
||||||
*
|
*
|
||||||
* Called by amdgpu_bo_move()
|
* Called by amdgpu_bo_move()
|
||||||
*/
|
*/
|
||||||
static bool amdgpu_mem_visible(struct amdgpu_device *adev,
|
static bool amdgpu_res_copyable(struct amdgpu_device *adev,
|
||||||
struct ttm_resource *mem)
|
struct ttm_resource *mem)
|
||||||
{
|
{
|
||||||
u64 mem_size = (u64)mem->size;
|
if (!amdgpu_res_cpu_visible(adev, mem))
|
||||||
struct amdgpu_res_cursor cursor;
|
|
||||||
u64 end;
|
|
||||||
|
|
||||||
if (mem->mem_type == TTM_PL_SYSTEM ||
|
|
||||||
mem->mem_type == TTM_PL_TT)
|
|
||||||
return true;
|
|
||||||
if (mem->mem_type != TTM_PL_VRAM)
|
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
amdgpu_res_first(mem, 0, mem_size, &cursor);
|
/* ttm_resource_ioremap only supports contiguous memory */
|
||||||
end = cursor.start + cursor.size;
|
if (mem->mem_type == TTM_PL_VRAM &&
|
||||||
while (cursor.remaining) {
|
!(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
|
||||||
amdgpu_res_next(&cursor, cursor.size);
|
return false;
|
||||||
|
|
||||||
if (!cursor.remaining)
|
return true;
|
||||||
break;
|
|
||||||
|
|
||||||
/* ttm_resource_ioremap only supports contiguous memory */
|
|
||||||
if (end != cursor.start)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
end = cursor.start + cursor.size;
|
|
||||||
}
|
|
||||||
|
|
||||||
return end <= adev->gmc.visible_vram_size;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -529,8 +544,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
|
|||||||
|
|
||||||
if (r) {
|
if (r) {
|
||||||
/* Check that all memory is CPU accessible */
|
/* Check that all memory is CPU accessible */
|
||||||
if (!amdgpu_mem_visible(adev, old_mem) ||
|
if (!amdgpu_res_copyable(adev, old_mem) ||
|
||||||
!amdgpu_mem_visible(adev, new_mem)) {
|
!amdgpu_res_copyable(adev, new_mem)) {
|
||||||
pr_err("Move buffer fallback to memcpy unavailable\n");
|
pr_err("Move buffer fallback to memcpy unavailable\n");
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
@ -557,7 +572,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
|
|||||||
struct ttm_resource *mem)
|
struct ttm_resource *mem)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
|
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
|
||||||
size_t bus_size = (size_t)mem->size;
|
|
||||||
|
|
||||||
switch (mem->mem_type) {
|
switch (mem->mem_type) {
|
||||||
case TTM_PL_SYSTEM:
|
case TTM_PL_SYSTEM:
|
||||||
@ -568,9 +582,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
|
|||||||
break;
|
break;
|
||||||
case TTM_PL_VRAM:
|
case TTM_PL_VRAM:
|
||||||
mem->bus.offset = mem->start << PAGE_SHIFT;
|
mem->bus.offset = mem->start << PAGE_SHIFT;
|
||||||
/* check if it's visible */
|
|
||||||
if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (adev->mman.aper_base_kaddr &&
|
if (adev->mman.aper_base_kaddr &&
|
||||||
mem->placement & TTM_PL_FLAG_CONTIGUOUS)
|
mem->placement & TTM_PL_FLAG_CONTIGUOUS)
|
||||||
|
@ -139,6 +139,9 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
|
|||||||
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
|
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
|
||||||
uint64_t start);
|
uint64_t start);
|
||||||
|
|
||||||
|
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
|
||||||
|
struct ttm_resource *res);
|
||||||
|
|
||||||
int amdgpu_ttm_init(struct amdgpu_device *adev);
|
int amdgpu_ttm_init(struct amdgpu_device *adev);
|
||||||
void amdgpu_ttm_fini(struct amdgpu_device *adev);
|
void amdgpu_ttm_fini(struct amdgpu_device *adev);
|
||||||
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
|
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
|
||||||
|
@ -1613,6 +1613,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
|
|||||||
trace_amdgpu_vm_bo_map(bo_va, mapping);
|
trace_amdgpu_vm_bo_map(bo_va, mapping);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Validate operation parameters to prevent potential abuse */
|
||||||
|
static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_bo *bo,
|
||||||
|
uint64_t saddr,
|
||||||
|
uint64_t offset,
|
||||||
|
uint64_t size)
|
||||||
|
{
|
||||||
|
uint64_t tmp, lpfn;
|
||||||
|
|
||||||
|
if (saddr & AMDGPU_GPU_PAGE_MASK
|
||||||
|
|| offset & AMDGPU_GPU_PAGE_MASK
|
||||||
|
|| size & AMDGPU_GPU_PAGE_MASK)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (check_add_overflow(saddr, size, &tmp)
|
||||||
|
|| check_add_overflow(offset, size, &tmp)
|
||||||
|
|| size == 0 /* which also leads to end < begin */)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* make sure object fit at this offset */
|
||||||
|
if (bo && offset + size > amdgpu_bo_size(bo))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* Ensure last pfn not exceed max_pfn */
|
||||||
|
lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
|
||||||
|
if (lpfn >= adev->vm_manager.max_pfn)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_vm_bo_map - map bo inside a vm
|
* amdgpu_vm_bo_map - map bo inside a vm
|
||||||
*
|
*
|
||||||
@ -1639,21 +1670,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|||||||
struct amdgpu_bo *bo = bo_va->base.bo;
|
struct amdgpu_bo *bo = bo_va->base.bo;
|
||||||
struct amdgpu_vm *vm = bo_va->base.vm;
|
struct amdgpu_vm *vm = bo_va->base.vm;
|
||||||
uint64_t eaddr;
|
uint64_t eaddr;
|
||||||
|
int r;
|
||||||
|
|
||||||
/* validate the parameters */
|
r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
|
||||||
if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
|
if (r)
|
||||||
return -EINVAL;
|
return r;
|
||||||
if (saddr + size <= saddr || offset + size <= offset)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
/* make sure object fit at this offset */
|
|
||||||
eaddr = saddr + size - 1;
|
|
||||||
if ((bo && offset + size > amdgpu_bo_size(bo)) ||
|
|
||||||
(eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
||||||
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
|
||||||
|
|
||||||
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
|
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
|
||||||
if (tmp) {
|
if (tmp) {
|
||||||
@ -1706,17 +1730,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
|
|||||||
uint64_t eaddr;
|
uint64_t eaddr;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
/* validate the parameters */
|
r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
|
||||||
if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
|
if (r)
|
||||||
return -EINVAL;
|
return r;
|
||||||
if (saddr + size <= saddr || offset + size <= offset)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
/* make sure object fit at this offset */
|
|
||||||
eaddr = saddr + size - 1;
|
|
||||||
if ((bo && offset + size > amdgpu_bo_size(bo)) ||
|
|
||||||
(eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
/* Allocate all the needed memory */
|
/* Allocate all the needed memory */
|
||||||
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
|
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
|
||||||
@ -1730,7 +1746,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
||||||
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
|
||||||
|
|
||||||
mapping->start = saddr;
|
mapping->start = saddr;
|
||||||
mapping->last = eaddr;
|
mapping->last = eaddr;
|
||||||
@ -1817,10 +1833,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
|||||||
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
|
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
|
||||||
LIST_HEAD(removed);
|
LIST_HEAD(removed);
|
||||||
uint64_t eaddr;
|
uint64_t eaddr;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
eaddr = saddr + size - 1;
|
|
||||||
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
||||||
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
|
||||||
|
|
||||||
/* Allocate all the needed memory */
|
/* Allocate all the needed memory */
|
||||||
before = kzalloc(sizeof(*before), GFP_KERNEL);
|
before = kzalloc(sizeof(*before), GFP_KERNEL);
|
||||||
|
@ -819,9 +819,9 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
|
|||||||
mutex_lock(&kfd_processes_mutex);
|
mutex_lock(&kfd_processes_mutex);
|
||||||
|
|
||||||
if (kfd_is_locked()) {
|
if (kfd_is_locked()) {
|
||||||
mutex_unlock(&kfd_processes_mutex);
|
|
||||||
pr_debug("KFD is locked! Cannot create process");
|
pr_debug("KFD is locked! Cannot create process");
|
||||||
return ERR_PTR(-EINVAL);
|
process = ERR_PTR(-EINVAL);
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* A prior open of /dev/kfd could have already created the process. */
|
/* A prior open of /dev/kfd could have already created the process. */
|
||||||
|
@ -248,14 +248,12 @@ void dcn32_link_encoder_construct(
|
|||||||
enc10->base.hpd_source = init_data->hpd_source;
|
enc10->base.hpd_source = init_data->hpd_source;
|
||||||
enc10->base.connector = init_data->connector;
|
enc10->base.connector = init_data->connector;
|
||||||
|
|
||||||
|
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
|
||||||
|
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
|
||||||
|
|
||||||
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
|
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
|
||||||
|
|
||||||
enc10->base.features = *enc_features;
|
enc10->base.features = *enc_features;
|
||||||
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
|
|
||||||
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
|
|
||||||
|
|
||||||
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
|
|
||||||
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
|
|
||||||
|
|
||||||
enc10->base.transmitter = init_data->transmitter;
|
enc10->base.transmitter = init_data->transmitter;
|
||||||
|
|
||||||
|
@ -184,6 +184,8 @@ void dcn35_link_encoder_construct(
|
|||||||
enc10->base.hpd_source = init_data->hpd_source;
|
enc10->base.hpd_source = init_data->hpd_source;
|
||||||
enc10->base.connector = init_data->connector;
|
enc10->base.connector = init_data->connector;
|
||||||
|
|
||||||
|
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
|
||||||
|
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
|
||||||
|
|
||||||
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
|
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
|
||||||
|
|
||||||
@ -238,8 +240,6 @@ void dcn35_link_encoder_construct(
|
|||||||
}
|
}
|
||||||
|
|
||||||
enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
|
enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
|
||||||
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
|
|
||||||
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
|
|
||||||
|
|
||||||
if (bp_funcs->get_connector_speed_cap_info)
|
if (bp_funcs->get_connector_speed_cap_info)
|
||||||
result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios,
|
result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios,
|
||||||
|
@ -23,6 +23,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "nouveau_drv.h"
|
#include "nouveau_drv.h"
|
||||||
|
#include "nouveau_bios.h"
|
||||||
#include "nouveau_reg.h"
|
#include "nouveau_reg.h"
|
||||||
#include "dispnv04/hw.h"
|
#include "dispnv04/hw.h"
|
||||||
#include "nouveau_encoder.h"
|
#include "nouveau_encoder.h"
|
||||||
@ -1677,7 +1678,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
|
|||||||
*/
|
*/
|
||||||
if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
|
if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
|
||||||
if (*conn == 0xf2005014 && *conf == 0xffffffff) {
|
if (*conn == 0xf2005014 && *conf == 0xffffffff) {
|
||||||
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1);
|
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, DCB_OUTPUT_B);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1763,26 +1764,26 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
|
|||||||
#ifdef __powerpc__
|
#ifdef __powerpc__
|
||||||
/* Apple iMac G4 NV17 */
|
/* Apple iMac G4 NV17 */
|
||||||
if (of_machine_is_compatible("PowerMac4,5")) {
|
if (of_machine_is_compatible("PowerMac4,5")) {
|
||||||
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1);
|
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, DCB_OUTPUT_B);
|
||||||
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2);
|
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, DCB_OUTPUT_C);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Make up some sane defaults */
|
/* Make up some sane defaults */
|
||||||
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
|
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
|
||||||
bios->legacy.i2c_indices.crt, 1, 1);
|
bios->legacy.i2c_indices.crt, 1, DCB_OUTPUT_B);
|
||||||
|
|
||||||
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
|
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
|
||||||
fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
|
fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
|
||||||
bios->legacy.i2c_indices.tv,
|
bios->legacy.i2c_indices.tv,
|
||||||
all_heads, 0);
|
all_heads, DCB_OUTPUT_A);
|
||||||
|
|
||||||
else if (bios->tmds.output0_script_ptr ||
|
else if (bios->tmds.output0_script_ptr ||
|
||||||
bios->tmds.output1_script_ptr)
|
bios->tmds.output1_script_ptr)
|
||||||
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
|
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
|
||||||
bios->legacy.i2c_indices.panel,
|
bios->legacy.i2c_indices.panel,
|
||||||
all_heads, 1);
|
all_heads, DCB_OUTPUT_B);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -225,12 +225,18 @@ nouveau_dp_detect(struct nouveau_connector *nv_connector,
|
|||||||
u8 *dpcd = nv_encoder->dp.dpcd;
|
u8 *dpcd = nv_encoder->dp.dpcd;
|
||||||
int ret = NOUVEAU_DP_NONE, hpd;
|
int ret = NOUVEAU_DP_NONE, hpd;
|
||||||
|
|
||||||
/* If we've already read the DPCD on an eDP device, we don't need to
|
/* eDP ports don't support hotplugging - so there's no point in probing eDP ports unless we
|
||||||
* reread it as it won't change
|
* haven't probed them once before.
|
||||||
*/
|
*/
|
||||||
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
|
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
|
||||||
dpcd[DP_DPCD_REV] != 0)
|
if (connector->status == connector_status_connected)
|
||||||
return NOUVEAU_DP_SST;
|
return NOUVEAU_DP_SST;
|
||||||
|
else if (connector->status == connector_status_disconnected)
|
||||||
|
return NOUVEAU_DP_NONE;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that the aux bus is enabled for probing
|
||||||
|
drm_dp_dpcd_set_powered(&nv_connector->aux, true);
|
||||||
|
|
||||||
mutex_lock(&nv_encoder->dp.hpd_irq_lock);
|
mutex_lock(&nv_encoder->dp.hpd_irq_lock);
|
||||||
if (mstm) {
|
if (mstm) {
|
||||||
@ -293,6 +299,13 @@ out:
|
|||||||
if (mstm && !mstm->suspended && ret != NOUVEAU_DP_MST)
|
if (mstm && !mstm->suspended && ret != NOUVEAU_DP_MST)
|
||||||
nv50_mstm_remove(mstm);
|
nv50_mstm_remove(mstm);
|
||||||
|
|
||||||
|
/* GSP doesn't like when we try to do aux transactions on a port it considers disconnected,
|
||||||
|
* and since we don't really have a usecase for that anyway - just disable the aux bus here
|
||||||
|
* if we've decided the connector is disconnected
|
||||||
|
*/
|
||||||
|
if (ret == NOUVEAU_DP_NONE)
|
||||||
|
drm_dp_dpcd_set_powered(&nv_connector->aux, false);
|
||||||
|
|
||||||
mutex_unlock(&nv_encoder->dp.hpd_irq_lock);
|
mutex_unlock(&nv_encoder->dp.hpd_irq_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -222,8 +222,11 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
|
|||||||
void __iomem *map = NULL;
|
void __iomem *map = NULL;
|
||||||
|
|
||||||
/* Already mapped? */
|
/* Already mapped? */
|
||||||
if (refcount_inc_not_zero(&iobj->maps))
|
if (refcount_inc_not_zero(&iobj->maps)) {
|
||||||
|
/* read barrier match the wmb on refcount set */
|
||||||
|
smp_rmb();
|
||||||
return iobj->map;
|
return iobj->map;
|
||||||
|
}
|
||||||
|
|
||||||
/* Take the lock, and re-check that another thread hasn't
|
/* Take the lock, and re-check that another thread hasn't
|
||||||
* already mapped the object in the meantime.
|
* already mapped the object in the meantime.
|
||||||
@ -250,6 +253,8 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
|
|||||||
iobj->base.memory.ptrs = &nv50_instobj_fast;
|
iobj->base.memory.ptrs = &nv50_instobj_fast;
|
||||||
else
|
else
|
||||||
iobj->base.memory.ptrs = &nv50_instobj_slow;
|
iobj->base.memory.ptrs = &nv50_instobj_slow;
|
||||||
|
/* barrier to ensure the ptrs are written before refcount is set */
|
||||||
|
smp_wmb();
|
||||||
refcount_set(&iobj->maps, 1);
|
refcount_set(&iobj->maps, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -614,8 +614,6 @@ static void nt36672e_panel_remove(struct mipi_dsi_device *dsi)
|
|||||||
struct nt36672e_panel *ctx = mipi_dsi_get_drvdata(dsi);
|
struct nt36672e_panel *ctx = mipi_dsi_get_drvdata(dsi);
|
||||||
|
|
||||||
mipi_dsi_detach(ctx->dsi);
|
mipi_dsi_detach(ctx->dsi);
|
||||||
mipi_dsi_device_unregister(ctx->dsi);
|
|
||||||
|
|
||||||
drm_panel_remove(&ctx->panel);
|
drm_panel_remove(&ctx->panel);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -253,8 +253,6 @@ static void visionox_rm69299_remove(struct mipi_dsi_device *dsi)
|
|||||||
struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
|
struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
|
||||||
|
|
||||||
mipi_dsi_detach(ctx->dsi);
|
mipi_dsi_detach(ctx->dsi);
|
||||||
mipi_dsi_device_unregister(ctx->dsi);
|
|
||||||
|
|
||||||
drm_panel_remove(&ctx->panel);
|
drm_panel_remove(&ctx->panel);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -424,7 +424,7 @@ typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
|
|||||||
typedef struct _ATOM_PPLIB_STATE_V2
|
typedef struct _ATOM_PPLIB_STATE_V2
|
||||||
{
|
{
|
||||||
//number of valid dpm levels in this state; Driver uses it to calculate the whole
|
//number of valid dpm levels in this state; Driver uses it to calculate the whole
|
||||||
//size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
|
//size of the state: struct_size(ATOM_PPLIB_STATE_V2, clockInfoIndex, ucNumDPMLevels)
|
||||||
UCHAR ucNumDPMLevels;
|
UCHAR ucNumDPMLevels;
|
||||||
|
|
||||||
//a index to the array of nonClockInfos
|
//a index to the array of nonClockInfos
|
||||||
@ -432,14 +432,14 @@ typedef struct _ATOM_PPLIB_STATE_V2
|
|||||||
/**
|
/**
|
||||||
* Driver will read the first ucNumDPMLevels in this array
|
* Driver will read the first ucNumDPMLevels in this array
|
||||||
*/
|
*/
|
||||||
UCHAR clockInfoIndex[1];
|
UCHAR clockInfoIndex[] __counted_by(ucNumDPMLevels);
|
||||||
} ATOM_PPLIB_STATE_V2;
|
} ATOM_PPLIB_STATE_V2;
|
||||||
|
|
||||||
typedef struct _StateArray{
|
typedef struct _StateArray{
|
||||||
//how many states we have
|
//how many states we have
|
||||||
UCHAR ucNumEntries;
|
UCHAR ucNumEntries;
|
||||||
|
|
||||||
ATOM_PPLIB_STATE_V2 states[1];
|
ATOM_PPLIB_STATE_V2 states[] __counted_by(ucNumEntries);
|
||||||
}StateArray;
|
}StateArray;
|
||||||
|
|
||||||
|
|
||||||
@ -450,7 +450,7 @@ typedef struct _ClockInfoArray{
|
|||||||
//sizeof(ATOM_PPLIB_CLOCK_INFO)
|
//sizeof(ATOM_PPLIB_CLOCK_INFO)
|
||||||
UCHAR ucEntrySize;
|
UCHAR ucEntrySize;
|
||||||
|
|
||||||
UCHAR clockInfo[1];
|
UCHAR clockInfo[] __counted_by(ucNumEntries);
|
||||||
}ClockInfoArray;
|
}ClockInfoArray;
|
||||||
|
|
||||||
typedef struct _NonClockInfoArray{
|
typedef struct _NonClockInfoArray{
|
||||||
@ -460,7 +460,7 @@ typedef struct _NonClockInfoArray{
|
|||||||
//sizeof(ATOM_PPLIB_NONCLOCK_INFO)
|
//sizeof(ATOM_PPLIB_NONCLOCK_INFO)
|
||||||
UCHAR ucEntrySize;
|
UCHAR ucEntrySize;
|
||||||
|
|
||||||
ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
|
ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[] __counted_by(ucNumEntries);
|
||||||
}NonClockInfoArray;
|
}NonClockInfoArray;
|
||||||
|
|
||||||
typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
|
typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
|
||||||
|
@ -923,8 +923,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
|
|||||||
max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
|
max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
|
||||||
|
|
||||||
for (i = 0; i < max_device; i++) {
|
for (i = 0; i < max_device; i++) {
|
||||||
ATOM_CONNECTOR_INFO_I2C ci =
|
ATOM_CONNECTOR_INFO_I2C ci;
|
||||||
supported_devices->info.asConnInfo[i];
|
|
||||||
|
if (frev > 1)
|
||||||
|
ci = supported_devices->info_2d1.asConnInfo[i];
|
||||||
|
else
|
||||||
|
ci = supported_devices->info.asConnInfo[i];
|
||||||
|
|
||||||
bios_connectors[i].valid = false;
|
bios_connectors[i].valid = false;
|
||||||
|
|
||||||
|
@ -288,17 +288,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
|
|||||||
enum ttm_caching caching,
|
enum ttm_caching caching,
|
||||||
unsigned int order)
|
unsigned int order)
|
||||||
{
|
{
|
||||||
if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE)
|
if (pool->use_dma_alloc)
|
||||||
return &pool->caching[caching].orders[order];
|
return &pool->caching[caching].orders[order];
|
||||||
|
|
||||||
#ifdef CONFIG_X86
|
#ifdef CONFIG_X86
|
||||||
switch (caching) {
|
switch (caching) {
|
||||||
case ttm_write_combined:
|
case ttm_write_combined:
|
||||||
|
if (pool->nid != NUMA_NO_NODE)
|
||||||
|
return &pool->caching[caching].orders[order];
|
||||||
|
|
||||||
if (pool->use_dma32)
|
if (pool->use_dma32)
|
||||||
return &global_dma32_write_combined[order];
|
return &global_dma32_write_combined[order];
|
||||||
|
|
||||||
return &global_write_combined[order];
|
return &global_write_combined[order];
|
||||||
case ttm_uncached:
|
case ttm_uncached:
|
||||||
|
if (pool->nid != NUMA_NO_NODE)
|
||||||
|
return &pool->caching[caching].orders[order];
|
||||||
|
|
||||||
if (pool->use_dma32)
|
if (pool->use_dma32)
|
||||||
return &global_dma32_uncached[order];
|
return &global_dma32_uncached[order];
|
||||||
|
|
||||||
@ -566,11 +572,17 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
|
|||||||
pool->use_dma_alloc = use_dma_alloc;
|
pool->use_dma_alloc = use_dma_alloc;
|
||||||
pool->use_dma32 = use_dma32;
|
pool->use_dma32 = use_dma32;
|
||||||
|
|
||||||
if (use_dma_alloc || nid != NUMA_NO_NODE) {
|
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
|
||||||
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
|
for (j = 0; j < NR_PAGE_ORDERS; ++j) {
|
||||||
for (j = 0; j < NR_PAGE_ORDERS; ++j)
|
struct ttm_pool_type *pt;
|
||||||
ttm_pool_type_init(&pool->caching[i].orders[j],
|
|
||||||
pool, i, j);
|
/* Initialize only pool types which are actually used */
|
||||||
|
pt = ttm_pool_select_type(pool, i, j);
|
||||||
|
if (pt != &pool->caching[i].orders[j])
|
||||||
|
continue;
|
||||||
|
|
||||||
|
ttm_pool_type_init(pt, pool, i, j);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ttm_pool_init);
|
EXPORT_SYMBOL(ttm_pool_init);
|
||||||
@ -599,10 +611,16 @@ void ttm_pool_fini(struct ttm_pool *pool)
|
|||||||
{
|
{
|
||||||
unsigned int i, j;
|
unsigned int i, j;
|
||||||
|
|
||||||
if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
|
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
|
||||||
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
|
for (j = 0; j < NR_PAGE_ORDERS; ++j) {
|
||||||
for (j = 0; j < NR_PAGE_ORDERS; ++j)
|
struct ttm_pool_type *pt;
|
||||||
ttm_pool_type_fini(&pool->caching[i].orders[j]);
|
|
||||||
|
pt = ttm_pool_select_type(pool, i, j);
|
||||||
|
if (pt != &pool->caching[i].orders[j])
|
||||||
|
continue;
|
||||||
|
|
||||||
|
ttm_pool_type_fini(pt);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We removed the pool types from the LRU, but we need to also make sure
|
/* We removed the pool types from the LRU, but we need to also make sure
|
||||||
|
@ -105,7 +105,6 @@ v3d_irq(int irq, void *arg)
|
|||||||
struct v3d_file_priv *file = v3d->bin_job->base.file->driver_priv;
|
struct v3d_file_priv *file = v3d->bin_job->base.file->driver_priv;
|
||||||
u64 runtime = local_clock() - file->start_ns[V3D_BIN];
|
u64 runtime = local_clock() - file->start_ns[V3D_BIN];
|
||||||
|
|
||||||
file->enabled_ns[V3D_BIN] += local_clock() - file->start_ns[V3D_BIN];
|
|
||||||
file->jobs_sent[V3D_BIN]++;
|
file->jobs_sent[V3D_BIN]++;
|
||||||
v3d->queue[V3D_BIN].jobs_sent++;
|
v3d->queue[V3D_BIN].jobs_sent++;
|
||||||
|
|
||||||
@ -126,7 +125,6 @@ v3d_irq(int irq, void *arg)
|
|||||||
struct v3d_file_priv *file = v3d->render_job->base.file->driver_priv;
|
struct v3d_file_priv *file = v3d->render_job->base.file->driver_priv;
|
||||||
u64 runtime = local_clock() - file->start_ns[V3D_RENDER];
|
u64 runtime = local_clock() - file->start_ns[V3D_RENDER];
|
||||||
|
|
||||||
file->enabled_ns[V3D_RENDER] += local_clock() - file->start_ns[V3D_RENDER];
|
|
||||||
file->jobs_sent[V3D_RENDER]++;
|
file->jobs_sent[V3D_RENDER]++;
|
||||||
v3d->queue[V3D_RENDER].jobs_sent++;
|
v3d->queue[V3D_RENDER].jobs_sent++;
|
||||||
|
|
||||||
@ -147,7 +145,6 @@ v3d_irq(int irq, void *arg)
|
|||||||
struct v3d_file_priv *file = v3d->csd_job->base.file->driver_priv;
|
struct v3d_file_priv *file = v3d->csd_job->base.file->driver_priv;
|
||||||
u64 runtime = local_clock() - file->start_ns[V3D_CSD];
|
u64 runtime = local_clock() - file->start_ns[V3D_CSD];
|
||||||
|
|
||||||
file->enabled_ns[V3D_CSD] += local_clock() - file->start_ns[V3D_CSD];
|
|
||||||
file->jobs_sent[V3D_CSD]++;
|
file->jobs_sent[V3D_CSD]++;
|
||||||
v3d->queue[V3D_CSD].jobs_sent++;
|
v3d->queue[V3D_CSD].jobs_sent++;
|
||||||
|
|
||||||
@ -195,7 +192,6 @@ v3d_hub_irq(int irq, void *arg)
|
|||||||
struct v3d_file_priv *file = v3d->tfu_job->base.file->driver_priv;
|
struct v3d_file_priv *file = v3d->tfu_job->base.file->driver_priv;
|
||||||
u64 runtime = local_clock() - file->start_ns[V3D_TFU];
|
u64 runtime = local_clock() - file->start_ns[V3D_TFU];
|
||||||
|
|
||||||
file->enabled_ns[V3D_TFU] += local_clock() - file->start_ns[V3D_TFU];
|
|
||||||
file->jobs_sent[V3D_TFU]++;
|
file->jobs_sent[V3D_TFU]++;
|
||||||
v3d->queue[V3D_TFU].jobs_sent++;
|
v3d->queue[V3D_TFU].jobs_sent++;
|
||||||
|
|
||||||
|
@ -456,8 +456,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
|
|||||||
.no_wait_gpu = false
|
.no_wait_gpu = false
|
||||||
};
|
};
|
||||||
u32 j, initial_line = dst_offset / dst_stride;
|
u32 j, initial_line = dst_offset / dst_stride;
|
||||||
struct vmw_bo_blit_line_data d;
|
struct vmw_bo_blit_line_data d = {0};
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
struct page **dst_pages = NULL;
|
||||||
|
struct page **src_pages = NULL;
|
||||||
|
|
||||||
/* Buffer objects need to be either pinned or reserved: */
|
/* Buffer objects need to be either pinned or reserved: */
|
||||||
if (!(dst->pin_count))
|
if (!(dst->pin_count))
|
||||||
@ -477,12 +479,35 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!src->ttm->pages && src->ttm->sg) {
|
||||||
|
src_pages = kvmalloc_array(src->ttm->num_pages,
|
||||||
|
sizeof(struct page *), GFP_KERNEL);
|
||||||
|
if (!src_pages)
|
||||||
|
return -ENOMEM;
|
||||||
|
ret = drm_prime_sg_to_page_array(src->ttm->sg, src_pages,
|
||||||
|
src->ttm->num_pages);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
if (!dst->ttm->pages && dst->ttm->sg) {
|
||||||
|
dst_pages = kvmalloc_array(dst->ttm->num_pages,
|
||||||
|
sizeof(struct page *), GFP_KERNEL);
|
||||||
|
if (!dst_pages) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
ret = drm_prime_sg_to_page_array(dst->ttm->sg, dst_pages,
|
||||||
|
dst->ttm->num_pages);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
d.mapped_dst = 0;
|
d.mapped_dst = 0;
|
||||||
d.mapped_src = 0;
|
d.mapped_src = 0;
|
||||||
d.dst_addr = NULL;
|
d.dst_addr = NULL;
|
||||||
d.src_addr = NULL;
|
d.src_addr = NULL;
|
||||||
d.dst_pages = dst->ttm->pages;
|
d.dst_pages = dst->ttm->pages ? dst->ttm->pages : dst_pages;
|
||||||
d.src_pages = src->ttm->pages;
|
d.src_pages = src->ttm->pages ? src->ttm->pages : src_pages;
|
||||||
d.dst_num_pages = PFN_UP(dst->resource->size);
|
d.dst_num_pages = PFN_UP(dst->resource->size);
|
||||||
d.src_num_pages = PFN_UP(src->resource->size);
|
d.src_num_pages = PFN_UP(src->resource->size);
|
||||||
d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
|
d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
|
||||||
@ -504,6 +529,10 @@ out:
|
|||||||
kunmap_atomic(d.src_addr);
|
kunmap_atomic(d.src_addr);
|
||||||
if (d.dst_addr)
|
if (d.dst_addr)
|
||||||
kunmap_atomic(d.dst_addr);
|
kunmap_atomic(d.dst_addr);
|
||||||
|
if (src_pages)
|
||||||
|
kvfree(src_pages);
|
||||||
|
if (dst_pages)
|
||||||
|
kvfree(dst_pages);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -377,7 +377,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
|
|||||||
{
|
{
|
||||||
struct ttm_operation_ctx ctx = {
|
struct ttm_operation_ctx ctx = {
|
||||||
.interruptible = params->bo_type != ttm_bo_type_kernel,
|
.interruptible = params->bo_type != ttm_bo_type_kernel,
|
||||||
.no_wait_gpu = false
|
.no_wait_gpu = false,
|
||||||
|
.resv = params->resv,
|
||||||
};
|
};
|
||||||
struct ttm_device *bdev = &dev_priv->bdev;
|
struct ttm_device *bdev = &dev_priv->bdev;
|
||||||
struct drm_device *vdev = &dev_priv->drm;
|
struct drm_device *vdev = &dev_priv->drm;
|
||||||
@ -394,8 +395,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
|
|||||||
|
|
||||||
vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
|
vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
|
||||||
ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
|
ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
|
||||||
&vmw_bo->placement, 0, &ctx, NULL,
|
&vmw_bo->placement, 0, &ctx,
|
||||||
NULL, destroy);
|
params->sg, params->resv, destroy);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -55,6 +55,8 @@ struct vmw_bo_params {
|
|||||||
enum ttm_bo_type bo_type;
|
enum ttm_bo_type bo_type;
|
||||||
size_t size;
|
size_t size;
|
||||||
bool pin;
|
bool pin;
|
||||||
|
struct dma_resv *resv;
|
||||||
|
struct sg_table *sg;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1628,6 +1628,7 @@ static const struct drm_driver driver = {
|
|||||||
|
|
||||||
.prime_fd_to_handle = vmw_prime_fd_to_handle,
|
.prime_fd_to_handle = vmw_prime_fd_to_handle,
|
||||||
.prime_handle_to_fd = vmw_prime_handle_to_fd,
|
.prime_handle_to_fd = vmw_prime_handle_to_fd,
|
||||||
|
.gem_prime_import_sg_table = vmw_prime_import_sg_table,
|
||||||
|
|
||||||
.fops = &vmwgfx_driver_fops,
|
.fops = &vmwgfx_driver_fops,
|
||||||
.name = VMWGFX_DRIVER_NAME,
|
.name = VMWGFX_DRIVER_NAME,
|
||||||
|
@ -1130,6 +1130,9 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev,
|
|||||||
struct drm_file *file_priv,
|
struct drm_file *file_priv,
|
||||||
uint32_t handle, uint32_t flags,
|
uint32_t handle, uint32_t flags,
|
||||||
int *prime_fd);
|
int *prime_fd);
|
||||||
|
struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
|
||||||
|
struct dma_buf_attachment *attach,
|
||||||
|
struct sg_table *table);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* MemoryOBject management - vmwgfx_mob.c
|
* MemoryOBject management - vmwgfx_mob.c
|
||||||
|
@ -149,6 +149,38 @@ out_no_bo:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
|
||||||
|
struct dma_buf_attachment *attach,
|
||||||
|
struct sg_table *table)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||||
|
struct drm_gem_object *gem = NULL;
|
||||||
|
struct vmw_bo *vbo;
|
||||||
|
struct vmw_bo_params params = {
|
||||||
|
.domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
|
||||||
|
.busy_domain = VMW_BO_DOMAIN_SYS,
|
||||||
|
.bo_type = ttm_bo_type_sg,
|
||||||
|
.size = attach->dmabuf->size,
|
||||||
|
.pin = false,
|
||||||
|
.resv = attach->dmabuf->resv,
|
||||||
|
.sg = table,
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
dma_resv_lock(params.resv, NULL);
|
||||||
|
|
||||||
|
ret = vmw_bo_create(dev_priv, ¶ms, &vbo);
|
||||||
|
if (ret != 0)
|
||||||
|
goto out_no_bo;
|
||||||
|
|
||||||
|
vbo->tbo.base.funcs = &vmw_gem_object_funcs;
|
||||||
|
|
||||||
|
gem = &vbo->tbo.base;
|
||||||
|
out_no_bo:
|
||||||
|
dma_resv_unlock(params.resv);
|
||||||
|
return gem;
|
||||||
|
}
|
||||||
|
|
||||||
int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
|
int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *filp)
|
struct drm_file *filp)
|
||||||
|
@ -933,6 +933,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
|
|||||||
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
|
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
|
||||||
struct drm_atomic_state *state)
|
struct drm_atomic_state *state)
|
||||||
{
|
{
|
||||||
|
struct vmw_private *vmw = vmw_priv(crtc->dev);
|
||||||
struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
|
struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
|
||||||
crtc);
|
crtc);
|
||||||
struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
|
struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
|
||||||
@ -940,9 +941,13 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
|
|||||||
bool has_primary = new_state->plane_mask &
|
bool has_primary = new_state->plane_mask &
|
||||||
drm_plane_mask(crtc->primary);
|
drm_plane_mask(crtc->primary);
|
||||||
|
|
||||||
/* We always want to have an active plane with an active CRTC */
|
/*
|
||||||
if (has_primary != new_state->enable)
|
* This is fine in general, but broken userspace might expect
|
||||||
return -EINVAL;
|
* some actual rendering so give a clue as why it's blank.
|
||||||
|
*/
|
||||||
|
if (new_state->enable && !has_primary)
|
||||||
|
drm_dbg_driver(&vmw->drm,
|
||||||
|
"CRTC without a primary plane will be blank.\n");
|
||||||
|
|
||||||
|
|
||||||
if (new_state->connector_mask != connector_mask &&
|
if (new_state->connector_mask != connector_mask &&
|
||||||
|
@ -243,10 +243,10 @@ struct vmw_framebuffer_bo {
|
|||||||
|
|
||||||
|
|
||||||
static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
|
static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
|
||||||
DRM_FORMAT_XRGB1555,
|
|
||||||
DRM_FORMAT_RGB565,
|
|
||||||
DRM_FORMAT_XRGB8888,
|
DRM_FORMAT_XRGB8888,
|
||||||
DRM_FORMAT_ARGB8888,
|
DRM_FORMAT_ARGB8888,
|
||||||
|
DRM_FORMAT_RGB565,
|
||||||
|
DRM_FORMAT_XRGB1555,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
|
static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
|
||||||
|
@ -75,8 +75,12 @@ int vmw_prime_fd_to_handle(struct drm_device *dev,
|
|||||||
int fd, u32 *handle)
|
int fd, u32 *handle)
|
||||||
{
|
{
|
||||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||||
|
int ret = ttm_prime_fd_to_handle(tfile, fd, handle);
|
||||||
|
|
||||||
return ttm_prime_fd_to_handle(tfile, fd, handle);
|
if (ret)
|
||||||
|
ret = drm_gem_prime_fd_to_handle(dev, file_priv, fd, handle);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int vmw_prime_handle_to_fd(struct drm_device *dev,
|
int vmw_prime_handle_to_fd(struct drm_device *dev,
|
||||||
@ -85,5 +89,12 @@ int vmw_prime_handle_to_fd(struct drm_device *dev,
|
|||||||
int *prime_fd)
|
int *prime_fd)
|
||||||
{
|
{
|
||||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||||
return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
|
int ret;
|
||||||
|
|
||||||
|
if (handle > VMWGFX_NUM_MOB)
|
||||||
|
ret = ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
|
||||||
|
else
|
||||||
|
ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, prime_fd);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -188,13 +188,18 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
|
|||||||
switch (dev_priv->map_mode) {
|
switch (dev_priv->map_mode) {
|
||||||
case vmw_dma_map_bind:
|
case vmw_dma_map_bind:
|
||||||
case vmw_dma_map_populate:
|
case vmw_dma_map_populate:
|
||||||
vsgt->sgt = &vmw_tt->sgt;
|
if (vmw_tt->dma_ttm.page_flags & TTM_TT_FLAG_EXTERNAL) {
|
||||||
ret = sg_alloc_table_from_pages_segment(
|
vsgt->sgt = vmw_tt->dma_ttm.sg;
|
||||||
&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
|
} else {
|
||||||
(unsigned long)vsgt->num_pages << PAGE_SHIFT,
|
vsgt->sgt = &vmw_tt->sgt;
|
||||||
dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
|
ret = sg_alloc_table_from_pages_segment(&vmw_tt->sgt,
|
||||||
if (ret)
|
vsgt->pages, vsgt->num_pages, 0,
|
||||||
goto out_sg_alloc_fail;
|
(unsigned long)vsgt->num_pages << PAGE_SHIFT,
|
||||||
|
dma_get_max_seg_size(dev_priv->drm.dev),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (ret)
|
||||||
|
goto out_sg_alloc_fail;
|
||||||
|
}
|
||||||
|
|
||||||
ret = vmw_ttm_map_for_dma(vmw_tt);
|
ret = vmw_ttm_map_for_dma(vmw_tt);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
@ -209,8 +214,9 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_map_fail:
|
out_map_fail:
|
||||||
sg_free_table(vmw_tt->vsgt.sgt);
|
drm_warn(&dev_priv->drm, "VSG table map failed!");
|
||||||
vmw_tt->vsgt.sgt = NULL;
|
sg_free_table(vsgt->sgt);
|
||||||
|
vsgt->sgt = NULL;
|
||||||
out_sg_alloc_fail:
|
out_sg_alloc_fail:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -356,15 +362,17 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
|
|||||||
static int vmw_ttm_populate(struct ttm_device *bdev,
|
static int vmw_ttm_populate(struct ttm_device *bdev,
|
||||||
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
|
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
|
||||||
{
|
{
|
||||||
int ret;
|
bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
|
||||||
|
|
||||||
/* TODO: maybe completely drop this ? */
|
|
||||||
if (ttm_tt_is_populated(ttm))
|
if (ttm_tt_is_populated(ttm))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
|
if (external && ttm->sg)
|
||||||
|
return drm_prime_sg_to_dma_addr_array(ttm->sg,
|
||||||
|
ttm->dma_address,
|
||||||
|
ttm->num_pages);
|
||||||
|
|
||||||
return ret;
|
return ttm_pool_alloc(&bdev->pool, ttm, ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmw_ttm_unpopulate(struct ttm_device *bdev,
|
static void vmw_ttm_unpopulate(struct ttm_device *bdev,
|
||||||
@ -372,6 +380,10 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
|
|||||||
{
|
{
|
||||||
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
|
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
|
||||||
dma_ttm);
|
dma_ttm);
|
||||||
|
bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
|
||||||
|
|
||||||
|
if (external)
|
||||||
|
return;
|
||||||
|
|
||||||
vmw_ttm_unbind(bdev, ttm);
|
vmw_ttm_unbind(bdev, ttm);
|
||||||
|
|
||||||
@ -390,6 +402,7 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
|
|||||||
{
|
{
|
||||||
struct vmw_ttm_tt *vmw_be;
|
struct vmw_ttm_tt *vmw_be;
|
||||||
int ret;
|
int ret;
|
||||||
|
bool external = bo->type == ttm_bo_type_sg;
|
||||||
|
|
||||||
vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
|
vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
|
||||||
if (!vmw_be)
|
if (!vmw_be)
|
||||||
@ -398,7 +411,10 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
|
|||||||
vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
|
vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
|
||||||
vmw_be->mob = NULL;
|
vmw_be->mob = NULL;
|
||||||
|
|
||||||
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
|
if (external)
|
||||||
|
page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
|
||||||
|
|
||||||
|
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent || external)
|
||||||
ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
|
ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
|
||||||
ttm_cached);
|
ttm_cached);
|
||||||
else
|
else
|
||||||
|
@ -31,7 +31,7 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
|
|||||||
|
|
||||||
ret = ttm_bo_reserve(&bo->ttm, true, false, NULL);
|
ret = ttm_bo_reserve(&bo->ttm, true, false, NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
goto err;
|
||||||
|
|
||||||
if (!(bo->flags & XE_BO_SCANOUT_BIT)) {
|
if (!(bo->flags & XE_BO_SCANOUT_BIT)) {
|
||||||
/*
|
/*
|
||||||
@ -42,12 +42,16 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
|
|||||||
*/
|
*/
|
||||||
if (XE_IOCTL_DBG(i915, !list_empty(&bo->ttm.base.gpuva.list))) {
|
if (XE_IOCTL_DBG(i915, !list_empty(&bo->ttm.base.gpuva.list))) {
|
||||||
ttm_bo_unreserve(&bo->ttm);
|
ttm_bo_unreserve(&bo->ttm);
|
||||||
return -EINVAL;
|
ret = -EINVAL;
|
||||||
|
goto err;
|
||||||
}
|
}
|
||||||
bo->flags |= XE_BO_SCANOUT_BIT;
|
bo->flags |= XE_BO_SCANOUT_BIT;
|
||||||
}
|
}
|
||||||
ttm_bo_unreserve(&bo->ttm);
|
ttm_bo_unreserve(&bo->ttm);
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err:
|
||||||
|
xe_bo_put(bo);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1577,6 +1577,16 @@ void xe_vm_close_and_put(struct xe_vm *vm)
|
|||||||
xe->usm.num_vm_in_fault_mode--;
|
xe->usm.num_vm_in_fault_mode--;
|
||||||
else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
|
else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
|
||||||
xe->usm.num_vm_in_non_fault_mode--;
|
xe->usm.num_vm_in_non_fault_mode--;
|
||||||
|
|
||||||
|
if (vm->usm.asid) {
|
||||||
|
void *lookup;
|
||||||
|
|
||||||
|
xe_assert(xe, xe->info.has_asid);
|
||||||
|
xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION));
|
||||||
|
|
||||||
|
lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
|
||||||
|
xe_assert(xe, lookup == vm);
|
||||||
|
}
|
||||||
mutex_unlock(&xe->usm.lock);
|
mutex_unlock(&xe->usm.lock);
|
||||||
|
|
||||||
for_each_tile(tile, xe, id)
|
for_each_tile(tile, xe, id)
|
||||||
@ -1592,24 +1602,15 @@ static void vm_destroy_work_func(struct work_struct *w)
|
|||||||
struct xe_device *xe = vm->xe;
|
struct xe_device *xe = vm->xe;
|
||||||
struct xe_tile *tile;
|
struct xe_tile *tile;
|
||||||
u8 id;
|
u8 id;
|
||||||
void *lookup;
|
|
||||||
|
|
||||||
/* xe_vm_close_and_put was not called? */
|
/* xe_vm_close_and_put was not called? */
|
||||||
xe_assert(xe, !vm->size);
|
xe_assert(xe, !vm->size);
|
||||||
|
|
||||||
mutex_destroy(&vm->snap_mutex);
|
mutex_destroy(&vm->snap_mutex);
|
||||||
|
|
||||||
if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
|
if (!(vm->flags & XE_VM_FLAG_MIGRATION))
|
||||||
xe_device_mem_access_put(xe);
|
xe_device_mem_access_put(xe);
|
||||||
|
|
||||||
if (xe->info.has_asid && vm->usm.asid) {
|
|
||||||
mutex_lock(&xe->usm.lock);
|
|
||||||
lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
|
|
||||||
xe_assert(xe, lookup == vm);
|
|
||||||
mutex_unlock(&xe->usm.lock);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for_each_tile(tile, xe, id)
|
for_each_tile(tile, xe, id)
|
||||||
XE_WARN_ON(vm->pt_root[id]);
|
XE_WARN_ON(vm->pt_root[id]);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user