forked from Minki/linux
Merge branch 'etnaviv/next' of https://git.pengutronix.de/git/lst/linux into drm-next
fix address space collisions in some edge cases when userspace is using softpin and cleans up the MMU reference handling a bit. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Lucas Stach <l.stach@pengutronix.de> Link: https://patchwork.freedesktop.org/patch/msgid/ffae9f7d03ca7a9e00da16d5910ae810befd3c5a.camel@pengutronix.de
This commit is contained in:
commit
f8122500a0
@ -294,18 +294,15 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
|
||||
list_del(&mapping->obj_node);
|
||||
}
|
||||
|
||||
mapping->context = etnaviv_iommu_context_get(mmu_context);
|
||||
mapping->use = 1;
|
||||
|
||||
ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
|
||||
mmu_context->global->memory_base,
|
||||
mapping, va);
|
||||
if (ret < 0) {
|
||||
etnaviv_iommu_context_put(mmu_context);
|
||||
if (ret < 0)
|
||||
kfree(mapping);
|
||||
} else {
|
||||
else
|
||||
list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&etnaviv_obj->lock);
|
||||
@ -500,10 +497,8 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj)
|
||||
|
||||
WARN_ON(mapping->use);
|
||||
|
||||
if (context) {
|
||||
if (context)
|
||||
etnaviv_iommu_unmap_gem(context, mapping);
|
||||
etnaviv_iommu_context_put(context);
|
||||
}
|
||||
|
||||
list_del(&mapping->obj_node);
|
||||
kfree(mapping);
|
||||
|
@ -92,6 +92,8 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
|
||||
da += bytes;
|
||||
}
|
||||
|
||||
context->flush_seq++;
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
@ -117,6 +119,8 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
|
||||
|
||||
da += bytes;
|
||||
}
|
||||
|
||||
context->flush_seq++;
|
||||
}
|
||||
|
||||
static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
|
||||
@ -219,8 +223,47 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
|
||||
static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
|
||||
struct drm_mm_node *node, size_t size, u64 va)
|
||||
{
|
||||
struct etnaviv_vram_mapping *m, *n;
|
||||
struct drm_mm_node *scan_node;
|
||||
LIST_HEAD(scan_list);
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&context->lock);
|
||||
|
||||
ret = drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
|
||||
va + size, DRM_MM_INSERT_LOWEST);
|
||||
if (ret != -ENOSPC)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* When we can't insert the node, due to a existing mapping blocking
|
||||
* the address space, there are two possible reasons:
|
||||
* 1. Userspace genuinely messed up and tried to reuse address space
|
||||
* before the last job using this VMA has finished executing.
|
||||
* 2. The existing buffer mappings are idle, but the buffers are not
|
||||
* destroyed yet (likely due to being referenced by another context) in
|
||||
* which case the mappings will not be cleaned up and we must reap them
|
||||
* here to make space for the new mapping.
|
||||
*/
|
||||
|
||||
drm_mm_for_each_node_in_range(scan_node, &context->mm, va, va + size) {
|
||||
m = container_of(scan_node, struct etnaviv_vram_mapping,
|
||||
vram_node);
|
||||
|
||||
if (m->use)
|
||||
return -ENOSPC;
|
||||
|
||||
list_add(&m->scan_node, &scan_list);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(m, n, &scan_list, scan_node) {
|
||||
etnaviv_iommu_remove_mapping(context, m);
|
||||
etnaviv_iommu_context_put(m->context);
|
||||
m->context = NULL;
|
||||
list_del_init(&m->mmu_node);
|
||||
list_del_init(&m->scan_node);
|
||||
}
|
||||
|
||||
return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
|
||||
va + size, DRM_MM_INSERT_LOWEST);
|
||||
}
|
||||
@ -245,6 +288,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
|
||||
iova = sg_dma_address(sgt->sgl) - memory_base;
|
||||
if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
|
||||
mapping->iova = iova;
|
||||
mapping->context = etnaviv_iommu_context_get(context);
|
||||
list_add_tail(&mapping->mmu_node, &context->mappings);
|
||||
ret = 0;
|
||||
goto unlock;
|
||||
@ -271,8 +315,8 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
mapping->context = etnaviv_iommu_context_get(context);
|
||||
list_add_tail(&mapping->mmu_node, &context->mappings);
|
||||
context->flush_seq++;
|
||||
unlock:
|
||||
mutex_unlock(&context->lock);
|
||||
|
||||
@ -286,13 +330,19 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
|
||||
|
||||
mutex_lock(&context->lock);
|
||||
|
||||
/* Bail if the mapping has been reaped by another thread */
|
||||
if (!mapping->context) {
|
||||
mutex_unlock(&context->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
/* If the vram node is on the mm, unmap and remove the node */
|
||||
if (mapping->vram_node.mm == &context->mm)
|
||||
etnaviv_iommu_remove_mapping(context, mapping);
|
||||
|
||||
list_del(&mapping->mmu_node);
|
||||
context->flush_seq++;
|
||||
mutex_unlock(&context->lock);
|
||||
etnaviv_iommu_context_put(context);
|
||||
}
|
||||
|
||||
static void etnaviv_iommu_context_free(struct kref *kref)
|
||||
|
Loading…
Reference in New Issue
Block a user