dma-buf: Change locking policy for mmap()

Change locking policy of mmap() callback, making exporters responsible
for handling dma-buf reservation locking. Previous locking policy stated
that dma-buf is locked for both importers and exporters by the dma-buf
core, which caused a deadlock problem for DRM drivers in a case of
self-imported dma-bufs which required to take the lock from the DRM
exporter side.

Acked-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Emil Velikov <emil.l.velikov@gmail.com>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230529223935.2672495-6-dmitry.osipenko@collabora.com
This commit is contained in:
Dmitry Osipenko 2023-05-30 01:39:34 +03:00
parent 11f38236f7
commit 8021fa16b7

View File

@ -131,7 +131,6 @@ static struct file_system_type dma_buf_fs_type = {
static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
{ {
struct dma_buf *dmabuf; struct dma_buf *dmabuf;
int ret;
if (!is_dma_buf_file(file)) if (!is_dma_buf_file(file))
return -EINVAL; return -EINVAL;
@ -147,11 +146,7 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
dmabuf->size >> PAGE_SHIFT) dmabuf->size >> PAGE_SHIFT)
return -EINVAL; return -EINVAL;
dma_resv_lock(dmabuf->resv, NULL); return dmabuf->ops->mmap(dmabuf, vma);
ret = dmabuf->ops->mmap(dmabuf, vma);
dma_resv_unlock(dmabuf->resv);
return ret;
} }
static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
@ -850,6 +845,7 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
* - &dma_buf_ops.release() * - &dma_buf_ops.release()
* - &dma_buf_ops.begin_cpu_access() * - &dma_buf_ops.begin_cpu_access()
* - &dma_buf_ops.end_cpu_access() * - &dma_buf_ops.end_cpu_access()
* - &dma_buf_ops.mmap()
* *
* 2. These &dma_buf_ops callbacks are invoked with locked dma-buf * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf
* reservation and exporter can't take the lock: * reservation and exporter can't take the lock:
@ -858,7 +854,6 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
* - &dma_buf_ops.unpin() * - &dma_buf_ops.unpin()
* - &dma_buf_ops.map_dma_buf() * - &dma_buf_ops.map_dma_buf()
* - &dma_buf_ops.unmap_dma_buf() * - &dma_buf_ops.unmap_dma_buf()
* - &dma_buf_ops.mmap()
* - &dma_buf_ops.vmap() * - &dma_buf_ops.vmap()
* - &dma_buf_ops.vunmap() * - &dma_buf_ops.vunmap()
* *
@ -1463,8 +1458,6 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
unsigned long pgoff) unsigned long pgoff)
{ {
int ret;
if (WARN_ON(!dmabuf || !vma)) if (WARN_ON(!dmabuf || !vma))
return -EINVAL; return -EINVAL;
@ -1485,11 +1478,7 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
vma_set_file(vma, dmabuf->file); vma_set_file(vma, dmabuf->file);
vma->vm_pgoff = pgoff; vma->vm_pgoff = pgoff;
dma_resv_lock(dmabuf->resv, NULL); return dmabuf->ops->mmap(dmabuf, vma);
ret = dmabuf->ops->mmap(dmabuf, vma);
dma_resv_unlock(dmabuf->resv);
return ret;
} }
EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF); EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);