2012-04-10 18:19:55 +00:00
|
|
|
/*
|
2013-02-11 17:43:09 +00:00
|
|
|
* drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
|
2012-04-10 18:19:55 +00:00
|
|
|
*
|
|
|
|
* Copyright (C) 2011 Texas Instruments
|
|
|
|
* Author: Rob Clark <rob.clark@linaro.org>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License version 2 as published by
|
|
|
|
* the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along with
|
|
|
|
* this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/dma-buf.h>
|
|
|
|
|
2015-03-05 19:31:37 +00:00
|
|
|
#include "omap_drv.h"
|
|
|
|
|
2015-12-14 20:39:44 +00:00
|
|
|
/* -----------------------------------------------------------------------------
|
|
|
|
* DMABUF Export
|
|
|
|
*/
|
|
|
|
|
2012-04-10 18:19:55 +00:00
|
|
|
static struct sg_table *omap_gem_map_dma_buf(
|
|
|
|
struct dma_buf_attachment *attachment,
|
|
|
|
enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
struct drm_gem_object *obj = attachment->dmabuf->priv;
|
|
|
|
struct sg_table *sg;
|
|
|
|
dma_addr_t paddr;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
sg = kzalloc(sizeof(*sg), GFP_KERNEL);
|
|
|
|
if (!sg)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
/* camera, etc, need physically contiguous.. but we need a
|
|
|
|
* better way to know this..
|
|
|
|
*/
|
|
|
|
ret = omap_gem_get_paddr(obj, &paddr, true);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = sg_alloc_table(sg, 1, GFP_KERNEL);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
sg_init_table(sg->sgl, 1);
|
|
|
|
sg_dma_len(sg->sgl) = obj->size;
|
|
|
|
sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(paddr)), obj->size, 0);
|
|
|
|
sg_dma_address(sg->sgl) = paddr;
|
|
|
|
|
2012-05-17 08:37:25 +00:00
|
|
|
/* this should be after _get_paddr() to ensure we have pages attached */
|
|
|
|
omap_gem_dma_sync(obj, dir);
|
|
|
|
|
2012-04-10 18:19:55 +00:00
|
|
|
return sg;
|
2013-01-15 19:46:50 +00:00
|
|
|
out:
|
|
|
|
kfree(sg);
|
|
|
|
return ERR_PTR(ret);
|
2012-04-10 18:19:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
|
|
|
|
struct sg_table *sg, enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
struct drm_gem_object *obj = attachment->dmabuf->priv;
|
|
|
|
omap_gem_put_paddr(obj);
|
|
|
|
sg_free_table(sg);
|
|
|
|
kfree(sg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void omap_gem_dmabuf_release(struct dma_buf *buffer)
|
|
|
|
{
|
|
|
|
struct drm_gem_object *obj = buffer->priv;
|
|
|
|
/* release reference that was taken when dmabuf was exported
|
|
|
|
* in omap_gem_prime_set()..
|
|
|
|
*/
|
|
|
|
drm_gem_object_unreference_unlocked(obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
|
2015-12-22 21:36:45 +00:00
|
|
|
enum dma_data_direction dir)
|
2012-04-10 18:19:55 +00:00
|
|
|
{
|
|
|
|
struct drm_gem_object *obj = buffer->priv;
|
|
|
|
struct page **pages;
|
|
|
|
if (omap_gem_flags(obj) & OMAP_BO_TILED) {
|
|
|
|
/* TODO we would need to pin at least part of the buffer to
|
|
|
|
* get de-tiled view. For now just reject it.
|
|
|
|
*/
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
/* make sure we have the pages: */
|
|
|
|
return omap_gem_get_pages(obj, &pages, true);
|
|
|
|
}
|
|
|
|
|
dma-buf, drm, ion: Propagate error code from dma_buf_start_cpu_access()
Drivers, especially i915.ko, can fail during the initial migration of a
dma-buf for CPU access. However, the error code from the driver was not
being propagated back to ioctl and so userspace was blissfully ignorant
of the failure. Rendering corruption ensues.
Whilst fixing the ioctl to return the error code from
dma_buf_start_cpu_access(), also do the same for
dma_buf_end_cpu_access(). For most drivers, dma_buf_end_cpu_access()
cannot fail. i915.ko however, as most drivers would, wants to avoid being
uninterruptible (as would be required to guarrantee no failure when
flushing the buffer to the device). As userspace already has to handle
errors from the SYNC_IOCTL, take advantage of this to be able to restart
the syscall across signals.
This fixes a coherency issue for i915.ko as well as reducing the
uninterruptible hold upon its BKL, the struct_mutex.
Fixes commit c11e391da2a8fe973c3c2398452000bed505851e
Author: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Thu Feb 11 20:04:51 2016 -0200
dma-buf: Add ioctls to allow userspace to flush
Testcase: igt/gem_concurrent_blit/*dmabuf*interruptible
Testcase: igt/prime_mmap_coherency/ioctl-errors
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tiago Vignatti <tiago.vignatti@intel.com>
Cc: Stéphane Marchesin <marcheu@chromium.org>
Cc: David Herrmann <dh.herrmann@gmail.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Daniel Vetter <daniel.vetter@intel.com>
CC: linux-media@vger.kernel.org
Cc: dri-devel@lists.freedesktop.org
Cc: linaro-mm-sig@lists.linaro.org
Cc: intel-gfx@lists.freedesktop.org
Cc: devel@driverdev.osuosl.org
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1458331359-2634-1-git-send-email-chris@chris-wilson.co.uk
2016-03-18 20:02:39 +00:00
|
|
|
static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
|
|
|
|
enum dma_data_direction dir)
|
2012-04-10 18:19:55 +00:00
|
|
|
{
|
|
|
|
struct drm_gem_object *obj = buffer->priv;
|
|
|
|
omap_gem_put_pages(obj);
|
dma-buf, drm, ion: Propagate error code from dma_buf_start_cpu_access()
Drivers, especially i915.ko, can fail during the initial migration of a
dma-buf for CPU access. However, the error code from the driver was not
being propagated back to ioctl and so userspace was blissfully ignorant
of the failure. Rendering corruption ensues.
Whilst fixing the ioctl to return the error code from
dma_buf_start_cpu_access(), also do the same for
dma_buf_end_cpu_access(). For most drivers, dma_buf_end_cpu_access()
cannot fail. i915.ko however, as most drivers would, wants to avoid being
uninterruptible (as would be required to guarrantee no failure when
flushing the buffer to the device). As userspace already has to handle
errors from the SYNC_IOCTL, take advantage of this to be able to restart
the syscall across signals.
This fixes a coherency issue for i915.ko as well as reducing the
uninterruptible hold upon its BKL, the struct_mutex.
Fixes commit c11e391da2a8fe973c3c2398452000bed505851e
Author: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Thu Feb 11 20:04:51 2016 -0200
dma-buf: Add ioctls to allow userspace to flush
Testcase: igt/gem_concurrent_blit/*dmabuf*interruptible
Testcase: igt/prime_mmap_coherency/ioctl-errors
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tiago Vignatti <tiago.vignatti@intel.com>
Cc: Stéphane Marchesin <marcheu@chromium.org>
Cc: David Herrmann <dh.herrmann@gmail.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Daniel Vetter <daniel.vetter@intel.com>
CC: linux-media@vger.kernel.org
Cc: dri-devel@lists.freedesktop.org
Cc: linaro-mm-sig@lists.linaro.org
Cc: intel-gfx@lists.freedesktop.org
Cc: devel@driverdev.osuosl.org
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1458331359-2634-1-git-send-email-chris@chris-wilson.co.uk
2016-03-18 20:02:39 +00:00
|
|
|
return 0;
|
2012-04-10 18:19:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void *omap_gem_dmabuf_kmap_atomic(struct dma_buf *buffer,
|
|
|
|
unsigned long page_num)
|
|
|
|
{
|
|
|
|
struct drm_gem_object *obj = buffer->priv;
|
|
|
|
struct page **pages;
|
|
|
|
omap_gem_get_pages(obj, &pages, false);
|
2012-05-17 08:37:25 +00:00
|
|
|
omap_gem_cpu_sync(obj, page_num);
|
2012-04-10 18:19:55 +00:00
|
|
|
return kmap_atomic(pages[page_num]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void omap_gem_dmabuf_kunmap_atomic(struct dma_buf *buffer,
|
|
|
|
unsigned long page_num, void *addr)
|
|
|
|
{
|
|
|
|
kunmap_atomic(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer,
|
|
|
|
unsigned long page_num)
|
|
|
|
{
|
|
|
|
struct drm_gem_object *obj = buffer->priv;
|
|
|
|
struct page **pages;
|
|
|
|
omap_gem_get_pages(obj, &pages, false);
|
2012-05-17 08:37:25 +00:00
|
|
|
omap_gem_cpu_sync(obj, page_num);
|
2012-04-10 18:19:55 +00:00
|
|
|
return kmap(pages[page_num]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer,
|
|
|
|
unsigned long page_num, void *addr)
|
|
|
|
{
|
|
|
|
struct drm_gem_object *obj = buffer->priv;
|
|
|
|
struct page **pages;
|
|
|
|
omap_gem_get_pages(obj, &pages, false);
|
|
|
|
kunmap(pages[page_num]);
|
|
|
|
}
|
|
|
|
|
2012-05-17 08:37:25 +00:00
|
|
|
static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
|
|
|
|
struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
struct drm_gem_object *obj = buffer->priv;
|
|
|
|
int ret = 0;
|
|
|
|
|
2013-04-16 12:21:23 +00:00
|
|
|
ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2012-05-17 08:37:25 +00:00
|
|
|
|
|
|
|
return omap_gem_mmap_obj(obj, vma);
|
|
|
|
}
|
|
|
|
|
2013-04-10 07:44:00 +00:00
|
|
|
static struct dma_buf_ops omap_dmabuf_ops = {
|
2015-01-10 22:02:07 +00:00
|
|
|
.map_dma_buf = omap_gem_map_dma_buf,
|
|
|
|
.unmap_dma_buf = omap_gem_unmap_dma_buf,
|
|
|
|
.release = omap_gem_dmabuf_release,
|
|
|
|
.begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
|
|
|
|
.end_cpu_access = omap_gem_dmabuf_end_cpu_access,
|
2017-04-19 19:36:10 +00:00
|
|
|
.map_atomic = omap_gem_dmabuf_kmap_atomic,
|
|
|
|
.unmap_atomic = omap_gem_dmabuf_kunmap_atomic,
|
|
|
|
.map = omap_gem_dmabuf_kmap,
|
|
|
|
.unmap = omap_gem_dmabuf_kunmap,
|
2015-01-10 22:02:07 +00:00
|
|
|
.mmap = omap_gem_dmabuf_mmap,
|
2012-04-10 18:19:55 +00:00
|
|
|
};
|
|
|
|
|
2012-11-14 10:40:14 +00:00
|
|
|
struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
|
2012-04-10 18:19:55 +00:00
|
|
|
struct drm_gem_object *obj, int flags)
|
|
|
|
{
|
2015-01-23 07:23:43 +00:00
|
|
|
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
|
|
|
|
|
|
|
exp_info.ops = &omap_dmabuf_ops;
|
|
|
|
exp_info.size = obj->size;
|
|
|
|
exp_info.flags = flags;
|
|
|
|
exp_info.priv = obj;
|
|
|
|
|
|
|
|
return dma_buf_export(&exp_info);
|
2012-04-10 18:19:55 +00:00
|
|
|
}
|
2012-05-17 08:37:26 +00:00
|
|
|
|
2015-12-14 20:39:44 +00:00
|
|
|
/* -----------------------------------------------------------------------------
|
|
|
|
* DMABUF Import
|
|
|
|
*/
|
|
|
|
|
2012-11-14 10:40:14 +00:00
|
|
|
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
|
2015-12-14 20:39:44 +00:00
|
|
|
struct dma_buf *dma_buf)
|
2012-05-17 08:37:26 +00:00
|
|
|
{
|
2015-12-14 20:39:44 +00:00
|
|
|
struct dma_buf_attachment *attach;
|
2012-05-17 08:37:26 +00:00
|
|
|
struct drm_gem_object *obj;
|
2015-12-14 20:39:44 +00:00
|
|
|
struct sg_table *sgt;
|
|
|
|
int ret;
|
2012-05-17 08:37:26 +00:00
|
|
|
|
2015-12-14 20:39:44 +00:00
|
|
|
if (dma_buf->ops == &omap_dmabuf_ops) {
|
|
|
|
obj = dma_buf->priv;
|
2012-05-17 08:37:26 +00:00
|
|
|
if (obj->dev == dev) {
|
2012-09-27 06:30:06 +00:00
|
|
|
/*
|
|
|
|
* Importing dmabuf exported from out own gem increases
|
|
|
|
* refcount on gem itself instead of f_count of dmabuf.
|
|
|
|
*/
|
2012-05-17 08:37:26 +00:00
|
|
|
drm_gem_object_reference(obj);
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-14 20:39:44 +00:00
|
|
|
attach = dma_buf_attach(dma_buf, dev->dev);
|
|
|
|
if (IS_ERR(attach))
|
|
|
|
return ERR_CAST(attach);
|
|
|
|
|
|
|
|
get_dma_buf(dma_buf);
|
|
|
|
|
|
|
|
sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
|
|
|
|
if (IS_ERR(sgt)) {
|
|
|
|
ret = PTR_ERR(sgt);
|
|
|
|
goto fail_detach;
|
|
|
|
}
|
|
|
|
|
|
|
|
obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt);
|
|
|
|
if (IS_ERR(obj)) {
|
|
|
|
ret = PTR_ERR(obj);
|
|
|
|
goto fail_unmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
obj->import_attach = attach;
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
|
|
|
|
fail_unmap:
|
|
|
|
dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
|
|
|
|
fail_detach:
|
|
|
|
dma_buf_detach(dma_buf, attach);
|
|
|
|
dma_buf_put(dma_buf);
|
|
|
|
|
|
|
|
return ERR_PTR(ret);
|
2012-05-17 08:37:26 +00:00
|
|
|
}
|