mirror of
https://github.com/torvalds/linux.git
synced 2024-11-08 05:01:48 +00:00
drm/prime: support to cache mapping
The drm prime also can support it like GEM CMA supports to cache mapping. It doesn't allow multiple mappings for one attachment. [airlied: rebased on top of other prime changes] Signed-off-by: Joonyoung Shim <jy0922.shim@samsung.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
parent
7e3d88f9cc
commit
538d6661f5
@ -62,15 +62,29 @@ struct drm_prime_member {
|
|||||||
struct dma_buf *dma_buf;
|
struct dma_buf *dma_buf;
|
||||||
uint32_t handle;
|
uint32_t handle;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct drm_prime_attachment {
|
||||||
|
struct sg_table *sgt;
|
||||||
|
enum dma_data_direction dir;
|
||||||
|
};
|
||||||
|
|
||||||
static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
|
static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
|
||||||
|
|
||||||
static int drm_gem_map_attach(struct dma_buf *dma_buf,
|
static int drm_gem_map_attach(struct dma_buf *dma_buf,
|
||||||
struct device *target_dev,
|
struct device *target_dev,
|
||||||
struct dma_buf_attachment *attach)
|
struct dma_buf_attachment *attach)
|
||||||
{
|
{
|
||||||
|
struct drm_prime_attachment *prime_attach;
|
||||||
struct drm_gem_object *obj = dma_buf->priv;
|
struct drm_gem_object *obj = dma_buf->priv;
|
||||||
struct drm_device *dev = obj->dev;
|
struct drm_device *dev = obj->dev;
|
||||||
|
|
||||||
|
prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
|
||||||
|
if (!prime_attach)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
prime_attach->dir = DMA_NONE;
|
||||||
|
attach->priv = prime_attach;
|
||||||
|
|
||||||
if (!dev->driver->gem_prime_pin)
|
if (!dev->driver->gem_prime_pin)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -80,19 +94,50 @@ static int drm_gem_map_attach(struct dma_buf *dma_buf,
|
|||||||
static void drm_gem_map_detach(struct dma_buf *dma_buf,
|
static void drm_gem_map_detach(struct dma_buf *dma_buf,
|
||||||
struct dma_buf_attachment *attach)
|
struct dma_buf_attachment *attach)
|
||||||
{
|
{
|
||||||
|
struct drm_prime_attachment *prime_attach = attach->priv;
|
||||||
struct drm_gem_object *obj = dma_buf->priv;
|
struct drm_gem_object *obj = dma_buf->priv;
|
||||||
struct drm_device *dev = obj->dev;
|
struct drm_device *dev = obj->dev;
|
||||||
|
struct sg_table *sgt;
|
||||||
|
|
||||||
if (dev->driver->gem_prime_unpin)
|
if (dev->driver->gem_prime_unpin)
|
||||||
dev->driver->gem_prime_unpin(obj);
|
dev->driver->gem_prime_unpin(obj);
|
||||||
|
|
||||||
|
if (!prime_attach)
|
||||||
|
return;
|
||||||
|
|
||||||
|
sgt = prime_attach->sgt;
|
||||||
|
|
||||||
|
if (prime_attach->dir != DMA_NONE)
|
||||||
|
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
|
||||||
|
prime_attach->dir);
|
||||||
|
|
||||||
|
sg_free_table(sgt);
|
||||||
|
kfree(sgt);
|
||||||
|
kfree(prime_attach);
|
||||||
|
attach->priv = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
|
static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
|
struct drm_prime_attachment *prime_attach = attach->priv;
|
||||||
struct drm_gem_object *obj = attach->dmabuf->priv;
|
struct drm_gem_object *obj = attach->dmabuf->priv;
|
||||||
struct sg_table *sgt;
|
struct sg_table *sgt;
|
||||||
|
|
||||||
|
if (WARN_ON(dir == DMA_NONE || !prime_attach))
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
|
/* return the cached mapping when possible */
|
||||||
|
if (prime_attach->dir == dir)
|
||||||
|
return prime_attach->sgt;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* two mappings with different directions for the same attachment are
|
||||||
|
* not allowed
|
||||||
|
*/
|
||||||
|
if (WARN_ON(prime_attach->dir != DMA_NONE))
|
||||||
|
return ERR_PTR(-EBUSY);
|
||||||
|
|
||||||
mutex_lock(&obj->dev->struct_mutex);
|
mutex_lock(&obj->dev->struct_mutex);
|
||||||
|
|
||||||
sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
|
sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
|
||||||
@ -102,6 +147,9 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
|
|||||||
sg_free_table(sgt);
|
sg_free_table(sgt);
|
||||||
kfree(sgt);
|
kfree(sgt);
|
||||||
sgt = ERR_PTR(-ENOMEM);
|
sgt = ERR_PTR(-ENOMEM);
|
||||||
|
} else {
|
||||||
|
prime_attach->sgt = sgt;
|
||||||
|
prime_attach->dir = dir;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -112,9 +160,7 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
|
|||||||
static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
|
static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
|
||||||
struct sg_table *sgt, enum dma_data_direction dir)
|
struct sg_table *sgt, enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
|
/* nothing to be done here */
|
||||||
sg_free_table(sgt);
|
|
||||||
kfree(sgt);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
|
static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
|
||||||
|
Loading…
Reference in New Issue
Block a user