gpu: ion: Add explicit sync ioctl
This is deprecated in favor of using the dma_buf api which will automatically sync a buffer to memory when it is mapped to a device. However, that functionality is not ready, so this patch adds the ability to sync a buffer explicitly. Signed-off-by: Rebecca Schultz Zavin <rebecca@android.com> [jstultz: modified patch to apply to staging directory] Signed-off-by: John Stultz <john.stultz@linaro.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
bd5d6bdae1
commit
0b9ec1cfd4
@ -684,8 +684,7 @@ static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
|
||||
struct dma_buf *dmabuf = attachment->dmabuf;
|
||||
struct ion_buffer *buffer = dmabuf->priv;
|
||||
|
||||
if (buffer->flags & ION_FLAG_CACHED)
|
||||
ion_buffer_sync_for_device(buffer, attachment->dev, direction);
|
||||
ion_buffer_sync_for_device(buffer, attachment->dev, direction);
|
||||
return buffer->sg_table;
|
||||
}
|
||||
|
||||
@ -721,6 +720,10 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
|
||||
|
||||
pr_debug("%s: syncing for device %s\n", __func__,
|
||||
dev ? dev_name(dev) : "null");
|
||||
|
||||
if (!(buffer->flags & ION_FLAG_CACHED))
|
||||
return;
|
||||
|
||||
mutex_lock(&buffer->lock);
|
||||
for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
|
||||
if (!test_bit(i, buffer->dirty))
|
||||
@ -958,6 +961,28 @@ end:
|
||||
return handle;
|
||||
}
|
||||
|
||||
static int ion_sync_for_device(struct ion_client *client, int fd)
|
||||
{
|
||||
struct dma_buf *dmabuf;
|
||||
struct ion_buffer *buffer;
|
||||
|
||||
dmabuf = dma_buf_get(fd);
|
||||
if (IS_ERR_OR_NULL(dmabuf))
|
||||
return PTR_ERR(dmabuf);
|
||||
|
||||
/* if this memory came from ion */
|
||||
if (dmabuf->ops != &dma_buf_ops) {
|
||||
pr_err("%s: can not sync dmabuf from another exporter\n",
|
||||
__func__);
|
||||
dma_buf_put(dmabuf);
|
||||
return -EINVAL;
|
||||
}
|
||||
buffer = dmabuf->priv;
|
||||
ion_buffer_sync_for_device(buffer, NULL, DMA_BIDIRECTIONAL);
|
||||
dma_buf_put(dmabuf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct ion_client *client = filp->private_data;
|
||||
@ -1022,6 +1047,15 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
return -EFAULT;
|
||||
break;
|
||||
}
|
||||
case ION_IOC_SYNC:
|
||||
{
|
||||
struct ion_fd_data data;
|
||||
if (copy_from_user(&data, (void __user *)arg,
|
||||
sizeof(struct ion_fd_data)))
|
||||
return -EFAULT;
|
||||
ion_sync_for_device(client, data.fd);
|
||||
break;
|
||||
}
|
||||
case ION_IOC_CUSTOM:
|
||||
{
|
||||
struct ion_device *dev = client->dev;
|
||||
|
@ -328,7 +328,17 @@ struct ion_custom_data {
|
||||
* descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
|
||||
* filed set to the corresponding opaque handle.
|
||||
*/
|
||||
#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, int)
|
||||
#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
|
||||
|
||||
/**
|
||||
* DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
|
||||
*
|
||||
* Deprecated in favor of using the dma_buf api's correctly (syncing
|
||||
* will happend automatically when the buffer is mapped to a device).
|
||||
* If necessary should be used after touching a cached buffer from the cpu,
|
||||
* this will make the buffer in memory coherent.
|
||||
*/
|
||||
#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
|
||||
|
||||
/**
|
||||
* DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
|
||||
|
Loading…
Reference in New Issue
Block a user