media: videobuf2: use sgtable-based scatterlist wrappers
Use recently introduced common wrappers operating directly on the struct sg_table objects and scatterlist page iterators to make the code a bit more compact, robust, easier to follow and copy/paste safe. No functional change, because the code already properly did all the scatterlist related calls. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Reviewed-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl> Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
This commit is contained in:
parent
f95fc014e0
commit
8b7c0280ab
@ -58,10 +58,10 @@ static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
|
||||
unsigned int i;
|
||||
unsigned long size = 0;
|
||||
|
||||
for_each_sg(sgt->sgl, s, sgt->nents, i) {
|
||||
for_each_sgtable_dma_sg(sgt, s, i) {
|
||||
if (sg_dma_address(s) != expected)
|
||||
break;
|
||||
expected = sg_dma_address(s) + sg_dma_len(s);
|
||||
expected += sg_dma_len(s);
|
||||
size += sg_dma_len(s);
|
||||
}
|
||||
return size;
|
||||
@ -103,8 +103,7 @@ static void vb2_dc_prepare(void *buf_priv)
|
||||
if (!sgt)
|
||||
return;
|
||||
|
||||
dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
|
||||
buf->dma_dir);
|
||||
dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
|
||||
}
|
||||
|
||||
static void vb2_dc_finish(void *buf_priv)
|
||||
@ -115,7 +114,7 @@ static void vb2_dc_finish(void *buf_priv)
|
||||
if (!sgt)
|
||||
return;
|
||||
|
||||
dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
|
||||
dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
|
||||
}
|
||||
|
||||
/*********************************************/
|
||||
@ -275,8 +274,8 @@ static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
|
||||
* memory locations do not require any explicit cache
|
||||
* maintenance prior or after being used by the device.
|
||||
*/
|
||||
dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
|
||||
attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
sg_free_table(sgt);
|
||||
kfree(attach);
|
||||
db_attach->priv = NULL;
|
||||
@ -301,8 +300,8 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(
|
||||
|
||||
/* release any previous cache */
|
||||
if (attach->dma_dir != DMA_NONE) {
|
||||
dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
|
||||
attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
attach->dma_dir = DMA_NONE;
|
||||
}
|
||||
|
||||
@ -310,9 +309,8 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(
|
||||
* mapping to the client with new direction, no cache sync
|
||||
* required see comment in vb2_dc_dmabuf_ops_detach()
|
||||
*/
|
||||
sgt->nents = dma_map_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
|
||||
dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (!sgt->nents) {
|
||||
if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,
|
||||
DMA_ATTR_SKIP_CPU_SYNC)) {
|
||||
pr_err("failed to map scatterlist\n");
|
||||
mutex_unlock(lock);
|
||||
return ERR_PTR(-EIO);
|
||||
@ -455,8 +453,8 @@ static void vb2_dc_put_userptr(void *buf_priv)
|
||||
* No need to sync to CPU, it's already synced to the CPU
|
||||
* since the finish() memop will have been called before this.
|
||||
*/
|
||||
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
|
||||
buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
pages = frame_vector_pages(buf->vec);
|
||||
/* sgt should exist only if vector contains pages... */
|
||||
BUG_ON(IS_ERR(pages));
|
||||
@ -553,9 +551,8 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
|
||||
* No need to sync to the device, this will happen later when the
|
||||
* prepare() memop is called.
|
||||
*/
|
||||
sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
|
||||
buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (sgt->nents <= 0) {
|
||||
if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
|
||||
DMA_ATTR_SKIP_CPU_SYNC)) {
|
||||
pr_err("failed to map scatterlist\n");
|
||||
ret = -EIO;
|
||||
goto fail_sgt_init;
|
||||
@ -577,8 +574,7 @@ out:
|
||||
return buf;
|
||||
|
||||
fail_map_sg:
|
||||
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
|
||||
buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
|
||||
fail_sgt_init:
|
||||
sg_free_table(sgt);
|
||||
|
@ -148,9 +148,8 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
|
||||
* No need to sync to the device, this will happen later when the
|
||||
* prepare() memop is called.
|
||||
*/
|
||||
sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
|
||||
buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (!sgt->nents)
|
||||
if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
|
||||
DMA_ATTR_SKIP_CPU_SYNC))
|
||||
goto fail_map;
|
||||
|
||||
buf->handler.refcount = &buf->refcount;
|
||||
@ -186,8 +185,8 @@ static void vb2_dma_sg_put(void *buf_priv)
|
||||
if (refcount_dec_and_test(&buf->refcount)) {
|
||||
dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
|
||||
buf->num_pages);
|
||||
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
|
||||
buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (buf->vaddr)
|
||||
vm_unmap_ram(buf->vaddr, buf->num_pages);
|
||||
sg_free_table(buf->dma_sgt);
|
||||
@ -204,8 +203,7 @@ static void vb2_dma_sg_prepare(void *buf_priv)
|
||||
struct vb2_dma_sg_buf *buf = buf_priv;
|
||||
struct sg_table *sgt = buf->dma_sgt;
|
||||
|
||||
dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
|
||||
buf->dma_dir);
|
||||
dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
|
||||
}
|
||||
|
||||
static void vb2_dma_sg_finish(void *buf_priv)
|
||||
@ -213,7 +211,7 @@ static void vb2_dma_sg_finish(void *buf_priv)
|
||||
struct vb2_dma_sg_buf *buf = buf_priv;
|
||||
struct sg_table *sgt = buf->dma_sgt;
|
||||
|
||||
dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
|
||||
dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
|
||||
}
|
||||
|
||||
static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
|
||||
@ -256,9 +254,8 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
|
||||
* No need to sync to the device, this will happen later when the
|
||||
* prepare() memop is called.
|
||||
*/
|
||||
sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
|
||||
buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (!sgt->nents)
|
||||
if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
|
||||
DMA_ATTR_SKIP_CPU_SYNC))
|
||||
goto userptr_fail_map;
|
||||
|
||||
return buf;
|
||||
@ -284,8 +281,7 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
|
||||
|
||||
dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
|
||||
__func__, buf->num_pages);
|
||||
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (buf->vaddr)
|
||||
vm_unmap_ram(buf->vaddr, buf->num_pages);
|
||||
sg_free_table(buf->dma_sgt);
|
||||
@ -408,8 +404,7 @@ static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
|
||||
|
||||
/* release the scatterlist cache */
|
||||
if (attach->dma_dir != DMA_NONE)
|
||||
dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
|
||||
attach->dma_dir);
|
||||
dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
|
||||
sg_free_table(sgt);
|
||||
kfree(attach);
|
||||
db_attach->priv = NULL;
|
||||
@ -434,15 +429,12 @@ static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
|
||||
|
||||
/* release any previous cache */
|
||||
if (attach->dma_dir != DMA_NONE) {
|
||||
dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
|
||||
attach->dma_dir);
|
||||
dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
|
||||
attach->dma_dir = DMA_NONE;
|
||||
}
|
||||
|
||||
/* mapping to the client with new direction */
|
||||
sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
|
||||
dma_dir);
|
||||
if (!sgt->nents) {
|
||||
if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
|
||||
pr_err("failed to map scatterlist\n");
|
||||
mutex_unlock(lock);
|
||||
return ERR_PTR(-EIO);
|
||||
|
@ -229,7 +229,7 @@ static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
|
||||
kfree(attach);
|
||||
return ret;
|
||||
}
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
||||
for_each_sgtable_sg(sgt, sg, i) {
|
||||
struct page *page = vmalloc_to_page(vaddr);
|
||||
|
||||
if (!page) {
|
||||
@ -259,8 +259,7 @@ static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
|
||||
|
||||
/* release the scatterlist cache */
|
||||
if (attach->dma_dir != DMA_NONE)
|
||||
dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
|
||||
attach->dma_dir);
|
||||
dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
|
||||
sg_free_table(sgt);
|
||||
kfree(attach);
|
||||
db_attach->priv = NULL;
|
||||
@ -285,15 +284,12 @@ static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
|
||||
|
||||
/* release any previous cache */
|
||||
if (attach->dma_dir != DMA_NONE) {
|
||||
dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
|
||||
attach->dma_dir);
|
||||
dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
|
||||
attach->dma_dir = DMA_NONE;
|
||||
}
|
||||
|
||||
/* mapping to the client with new direction */
|
||||
sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
|
||||
dma_dir);
|
||||
if (!sgt->nents) {
|
||||
if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
|
||||
pr_err("failed to map scatterlist\n");
|
||||
mutex_unlock(lock);
|
||||
return ERR_PTR(-EIO);
|
||||
|
Loading…
Reference in New Issue
Block a user