gpu: host1x: Add IOMMU support

Add support for the Host1x unit to be located behind
an IOMMU. This is required when gather buffers may be
allocated non-contiguously in physical memory, as can
be the case when TegraDRM is also using the IOMMU.

Signed-off-by: Mikko Perttunen <mperttunen@nvidia.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
This commit is contained in:
Mikko Perttunen
2016-12-14 13:16:14 +02:00
committed by Thierry Reding
parent 8cadb01d2c
commit 404bfb78da
7 changed files with 177 additions and 39 deletions

View File

@@ -51,9 +51,15 @@ static void host1x_pushbuffer_destroy(struct push_buffer *pb)
struct host1x_cdma *cdma = pb_to_cdma(pb);
struct host1x *host1x = cdma_to_host1x(cdma);
if (pb->phys != 0)
dma_free_wc(host1x->dev, pb->size_bytes + 4, pb->mapped,
pb->phys);
if (!pb->phys)
return;
if (host1x->domain) {
iommu_unmap(host1x->domain, pb->dma, pb->alloc_size);
free_iova(&host1x->iova, iova_pfn(&host1x->iova, pb->dma));
}
dma_free_wc(host1x->dev, pb->alloc_size, pb->mapped, pb->phys);
pb->mapped = NULL;
pb->phys = 0;
@@ -66,28 +72,64 @@ static int host1x_pushbuffer_init(struct push_buffer *pb)
{
struct host1x_cdma *cdma = pb_to_cdma(pb);
struct host1x *host1x = cdma_to_host1x(cdma);
struct iova *alloc;
u32 size;
int err;
pb->mapped = NULL;
pb->phys = 0;
pb->size_bytes = HOST1X_PUSHBUFFER_SLOTS * 8;
pb->size = HOST1X_PUSHBUFFER_SLOTS * 8;
size = pb->size + 4;
/* initialize buffer pointers */
pb->fence = pb->size_bytes - 8;
pb->fence = pb->size - 8;
pb->pos = 0;
/* allocate and map pushbuffer memory */
pb->mapped = dma_alloc_wc(host1x->dev, pb->size_bytes + 4, &pb->phys,
GFP_KERNEL);
if (!pb->mapped)
goto fail;
if (host1x->domain) {
unsigned long shift;
size = iova_align(&host1x->iova, size);
pb->mapped = dma_alloc_wc(host1x->dev, size, &pb->phys,
GFP_KERNEL);
if (!pb->mapped)
return -ENOMEM;
shift = iova_shift(&host1x->iova);
alloc = alloc_iova(&host1x->iova, size >> shift,
host1x->iova_end >> shift, true);
if (!alloc) {
err = -ENOMEM;
goto iommu_free_mem;
}
pb->dma = iova_dma_addr(&host1x->iova, alloc);
err = iommu_map(host1x->domain, pb->dma, pb->phys, size,
IOMMU_READ);
if (err)
goto iommu_free_iova;
} else {
pb->mapped = dma_alloc_wc(host1x->dev, size, &pb->phys,
GFP_KERNEL);
if (!pb->mapped)
return -ENOMEM;
pb->dma = pb->phys;
}
pb->alloc_size = size;
host1x_hw_pushbuffer_init(host1x, pb);
return 0;
fail:
host1x_pushbuffer_destroy(pb);
return -ENOMEM;
iommu_free_iova:
__free_iova(&host1x->iova, alloc);
iommu_free_mem:
dma_free_wc(host1x->dev, pb->alloc_size, pb->mapped, pb->phys);
return err;
}
/*
@@ -101,7 +143,7 @@ static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
WARN_ON(pb->pos == pb->fence);
*(p++) = op1;
*(p++) = op2;
pb->pos = (pb->pos + 8) & (pb->size_bytes - 1);
pb->pos = (pb->pos + 8) & (pb->size - 1);
}
/*
@@ -111,7 +153,7 @@ static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
static void host1x_pushbuffer_pop(struct push_buffer *pb, unsigned int slots)
{
/* Advance the next write position */
pb->fence = (pb->fence + slots * 8) & (pb->size_bytes - 1);
pb->fence = (pb->fence + slots * 8) & (pb->size - 1);
}
/*
@@ -119,7 +161,7 @@ static void host1x_pushbuffer_pop(struct push_buffer *pb, unsigned int slots)
*/
static u32 host1x_pushbuffer_space(struct push_buffer *pb)
{
return ((pb->fence - pb->pos) & (pb->size_bytes - 1)) / 8;
return ((pb->fence - pb->pos) & (pb->size - 1)) / 8;
}
/*