iommu/amd: Optimize map_sg and unmap_sg

Optimize these functions so that they need only one call
into the address alloctor. This also saves a couple of
io-tlb flushes in the unmap_sg path.

Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Joerg Roedel 2016-07-06 17:20:54 +02:00
parent f37f7f33d5
commit 80187fd39d

View File

@ -2396,50 +2396,110 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
__unmap_single(domain->priv, dma_addr, size, dir);
}
static int sg_num_pages(struct device *dev,
struct scatterlist *sglist,
int nelems)
{
unsigned long mask, boundary_size;
struct scatterlist *s;
int i, npages = 0;
mask = dma_get_seg_boundary(dev);
boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
1UL << (BITS_PER_LONG - PAGE_SHIFT);
for_each_sg(sglist, s, nelems, i) {
int p, n;
s->dma_address = npages << PAGE_SHIFT;
p = npages % boundary_size;
n = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
if (p + n > boundary_size)
npages += boundary_size - p;
npages += n;
}
return npages;
}
/*
* The exported map_sg function for dma_ops (handles scatter-gather
* lists).
*/
static int map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction dir,
int nelems, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
int mapped_pages = 0, npages = 0, prot = 0, i;
struct protection_domain *domain;
int i;
struct dma_ops_domain *dma_dom;
struct scatterlist *s;
phys_addr_t paddr;
int mapped_elems = 0;
unsigned long address;
u64 dma_mask;
domain = get_domain(dev);
if (IS_ERR(domain))
return 0;
dma_dom = domain->priv;
dma_mask = *dev->dma_mask;
npages = sg_num_pages(dev, sglist, nelems);
address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask);
if (address == DMA_ERROR_CODE)
goto out_err;
prot = dir2prot(direction);
/* Map all sg entries */
for_each_sg(sglist, s, nelems, i) {
paddr = sg_phys(s);
int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
s->dma_address = __map_single(dev, domain->priv,
paddr, s->length, dir, dma_mask);
for (j = 0; j < pages; ++j) {
unsigned long bus_addr, phys_addr;
int ret;
if (s->dma_address) {
s->dma_length = s->length;
mapped_elems++;
} else
goto unmap;
bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC);
if (ret)
goto out_unmap;
mapped_pages += 1;
}
}
return mapped_elems;
unmap:
for_each_sg(sglist, s, mapped_elems, i) {
if (s->dma_address)
__unmap_single(domain->priv, s->dma_address,
s->dma_length, dir);
s->dma_address = s->dma_length = 0;
/* Everything is mapped - write the right values into s->dma_address */
for_each_sg(sglist, s, nelems, i) {
s->dma_address += address + s->offset;
s->dma_length = s->length;
}
return nelems;
out_unmap:
pr_err("%s: IOMMU mapping error in map_sg (io-pages: %d)\n",
dev_name(dev), npages);
for_each_sg(sglist, s, nelems, i) {
int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
for (j = 0; j < pages; ++j) {
unsigned long bus_addr;
bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
if (--mapped_pages)
goto out_free_iova;
}
}
out_free_iova:
free_iova_fast(&dma_dom->iovad, address, npages);
out_err:
return 0;
}
@ -2452,18 +2512,17 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
struct dma_attrs *attrs)
{
struct protection_domain *domain;
struct scatterlist *s;
int i;
unsigned long startaddr;
int npages = 2;
domain = get_domain(dev);
if (IS_ERR(domain))
return;
for_each_sg(sglist, s, nelems, i) {
__unmap_single(domain->priv, s->dma_address,
s->dma_length, dir);
s->dma_address = s->dma_length = 0;
}
startaddr = sg_dma_address(sglist) & PAGE_MASK;
npages = sg_num_pages(dev, sglist, nelems);
__unmap_single(domain->priv, startaddr, npages << PAGE_SHIFT, dir);
}
/*