swiotlb: Split size parameter to map/unmap APIs

This splits the size parameter to swiotlb_tbl_map_single() and
swiotlb_tbl_unmap_single() into an alloc_size and a mapping_size
parameter, where the latter one is rounded up to the iommu page
size.

Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Lu Baolu 2019-09-06 14:14:48 +08:00 committed by Joerg Roedel
parent 2c70010867
commit 3fc1ca0065
4 changed files with 32 additions and 20 deletions

View File

@ -386,8 +386,8 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
*/ */
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir, map = swiotlb_tbl_map_single(dev, start_dma_addr, phys,
attrs); size, size, dir, attrs);
if (map == (phys_addr_t)DMA_MAPPING_ERROR) if (map == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
@ -397,7 +397,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
* Ensure that the address returned is DMA'ble * Ensure that the address returned is DMA'ble
*/ */
if (unlikely(!dma_capable(dev, dev_addr, size))) { if (unlikely(!dma_capable(dev, dev_addr, size))) {
swiotlb_tbl_unmap_single(dev, map, size, dir, swiotlb_tbl_unmap_single(dev, map, size, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC); attrs | DMA_ATTR_SKIP_CPU_SYNC);
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
} }
@ -433,7 +433,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
/* NOTE: We use dev_addr here, not paddr! */ /* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(dev_addr)) if (is_xen_swiotlb_buffer(dev_addr))
swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs); swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs);
} }
static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,

View File

@ -46,13 +46,17 @@ enum dma_sync_target {
extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
dma_addr_t tbl_dma_addr, dma_addr_t tbl_dma_addr,
phys_addr_t phys, size_t size, phys_addr_t phys,
size_t mapping_size,
size_t alloc_size,
enum dma_data_direction dir, enum dma_data_direction dir,
unsigned long attrs); unsigned long attrs);
extern void swiotlb_tbl_unmap_single(struct device *hwdev, extern void swiotlb_tbl_unmap_single(struct device *hwdev,
phys_addr_t tlb_addr, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir, size_t mapping_size,
size_t alloc_size,
enum dma_data_direction dir,
unsigned long attrs); unsigned long attrs);
extern void swiotlb_tbl_sync_single(struct device *hwdev, extern void swiotlb_tbl_sync_single(struct device *hwdev,

View File

@ -297,7 +297,7 @@ void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
dma_direct_sync_single_for_cpu(dev, addr, size, dir); dma_direct_sync_single_for_cpu(dev, addr, size, dir);
if (unlikely(is_swiotlb_buffer(phys))) if (unlikely(is_swiotlb_buffer(phys)))
swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
} }
EXPORT_SYMBOL(dma_direct_unmap_page); EXPORT_SYMBOL(dma_direct_unmap_page);

View File

@ -444,7 +444,9 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
dma_addr_t tbl_dma_addr, dma_addr_t tbl_dma_addr,
phys_addr_t orig_addr, size_t size, phys_addr_t orig_addr,
size_t mapping_size,
size_t alloc_size,
enum dma_data_direction dir, enum dma_data_direction dir,
unsigned long attrs) unsigned long attrs)
{ {
@ -464,6 +466,12 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
pr_warn_once("%s is active and system is using DMA bounce buffers\n", pr_warn_once("%s is active and system is using DMA bounce buffers\n",
sme_active() ? "SME" : "SEV"); sme_active() ? "SME" : "SEV");
if (mapping_size > alloc_size) {
dev_warn_once(hwdev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
mapping_size, alloc_size);
return (phys_addr_t)DMA_MAPPING_ERROR;
}
mask = dma_get_seg_boundary(hwdev); mask = dma_get_seg_boundary(hwdev);
tbl_dma_addr &= mask; tbl_dma_addr &= mask;
@ -471,8 +479,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
/* /*
* Carefully handle integer overflow which can occur when mask == ~0UL. * Carefully handle integer overflow which can occur when mask == ~0UL.
*/ */
max_slots = mask + 1 max_slots = mask + 1
? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
: 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
@ -481,8 +489,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
* For mappings greater than or equal to a page, we limit the stride * For mappings greater than or equal to a page, we limit the stride
* (and hence alignment) to a page size. * (and hence alignment) to a page size.
*/ */
nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
if (size >= PAGE_SIZE) if (alloc_size >= PAGE_SIZE)
stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
else else
stride = 1; stride = 1;
@ -547,7 +555,7 @@ not_found:
spin_unlock_irqrestore(&io_tlb_lock, flags); spin_unlock_irqrestore(&io_tlb_lock, flags);
if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n", dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
size, io_tlb_nslabs, tmp_io_tlb_used); alloc_size, io_tlb_nslabs, tmp_io_tlb_used);
return (phys_addr_t)DMA_MAPPING_ERROR; return (phys_addr_t)DMA_MAPPING_ERROR;
found: found:
io_tlb_used += nslots; io_tlb_used += nslots;
@ -562,7 +570,7 @@ found:
io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT); io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE); swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE);
return tlb_addr; return tlb_addr;
} }
@ -571,11 +579,11 @@ found:
* tlb_addr is the physical address of the bounce buffer to unmap. * tlb_addr is the physical address of the bounce buffer to unmap.
*/ */
void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir, size_t mapping_size, size_t alloc_size,
unsigned long attrs) enum dma_data_direction dir, unsigned long attrs)
{ {
unsigned long flags; unsigned long flags;
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; int i, count, nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
phys_addr_t orig_addr = io_tlb_orig_addr[index]; phys_addr_t orig_addr = io_tlb_orig_addr[index];
@ -585,7 +593,7 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
if (orig_addr != INVALID_PHYS_ADDR && if (orig_addr != INVALID_PHYS_ADDR &&
!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE); swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_FROM_DEVICE);
/* /*
* Return the buffer to the free list by setting the corresponding * Return the buffer to the free list by setting the corresponding
@ -665,14 +673,14 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
/* Oh well, have to allocate and map a bounce buffer. */ /* Oh well, have to allocate and map a bounce buffer. */
*phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start), *phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
*phys, size, dir, attrs); *phys, size, size, dir, attrs);
if (*phys == (phys_addr_t)DMA_MAPPING_ERROR) if (*phys == (phys_addr_t)DMA_MAPPING_ERROR)
return false; return false;
/* Ensure that the address returned is DMA'ble */ /* Ensure that the address returned is DMA'ble */
*dma_addr = __phys_to_dma(dev, *phys); *dma_addr = __phys_to_dma(dev, *phys);
if (unlikely(!dma_capable(dev, *dma_addr, size))) { if (unlikely(!dma_capable(dev, *dma_addr, size))) {
swiotlb_tbl_unmap_single(dev, *phys, size, dir, swiotlb_tbl_unmap_single(dev, *phys, size, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC); attrs | DMA_ATTR_SKIP_CPU_SYNC);
return false; return false;
} }