forked from Minki/linux
swiotlb: provide swiotlb_init variants that remap the buffer
To shared more code between swiotlb and xen-swiotlb, offer a swiotlb_init_remap interface and add a remap callback to swiotlb_init_late that will allow Xen to remap the buffer without duplicating much of the logic. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Tested-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
This commit is contained in:
parent
742519538e
commit
7374153d29
@ -57,7 +57,7 @@ static void sta2x11_new_instance(struct pci_dev *pdev)
|
||||
int size = STA2X11_SWIOTLB_SIZE;
|
||||
/* First instance: register your own swiotlb area */
|
||||
dev_info(&pdev->dev, "Using SWIOTLB (size %i)\n", size);
|
||||
if (swiotlb_init_late(size, GFP_DMA))
|
||||
if (swiotlb_init_late(size, GFP_DMA, NULL))
|
||||
dev_emerg(&pdev->dev, "init swiotlb failed\n");
|
||||
}
|
||||
list_add(&instance->list, &sta2x11_instance_list);
|
||||
|
@ -36,8 +36,11 @@ struct scatterlist;
|
||||
|
||||
int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, unsigned int flags);
|
||||
unsigned long swiotlb_size_or_default(void);
|
||||
void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
|
||||
int (*remap)(void *tlb, unsigned long nslabs));
|
||||
int swiotlb_init_late(size_t size, gfp_t gfp_mask,
|
||||
int (*remap)(void *tlb, unsigned long nslabs));
|
||||
extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
|
||||
int swiotlb_init_late(size_t size, gfp_t gfp_mask);
|
||||
extern void __init swiotlb_update_mem_attributes(void);
|
||||
|
||||
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
|
||||
|
@ -256,9 +256,11 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs,
|
||||
* Statically reserve bounce buffer space and initialize bounce buffer data
|
||||
* structures for the software IO TLB used to implement the DMA API.
|
||||
*/
|
||||
void __init swiotlb_init(bool addressing_limit, unsigned int flags)
|
||||
void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
|
||||
int (*remap)(void *tlb, unsigned long nslabs))
|
||||
{
|
||||
size_t bytes = PAGE_ALIGN(default_nslabs << IO_TLB_SHIFT);
|
||||
unsigned long nslabs = default_nslabs;
|
||||
size_t bytes;
|
||||
void *tlb;
|
||||
|
||||
if (!addressing_limit && !swiotlb_force_bounce)
|
||||
@ -271,12 +273,23 @@ void __init swiotlb_init(bool addressing_limit, unsigned int flags)
|
||||
* allow to pick a location everywhere for hypervisors with guest
|
||||
* memory encryption.
|
||||
*/
|
||||
retry:
|
||||
bytes = PAGE_ALIGN(default_nslabs << IO_TLB_SHIFT);
|
||||
if (flags & SWIOTLB_ANY)
|
||||
tlb = memblock_alloc(bytes, PAGE_SIZE);
|
||||
else
|
||||
tlb = memblock_alloc_low(bytes, PAGE_SIZE);
|
||||
if (!tlb)
|
||||
goto fail;
|
||||
if (remap && remap(tlb, nslabs) < 0) {
|
||||
memblock_free(tlb, PAGE_ALIGN(bytes));
|
||||
|
||||
nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
|
||||
if (nslabs < IO_TLB_MIN_SLABS)
|
||||
panic("%s: Failed to remap %zu bytes\n",
|
||||
__func__, bytes);
|
||||
goto retry;
|
||||
}
|
||||
if (swiotlb_init_with_tbl(tlb, default_nslabs, flags))
|
||||
goto fail_free_mem;
|
||||
return;
|
||||
@ -287,12 +300,18 @@ fail:
|
||||
pr_warn("Cannot allocate buffer");
|
||||
}
|
||||
|
||||
void __init swiotlb_init(bool addressing_limit, unsigned int flags)
|
||||
{
|
||||
return swiotlb_init_remap(addressing_limit, flags, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Systems with larger DMA zones (those that don't support ISA) can
|
||||
* initialize the swiotlb later using the slab allocator if needed.
|
||||
* This should be just like above, but with some error catching.
|
||||
*/
|
||||
int swiotlb_init_late(size_t size, gfp_t gfp_mask)
|
||||
int swiotlb_init_late(size_t size, gfp_t gfp_mask,
|
||||
int (*remap)(void *tlb, unsigned long nslabs))
|
||||
{
|
||||
unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
|
||||
unsigned long bytes;
|
||||
@ -303,6 +322,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask)
|
||||
if (swiotlb_force_disable)
|
||||
return 0;
|
||||
|
||||
retry:
|
||||
order = get_order(nslabs << IO_TLB_SHIFT);
|
||||
nslabs = SLABS_PER_PAGE << order;
|
||||
bytes = nslabs << IO_TLB_SHIFT;
|
||||
@ -323,6 +343,16 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask)
|
||||
(PAGE_SIZE << order) >> 20);
|
||||
nslabs = SLABS_PER_PAGE << order;
|
||||
}
|
||||
if (remap)
|
||||
rc = remap(vstart, nslabs);
|
||||
if (rc) {
|
||||
free_pages((unsigned long)vstart, order);
|
||||
|
||||
nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
|
||||
if (nslabs < IO_TLB_MIN_SLABS)
|
||||
return rc;
|
||||
goto retry;
|
||||
}
|
||||
rc = swiotlb_late_init_with_tbl(vstart, nslabs);
|
||||
if (rc)
|
||||
free_pages((unsigned long)vstart, order);
|
||||
|
Loading…
Reference in New Issue
Block a user