mirror of
https://github.com/torvalds/linux.git
synced 2024-12-24 20:01:55 +00:00
4ce63fcd91
This patch add a complete implementation of DMA-mapping API for devices which have IOMMU support. This implementation tries to optimize dma address space usage by remapping all possible physical memory chunks into a single dma address space chunk. DMA address space is managed on top of the bitmap stored in the dma_iommu_mapping structure stored in device->archdata. Platform setup code has to initialize parameters of the dma address space (base address, size, allocation precision order) with arm_iommu_create_mapping() function. To reduce the size of the bitmap, all allocations are aligned to the specified order of base 4 KiB pages. dma_alloc_* functions allocate physical memory in chunks, each with alloc_pages() function to avoid failing if the physical memory gets fragmented. In worst case the allocated buffer is composed of 4 KiB page chunks. dma_map_sg() function minimizes the total number of dma address space chunks by merging of physical memory chunks into one larger dma address space chunk. If requested chunk (scatter list entry) boundaries match physical page boundaries, most calls to dma_map_sg() requests will result in creating only one chunk in dma address space. dma_map_page() simply creates a mapping for the given page(s) in the dma address space. All dma functions also perform required cache operation like their counterparts from the arm linear physical memory mapping version. This patch contains code and fixes kindly provided by: - Krishna Reddy <vdumpa@nvidia.com>, - Andrzej Pietrasiewicz <andrzej.p@samsung.com>, - Hiroshi DOYU <hdoyu@nvidia.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Acked-by: Kyungmin Park <kyungmin.park@samsung.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
33 lines
829 B
C
33 lines
829 B
C
#ifndef VMREGION_H
|
|
#define VMREGION_H
|
|
|
|
#include <linux/spinlock.h>
|
|
#include <linux/list.h>
|
|
|
|
struct page;
|
|
|
|
struct arm_vmregion_head {
|
|
spinlock_t vm_lock;
|
|
struct list_head vm_list;
|
|
unsigned long vm_start;
|
|
unsigned long vm_end;
|
|
};
|
|
|
|
struct arm_vmregion {
|
|
struct list_head vm_list;
|
|
unsigned long vm_start;
|
|
unsigned long vm_end;
|
|
void *priv;
|
|
int vm_active;
|
|
const void *caller;
|
|
};
|
|
|
|
struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t, const void *);
|
|
struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long);
|
|
struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long);
|
|
void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *);
|
|
|
|
int arm_vmregion_create_proc(const char *, struct arm_vmregion_head *);
|
|
|
|
#endif
|