forked from Minki/linux
dma-mapping updates for Linux 5.18
- do not zero buffer in set_memory_decrypted (Kirill A. Shutemov) - fix return value of dma-debug __setup handlers (Randy Dunlap) - swiotlb cleanups (Robin Murphy) - remove most remaining users of the pci-dma-compat.h API (Christophe JAILLET) - share the ABI header for the DMA map_benchmark with userspace (Tian Tao) - update the maintainer for DMA MAPPING BENCHMARK (Xiang Chen) - remove CONFIG_DMA_REMAP (me) -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmJDDgsLHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYM9oBAAxm93DZCXsqektM2qJ34o1KCyfAhvTvZ1r38ab+cl wJwmMPF6/S9MCj6XZEnCzUnXL//TnhcuYVztNpPTWqhx6QaqWmmx9yJKjoYAnHce svVMef7iipn35w7hAPpiVR/AVwWyxQCkSC+5sgp6XX8mp7l7I3ajfO0fZ52JCcxw 12d4k1E0yjC096Kw8wXQv+rzmCAoQcK9Jj20COUO3rkgOr68ZIXse2HXUJjn76Fy wym2rJfqJ9mdKrDHqphe1ntIzkcQNWx9xR0UVh7/e4p7Si5H8Lp8QWwC7Zw6Y2Gb paeotIMu1uTKkcZI4K54J8PXRLA7PLrDSDFdxnKOsWNZU/inIwt9b11kr9FOaYqR BLJ+w6bF1/PmM6q2gkOwNuoiJD5YQfwF7y+wi84VyaauM0J8ssIHYnVrCWXn0m1E 4veAkWasAYb1oaoNlDhmZEbpI+kcN3xwDyK1WbtHuGvR00oSvxl0d1viGTVXYfDA k5rBjb7CovK8JIrFIJoMiDM4TvdauxL66IlEL7ohLDh6l1f09Q0+gsdVcAM0ObX6 zOkoulyHCFqkePvoH/xpyIrZZ9cHA228fZYC7QiBcxdWlD3dFMWkKvhajiSDQJSW SAz94CeEDWn64Q462N+ecivKlLwz7j/TqOig5xU+/6UoMC/2a7+HIim+p6bjh8Pc 5Gg= =C+Es -----END PGP SIGNATURE----- Merge tag 'dma-mapping-5.18' of git://git.infradead.org/users/hch/dma-mapping Pull dma-mapping updates from Christoph Hellwig: - do not zero buffer in set_memory_decrypted (Kirill A. Shutemov) - fix return value of dma-debug __setup handlers (Randy Dunlap) - swiotlb cleanups (Robin Murphy) - remove most remaining users of the pci-dma-compat.h API (Christophe JAILLET) - share the ABI header for the DMA map_benchmark with userspace (Tian Tao) - update the maintainer for DMA MAPPING BENCHMARK (Xiang Chen) - remove CONFIG_DMA_REMAP (me) * tag 'dma-mapping-5.18' of git://git.infradead.org/users/hch/dma-mapping: dma-mapping: benchmark: extract a common header file for map_benchmark definition dma-debug: fix return value of __setup handlers dma-mapping: remove CONFIG_DMA_REMAP media: v4l2-pci-skeleton: Remove usage of the deprecated "pci-dma-compat.h" API rapidio/tsi721: Remove usage of the deprecated "pci-dma-compat.h" API sparc: Remove usage of the deprecated "pci-dma-compat.h" API agp/intel: Remove usage of the deprecated "pci-dma-compat.h" API alpha: Remove usage of the deprecated "pci-dma-compat.h" API MAINTAINERS: update maintainer list of DMA MAPPING BENCHMARK swiotlb: simplify array allocation swiotlb: tidy up includes swiotlb: simplify debugfs setup swiotlb: do not zero buffer in set_memory_decrypted()
This commit is contained in:
commit
9ae2a14308
@ -5880,7 +5880,7 @@ F: include/linux/dma-map-ops.h
|
||||
F: kernel/dma/
|
||||
|
||||
DMA MAPPING BENCHMARK
|
||||
M: Barry Song <song.bao.hua@hisilicon.com>
|
||||
M: Xiang Chen <chenxiang66@hisilicon.com>
|
||||
L: iommu@lists.linux-foundation.org
|
||||
F: kernel/dma/map_benchmark.c
|
||||
F: tools/testing/selftests/dma/
|
||||
|
@ -43,17 +43,18 @@ alpha_fd_dma_setup(char *addr, unsigned long size, int mode, int io)
|
||||
static int prev_dir;
|
||||
int dir;
|
||||
|
||||
dir = (mode != DMA_MODE_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE;
|
||||
dir = (mode != DMA_MODE_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
|
||||
|
||||
if (bus_addr
|
||||
&& (addr != prev_addr || size != prev_size || dir != prev_dir)) {
|
||||
/* different from last time -- unmap prev */
|
||||
pci_unmap_single(isa_bridge, bus_addr, prev_size, prev_dir);
|
||||
dma_unmap_single(&isa_bridge->dev, bus_addr, prev_size,
|
||||
prev_dir);
|
||||
bus_addr = 0;
|
||||
}
|
||||
|
||||
if (!bus_addr) /* need to map it */
|
||||
bus_addr = pci_map_single(isa_bridge, addr, size, dir);
|
||||
bus_addr = dma_map_single(&isa_bridge->dev, addr, size, dir);
|
||||
|
||||
/* remember this one as prev */
|
||||
prev_addr = addr;
|
||||
|
@ -333,7 +333,7 @@ static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
|
||||
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
|
||||
int dac_allowed;
|
||||
|
||||
BUG_ON(dir == PCI_DMA_NONE);
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
|
||||
dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
|
||||
return pci_map_single_1(pdev, (char *)page_address(page) + offset,
|
||||
@ -356,7 +356,7 @@ static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
|
||||
struct pci_iommu_arena *arena;
|
||||
long dma_ofs, npages;
|
||||
|
||||
BUG_ON(dir == PCI_DMA_NONE);
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
|
||||
if (dma_addr >= __direct_map_base
|
||||
&& dma_addr < __direct_map_base + __direct_map_size) {
|
||||
@ -460,7 +460,7 @@ static void alpha_pci_free_coherent(struct device *dev, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
|
||||
pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
|
||||
dma_unmap_single(&pdev->dev, dma_addr, size, DMA_BIDIRECTIONAL);
|
||||
free_pages((unsigned long)cpu_addr, get_order(size));
|
||||
|
||||
DBGA2("pci_free_consistent: [%llx,%zx] from %ps\n",
|
||||
@ -639,7 +639,7 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
dma_addr_t max_dma;
|
||||
int dac_allowed;
|
||||
|
||||
BUG_ON(dir == PCI_DMA_NONE);
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
|
||||
dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
|
||||
|
||||
@ -702,7 +702,7 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
/* Some allocation failed while mapping the scatterlist
|
||||
entries. Unmap them now. */
|
||||
if (out > start)
|
||||
pci_unmap_sg(pdev, start, out - start, dir);
|
||||
dma_unmap_sg(&pdev->dev, start, out - start, dir);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -722,7 +722,7 @@ static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
dma_addr_t max_dma;
|
||||
dma_addr_t fbeg, fend;
|
||||
|
||||
BUG_ON(dir == PCI_DMA_NONE);
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
|
||||
if (! alpha_mv.mv_pci_tbi)
|
||||
return;
|
||||
|
@ -49,7 +49,7 @@ config ARM
|
||||
select DMA_DECLARE_COHERENT
|
||||
select DMA_GLOBAL_POOL if !MMU
|
||||
select DMA_OPS
|
||||
select DMA_REMAP if MMU
|
||||
select DMA_NONCOHERENT_MMAP if MMU
|
||||
select EDAC_SUPPORT
|
||||
select EDAC_ATOMIC_SCRUB
|
||||
select GENERIC_ALLOCATOR
|
||||
|
@ -309,7 +309,7 @@ arch_initcall(sparc_register_ioport);
|
||||
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
if (dir != PCI_DMA_TODEVICE &&
|
||||
if (dir != DMA_TO_DEVICE &&
|
||||
sparc_cpu_model == sparc_leon &&
|
||||
!sparc_leon3_snooping_enabled())
|
||||
leon_flush_dcache_all();
|
||||
|
@ -18,7 +18,7 @@ config XTENSA
|
||||
select BUILDTIME_TABLE_SORT
|
||||
select CLONE_BACKWARDS
|
||||
select COMMON_CLK
|
||||
select DMA_REMAP if MMU
|
||||
select DMA_NONCOHERENT_MMAP if MMU
|
||||
select GENERIC_ATOMIC64
|
||||
select GENERIC_IRQ_SHOW
|
||||
select GENERIC_LIB_CMPDI2
|
||||
|
@ -111,8 +111,8 @@ static int intel_gtt_map_memory(struct page **pages,
|
||||
for_each_sg(st->sgl, sg, num_entries, i)
|
||||
sg_set_page(sg, pages[i], PAGE_SIZE, 0);
|
||||
|
||||
if (!pci_map_sg(intel_private.pcidev,
|
||||
st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL))
|
||||
if (!dma_map_sg(&intel_private.pcidev->dev, st->sgl, st->nents,
|
||||
DMA_BIDIRECTIONAL))
|
||||
goto err;
|
||||
|
||||
return 0;
|
||||
@ -127,8 +127,8 @@ static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
|
||||
struct sg_table st;
|
||||
DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
|
||||
|
||||
pci_unmap_sg(intel_private.pcidev, sg_list,
|
||||
num_sg, PCI_DMA_BIDIRECTIONAL);
|
||||
dma_unmap_sg(&intel_private.pcidev->dev, sg_list, num_sg,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
st.sgl = sg_list;
|
||||
st.orig_nents = st.nents = num_sg;
|
||||
@ -303,9 +303,9 @@ static int intel_gtt_setup_scratch_page(void)
|
||||
set_pages_uc(page, 1);
|
||||
|
||||
if (intel_private.needs_dmar) {
|
||||
dma_addr = pci_map_page(intel_private.pcidev, page, 0,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) {
|
||||
dma_addr = dma_map_page(&intel_private.pcidev->dev, page, 0,
|
||||
PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(&intel_private.pcidev->dev, dma_addr)) {
|
||||
__free_page(page);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -552,9 +552,9 @@ static void intel_gtt_teardown_scratch_page(void)
|
||||
{
|
||||
set_pages_wb(intel_private.scratch_page, 1);
|
||||
if (intel_private.needs_dmar)
|
||||
pci_unmap_page(intel_private.pcidev,
|
||||
intel_private.scratch_page_dma,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
dma_unmap_page(&intel_private.pcidev->dev,
|
||||
intel_private.scratch_page_dma, PAGE_SIZE,
|
||||
DMA_BIDIRECTIONAL);
|
||||
__free_page(intel_private.scratch_page);
|
||||
}
|
||||
|
||||
@ -1412,13 +1412,13 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
|
||||
|
||||
if (bridge) {
|
||||
mask = intel_private.driver->dma_mask_size;
|
||||
if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
|
||||
if (dma_set_mask(&intel_private.pcidev->dev, DMA_BIT_MASK(mask)))
|
||||
dev_err(&intel_private.pcidev->dev,
|
||||
"set gfx device dma mask %d-bit failed!\n",
|
||||
mask);
|
||||
else
|
||||
pci_set_consistent_dma_mask(intel_private.pcidev,
|
||||
DMA_BIT_MASK(mask));
|
||||
dma_set_coherent_mask(&intel_private.pcidev->dev,
|
||||
DMA_BIT_MASK(mask));
|
||||
}
|
||||
|
||||
if (intel_gtt_init() != 0) {
|
||||
|
@ -856,7 +856,6 @@ out_unmap:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DMA_REMAP
|
||||
static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
|
||||
size_t size, enum dma_data_direction dir, gfp_t gfp,
|
||||
unsigned long attrs)
|
||||
@ -886,7 +885,6 @@ static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
|
||||
sg_free_table(&sh->sgt);
|
||||
kfree(sh);
|
||||
}
|
||||
#endif /* CONFIG_DMA_REMAP */
|
||||
|
||||
static void iommu_dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
|
||||
@ -1280,7 +1278,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
|
||||
dma_free_from_pool(dev, cpu_addr, alloc_size))
|
||||
return;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
|
||||
if (is_vmalloc_addr(cpu_addr)) {
|
||||
/*
|
||||
* If it the address is remapped, then it's either non-coherent
|
||||
* or highmem CMA, or an iommu_dma_alloc_remap() construction.
|
||||
@ -1322,7 +1320,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
|
||||
if (!coherent || PageHighMem(page)) {
|
||||
pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
|
||||
|
||||
cpu_addr = dma_common_contiguous_remap(page, alloc_size,
|
||||
@ -1354,7 +1352,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
|
||||
|
||||
gfp |= __GFP_ZERO;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
|
||||
if (gfpflags_allow_blocking(gfp) &&
|
||||
!(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
|
||||
return iommu_dma_alloc_remap(dev, size, handle, gfp,
|
||||
dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
|
||||
@ -1395,7 +1393,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
|
||||
return -ENXIO;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
|
||||
if (is_vmalloc_addr(cpu_addr)) {
|
||||
struct page **pages = dma_common_find_pages(cpu_addr);
|
||||
|
||||
if (pages)
|
||||
@ -1417,7 +1415,7 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
struct page *page;
|
||||
int ret;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
|
||||
if (is_vmalloc_addr(cpu_addr)) {
|
||||
struct page **pages = dma_common_find_pages(cpu_addr);
|
||||
|
||||
if (pages) {
|
||||
@ -1449,10 +1447,8 @@ static const struct dma_map_ops iommu_dma_ops = {
|
||||
.free = iommu_dma_free,
|
||||
.alloc_pages = dma_common_alloc_pages,
|
||||
.free_pages = dma_common_free_pages,
|
||||
#ifdef CONFIG_DMA_REMAP
|
||||
.alloc_noncontiguous = iommu_dma_alloc_noncontiguous,
|
||||
.free_noncontiguous = iommu_dma_free_noncontiguous,
|
||||
#endif
|
||||
.mmap = iommu_dma_mmap,
|
||||
.get_sgtable = iommu_dma_get_sgtable,
|
||||
.map_page = iommu_dma_map_page,
|
||||
|
@ -2836,17 +2836,17 @@ static int tsi721_probe(struct pci_dev *pdev,
|
||||
}
|
||||
|
||||
/* Configure DMA attributes. */
|
||||
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
|
||||
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
tsi_err(&pdev->dev, "Unable to set DMA mask");
|
||||
goto err_unmap_bars;
|
||||
}
|
||||
|
||||
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
|
||||
if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)))
|
||||
tsi_info(&pdev->dev, "Unable to set consistent DMA mask");
|
||||
} else {
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (err)
|
||||
tsi_info(&pdev->dev, "Unable to set consistent DMA mask");
|
||||
}
|
||||
|
31
include/linux/map_benchmark.h
Normal file
31
include/linux/map_benchmark.h
Normal file
@ -0,0 +1,31 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2022 HiSilicon Limited.
|
||||
*/
|
||||
|
||||
#ifndef _KERNEL_DMA_BENCHMARK_H
|
||||
#define _KERNEL_DMA_BENCHMARK_H
|
||||
|
||||
#define DMA_MAP_BENCHMARK _IOWR('d', 1, struct map_benchmark)
|
||||
#define DMA_MAP_MAX_THREADS 1024
|
||||
#define DMA_MAP_MAX_SECONDS 300
|
||||
#define DMA_MAP_MAX_TRANS_DELAY (10 * NSEC_PER_MSEC)
|
||||
|
||||
#define DMA_MAP_BIDIRECTIONAL 0
|
||||
#define DMA_MAP_TO_DEVICE 1
|
||||
#define DMA_MAP_FROM_DEVICE 2
|
||||
|
||||
struct map_benchmark {
|
||||
__u64 avg_map_100ns; /* average map latency in 100ns */
|
||||
__u64 map_stddev; /* standard deviation of map latency */
|
||||
__u64 avg_unmap_100ns; /* as above */
|
||||
__u64 unmap_stddev;
|
||||
__u32 threads; /* how many threads will do map/unmap in parallel */
|
||||
__u32 seconds; /* how long the test will last */
|
||||
__s32 node; /* which numa node this benchmark will run on */
|
||||
__u32 dma_bits; /* DMA addressing capability */
|
||||
__u32 dma_dir; /* DMA data direction */
|
||||
__u32 dma_trans_ns; /* time for DMA transmission in ns */
|
||||
__u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */
|
||||
};
|
||||
#endif /* _KERNEL_DMA_BENCHMARK_H */
|
@ -110,15 +110,10 @@ config DMA_GLOBAL_POOL
|
||||
select DMA_DECLARE_COHERENT
|
||||
bool
|
||||
|
||||
config DMA_REMAP
|
||||
bool
|
||||
depends on MMU
|
||||
select DMA_NONCOHERENT_MMAP
|
||||
|
||||
config DMA_DIRECT_REMAP
|
||||
bool
|
||||
select DMA_REMAP
|
||||
select DMA_COHERENT_POOL
|
||||
select DMA_NONCOHERENT_MMAP
|
||||
|
||||
config DMA_CMA
|
||||
bool "DMA Contiguous Memory Allocator"
|
||||
|
@ -8,5 +8,5 @@ obj-$(CONFIG_DMA_DECLARE_COHERENT) += coherent.o
|
||||
obj-$(CONFIG_DMA_API_DEBUG) += debug.o
|
||||
obj-$(CONFIG_SWIOTLB) += swiotlb.o
|
||||
obj-$(CONFIG_DMA_COHERENT_POOL) += pool.o
|
||||
obj-$(CONFIG_DMA_REMAP) += remap.o
|
||||
obj-$(CONFIG_MMU) += remap.o
|
||||
obj-$(CONFIG_DMA_MAP_BENCHMARK) += map_benchmark.o
|
||||
|
@ -927,7 +927,7 @@ static __init int dma_debug_cmdline(char *str)
|
||||
global_disable = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static __init int dma_debug_entries_cmdline(char *str)
|
||||
@ -936,7 +936,7 @@ static __init int dma_debug_entries_cmdline(char *str)
|
||||
return -EINVAL;
|
||||
if (!get_option(&str, &nr_prealloc_entries))
|
||||
nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("dma_debug=", dma_debug_cmdline);
|
||||
|
@ -265,17 +265,13 @@ void *dma_direct_alloc(struct device *dev, size_t size,
|
||||
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* dma_alloc_contiguous can return highmem pages depending on a
|
||||
* combination the cma= arguments and per-arch setup. These need to be
|
||||
* remapped to return a kernel virtual address.
|
||||
*/
|
||||
if (PageHighMem(page)) {
|
||||
/*
|
||||
* Depending on the cma= arguments and per-arch setup,
|
||||
* dma_alloc_contiguous could return highmem pages.
|
||||
* Without remapping there is no way to return them here, so
|
||||
* log an error and fail.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_DMA_REMAP)) {
|
||||
dev_info(dev, "Rejecting highmem page from CMA.\n");
|
||||
goto out_free_pages;
|
||||
}
|
||||
remap = true;
|
||||
set_uncached = false;
|
||||
}
|
||||
@ -349,7 +345,7 @@ void dma_direct_free(struct device *dev, size_t size,
|
||||
dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
|
||||
return;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
|
||||
if (is_vmalloc_addr(cpu_addr)) {
|
||||
vunmap(cpu_addr);
|
||||
} else {
|
||||
if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/map_benchmark.h>
|
||||
#include <linux/math64.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
@ -18,30 +19,6 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/timekeeping.h>
|
||||
|
||||
#define DMA_MAP_BENCHMARK _IOWR('d', 1, struct map_benchmark)
|
||||
#define DMA_MAP_MAX_THREADS 1024
|
||||
#define DMA_MAP_MAX_SECONDS 300
|
||||
#define DMA_MAP_MAX_TRANS_DELAY (10 * NSEC_PER_MSEC)
|
||||
|
||||
#define DMA_MAP_BIDIRECTIONAL 0
|
||||
#define DMA_MAP_TO_DEVICE 1
|
||||
#define DMA_MAP_FROM_DEVICE 2
|
||||
|
||||
struct map_benchmark {
|
||||
__u64 avg_map_100ns; /* average map latency in 100ns */
|
||||
__u64 map_stddev; /* standard deviation of map latency */
|
||||
__u64 avg_unmap_100ns; /* as above */
|
||||
__u64 unmap_stddev;
|
||||
__u32 threads; /* how many threads will do map/unmap in parallel */
|
||||
__u32 seconds; /* how long the test will last */
|
||||
__s32 node; /* which numa node this benchmark will run on */
|
||||
__u32 dma_bits; /* DMA addressing capability */
|
||||
__u32 dma_dir; /* DMA data direction */
|
||||
__u32 dma_trans_ns; /* time for DMA transmission in ns */
|
||||
__u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */
|
||||
__u8 expansion[76]; /* For future use */
|
||||
};
|
||||
|
||||
struct map_benchmark_data {
|
||||
struct map_benchmark bparam;
|
||||
struct device *dev;
|
||||
|
@ -21,40 +21,33 @@
|
||||
#define pr_fmt(fmt) "software IO TLB: " fmt
|
||||
|
||||
#include <linux/cache.h>
|
||||
#include <linux/cc_platform.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iommu-helper.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/set_memory.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/swiotlb.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/cc_platform.h>
|
||||
#include <linux/set_memory.h>
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
#include <linux/debugfs.h>
|
||||
#endif
|
||||
#ifdef CONFIG_DMA_RESTRICTED_POOL
|
||||
#include <linux/io.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_fdt.h>
|
||||
#include <linux/of_reserved_mem.h>
|
||||
#include <linux/slab.h>
|
||||
#endif
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/dma.h>
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/iommu-helper.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/swiotlb.h>
|
||||
|
||||
@ -207,8 +200,6 @@ void __init swiotlb_update_mem_attributes(void)
|
||||
mem->vaddr = swiotlb_mem_remap(mem, bytes);
|
||||
if (!mem->vaddr)
|
||||
mem->vaddr = vaddr;
|
||||
|
||||
memset(mem->vaddr, 0, bytes);
|
||||
}
|
||||
|
||||
static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
|
||||
@ -763,47 +754,29 @@ bool is_swiotlb_active(struct device *dev)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(is_swiotlb_active);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static struct dentry *debugfs_dir;
|
||||
|
||||
static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem)
|
||||
static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
|
||||
const char *dirname)
|
||||
{
|
||||
mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs);
|
||||
if (!mem->nslabs)
|
||||
return;
|
||||
|
||||
debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
|
||||
debugfs_create_ulong("io_tlb_used", 0400, mem->debugfs, &mem->used);
|
||||
}
|
||||
|
||||
static int __init swiotlb_create_default_debugfs(void)
|
||||
static int __init __maybe_unused swiotlb_create_default_debugfs(void)
|
||||
{
|
||||
struct io_tlb_mem *mem = &io_tlb_default_mem;
|
||||
|
||||
debugfs_dir = debugfs_create_dir("swiotlb", NULL);
|
||||
if (mem->nslabs) {
|
||||
mem->debugfs = debugfs_dir;
|
||||
swiotlb_create_debugfs_files(mem);
|
||||
}
|
||||
swiotlb_create_debugfs_files(&io_tlb_default_mem, "swiotlb");
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
late_initcall(swiotlb_create_default_debugfs);
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DMA_RESTRICTED_POOL
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static void rmem_swiotlb_debugfs_init(struct reserved_mem *rmem)
|
||||
{
|
||||
struct io_tlb_mem *mem = rmem->priv;
|
||||
|
||||
mem->debugfs = debugfs_create_dir(rmem->name, debugfs_dir);
|
||||
swiotlb_create_debugfs_files(mem);
|
||||
}
|
||||
#else
|
||||
static void rmem_swiotlb_debugfs_init(struct reserved_mem *rmem)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
struct page *swiotlb_alloc(struct device *dev, size_t size)
|
||||
{
|
||||
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
|
||||
@ -850,8 +823,7 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
|
||||
if (!mem)
|
||||
return -ENOMEM;
|
||||
|
||||
mem->slots = kzalloc(array_size(sizeof(*mem->slots), nslabs),
|
||||
GFP_KERNEL);
|
||||
mem->slots = kcalloc(nslabs, sizeof(*mem->slots), GFP_KERNEL);
|
||||
if (!mem->slots) {
|
||||
kfree(mem);
|
||||
return -ENOMEM;
|
||||
@ -865,7 +837,7 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
|
||||
|
||||
rmem->priv = mem;
|
||||
|
||||
rmem_swiotlb_debugfs_init(rmem);
|
||||
swiotlb_create_debugfs_files(mem, rmem->name);
|
||||
}
|
||||
|
||||
dev->dma_io_tlb_mem = mem;
|
||||
|
@ -766,7 +766,7 @@ static int skeleton_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
ret = pci_enable_device(pdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "no suitable DMA available.\n");
|
||||
goto disable_pci;
|
||||
|
@ -10,40 +10,17 @@
|
||||
#include <unistd.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <sys/mman.h>
|
||||
#include <linux/map_benchmark.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define NSEC_PER_MSEC 1000000L
|
||||
|
||||
#define DMA_MAP_BENCHMARK _IOWR('d', 1, struct map_benchmark)
|
||||
#define DMA_MAP_MAX_THREADS 1024
|
||||
#define DMA_MAP_MAX_SECONDS 300
|
||||
#define DMA_MAP_MAX_TRANS_DELAY (10 * NSEC_PER_MSEC)
|
||||
|
||||
#define DMA_MAP_BIDIRECTIONAL 0
|
||||
#define DMA_MAP_TO_DEVICE 1
|
||||
#define DMA_MAP_FROM_DEVICE 2
|
||||
|
||||
static char *directions[] = {
|
||||
"BIDIRECTIONAL",
|
||||
"TO_DEVICE",
|
||||
"FROM_DEVICE",
|
||||
};
|
||||
|
||||
struct map_benchmark {
|
||||
__u64 avg_map_100ns; /* average map latency in 100ns */
|
||||
__u64 map_stddev; /* standard deviation of map latency */
|
||||
__u64 avg_unmap_100ns; /* as above */
|
||||
__u64 unmap_stddev;
|
||||
__u32 threads; /* how many threads will do map/unmap in parallel */
|
||||
__u32 seconds; /* how long the test will last */
|
||||
__s32 node; /* which numa node this benchmark will run on */
|
||||
__u32 dma_bits; /* DMA addressing capability */
|
||||
__u32 dma_dir; /* DMA data direction */
|
||||
__u32 dma_trans_ns; /* time for DMA transmission in ns */
|
||||
__u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */
|
||||
__u8 expansion[76]; /* For future use */
|
||||
};
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct map_benchmark map;
|
||||
|
Loading…
Reference in New Issue
Block a user