mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 13:11:40 +00:00
IOMMU Updates for Linux v5.3
Including: - Patches to make the dma-iommu code more generic so that it can be used outside of the ARM context with other IOMMU drivers. Goal is to make use of it on x86 too. - Generic IOMMU domain support for the Intel VT-d driver. This driver now makes more use of common IOMMU code to allocate default domains for the devices it handles. - An IOMMU fault reporting API to userspace. With that the IOMMU fault handling can be done in user-space, for example to forward the faults to a VM. - Better handling for reserved regions requested by the firmware. These can be 'relaxed' now, meaning that those don't prevent a device being attached to a VM. - Suspend/Resume support for the Renesas IOMMU driver. - Added support for dumping SVA related fields of the DMAR table in the Intel VT-d driver via debugfs. - A pile of smaller fixes and cleanups. -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEr9jSbILcajRFYWYyK/BELZcBGuMFAl0jaOAACgkQK/BELZcB GuNYTRAAjXuNs1OX/ROJ4TByT3YBWj5BLZVMrpGx75MAEvO68a7rTdaCpGuHv09w 5JneXxA3H2O1q6JCGguLm//Dy5QycJQIn0WuaBGi5Vjo+cGe47sf48Hb6AhIoAFf exjhfmG6kL3ZzEkLU/7iJWqz+iiCBxRU0yMINJvzFFzliJkfyGWsjoruxobmUEhm XdEsdeAQFXoPMNVCDiorM+B+3VCl7BfwfhdlFrmX3fpNrMZ5ytlnPTgfJdXAGgHn DAoK8qGoyYtRnZnivn1y56M13qv5y3XvicKqx/GRKK8CyJpz/vYSBLktI5HO4tOF y/VzNg+ctDSjQgFyKIq+38k6qu4YWctyxZNObmPVpioNf9j9GaZWT97fxGzXMNS4 G7TqOeq4yxjotAOcd+DdH265uVBtGmLqk7XobigxCZ0xzGVVnmPy4oJtkWwjaMgN HtNSMPRRirjprK4BUoCiXwP8YyWZs0a93oJmh2orD406yZkHNuqLNaicu24WsKKf Ze5BsFbNUb0qYdyL7ofOugQrOvYYoWYuREiz8ClF3U4mCcOk4FJ9K2Vf1kSfKBX8 1Skz75NeSZdzXg9JcznNMuiqH2mN3lx/88xKJg3LJxEeFxU++VDMX3Z8kEmpj5w+ 9m/v+VRclUXL6g8s6QFNIlIenMknAEjtWw6hVWPhWLaULH6fR6Y= =Kvk5 -----END PGP SIGNATURE----- Merge tag 'iommu-updates-v5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu Pull iommu updates from Joerg Roedel: - Make the dma-iommu code more generic so that it can be used outside of the ARM context with other IOMMU drivers. Goal is to make use of it on x86 too. - Generic IOMMU domain support for the Intel VT-d driver. This driver now makes more use of common IOMMU code to allocate default domains for the devices it handles. - An IOMMU fault reporting API to userspace. With that the IOMMU fault handling can be done in user-space, for example to forward the faults to a VM. - Better handling for reserved regions requested by the firmware. These can be 'relaxed' now, meaning that those don't prevent a device being attached to a VM. - Suspend/Resume support for the Renesas IOMMU driver. - Added support for dumping SVA related fields of the DMAR table in the Intel VT-d driver via debugfs. - A pile of smaller fixes and cleanups. * tag 'iommu-updates-v5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (90 commits) iommu/omap: No need to check return value of debugfs_create functions iommu/arm-smmu-v3: Invalidate ATC when detaching a device iommu/arm-smmu-v3: Fix compilation when CONFIG_CMA=n iommu/vt-d: Cleanup unused variable iommu/amd: Flush not present cache in iommu_map_page iommu/amd: Only free resources once on init error iommu/amd: Move gart fallback to amd_iommu_init iommu/amd: Make iommu_disable safer iommu/io-pgtable: Support non-coherent page tables iommu/io-pgtable: Replace IO_PGTABLE_QUIRK_NO_DMA with specific flag iommu/io-pgtable-arm: Add support to use system cache iommu/arm-smmu-v3: Increase maximum size of queues iommu/vt-d: Silence a variable set but not used iommu/vt-d: Remove an unused variable "length" iommu: Fix integer truncation iommu: Add padding to struct iommu_fault iommu/vt-d: Consolidate domain_init() to avoid duplication iommu/vt-d: Cleanup after delegating DMA domain to generic iommu iommu/vt-d: Fix suspicious RCU usage in probe_acpi_namespace_devices() iommu/vt-d: Allow DMA domain attaching to rmrr locked device ...
This commit is contained in:
commit
6b04014f3f
@ -24,3 +24,12 @@ Description: /sys/kernel/iommu_groups/reserved_regions list IOVA
|
||||
region is described on a single line: the 1st field is
|
||||
the base IOVA, the second is the end IOVA and the third
|
||||
field describes the type of the region.
|
||||
|
||||
What: /sys/kernel/iommu_groups/reserved_regions
|
||||
Date: June 2019
|
||||
KernelVersion: v5.3
|
||||
Contact: Eric Auger <eric.auger@redhat.com>
|
||||
Description: In case an RMRR is used only by graphics or USB devices
|
||||
it is now exposed as "direct-relaxable" instead of "direct".
|
||||
In device assignment use case, for instance, those RMRR
|
||||
are considered to be relaxable and safe.
|
||||
|
@ -1,24 +1,13 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* SWIOTLB-based DMA API implementation
|
||||
*
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
* Author: Catalin Marinas <catalin.marinas@arm.com>
|
||||
*/
|
||||
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/swiotlb.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/dma-iommu.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
@ -47,37 +36,6 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
|
||||
__dma_flush_area(page_address(page), size);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IOMMU_DMA
|
||||
static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
|
||||
struct page *page, size_t size)
|
||||
{
|
||||
int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
|
||||
|
||||
if (!ret)
|
||||
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
|
||||
unsigned long pfn, size_t size)
|
||||
{
|
||||
int ret = -ENXIO;
|
||||
unsigned long nr_vma_pages = vma_pages(vma);
|
||||
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
unsigned long off = vma->vm_pgoff;
|
||||
|
||||
if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
|
||||
ret = remap_pfn_range(vma, vma->vm_start,
|
||||
pfn + off,
|
||||
vma->vm_end - vma->vm_start,
|
||||
vma->vm_page_prot);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_IOMMU_DMA */
|
||||
|
||||
static int __init arm64_dma_init(void)
|
||||
{
|
||||
return dma_atomic_pool_init(GFP_DMA32, __pgprot(PROT_NORMAL_NC));
|
||||
@ -85,374 +43,11 @@ static int __init arm64_dma_init(void)
|
||||
arch_initcall(arm64_dma_init);
|
||||
|
||||
#ifdef CONFIG_IOMMU_DMA
|
||||
#include <linux/dma-iommu.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/amba/bus.h>
|
||||
|
||||
/* Thankfully, all cache ops are by VA so we can ignore phys here */
|
||||
static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
|
||||
{
|
||||
__dma_flush_area(virt, PAGE_SIZE);
|
||||
}
|
||||
|
||||
static void *__iommu_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *handle, gfp_t gfp,
|
||||
unsigned long attrs)
|
||||
{
|
||||
bool coherent = dev_is_dma_coherent(dev);
|
||||
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
|
||||
size_t iosize = size;
|
||||
void *addr;
|
||||
|
||||
if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
|
||||
return NULL;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
/*
|
||||
* Some drivers rely on this, and we probably don't want the
|
||||
* possibility of stale kernel data being read by devices anyway.
|
||||
*/
|
||||
gfp |= __GFP_ZERO;
|
||||
|
||||
if (!gfpflags_allow_blocking(gfp)) {
|
||||
struct page *page;
|
||||
/*
|
||||
* In atomic context we can't remap anything, so we'll only
|
||||
* get the virtually contiguous buffer we need by way of a
|
||||
* physically contiguous allocation.
|
||||
*/
|
||||
if (coherent) {
|
||||
page = alloc_pages(gfp, get_order(size));
|
||||
addr = page ? page_address(page) : NULL;
|
||||
} else {
|
||||
addr = dma_alloc_from_pool(size, &page, gfp);
|
||||
}
|
||||
if (!addr)
|
||||
return NULL;
|
||||
|
||||
*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
|
||||
if (*handle == DMA_MAPPING_ERROR) {
|
||||
if (coherent)
|
||||
__free_pages(page, get_order(size));
|
||||
else
|
||||
dma_free_from_pool(addr, size);
|
||||
addr = NULL;
|
||||
}
|
||||
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
|
||||
pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
|
||||
struct page *page;
|
||||
|
||||
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
|
||||
get_order(size), gfp & __GFP_NOWARN);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
|
||||
if (*handle == DMA_MAPPING_ERROR) {
|
||||
dma_release_from_contiguous(dev, page,
|
||||
size >> PAGE_SHIFT);
|
||||
return NULL;
|
||||
}
|
||||
addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
|
||||
prot,
|
||||
__builtin_return_address(0));
|
||||
if (addr) {
|
||||
if (!coherent)
|
||||
__dma_flush_area(page_to_virt(page), iosize);
|
||||
memset(addr, 0, size);
|
||||
} else {
|
||||
iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
|
||||
dma_release_from_contiguous(dev, page,
|
||||
size >> PAGE_SHIFT);
|
||||
}
|
||||
} else {
|
||||
pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
|
||||
struct page **pages;
|
||||
|
||||
pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
|
||||
handle, flush_page);
|
||||
if (!pages)
|
||||
return NULL;
|
||||
|
||||
addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
|
||||
__builtin_return_address(0));
|
||||
if (!addr)
|
||||
iommu_dma_free(dev, pages, iosize, handle);
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t handle, unsigned long attrs)
|
||||
{
|
||||
size_t iosize = size;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
/*
|
||||
* @cpu_addr will be one of 4 things depending on how it was allocated:
|
||||
* - A remapped array of pages for contiguous allocations.
|
||||
* - A remapped array of pages from iommu_dma_alloc(), for all
|
||||
* non-atomic allocations.
|
||||
* - A non-cacheable alias from the atomic pool, for atomic
|
||||
* allocations by non-coherent devices.
|
||||
* - A normal lowmem address, for atomic allocations by
|
||||
* coherent devices.
|
||||
* Hence how dodgy the below logic looks...
|
||||
*/
|
||||
if (dma_in_atomic_pool(cpu_addr, size)) {
|
||||
iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
|
||||
dma_free_from_pool(cpu_addr, size);
|
||||
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
|
||||
struct page *page = vmalloc_to_page(cpu_addr);
|
||||
|
||||
iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
|
||||
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
|
||||
dma_common_free_remap(cpu_addr, size, VM_USERMAP);
|
||||
} else if (is_vmalloc_addr(cpu_addr)){
|
||||
struct vm_struct *area = find_vm_area(cpu_addr);
|
||||
|
||||
if (WARN_ON(!area || !area->pages))
|
||||
return;
|
||||
iommu_dma_free(dev, area->pages, iosize, &handle);
|
||||
dma_common_free_remap(cpu_addr, size, VM_USERMAP);
|
||||
} else {
|
||||
iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
|
||||
__free_pages(virt_to_page(cpu_addr), get_order(size));
|
||||
}
|
||||
}
|
||||
|
||||
static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
int ret;
|
||||
|
||||
vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
|
||||
|
||||
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
|
||||
return ret;
|
||||
|
||||
if (!is_vmalloc_addr(cpu_addr)) {
|
||||
unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
|
||||
return __swiotlb_mmap_pfn(vma, pfn, size);
|
||||
}
|
||||
|
||||
if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
|
||||
/*
|
||||
* DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
|
||||
* hence in the vmalloc space.
|
||||
*/
|
||||
unsigned long pfn = vmalloc_to_pfn(cpu_addr);
|
||||
return __swiotlb_mmap_pfn(vma, pfn, size);
|
||||
}
|
||||
|
||||
area = find_vm_area(cpu_addr);
|
||||
if (WARN_ON(!area || !area->pages))
|
||||
return -ENXIO;
|
||||
|
||||
return iommu_dma_mmap(area->pages, size, vma);
|
||||
}
|
||||
|
||||
static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr,
|
||||
size_t size, unsigned long attrs)
|
||||
{
|
||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
struct vm_struct *area = find_vm_area(cpu_addr);
|
||||
|
||||
if (!is_vmalloc_addr(cpu_addr)) {
|
||||
struct page *page = virt_to_page(cpu_addr);
|
||||
return __swiotlb_get_sgtable_page(sgt, page, size);
|
||||
}
|
||||
|
||||
if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
|
||||
/*
|
||||
* DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
|
||||
* hence in the vmalloc space.
|
||||
*/
|
||||
struct page *page = vmalloc_to_page(cpu_addr);
|
||||
return __swiotlb_get_sgtable_page(sgt, page, size);
|
||||
}
|
||||
|
||||
if (WARN_ON(!area || !area->pages))
|
||||
return -ENXIO;
|
||||
|
||||
return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
|
||||
GFP_KERNEL);
|
||||
}
|
||||
|
||||
static void __iommu_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dev_addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
phys_addr_t phys;
|
||||
|
||||
if (dev_is_dma_coherent(dev))
|
||||
return;
|
||||
|
||||
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr);
|
||||
arch_sync_dma_for_cpu(dev, phys, size, dir);
|
||||
}
|
||||
|
||||
static void __iommu_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t dev_addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
phys_addr_t phys;
|
||||
|
||||
if (dev_is_dma_coherent(dev))
|
||||
return;
|
||||
|
||||
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr);
|
||||
arch_sync_dma_for_device(dev, phys, size, dir);
|
||||
}
|
||||
|
||||
static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
bool coherent = dev_is_dma_coherent(dev);
|
||||
int prot = dma_info_to_prot(dir, coherent, attrs);
|
||||
dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
|
||||
|
||||
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
|
||||
dev_addr != DMA_MAPPING_ERROR)
|
||||
__dma_map_area(page_address(page) + offset, size, dir);
|
||||
|
||||
return dev_addr;
|
||||
}
|
||||
|
||||
static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
|
||||
__iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
|
||||
|
||||
iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
|
||||
}
|
||||
|
||||
static void __iommu_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sgl, int nelems,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
if (dev_is_dma_coherent(dev))
|
||||
return;
|
||||
|
||||
for_each_sg(sgl, sg, nelems, i)
|
||||
arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
|
||||
}
|
||||
|
||||
static void __iommu_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sgl, int nelems,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
if (dev_is_dma_coherent(dev))
|
||||
return;
|
||||
|
||||
for_each_sg(sgl, sg, nelems, i)
|
||||
arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
|
||||
}
|
||||
|
||||
static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
|
||||
int nelems, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
bool coherent = dev_is_dma_coherent(dev);
|
||||
|
||||
if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
|
||||
__iommu_sync_sg_for_device(dev, sgl, nelems, dir);
|
||||
|
||||
return iommu_dma_map_sg(dev, sgl, nelems,
|
||||
dma_info_to_prot(dir, coherent, attrs));
|
||||
}
|
||||
|
||||
static void __iommu_unmap_sg_attrs(struct device *dev,
|
||||
struct scatterlist *sgl, int nelems,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
|
||||
__iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
|
||||
|
||||
iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
|
||||
}
|
||||
|
||||
static const struct dma_map_ops iommu_dma_ops = {
|
||||
.alloc = __iommu_alloc_attrs,
|
||||
.free = __iommu_free_attrs,
|
||||
.mmap = __iommu_mmap_attrs,
|
||||
.get_sgtable = __iommu_get_sgtable,
|
||||
.map_page = __iommu_map_page,
|
||||
.unmap_page = __iommu_unmap_page,
|
||||
.map_sg = __iommu_map_sg_attrs,
|
||||
.unmap_sg = __iommu_unmap_sg_attrs,
|
||||
.sync_single_for_cpu = __iommu_sync_single_for_cpu,
|
||||
.sync_single_for_device = __iommu_sync_single_for_device,
|
||||
.sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = __iommu_sync_sg_for_device,
|
||||
.map_resource = iommu_dma_map_resource,
|
||||
.unmap_resource = iommu_dma_unmap_resource,
|
||||
};
|
||||
|
||||
static int __init __iommu_dma_init(void)
|
||||
{
|
||||
return iommu_dma_init();
|
||||
}
|
||||
arch_initcall(__iommu_dma_init);
|
||||
|
||||
static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *ops)
|
||||
{
|
||||
struct iommu_domain *domain;
|
||||
|
||||
if (!ops)
|
||||
return;
|
||||
|
||||
/*
|
||||
* The IOMMU core code allocates the default DMA domain, which the
|
||||
* underlying IOMMU driver needs to support via the dma-iommu layer.
|
||||
*/
|
||||
domain = iommu_get_domain_for_dev(dev);
|
||||
|
||||
if (!domain)
|
||||
goto out_err;
|
||||
|
||||
if (domain->type == IOMMU_DOMAIN_DMA) {
|
||||
if (iommu_dma_init_domain(domain, dma_base, size, dev))
|
||||
goto out_err;
|
||||
|
||||
dev->dma_ops = &iommu_dma_ops;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
out_err:
|
||||
pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
|
||||
dev_name(dev));
|
||||
}
|
||||
|
||||
void arch_teardown_dma_ops(struct device *dev)
|
||||
{
|
||||
dev->dma_ops = NULL;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu)
|
||||
{ }
|
||||
|
||||
#endif /* CONFIG_IOMMU_DMA */
|
||||
#endif
|
||||
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu, bool coherent)
|
||||
@ -466,7 +61,8 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
ARCH_DMA_MINALIGN, cls);
|
||||
|
||||
dev->dma_coherent = coherent;
|
||||
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
|
||||
if (iommu)
|
||||
iommu_setup_dma_ops(dev, dma_base, size);
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
if (xen_initial_domain())
|
||||
|
@ -619,9 +619,9 @@ retry:
|
||||
pasid = ((event[0] >> 16) & 0xFFFF)
|
||||
| ((event[1] << 6) & 0xF0000);
|
||||
tag = event[1] & 0x03FF;
|
||||
dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
|
||||
dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
|
||||
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
|
||||
pasid, address, flags);
|
||||
pasid, address, flags, tag);
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
|
||||
@ -1295,6 +1295,16 @@ static void domain_flush_complete(struct protection_domain *domain)
|
||||
}
|
||||
}
|
||||
|
||||
/* Flush the not present cache if it exists */
|
||||
static void domain_flush_np_cache(struct protection_domain *domain,
|
||||
dma_addr_t iova, size_t size)
|
||||
{
|
||||
if (unlikely(amd_iommu_np_cache)) {
|
||||
domain_flush_pages(domain, iova, size);
|
||||
domain_flush_complete(domain);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* This function flushes the DTEs for all devices in domain
|
||||
@ -2377,10 +2387,7 @@ static dma_addr_t __map_single(struct device *dev,
|
||||
}
|
||||
address += offset;
|
||||
|
||||
if (unlikely(amd_iommu_np_cache)) {
|
||||
domain_flush_pages(&dma_dom->domain, address, size);
|
||||
domain_flush_complete(&dma_dom->domain);
|
||||
}
|
||||
domain_flush_np_cache(&dma_dom->domain, address, size);
|
||||
|
||||
out:
|
||||
return address;
|
||||
@ -2559,6 +2566,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
s->dma_length = s->length;
|
||||
}
|
||||
|
||||
if (s)
|
||||
domain_flush_np_cache(domain, s->dma_address, s->dma_length);
|
||||
|
||||
return nelems;
|
||||
|
||||
out_unmap:
|
||||
@ -2597,7 +2607,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||
struct protection_domain *domain;
|
||||
struct dma_ops_domain *dma_dom;
|
||||
unsigned long startaddr;
|
||||
int npages = 2;
|
||||
int npages;
|
||||
|
||||
domain = get_domain(dev);
|
||||
if (IS_ERR(domain))
|
||||
@ -3039,6 +3049,8 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
|
||||
ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
|
||||
mutex_unlock(&domain->api_lock);
|
||||
|
||||
domain_flush_np_cache(domain, iova, page_size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -406,6 +406,9 @@ static void iommu_enable(struct amd_iommu *iommu)
|
||||
|
||||
static void iommu_disable(struct amd_iommu *iommu)
|
||||
{
|
||||
if (!iommu->mmio_base)
|
||||
return;
|
||||
|
||||
/* Disable command buffer */
|
||||
iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
|
||||
|
||||
@ -2325,15 +2328,6 @@ static void __init free_iommu_resources(void)
|
||||
amd_iommu_dev_table = NULL;
|
||||
|
||||
free_iommu_all();
|
||||
|
||||
#ifdef CONFIG_GART_IOMMU
|
||||
/*
|
||||
* We failed to initialize the AMD IOMMU - try fallback to GART
|
||||
* if possible.
|
||||
*/
|
||||
gart_iommu_init();
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
/* SB IOAPIC is always on this device in AMD systems */
|
||||
@ -2625,8 +2619,6 @@ static int __init state_next(void)
|
||||
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
|
||||
if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
|
||||
pr_info("AMD IOMMU disabled on kernel command-line\n");
|
||||
free_dma_resources();
|
||||
free_iommu_resources();
|
||||
init_state = IOMMU_CMDLINE_DISABLED;
|
||||
ret = -EINVAL;
|
||||
}
|
||||
@ -2667,6 +2659,19 @@ static int __init state_next(void)
|
||||
BUG();
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
free_dma_resources();
|
||||
if (!irq_remapping_enabled) {
|
||||
disable_iommus();
|
||||
free_iommu_resources();
|
||||
} else {
|
||||
struct amd_iommu *iommu;
|
||||
|
||||
uninit_device_table_dma();
|
||||
for_each_iommu(iommu)
|
||||
iommu_flush_all_caches(iommu);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2740,17 +2745,15 @@ static int __init amd_iommu_init(void)
|
||||
int ret;
|
||||
|
||||
ret = iommu_go_to_state(IOMMU_INITIALIZED);
|
||||
if (ret) {
|
||||
free_dma_resources();
|
||||
if (!irq_remapping_enabled) {
|
||||
disable_iommus();
|
||||
free_iommu_resources();
|
||||
} else {
|
||||
uninit_device_table_dma();
|
||||
for_each_iommu(iommu)
|
||||
iommu_flush_all_caches(iommu);
|
||||
}
|
||||
#ifdef CONFIG_GART_IOMMU
|
||||
if (ret && list_empty(&amd_iommu_list)) {
|
||||
/*
|
||||
* We failed to initialize the AMD IOMMU - try fallback
|
||||
* to GART if possible.
|
||||
*/
|
||||
gart_iommu_init();
|
||||
}
|
||||
#endif
|
||||
|
||||
for_each_iommu(iommu)
|
||||
amd_iommu_debugfs_setup(iommu);
|
||||
|
@ -192,6 +192,13 @@
|
||||
#define Q_BASE_ADDR_MASK GENMASK_ULL(51, 5)
|
||||
#define Q_BASE_LOG2SIZE GENMASK(4, 0)
|
||||
|
||||
/* Ensure DMA allocations are naturally aligned */
|
||||
#ifdef CONFIG_CMA_ALIGNMENT
|
||||
#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + CONFIG_CMA_ALIGNMENT)
|
||||
#else
|
||||
#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + MAX_ORDER - 1)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Stream table.
|
||||
*
|
||||
@ -289,8 +296,9 @@
|
||||
FIELD_GET(ARM64_TCR_##fld, tcr))
|
||||
|
||||
/* Command queue */
|
||||
#define CMDQ_ENT_DWORDS 2
|
||||
#define CMDQ_MAX_SZ_SHIFT 8
|
||||
#define CMDQ_ENT_SZ_SHIFT 4
|
||||
#define CMDQ_ENT_DWORDS ((1 << CMDQ_ENT_SZ_SHIFT) >> 3)
|
||||
#define CMDQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - CMDQ_ENT_SZ_SHIFT)
|
||||
|
||||
#define CMDQ_CONS_ERR GENMASK(30, 24)
|
||||
#define CMDQ_ERR_CERROR_NONE_IDX 0
|
||||
@ -336,14 +344,16 @@
|
||||
#define CMDQ_SYNC_1_MSIADDR_MASK GENMASK_ULL(51, 2)
|
||||
|
||||
/* Event queue */
|
||||
#define EVTQ_ENT_DWORDS 4
|
||||
#define EVTQ_MAX_SZ_SHIFT 7
|
||||
#define EVTQ_ENT_SZ_SHIFT 5
|
||||
#define EVTQ_ENT_DWORDS ((1 << EVTQ_ENT_SZ_SHIFT) >> 3)
|
||||
#define EVTQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - EVTQ_ENT_SZ_SHIFT)
|
||||
|
||||
#define EVTQ_0_ID GENMASK_ULL(7, 0)
|
||||
|
||||
/* PRI queue */
|
||||
#define PRIQ_ENT_DWORDS 2
|
||||
#define PRIQ_MAX_SZ_SHIFT 8
|
||||
#define PRIQ_ENT_SZ_SHIFT 4
|
||||
#define PRIQ_ENT_DWORDS ((1 << PRIQ_ENT_SZ_SHIFT) >> 3)
|
||||
#define PRIQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - PRIQ_ENT_SZ_SHIFT)
|
||||
|
||||
#define PRIQ_0_SID GENMASK_ULL(31, 0)
|
||||
#define PRIQ_0_SSID GENMASK_ULL(51, 32)
|
||||
@ -798,7 +808,7 @@ static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
|
||||
/* High-level queue accessors */
|
||||
static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
|
||||
{
|
||||
memset(cmd, 0, CMDQ_ENT_DWORDS << 3);
|
||||
memset(cmd, 0, 1 << CMDQ_ENT_SZ_SHIFT);
|
||||
cmd[0] |= FIELD_PREP(CMDQ_0_OP, ent->opcode);
|
||||
|
||||
switch (ent->opcode) {
|
||||
@ -1785,13 +1795,11 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
|
||||
.pgsize_bitmap = smmu->pgsize_bitmap,
|
||||
.ias = ias,
|
||||
.oas = oas,
|
||||
.coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY,
|
||||
.tlb = &arm_smmu_gather_ops,
|
||||
.iommu_dev = smmu->dev,
|
||||
};
|
||||
|
||||
if (smmu->features & ARM_SMMU_FEAT_COHERENCY)
|
||||
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
|
||||
|
||||
if (smmu_domain->non_strict)
|
||||
pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
|
||||
|
||||
@ -1884,9 +1892,13 @@ static int arm_smmu_enable_ats(struct arm_smmu_master *master)
|
||||
|
||||
static void arm_smmu_disable_ats(struct arm_smmu_master *master)
|
||||
{
|
||||
struct arm_smmu_cmdq_ent cmd;
|
||||
|
||||
if (!master->ats_enabled || !dev_is_pci(master->dev))
|
||||
return;
|
||||
|
||||
arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
|
||||
arm_smmu_atc_inv_master(master, &cmd);
|
||||
pci_disable_ats(to_pci_dev(master->dev));
|
||||
master->ats_enabled = false;
|
||||
}
|
||||
@ -1906,7 +1918,6 @@ static void arm_smmu_detach_dev(struct arm_smmu_master *master)
|
||||
master->domain = NULL;
|
||||
arm_smmu_install_ste_for_dev(master);
|
||||
|
||||
/* Disabling ATS invalidates all ATC entries */
|
||||
arm_smmu_disable_ats(master);
|
||||
}
|
||||
|
||||
@ -2270,17 +2281,32 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
|
||||
struct arm_smmu_queue *q,
|
||||
unsigned long prod_off,
|
||||
unsigned long cons_off,
|
||||
size_t dwords)
|
||||
size_t dwords, const char *name)
|
||||
{
|
||||
size_t qsz = ((1 << q->max_n_shift) * dwords) << 3;
|
||||
size_t qsz;
|
||||
|
||||
do {
|
||||
qsz = ((1 << q->max_n_shift) * dwords) << 3;
|
||||
q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma,
|
||||
GFP_KERNEL);
|
||||
if (q->base || qsz < PAGE_SIZE)
|
||||
break;
|
||||
|
||||
q->max_n_shift--;
|
||||
} while (1);
|
||||
|
||||
q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
|
||||
if (!q->base) {
|
||||
dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n",
|
||||
qsz);
|
||||
dev_err(smmu->dev,
|
||||
"failed to allocate queue (0x%zx bytes) for %s\n",
|
||||
qsz, name);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (!WARN_ON(q->base_dma & (qsz - 1))) {
|
||||
dev_info(smmu->dev, "allocated %u entries for %s\n",
|
||||
1 << q->max_n_shift, name);
|
||||
}
|
||||
|
||||
q->prod_reg = arm_smmu_page1_fixup(prod_off, smmu);
|
||||
q->cons_reg = arm_smmu_page1_fixup(cons_off, smmu);
|
||||
q->ent_dwords = dwords;
|
||||
@ -2300,13 +2326,15 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
|
||||
/* cmdq */
|
||||
spin_lock_init(&smmu->cmdq.lock);
|
||||
ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
|
||||
ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS);
|
||||
ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS,
|
||||
"cmdq");
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* evtq */
|
||||
ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
|
||||
ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS);
|
||||
ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS,
|
||||
"evtq");
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -2315,7 +2343,8 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
|
||||
return 0;
|
||||
|
||||
return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
|
||||
ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS);
|
||||
ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS,
|
||||
"priq");
|
||||
}
|
||||
|
||||
static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
|
||||
@ -2879,7 +2908,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
/* Queue sizes, capped at 4k */
|
||||
/* Queue sizes, capped to ensure natural alignment */
|
||||
smmu->cmdq.q.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
|
||||
FIELD_GET(IDR1_CMDQS, reg));
|
||||
if (!smmu->cmdq.q.max_n_shift) {
|
||||
|
@ -892,13 +892,11 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
||||
.pgsize_bitmap = smmu->pgsize_bitmap,
|
||||
.ias = ias,
|
||||
.oas = oas,
|
||||
.coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
|
||||
.tlb = smmu_domain->tlb_ops,
|
||||
.iommu_dev = smmu->dev,
|
||||
};
|
||||
|
||||
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
|
||||
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
|
||||
|
||||
if (smmu_domain->non_strict)
|
||||
pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
|
||||
|
||||
|
@ -10,7 +10,9 @@
|
||||
|
||||
#include <linux/acpi_iort.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/dma-iommu.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/huge_mm.h>
|
||||
#include <linux/iommu.h>
|
||||
@ -67,11 +69,6 @@ static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
|
||||
return cookie;
|
||||
}
|
||||
|
||||
int iommu_dma_init(void)
|
||||
{
|
||||
return iova_cache_get();
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_get_dma_cookie - Acquire DMA-API resources for a domain
|
||||
* @domain: IOMMU domain to prepare for DMA-API usage
|
||||
@ -229,8 +226,8 @@ resv_iova:
|
||||
start = window->res->end - window->offset + 1;
|
||||
/* If window is last entry */
|
||||
if (window->node.next == &bridge->dma_ranges &&
|
||||
end != ~(dma_addr_t)0) {
|
||||
end = ~(dma_addr_t)0;
|
||||
end != ~(phys_addr_t)0) {
|
||||
end = ~(phys_addr_t)0;
|
||||
goto resv_iova;
|
||||
}
|
||||
}
|
||||
@ -302,7 +299,7 @@ static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
|
||||
* to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
|
||||
* any change which could make prior IOVAs invalid will fail.
|
||||
*/
|
||||
int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||
static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||
u64 size, struct device *dev)
|
||||
{
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
@ -353,7 +350,6 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||
|
||||
return iova_reserve_iommu_regions(dev, domain);
|
||||
}
|
||||
EXPORT_SYMBOL(iommu_dma_init_domain);
|
||||
|
||||
/**
|
||||
* dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
|
||||
@ -364,7 +360,7 @@ EXPORT_SYMBOL(iommu_dma_init_domain);
|
||||
*
|
||||
* Return: corresponding IOMMU API page protection flags
|
||||
*/
|
||||
int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
|
||||
static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
|
||||
unsigned long attrs)
|
||||
{
|
||||
int prot = coherent ? IOMMU_CACHE : 0;
|
||||
@ -441,9 +437,10 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
|
||||
size >> iova_shift(iovad));
|
||||
}
|
||||
|
||||
static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
|
||||
static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t size)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_dma_domain(dev);
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
size_t iova_off = iova_offset(iovad, dma_addr);
|
||||
@ -457,6 +454,30 @@ static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
|
||||
iommu_dma_free_iova(cookie, dma_addr, size);
|
||||
}
|
||||
|
||||
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
|
||||
size_t size, int prot)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_dma_domain(dev);
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
size_t iova_off = 0;
|
||||
dma_addr_t iova;
|
||||
|
||||
if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
|
||||
iova_off = iova_offset(&cookie->iovad, phys);
|
||||
size = iova_align(&cookie->iovad, size + iova_off);
|
||||
}
|
||||
|
||||
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
|
||||
if (!iova)
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
|
||||
iommu_dma_free_iova(cookie, iova, size);
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
return iova + iova_off;
|
||||
}
|
||||
|
||||
static void __iommu_dma_free_pages(struct page **pages, int count)
|
||||
{
|
||||
while (count--)
|
||||
@ -522,55 +543,45 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
|
||||
return pages;
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
|
||||
* @dev: Device which owns this buffer
|
||||
* @pages: Array of buffer pages as returned by iommu_dma_alloc()
|
||||
* @size: Size of buffer in bytes
|
||||
* @handle: DMA address of buffer
|
||||
*
|
||||
* Frees both the pages associated with the buffer, and the array
|
||||
* describing them
|
||||
*/
|
||||
void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
|
||||
dma_addr_t *handle)
|
||||
static struct page **__iommu_dma_get_pages(void *cpu_addr)
|
||||
{
|
||||
__iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size);
|
||||
__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
|
||||
*handle = DMA_MAPPING_ERROR;
|
||||
struct vm_struct *area = find_vm_area(cpu_addr);
|
||||
|
||||
if (!area || !area->pages)
|
||||
return NULL;
|
||||
return area->pages;
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
|
||||
* iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
|
||||
* @dev: Device to allocate memory for. Must be a real device
|
||||
* attached to an iommu_dma_domain
|
||||
* @size: Size of buffer in bytes
|
||||
* @dma_handle: Out argument for allocated DMA handle
|
||||
* @gfp: Allocation flags
|
||||
* @attrs: DMA attributes for this allocation
|
||||
* @prot: IOMMU mapping flags
|
||||
* @handle: Out argument for allocated DMA handle
|
||||
* @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
|
||||
* given VA/PA are visible to the given non-coherent device.
|
||||
*
|
||||
* If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
|
||||
* but an IOMMU which supports smaller pages might not map the whole thing.
|
||||
*
|
||||
* Return: Array of struct page pointers describing the buffer,
|
||||
* or NULL on failure.
|
||||
* Return: Mapped virtual address, or NULL on failure.
|
||||
*/
|
||||
struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
|
||||
unsigned long attrs, int prot, dma_addr_t *handle,
|
||||
void (*flush_page)(struct device *, const void *, phys_addr_t))
|
||||
static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_dma_domain(dev);
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
bool coherent = dev_is_dma_coherent(dev);
|
||||
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
|
||||
pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
|
||||
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
|
||||
struct page **pages;
|
||||
struct sg_table sgt;
|
||||
dma_addr_t iova;
|
||||
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
|
||||
void *vaddr;
|
||||
|
||||
*handle = DMA_MAPPING_ERROR;
|
||||
*dma_handle = DMA_MAPPING_ERROR;
|
||||
|
||||
min_size = alloc_sizes & -alloc_sizes;
|
||||
if (min_size < PAGE_SIZE) {
|
||||
@ -596,26 +607,29 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
|
||||
if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
|
||||
goto out_free_iova;
|
||||
|
||||
if (!(prot & IOMMU_CACHE)) {
|
||||
struct sg_mapping_iter miter;
|
||||
/*
|
||||
* The CPU-centric flushing implied by SG_MITER_TO_SG isn't
|
||||
* sufficient here, so skip it by using the "wrong" direction.
|
||||
*/
|
||||
sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
|
||||
while (sg_miter_next(&miter))
|
||||
flush_page(dev, miter.addr, page_to_phys(miter.page));
|
||||
sg_miter_stop(&miter);
|
||||
if (!(ioprot & IOMMU_CACHE)) {
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
|
||||
arch_dma_prep_coherent(sg_page(sg), sg->length);
|
||||
}
|
||||
|
||||
if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
|
||||
if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
|
||||
< size)
|
||||
goto out_free_sg;
|
||||
|
||||
*handle = iova;
|
||||
sg_free_table(&sgt);
|
||||
return pages;
|
||||
vaddr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
|
||||
__builtin_return_address(0));
|
||||
if (!vaddr)
|
||||
goto out_unmap;
|
||||
|
||||
*dma_handle = iova;
|
||||
sg_free_table(&sgt);
|
||||
return vaddr;
|
||||
|
||||
out_unmap:
|
||||
__iommu_dma_unmap(dev, iova, size);
|
||||
out_free_sg:
|
||||
sg_free_table(&sgt);
|
||||
out_free_iova:
|
||||
@ -626,54 +640,94 @@ out_free_pages:
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_dma_mmap - Map a buffer into provided user VMA
|
||||
* @pages: Array representing buffer from iommu_dma_alloc()
|
||||
* __iommu_dma_mmap - Map a buffer into provided user VMA
|
||||
* @pages: Array representing buffer from __iommu_dma_alloc()
|
||||
* @size: Size of buffer in bytes
|
||||
* @vma: VMA describing requested userspace mapping
|
||||
*
|
||||
* Maps the pages of the buffer in @pages into @vma. The caller is responsible
|
||||
* for verifying the correct size and protection of @vma beforehand.
|
||||
*/
|
||||
|
||||
int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
|
||||
static int __iommu_dma_mmap(struct page **pages, size_t size,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
|
||||
size_t size, int prot, struct iommu_domain *domain)
|
||||
static void iommu_dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
size_t iova_off = 0;
|
||||
dma_addr_t iova;
|
||||
phys_addr_t phys;
|
||||
|
||||
if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
|
||||
iova_off = iova_offset(&cookie->iovad, phys);
|
||||
size = iova_align(&cookie->iovad, size + iova_off);
|
||||
}
|
||||
if (dev_is_dma_coherent(dev))
|
||||
return;
|
||||
|
||||
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
|
||||
if (!iova)
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
|
||||
iommu_dma_free_iova(cookie, iova, size);
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
return iova + iova_off;
|
||||
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
|
||||
arch_sync_dma_for_cpu(dev, phys, size, dir);
|
||||
}
|
||||
|
||||
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, int prot)
|
||||
static void iommu_dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot,
|
||||
iommu_get_dma_domain(dev));
|
||||
phys_addr_t phys;
|
||||
|
||||
if (dev_is_dma_coherent(dev))
|
||||
return;
|
||||
|
||||
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
|
||||
arch_sync_dma_for_device(dev, phys, size, dir);
|
||||
}
|
||||
|
||||
void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
static void iommu_dma_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sgl, int nelems,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
__iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
if (dev_is_dma_coherent(dev))
|
||||
return;
|
||||
|
||||
for_each_sg(sgl, sg, nelems, i)
|
||||
arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
|
||||
}
|
||||
|
||||
static void iommu_dma_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sgl, int nelems,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
if (dev_is_dma_coherent(dev))
|
||||
return;
|
||||
|
||||
for_each_sg(sgl, sg, nelems, i)
|
||||
arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
|
||||
}
|
||||
|
||||
static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
phys_addr_t phys = page_to_phys(page) + offset;
|
||||
bool coherent = dev_is_dma_coherent(dev);
|
||||
int prot = dma_info_to_prot(dir, coherent, attrs);
|
||||
dma_addr_t dma_handle;
|
||||
|
||||
dma_handle =__iommu_dma_map(dev, phys, size, prot);
|
||||
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
|
||||
dma_handle != DMA_MAPPING_ERROR)
|
||||
arch_sync_dma_for_device(dev, phys, size, dir);
|
||||
return dma_handle;
|
||||
}
|
||||
|
||||
static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
|
||||
__iommu_dma_unmap(dev, dma_handle, size);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -758,18 +812,22 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
|
||||
* impedance-matching, to be able to hand off a suitably-aligned list,
|
||||
* but still preserve the original offsets and sizes for the caller.
|
||||
*/
|
||||
int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, int prot)
|
||||
static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_dma_domain(dev);
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
struct scatterlist *s, *prev = NULL;
|
||||
int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
|
||||
dma_addr_t iova;
|
||||
size_t iova_len = 0;
|
||||
unsigned long mask = dma_get_seg_boundary(dev);
|
||||
int i;
|
||||
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
|
||||
|
||||
/*
|
||||
* Work out how much IOVA space we need, and align the segments to
|
||||
* IOVA granules for the IOMMU driver to handle. With some clever
|
||||
@ -829,12 +887,16 @@ out_restore_sg:
|
||||
return 0;
|
||||
}
|
||||
|
||||
void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
dma_addr_t start, end;
|
||||
struct scatterlist *tmp;
|
||||
int i;
|
||||
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
|
||||
|
||||
/*
|
||||
* The scatterlist segments are mapped into a single
|
||||
* contiguous IOVA allocation, so this is incredibly easy.
|
||||
@ -846,21 +908,231 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
sg = tmp;
|
||||
}
|
||||
end = sg_dma_address(sg) + sg_dma_len(sg);
|
||||
__iommu_dma_unmap(iommu_get_dma_domain(dev), start, end - start);
|
||||
__iommu_dma_unmap(dev, start, end - start);
|
||||
}
|
||||
|
||||
dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
|
||||
static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
return __iommu_dma_map(dev, phys, size,
|
||||
dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
|
||||
iommu_get_dma_domain(dev));
|
||||
dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
|
||||
}
|
||||
|
||||
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
|
||||
static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
__iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
|
||||
__iommu_dma_unmap(dev, handle, size);
|
||||
}
|
||||
|
||||
static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
|
||||
{
|
||||
size_t alloc_size = PAGE_ALIGN(size);
|
||||
int count = alloc_size >> PAGE_SHIFT;
|
||||
struct page *page = NULL, **pages = NULL;
|
||||
|
||||
/* Non-coherent atomic allocation? Easy */
|
||||
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
|
||||
dma_free_from_pool(cpu_addr, alloc_size))
|
||||
return;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
|
||||
/*
|
||||
* If it the address is remapped, then it's either non-coherent
|
||||
* or highmem CMA, or an iommu_dma_alloc_remap() construction.
|
||||
*/
|
||||
pages = __iommu_dma_get_pages(cpu_addr);
|
||||
if (!pages)
|
||||
page = vmalloc_to_page(cpu_addr);
|
||||
dma_common_free_remap(cpu_addr, alloc_size, VM_USERMAP);
|
||||
} else {
|
||||
/* Lowmem means a coherent atomic or CMA allocation */
|
||||
page = virt_to_page(cpu_addr);
|
||||
}
|
||||
|
||||
if (pages)
|
||||
__iommu_dma_free_pages(pages, count);
|
||||
if (page && !dma_release_from_contiguous(dev, page, count))
|
||||
__free_pages(page, get_order(alloc_size));
|
||||
}
|
||||
|
||||
static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t handle, unsigned long attrs)
|
||||
{
|
||||
__iommu_dma_unmap(dev, handle, size);
|
||||
__iommu_dma_free(dev, size, cpu_addr);
|
||||
}
|
||||
|
||||
static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
|
||||
struct page **pagep, gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
bool coherent = dev_is_dma_coherent(dev);
|
||||
size_t alloc_size = PAGE_ALIGN(size);
|
||||
struct page *page = NULL;
|
||||
void *cpu_addr;
|
||||
|
||||
if (gfpflags_allow_blocking(gfp))
|
||||
page = dma_alloc_from_contiguous(dev, alloc_size >> PAGE_SHIFT,
|
||||
get_order(alloc_size),
|
||||
gfp & __GFP_NOWARN);
|
||||
if (!page)
|
||||
page = alloc_pages(gfp, get_order(alloc_size));
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
|
||||
pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
|
||||
|
||||
cpu_addr = dma_common_contiguous_remap(page, alloc_size,
|
||||
VM_USERMAP, prot, __builtin_return_address(0));
|
||||
if (!cpu_addr)
|
||||
goto out_free_pages;
|
||||
|
||||
if (!coherent)
|
||||
arch_dma_prep_coherent(page, size);
|
||||
} else {
|
||||
cpu_addr = page_address(page);
|
||||
}
|
||||
|
||||
*pagep = page;
|
||||
memset(cpu_addr, 0, alloc_size);
|
||||
return cpu_addr;
|
||||
out_free_pages:
|
||||
if (!dma_release_from_contiguous(dev, page, alloc_size >> PAGE_SHIFT))
|
||||
__free_pages(page, get_order(alloc_size));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *iommu_dma_alloc(struct device *dev, size_t size,
|
||||
dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
bool coherent = dev_is_dma_coherent(dev);
|
||||
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
|
||||
struct page *page = NULL;
|
||||
void *cpu_addr;
|
||||
|
||||
gfp |= __GFP_ZERO;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
|
||||
!(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
|
||||
return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
|
||||
!gfpflags_allow_blocking(gfp) && !coherent)
|
||||
cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
|
||||
else
|
||||
cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
|
||||
if (!cpu_addr)
|
||||
return NULL;
|
||||
|
||||
*handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
|
||||
if (*handle == DMA_MAPPING_ERROR) {
|
||||
__iommu_dma_free(dev, size, cpu_addr);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return cpu_addr;
|
||||
}
|
||||
|
||||
static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
unsigned long pfn, off = vma->vm_pgoff;
|
||||
int ret;
|
||||
|
||||
vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
|
||||
|
||||
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
|
||||
return ret;
|
||||
|
||||
if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
|
||||
return -ENXIO;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
|
||||
struct page **pages = __iommu_dma_get_pages(cpu_addr);
|
||||
|
||||
if (pages)
|
||||
return __iommu_dma_mmap(pages, size, vma);
|
||||
pfn = vmalloc_to_pfn(cpu_addr);
|
||||
} else {
|
||||
pfn = page_to_pfn(virt_to_page(cpu_addr));
|
||||
}
|
||||
|
||||
return remap_pfn_range(vma, vma->vm_start, pfn + off,
|
||||
vma->vm_end - vma->vm_start,
|
||||
vma->vm_page_prot);
|
||||
}
|
||||
|
||||
static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct page *page;
|
||||
int ret;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
|
||||
struct page **pages = __iommu_dma_get_pages(cpu_addr);
|
||||
|
||||
if (pages) {
|
||||
return sg_alloc_table_from_pages(sgt, pages,
|
||||
PAGE_ALIGN(size) >> PAGE_SHIFT,
|
||||
0, size, GFP_KERNEL);
|
||||
}
|
||||
|
||||
page = vmalloc_to_page(cpu_addr);
|
||||
} else {
|
||||
page = virt_to_page(cpu_addr);
|
||||
}
|
||||
|
||||
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
|
||||
if (!ret)
|
||||
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct dma_map_ops iommu_dma_ops = {
|
||||
.alloc = iommu_dma_alloc,
|
||||
.free = iommu_dma_free,
|
||||
.mmap = iommu_dma_mmap,
|
||||
.get_sgtable = iommu_dma_get_sgtable,
|
||||
.map_page = iommu_dma_map_page,
|
||||
.unmap_page = iommu_dma_unmap_page,
|
||||
.map_sg = iommu_dma_map_sg,
|
||||
.unmap_sg = iommu_dma_unmap_sg,
|
||||
.sync_single_for_cpu = iommu_dma_sync_single_for_cpu,
|
||||
.sync_single_for_device = iommu_dma_sync_single_for_device,
|
||||
.sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = iommu_dma_sync_sg_for_device,
|
||||
.map_resource = iommu_dma_map_resource,
|
||||
.unmap_resource = iommu_dma_unmap_resource,
|
||||
};
|
||||
|
||||
/*
|
||||
* The IOMMU core code allocates the default DMA domain, which the underlying
|
||||
* IOMMU driver needs to support via the dma-iommu layer.
|
||||
*/
|
||||
void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
|
||||
if (!domain)
|
||||
goto out_err;
|
||||
|
||||
/*
|
||||
* The IOMMU core code allocates the default DMA domain, which the
|
||||
* underlying IOMMU driver needs to support via the dma-iommu layer.
|
||||
*/
|
||||
if (domain->type == IOMMU_DOMAIN_DMA) {
|
||||
if (iommu_dma_init_domain(domain, dma_base, size, dev))
|
||||
goto out_err;
|
||||
dev->dma_ops = &iommu_dma_ops;
|
||||
}
|
||||
|
||||
return;
|
||||
out_err:
|
||||
pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
|
||||
dev_name(dev));
|
||||
}
|
||||
|
||||
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
|
||||
@ -881,7 +1153,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
|
||||
if (!msi_page)
|
||||
return NULL;
|
||||
|
||||
iova = __iommu_dma_map(dev, msi_addr, size, prot, domain);
|
||||
iova = __iommu_dma_map(dev, msi_addr, size, prot);
|
||||
if (iova == DMA_MAPPING_ERROR)
|
||||
goto out_free_page;
|
||||
|
||||
@ -943,3 +1215,9 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc,
|
||||
msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
|
||||
msg->address_lo += lower_32_bits(msi_page->iova);
|
||||
}
|
||||
|
||||
static int iommu_dma_init(void)
|
||||
{
|
||||
return iova_cache_get();
|
||||
}
|
||||
arch_initcall(iommu_dma_init);
|
||||
|
@ -14,6 +14,17 @@
|
||||
|
||||
#include <asm/irq_remapping.h>
|
||||
|
||||
#include "intel-pasid.h"
|
||||
|
||||
struct tbl_walk {
|
||||
u16 bus;
|
||||
u16 devfn;
|
||||
u32 pasid;
|
||||
struct root_entry *rt_entry;
|
||||
struct context_entry *ctx_entry;
|
||||
struct pasid_entry *pasid_tbl_entry;
|
||||
};
|
||||
|
||||
struct iommu_regset {
|
||||
int offset;
|
||||
const char *regs;
|
||||
@ -131,16 +142,86 @@ out:
|
||||
}
|
||||
DEFINE_SHOW_ATTRIBUTE(iommu_regset);
|
||||
|
||||
static void ctx_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu,
|
||||
int bus)
|
||||
static inline void print_tbl_walk(struct seq_file *m)
|
||||
{
|
||||
struct tbl_walk *tbl_wlk = m->private;
|
||||
|
||||
seq_printf(m, "%02x:%02x.%x\t0x%016llx:0x%016llx\t0x%016llx:0x%016llx\t",
|
||||
tbl_wlk->bus, PCI_SLOT(tbl_wlk->devfn),
|
||||
PCI_FUNC(tbl_wlk->devfn), tbl_wlk->rt_entry->hi,
|
||||
tbl_wlk->rt_entry->lo, tbl_wlk->ctx_entry->hi,
|
||||
tbl_wlk->ctx_entry->lo);
|
||||
|
||||
/*
|
||||
* A legacy mode DMAR doesn't support PASID, hence default it to -1
|
||||
* indicating that it's invalid. Also, default all PASID related fields
|
||||
* to 0.
|
||||
*/
|
||||
if (!tbl_wlk->pasid_tbl_entry)
|
||||
seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n", -1,
|
||||
(u64)0, (u64)0, (u64)0);
|
||||
else
|
||||
seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n",
|
||||
tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[0],
|
||||
tbl_wlk->pasid_tbl_entry->val[1],
|
||||
tbl_wlk->pasid_tbl_entry->val[2]);
|
||||
}
|
||||
|
||||
static void pasid_tbl_walk(struct seq_file *m, struct pasid_entry *tbl_entry,
|
||||
u16 dir_idx)
|
||||
{
|
||||
struct tbl_walk *tbl_wlk = m->private;
|
||||
u8 tbl_idx;
|
||||
|
||||
for (tbl_idx = 0; tbl_idx < PASID_TBL_ENTRIES; tbl_idx++) {
|
||||
if (pasid_pte_is_present(tbl_entry)) {
|
||||
tbl_wlk->pasid_tbl_entry = tbl_entry;
|
||||
tbl_wlk->pasid = (dir_idx << PASID_PDE_SHIFT) + tbl_idx;
|
||||
print_tbl_walk(m);
|
||||
}
|
||||
|
||||
tbl_entry++;
|
||||
}
|
||||
}
|
||||
|
||||
static void pasid_dir_walk(struct seq_file *m, u64 pasid_dir_ptr,
|
||||
u16 pasid_dir_size)
|
||||
{
|
||||
struct pasid_dir_entry *dir_entry = phys_to_virt(pasid_dir_ptr);
|
||||
struct pasid_entry *pasid_tbl;
|
||||
u16 dir_idx;
|
||||
|
||||
for (dir_idx = 0; dir_idx < pasid_dir_size; dir_idx++) {
|
||||
pasid_tbl = get_pasid_table_from_pde(dir_entry);
|
||||
if (pasid_tbl)
|
||||
pasid_tbl_walk(m, pasid_tbl, dir_idx);
|
||||
|
||||
dir_entry++;
|
||||
}
|
||||
}
|
||||
|
||||
static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus)
|
||||
{
|
||||
struct context_entry *context;
|
||||
int devfn;
|
||||
|
||||
seq_printf(m, " Context Table Entries for Bus: %d\n", bus);
|
||||
seq_puts(m, " Entry\tB:D.F\tHigh\tLow\n");
|
||||
u16 devfn, pasid_dir_size;
|
||||
u64 pasid_dir_ptr;
|
||||
|
||||
for (devfn = 0; devfn < 256; devfn++) {
|
||||
struct tbl_walk tbl_wlk = {0};
|
||||
|
||||
/*
|
||||
* Scalable mode root entry points to upper scalable mode
|
||||
* context table and lower scalable mode context table. Each
|
||||
* scalable mode context table has 128 context entries where as
|
||||
* legacy mode context table has 256 context entries. So in
|
||||
* scalable mode, the context entries for former 128 devices are
|
||||
* in the lower scalable mode context table, while the latter
|
||||
* 128 devices are in the upper scalable mode context table.
|
||||
* In scalable mode, when devfn > 127, iommu_context_addr()
|
||||
* automatically refers to upper scalable mode context table and
|
||||
* hence the caller doesn't have to worry about differences
|
||||
* between scalable mode and non scalable mode.
|
||||
*/
|
||||
context = iommu_context_addr(iommu, bus, devfn, 0);
|
||||
if (!context)
|
||||
return;
|
||||
@ -148,33 +229,41 @@ static void ctx_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu,
|
||||
if (!context_present(context))
|
||||
continue;
|
||||
|
||||
seq_printf(m, " %-5d\t%02x:%02x.%x\t%-6llx\t%llx\n", devfn,
|
||||
bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
|
||||
context[0].hi, context[0].lo);
|
||||
tbl_wlk.bus = bus;
|
||||
tbl_wlk.devfn = devfn;
|
||||
tbl_wlk.rt_entry = &iommu->root_entry[bus];
|
||||
tbl_wlk.ctx_entry = context;
|
||||
m->private = &tbl_wlk;
|
||||
|
||||
if (pasid_supported(iommu) && is_pasid_enabled(context)) {
|
||||
pasid_dir_ptr = context->lo & VTD_PAGE_MASK;
|
||||
pasid_dir_size = get_pasid_dir_size(context);
|
||||
pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size);
|
||||
continue;
|
||||
}
|
||||
|
||||
print_tbl_walk(m);
|
||||
}
|
||||
}
|
||||
|
||||
static void root_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu)
|
||||
static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu)
|
||||
{
|
||||
unsigned long flags;
|
||||
int bus;
|
||||
u16 bus;
|
||||
|
||||
spin_lock_irqsave(&iommu->lock, flags);
|
||||
seq_printf(m, "IOMMU %s: Root Table Address:%llx\n", iommu->name,
|
||||
seq_printf(m, "IOMMU %s: Root Table Address: 0x%llx\n", iommu->name,
|
||||
(u64)virt_to_phys(iommu->root_entry));
|
||||
seq_puts(m, "Root Table Entries:\n");
|
||||
seq_puts(m, "B.D.F\tRoot_entry\t\t\t\tContext_entry\t\t\t\tPASID\tPASID_table_entry\n");
|
||||
|
||||
for (bus = 0; bus < 256; bus++) {
|
||||
if (!(iommu->root_entry[bus].lo & 1))
|
||||
continue;
|
||||
/*
|
||||
* No need to check if the root entry is present or not because
|
||||
* iommu_context_addr() performs the same check before returning
|
||||
* context entry.
|
||||
*/
|
||||
for (bus = 0; bus < 256; bus++)
|
||||
ctx_tbl_walk(m, iommu, bus);
|
||||
|
||||
seq_printf(m, " Bus: %d H: %llx L: %llx\n", bus,
|
||||
iommu->root_entry[bus].hi,
|
||||
iommu->root_entry[bus].lo);
|
||||
|
||||
ctx_tbl_entry_show(m, iommu, bus);
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
}
|
||||
|
||||
@ -185,7 +274,7 @@ static int dmar_translation_struct_show(struct seq_file *m, void *unused)
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_active_iommu(iommu, drhd) {
|
||||
root_tbl_entry_show(m, iommu);
|
||||
root_tbl_walk(m, iommu);
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -169,23 +169,6 @@ attach_out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Get PRESENT bit of a PASID directory entry. */
|
||||
static inline bool
|
||||
pasid_pde_is_present(struct pasid_dir_entry *pde)
|
||||
{
|
||||
return READ_ONCE(pde->val) & PASID_PTE_PRESENT;
|
||||
}
|
||||
|
||||
/* Get PASID table from a PASID directory entry. */
|
||||
static inline struct pasid_entry *
|
||||
get_pasid_table_from_pde(struct pasid_dir_entry *pde)
|
||||
{
|
||||
if (!pasid_pde_is_present(pde))
|
||||
return NULL;
|
||||
|
||||
return phys_to_virt(READ_ONCE(pde->val) & PDE_PFN_MASK);
|
||||
}
|
||||
|
||||
void intel_pasid_free_table(struct device *dev)
|
||||
{
|
||||
struct device_domain_info *info;
|
||||
|
@ -18,6 +18,10 @@
|
||||
#define PDE_PFN_MASK PAGE_MASK
|
||||
#define PASID_PDE_SHIFT 6
|
||||
#define MAX_NR_PASID_BITS 20
|
||||
#define PASID_TBL_ENTRIES BIT(PASID_PDE_SHIFT)
|
||||
|
||||
#define is_pasid_enabled(entry) (((entry)->lo >> 3) & 0x1)
|
||||
#define get_pasid_dir_size(entry) (1 << ((((entry)->lo >> 9) & 0x7) + 7))
|
||||
|
||||
/*
|
||||
* Domain ID reserved for pasid entries programmed for first-level
|
||||
@ -49,6 +53,28 @@ struct pasid_table {
|
||||
struct list_head dev; /* device list */
|
||||
};
|
||||
|
||||
/* Get PRESENT bit of a PASID directory entry. */
|
||||
static inline bool pasid_pde_is_present(struct pasid_dir_entry *pde)
|
||||
{
|
||||
return READ_ONCE(pde->val) & PASID_PTE_PRESENT;
|
||||
}
|
||||
|
||||
/* Get PASID table from a PASID directory entry. */
|
||||
static inline struct pasid_entry *
|
||||
get_pasid_table_from_pde(struct pasid_dir_entry *pde)
|
||||
{
|
||||
if (!pasid_pde_is_present(pde))
|
||||
return NULL;
|
||||
|
||||
return phys_to_virt(READ_ONCE(pde->val) & PDE_PFN_MASK);
|
||||
}
|
||||
|
||||
/* Get PRESENT bit of a PASID table entry. */
|
||||
static inline bool pasid_pte_is_present(struct pasid_entry *pte)
|
||||
{
|
||||
return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT;
|
||||
}
|
||||
|
||||
extern u32 intel_pasid_max_id;
|
||||
int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp);
|
||||
void intel_pasid_free_id(int pasid);
|
||||
|
@ -366,6 +366,21 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
|
||||
}
|
||||
|
||||
list_add_tail(&svm->list, &global_svm_list);
|
||||
} else {
|
||||
/*
|
||||
* Binding a new device with existing PASID, need to setup
|
||||
* the PASID entry.
|
||||
*/
|
||||
spin_lock(&iommu->lock);
|
||||
ret = intel_pasid_setup_first_level(iommu, dev,
|
||||
mm ? mm->pgd : init_mm.pgd,
|
||||
svm->pasid, FLPT_DEFAULT_DID,
|
||||
mm ? 0 : PASID_FLAG_SUPERVISOR_MODE);
|
||||
spin_unlock(&iommu->lock);
|
||||
if (ret) {
|
||||
kfree(sdev);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
list_add_rcu(&sdev->list, &svm->devs);
|
||||
|
||||
|
@ -101,7 +101,7 @@ static void init_ir_status(struct intel_iommu *iommu)
|
||||
iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
|
||||
}
|
||||
|
||||
static int alloc_irte(struct intel_iommu *iommu, int irq,
|
||||
static int alloc_irte(struct intel_iommu *iommu,
|
||||
struct irq_2_iommu *irq_iommu, u16 count)
|
||||
{
|
||||
struct ir_table *table = iommu->ir_table;
|
||||
@ -1374,7 +1374,7 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
|
||||
goto out_free_parent;
|
||||
|
||||
down_read(&dmar_global_lock);
|
||||
index = alloc_irte(iommu, virq, &data->irq_2_iommu, nr_irqs);
|
||||
index = alloc_irte(iommu, &data->irq_2_iommu, nr_irqs);
|
||||
up_read(&dmar_global_lock);
|
||||
if (index < 0) {
|
||||
pr_warn("Failed to allocate IRTE\n");
|
||||
|
@ -204,7 +204,7 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
|
||||
dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
|
||||
goto out_free;
|
||||
}
|
||||
if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
|
||||
if (table && !cfg->coherent_walk) {
|
||||
dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, dma))
|
||||
goto out_free;
|
||||
@ -238,7 +238,7 @@ static void __arm_v7s_free_table(void *table, int lvl,
|
||||
struct device *dev = cfg->iommu_dev;
|
||||
size_t size = ARM_V7S_TABLE_SIZE(lvl);
|
||||
|
||||
if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
|
||||
if (!cfg->coherent_walk)
|
||||
dma_unmap_single(dev, __arm_v7s_dma_addr(table), size,
|
||||
DMA_TO_DEVICE);
|
||||
if (lvl == 1)
|
||||
@ -250,7 +250,7 @@ static void __arm_v7s_free_table(void *table, int lvl,
|
||||
static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries,
|
||||
struct io_pgtable_cfg *cfg)
|
||||
{
|
||||
if (cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)
|
||||
if (cfg->coherent_walk)
|
||||
return;
|
||||
|
||||
dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep),
|
||||
@ -716,7 +716,6 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
|
||||
IO_PGTABLE_QUIRK_NO_PERMS |
|
||||
IO_PGTABLE_QUIRK_TLBI_ON_MAP |
|
||||
IO_PGTABLE_QUIRK_ARM_MTK_4GB |
|
||||
IO_PGTABLE_QUIRK_NO_DMA |
|
||||
IO_PGTABLE_QUIRK_NON_STRICT))
|
||||
return NULL;
|
||||
|
||||
@ -779,8 +778,11 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
|
||||
/* TTBRs */
|
||||
cfg->arm_v7s_cfg.ttbr[0] = virt_to_phys(data->pgd) |
|
||||
ARM_V7S_TTBR_S | ARM_V7S_TTBR_NOS |
|
||||
ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
|
||||
ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA);
|
||||
(cfg->coherent_walk ?
|
||||
(ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
|
||||
ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) :
|
||||
(ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) |
|
||||
ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
|
||||
cfg->arm_v7s_cfg.ttbr[1] = 0;
|
||||
return &data->iop;
|
||||
|
||||
@ -835,7 +837,8 @@ static int __init arm_v7s_do_selftests(void)
|
||||
.tlb = &dummy_tlb_ops,
|
||||
.oas = 32,
|
||||
.ias = 32,
|
||||
.quirks = IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA,
|
||||
.coherent_walk = true,
|
||||
.quirks = IO_PGTABLE_QUIRK_ARM_NS,
|
||||
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
|
||||
};
|
||||
unsigned int iova, size, iova_start;
|
||||
|
@ -156,10 +156,12 @@
|
||||
#define ARM_LPAE_MAIR_ATTR_MASK 0xff
|
||||
#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
|
||||
#define ARM_LPAE_MAIR_ATTR_NC 0x44
|
||||
#define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4
|
||||
#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
|
||||
#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
|
||||
#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
|
||||
#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
|
||||
#define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3
|
||||
|
||||
#define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
|
||||
#define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2)
|
||||
@ -239,7 +241,7 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
|
||||
return NULL;
|
||||
|
||||
pages = page_address(p);
|
||||
if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
|
||||
if (!cfg->coherent_walk) {
|
||||
dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, dma))
|
||||
goto out_free;
|
||||
@ -265,7 +267,7 @@ out_free:
|
||||
static void __arm_lpae_free_pages(void *pages, size_t size,
|
||||
struct io_pgtable_cfg *cfg)
|
||||
{
|
||||
if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
|
||||
if (!cfg->coherent_walk)
|
||||
dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
|
||||
size, DMA_TO_DEVICE);
|
||||
free_pages((unsigned long)pages, get_order(size));
|
||||
@ -283,7 +285,7 @@ static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
|
||||
{
|
||||
*ptep = pte;
|
||||
|
||||
if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
|
||||
if (!cfg->coherent_walk)
|
||||
__arm_lpae_sync_pte(ptep, cfg);
|
||||
}
|
||||
|
||||
@ -361,8 +363,7 @@ static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
|
||||
|
||||
old = cmpxchg64_relaxed(ptep, curr, new);
|
||||
|
||||
if ((cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) ||
|
||||
(old & ARM_LPAE_PTE_SW_SYNC))
|
||||
if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
|
||||
return old;
|
||||
|
||||
/* Even if it's not ours, there's no point waiting; just kick it */
|
||||
@ -403,8 +404,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
|
||||
pte = arm_lpae_install_table(cptep, ptep, 0, cfg);
|
||||
if (pte)
|
||||
__arm_lpae_free_pages(cptep, tblsz, cfg);
|
||||
} else if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) &&
|
||||
!(pte & ARM_LPAE_PTE_SW_SYNC)) {
|
||||
} else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
|
||||
__arm_lpae_sync_pte(ptep, cfg);
|
||||
}
|
||||
|
||||
@ -459,6 +459,9 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
|
||||
else if (prot & IOMMU_CACHE)
|
||||
pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
|
||||
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
|
||||
else if (prot & IOMMU_QCOM_SYS_CACHE)
|
||||
pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE
|
||||
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
|
||||
}
|
||||
|
||||
if (prot & IOMMU_NOEXEC)
|
||||
@ -783,7 +786,7 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
|
||||
u64 reg;
|
||||
struct arm_lpae_io_pgtable *data;
|
||||
|
||||
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA |
|
||||
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
|
||||
IO_PGTABLE_QUIRK_NON_STRICT))
|
||||
return NULL;
|
||||
|
||||
@ -792,9 +795,15 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
|
||||
return NULL;
|
||||
|
||||
/* TCR */
|
||||
reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
|
||||
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
|
||||
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
|
||||
if (cfg->coherent_walk) {
|
||||
reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
|
||||
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
|
||||
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
|
||||
} else {
|
||||
reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
|
||||
(ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
|
||||
(ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT);
|
||||
}
|
||||
|
||||
switch (ARM_LPAE_GRANULE(data)) {
|
||||
case SZ_4K:
|
||||
@ -846,7 +855,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
|
||||
(ARM_LPAE_MAIR_ATTR_WBRWA
|
||||
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
|
||||
(ARM_LPAE_MAIR_ATTR_DEVICE
|
||||
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
|
||||
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
|
||||
(ARM_LPAE_MAIR_ATTR_INC_OWBRWA
|
||||
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
|
||||
|
||||
cfg->arm_lpae_s1_cfg.mair[0] = reg;
|
||||
cfg->arm_lpae_s1_cfg.mair[1] = 0;
|
||||
@ -876,8 +887,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
|
||||
struct arm_lpae_io_pgtable *data;
|
||||
|
||||
/* The NS quirk doesn't apply at stage 2 */
|
||||
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NO_DMA |
|
||||
IO_PGTABLE_QUIRK_NON_STRICT))
|
||||
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT))
|
||||
return NULL;
|
||||
|
||||
data = arm_lpae_alloc_pgtable(cfg);
|
||||
@ -1212,7 +1222,7 @@ static int __init arm_lpae_do_selftests(void)
|
||||
struct io_pgtable_cfg cfg = {
|
||||
.tlb = &dummy_tlb_ops,
|
||||
.oas = 48,
|
||||
.quirks = IO_PGTABLE_QUIRK_NO_DMA,
|
||||
.coherent_walk = true,
|
||||
};
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
|
||||
|
@ -61,10 +61,11 @@ struct iommu_group_attribute {
|
||||
};
|
||||
|
||||
static const char * const iommu_group_resv_type_string[] = {
|
||||
[IOMMU_RESV_DIRECT] = "direct",
|
||||
[IOMMU_RESV_RESERVED] = "reserved",
|
||||
[IOMMU_RESV_MSI] = "msi",
|
||||
[IOMMU_RESV_SW_MSI] = "msi",
|
||||
[IOMMU_RESV_DIRECT] = "direct",
|
||||
[IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable",
|
||||
[IOMMU_RESV_RESERVED] = "reserved",
|
||||
[IOMMU_RESV_MSI] = "msi",
|
||||
[IOMMU_RESV_SW_MSI] = "msi",
|
||||
};
|
||||
|
||||
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
|
||||
@ -95,15 +96,43 @@ void iommu_device_unregister(struct iommu_device *iommu)
|
||||
spin_unlock(&iommu_device_lock);
|
||||
}
|
||||
|
||||
static struct iommu_param *iommu_get_dev_param(struct device *dev)
|
||||
{
|
||||
struct iommu_param *param = dev->iommu_param;
|
||||
|
||||
if (param)
|
||||
return param;
|
||||
|
||||
param = kzalloc(sizeof(*param), GFP_KERNEL);
|
||||
if (!param)
|
||||
return NULL;
|
||||
|
||||
mutex_init(¶m->lock);
|
||||
dev->iommu_param = param;
|
||||
return param;
|
||||
}
|
||||
|
||||
static void iommu_free_dev_param(struct device *dev)
|
||||
{
|
||||
kfree(dev->iommu_param);
|
||||
dev->iommu_param = NULL;
|
||||
}
|
||||
|
||||
int iommu_probe_device(struct device *dev)
|
||||
{
|
||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
int ret = -EINVAL;
|
||||
int ret;
|
||||
|
||||
WARN_ON(dev->iommu_group);
|
||||
if (!ops)
|
||||
return -EINVAL;
|
||||
|
||||
if (ops)
|
||||
ret = ops->add_device(dev);
|
||||
if (!iommu_get_dev_param(dev))
|
||||
return -ENOMEM;
|
||||
|
||||
ret = ops->add_device(dev);
|
||||
if (ret)
|
||||
iommu_free_dev_param(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -114,6 +143,8 @@ void iommu_release_device(struct device *dev)
|
||||
|
||||
if (dev->iommu_group)
|
||||
ops->remove_device(dev);
|
||||
|
||||
iommu_free_dev_param(dev);
|
||||
}
|
||||
|
||||
static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
|
||||
@ -225,18 +256,21 @@ static int iommu_insert_resv_region(struct iommu_resv_region *new,
|
||||
pos = pos->next;
|
||||
} else if ((start >= a) && (end <= b)) {
|
||||
if (new->type == type)
|
||||
goto done;
|
||||
return 0;
|
||||
else
|
||||
pos = pos->next;
|
||||
} else {
|
||||
if (new->type == type) {
|
||||
phys_addr_t new_start = min(a, start);
|
||||
phys_addr_t new_end = max(b, end);
|
||||
int ret;
|
||||
|
||||
list_del(&entry->list);
|
||||
entry->start = new_start;
|
||||
entry->length = new_end - new_start + 1;
|
||||
iommu_insert_resv_region(entry, regions);
|
||||
ret = iommu_insert_resv_region(entry, regions);
|
||||
kfree(entry);
|
||||
return ret;
|
||||
} else {
|
||||
pos = pos->next;
|
||||
}
|
||||
@ -249,7 +283,6 @@ insert:
|
||||
return -ENOMEM;
|
||||
|
||||
list_add_tail(®ion->list, pos);
|
||||
done:
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -561,7 +594,8 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
|
||||
start = ALIGN(entry->start, pg_size);
|
||||
end = ALIGN(entry->start + entry->length, pg_size);
|
||||
|
||||
if (entry->type != IOMMU_RESV_DIRECT)
|
||||
if (entry->type != IOMMU_RESV_DIRECT &&
|
||||
entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
|
||||
continue;
|
||||
|
||||
for (addr = start; addr < end; addr += pg_size) {
|
||||
@ -842,6 +876,206 @@ int iommu_group_unregister_notifier(struct iommu_group *group,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
|
||||
|
||||
/**
|
||||
* iommu_register_device_fault_handler() - Register a device fault handler
|
||||
* @dev: the device
|
||||
* @handler: the fault handler
|
||||
* @data: private data passed as argument to the handler
|
||||
*
|
||||
* When an IOMMU fault event is received, this handler gets called with the
|
||||
* fault event and data as argument. The handler should return 0 on success. If
|
||||
* the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
|
||||
* complete the fault by calling iommu_page_response() with one of the following
|
||||
* response code:
|
||||
* - IOMMU_PAGE_RESP_SUCCESS: retry the translation
|
||||
* - IOMMU_PAGE_RESP_INVALID: terminate the fault
|
||||
* - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
|
||||
* page faults if possible.
|
||||
*
|
||||
* Return 0 if the fault handler was installed successfully, or an error.
|
||||
*/
|
||||
int iommu_register_device_fault_handler(struct device *dev,
|
||||
iommu_dev_fault_handler_t handler,
|
||||
void *data)
|
||||
{
|
||||
struct iommu_param *param = dev->iommu_param;
|
||||
int ret = 0;
|
||||
|
||||
if (!param)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(¶m->lock);
|
||||
/* Only allow one fault handler registered for each device */
|
||||
if (param->fault_param) {
|
||||
ret = -EBUSY;
|
||||
goto done_unlock;
|
||||
}
|
||||
|
||||
get_device(dev);
|
||||
param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
|
||||
if (!param->fault_param) {
|
||||
put_device(dev);
|
||||
ret = -ENOMEM;
|
||||
goto done_unlock;
|
||||
}
|
||||
param->fault_param->handler = handler;
|
||||
param->fault_param->data = data;
|
||||
mutex_init(¶m->fault_param->lock);
|
||||
INIT_LIST_HEAD(¶m->fault_param->faults);
|
||||
|
||||
done_unlock:
|
||||
mutex_unlock(¶m->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
|
||||
|
||||
/**
|
||||
* iommu_unregister_device_fault_handler() - Unregister the device fault handler
|
||||
* @dev: the device
|
||||
*
|
||||
* Remove the device fault handler installed with
|
||||
* iommu_register_device_fault_handler().
|
||||
*
|
||||
* Return 0 on success, or an error.
|
||||
*/
|
||||
int iommu_unregister_device_fault_handler(struct device *dev)
|
||||
{
|
||||
struct iommu_param *param = dev->iommu_param;
|
||||
int ret = 0;
|
||||
|
||||
if (!param)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(¶m->lock);
|
||||
|
||||
if (!param->fault_param)
|
||||
goto unlock;
|
||||
|
||||
/* we cannot unregister handler if there are pending faults */
|
||||
if (!list_empty(¶m->fault_param->faults)) {
|
||||
ret = -EBUSY;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
kfree(param->fault_param);
|
||||
param->fault_param = NULL;
|
||||
put_device(dev);
|
||||
unlock:
|
||||
mutex_unlock(¶m->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
|
||||
|
||||
/**
|
||||
* iommu_report_device_fault() - Report fault event to device driver
|
||||
* @dev: the device
|
||||
* @evt: fault event data
|
||||
*
|
||||
* Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
|
||||
* handler. When this function fails and the fault is recoverable, it is the
|
||||
* caller's responsibility to complete the fault.
|
||||
*
|
||||
* Return 0 on success, or an error.
|
||||
*/
|
||||
int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
|
||||
{
|
||||
struct iommu_param *param = dev->iommu_param;
|
||||
struct iommu_fault_event *evt_pending = NULL;
|
||||
struct iommu_fault_param *fparam;
|
||||
int ret = 0;
|
||||
|
||||
if (!param || !evt)
|
||||
return -EINVAL;
|
||||
|
||||
/* we only report device fault if there is a handler registered */
|
||||
mutex_lock(¶m->lock);
|
||||
fparam = param->fault_param;
|
||||
if (!fparam || !fparam->handler) {
|
||||
ret = -EINVAL;
|
||||
goto done_unlock;
|
||||
}
|
||||
|
||||
if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
|
||||
(evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
|
||||
evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
|
||||
GFP_KERNEL);
|
||||
if (!evt_pending) {
|
||||
ret = -ENOMEM;
|
||||
goto done_unlock;
|
||||
}
|
||||
mutex_lock(&fparam->lock);
|
||||
list_add_tail(&evt_pending->list, &fparam->faults);
|
||||
mutex_unlock(&fparam->lock);
|
||||
}
|
||||
|
||||
ret = fparam->handler(&evt->fault, fparam->data);
|
||||
if (ret && evt_pending) {
|
||||
mutex_lock(&fparam->lock);
|
||||
list_del(&evt_pending->list);
|
||||
mutex_unlock(&fparam->lock);
|
||||
kfree(evt_pending);
|
||||
}
|
||||
done_unlock:
|
||||
mutex_unlock(¶m->lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_report_device_fault);
|
||||
|
||||
int iommu_page_response(struct device *dev,
|
||||
struct iommu_page_response *msg)
|
||||
{
|
||||
bool pasid_valid;
|
||||
int ret = -EINVAL;
|
||||
struct iommu_fault_event *evt;
|
||||
struct iommu_fault_page_request *prm;
|
||||
struct iommu_param *param = dev->iommu_param;
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
|
||||
if (!domain || !domain->ops->page_response)
|
||||
return -ENODEV;
|
||||
|
||||
if (!param || !param->fault_param)
|
||||
return -EINVAL;
|
||||
|
||||
if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
|
||||
msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
|
||||
return -EINVAL;
|
||||
|
||||
/* Only send response if there is a fault report pending */
|
||||
mutex_lock(¶m->fault_param->lock);
|
||||
if (list_empty(¶m->fault_param->faults)) {
|
||||
dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
|
||||
goto done_unlock;
|
||||
}
|
||||
/*
|
||||
* Check if we have a matching page request pending to respond,
|
||||
* otherwise return -EINVAL
|
||||
*/
|
||||
list_for_each_entry(evt, ¶m->fault_param->faults, list) {
|
||||
prm = &evt->fault.prm;
|
||||
pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
|
||||
|
||||
if ((pasid_valid && prm->pasid != msg->pasid) ||
|
||||
prm->grpid != msg->grpid)
|
||||
continue;
|
||||
|
||||
/* Sanitize the reply */
|
||||
msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0;
|
||||
|
||||
ret = domain->ops->page_response(dev, evt, msg);
|
||||
list_del(&evt->list);
|
||||
kfree(evt);
|
||||
break;
|
||||
}
|
||||
|
||||
done_unlock:
|
||||
mutex_unlock(¶m->fault_param->lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_page_response);
|
||||
|
||||
/**
|
||||
* iommu_group_id - Return ID for a group
|
||||
* @group: the group to ID
|
||||
@ -1895,24 +2129,23 @@ struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
|
||||
return region;
|
||||
}
|
||||
|
||||
/* Request that a device is direct mapped by the IOMMU */
|
||||
int iommu_request_dm_for_dev(struct device *dev)
|
||||
static int
|
||||
request_default_domain_for_dev(struct device *dev, unsigned long type)
|
||||
{
|
||||
struct iommu_domain *dm_domain;
|
||||
struct iommu_domain *domain;
|
||||
struct iommu_group *group;
|
||||
int ret;
|
||||
|
||||
/* Device must already be in a group before calling this function */
|
||||
group = iommu_group_get_for_dev(dev);
|
||||
if (IS_ERR(group))
|
||||
return PTR_ERR(group);
|
||||
group = iommu_group_get(dev);
|
||||
if (!group)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&group->mutex);
|
||||
|
||||
/* Check if the default domain is already direct mapped */
|
||||
ret = 0;
|
||||
if (group->default_domain &&
|
||||
group->default_domain->type == IOMMU_DOMAIN_IDENTITY)
|
||||
if (group->default_domain && group->default_domain->type == type)
|
||||
goto out;
|
||||
|
||||
/* Don't change mappings of existing devices */
|
||||
@ -1922,23 +2155,26 @@ int iommu_request_dm_for_dev(struct device *dev)
|
||||
|
||||
/* Allocate a direct mapped domain */
|
||||
ret = -ENOMEM;
|
||||
dm_domain = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_IDENTITY);
|
||||
if (!dm_domain)
|
||||
domain = __iommu_domain_alloc(dev->bus, type);
|
||||
if (!domain)
|
||||
goto out;
|
||||
|
||||
/* Attach the device to the domain */
|
||||
ret = __iommu_attach_group(dm_domain, group);
|
||||
ret = __iommu_attach_group(domain, group);
|
||||
if (ret) {
|
||||
iommu_domain_free(dm_domain);
|
||||
iommu_domain_free(domain);
|
||||
goto out;
|
||||
}
|
||||
|
||||
iommu_group_create_direct_mappings(group, dev);
|
||||
|
||||
/* Make the direct mapped domain the default for this group */
|
||||
if (group->default_domain)
|
||||
iommu_domain_free(group->default_domain);
|
||||
group->default_domain = dm_domain;
|
||||
group->default_domain = domain;
|
||||
|
||||
dev_info(dev, "Using iommu direct mapping\n");
|
||||
dev_info(dev, "Using iommu %s mapping\n",
|
||||
type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
@ -1948,6 +2184,18 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Request that a device is direct mapped by the IOMMU */
|
||||
int iommu_request_dm_for_dev(struct device *dev)
|
||||
{
|
||||
return request_default_domain_for_dev(dev, IOMMU_DOMAIN_IDENTITY);
|
||||
}
|
||||
|
||||
/* Request that a device can't be direct mapped by the IOMMU */
|
||||
int iommu_request_dma_domain_for_dev(struct device *dev)
|
||||
{
|
||||
return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA);
|
||||
}
|
||||
|
||||
const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
|
||||
{
|
||||
const struct iommu_ops *ops = NULL;
|
||||
|
@ -36,12 +36,16 @@
|
||||
#define arm_iommu_detach_device(...) do {} while (0)
|
||||
#endif
|
||||
|
||||
#define IPMMU_CTX_MAX 8
|
||||
#define IPMMU_CTX_MAX 8U
|
||||
#define IPMMU_CTX_INVALID -1
|
||||
|
||||
#define IPMMU_UTLB_MAX 48U
|
||||
|
||||
struct ipmmu_features {
|
||||
bool use_ns_alias_offset;
|
||||
bool has_cache_leaf_nodes;
|
||||
unsigned int number_of_contexts;
|
||||
unsigned int num_utlbs;
|
||||
bool setup_imbuscr;
|
||||
bool twobit_imttbcr_sl0;
|
||||
bool reserved_context;
|
||||
@ -53,11 +57,11 @@ struct ipmmu_vmsa_device {
|
||||
struct iommu_device iommu;
|
||||
struct ipmmu_vmsa_device *root;
|
||||
const struct ipmmu_features *features;
|
||||
unsigned int num_utlbs;
|
||||
unsigned int num_ctx;
|
||||
spinlock_t lock; /* Protects ctx and domains[] */
|
||||
DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
|
||||
struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
|
||||
s8 utlb_ctx[IPMMU_UTLB_MAX];
|
||||
|
||||
struct iommu_group *group;
|
||||
struct dma_iommu_mapping *mapping;
|
||||
@ -186,7 +190,8 @@ static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
|
||||
#define IMMAIR_ATTR_IDX_WBRWA 1
|
||||
#define IMMAIR_ATTR_IDX_DEV 2
|
||||
|
||||
#define IMEAR 0x0030
|
||||
#define IMELAR 0x0030 /* IMEAR on R-Car Gen2 */
|
||||
#define IMEUAR 0x0034 /* R-Car Gen3 only */
|
||||
|
||||
#define IMPCTR 0x0200
|
||||
#define IMPSTR 0x0208
|
||||
@ -334,6 +339,7 @@ static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
|
||||
ipmmu_write(mmu, IMUCTR(utlb),
|
||||
IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH |
|
||||
IMUCTR_MMUEN);
|
||||
mmu->utlb_ctx[utlb] = domain->context_id;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -345,6 +351,7 @@ static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
|
||||
struct ipmmu_vmsa_device *mmu = domain->mmu;
|
||||
|
||||
ipmmu_write(mmu, IMUCTR(utlb), 0);
|
||||
mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
|
||||
}
|
||||
|
||||
static void ipmmu_tlb_flush_all(void *cookie)
|
||||
@ -403,52 +410,10 @@ static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
|
||||
spin_unlock_irqrestore(&mmu->lock, flags);
|
||||
}
|
||||
|
||||
static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
|
||||
static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
|
||||
{
|
||||
u64 ttbr;
|
||||
u32 tmp;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Allocate the page table operations.
|
||||
*
|
||||
* VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
|
||||
* access, Long-descriptor format" that the NStable bit being set in a
|
||||
* table descriptor will result in the NStable and NS bits of all child
|
||||
* entries being ignored and considered as being set. The IPMMU seems
|
||||
* not to comply with this, as it generates a secure access page fault
|
||||
* if any of the NStable and NS bits isn't set when running in
|
||||
* non-secure mode.
|
||||
*/
|
||||
domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
|
||||
domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
|
||||
domain->cfg.ias = 32;
|
||||
domain->cfg.oas = 40;
|
||||
domain->cfg.tlb = &ipmmu_gather_ops;
|
||||
domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
|
||||
domain->io_domain.geometry.force_aperture = true;
|
||||
/*
|
||||
* TODO: Add support for coherent walk through CCI with DVM and remove
|
||||
* cache handling. For now, delegate it to the io-pgtable code.
|
||||
*/
|
||||
domain->cfg.iommu_dev = domain->mmu->root->dev;
|
||||
|
||||
/*
|
||||
* Find an unused context.
|
||||
*/
|
||||
ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
domain->context_id = ret;
|
||||
|
||||
domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
|
||||
domain);
|
||||
if (!domain->iop) {
|
||||
ipmmu_domain_free_context(domain->mmu->root,
|
||||
domain->context_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* TTBR0 */
|
||||
ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
|
||||
@ -494,7 +459,55 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
|
||||
*/
|
||||
ipmmu_ctx_write_all(domain, IMCTR,
|
||||
IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
|
||||
}
|
||||
|
||||
static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Allocate the page table operations.
|
||||
*
|
||||
* VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
|
||||
* access, Long-descriptor format" that the NStable bit being set in a
|
||||
* table descriptor will result in the NStable and NS bits of all child
|
||||
* entries being ignored and considered as being set. The IPMMU seems
|
||||
* not to comply with this, as it generates a secure access page fault
|
||||
* if any of the NStable and NS bits isn't set when running in
|
||||
* non-secure mode.
|
||||
*/
|
||||
domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
|
||||
domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
|
||||
domain->cfg.ias = 32;
|
||||
domain->cfg.oas = 40;
|
||||
domain->cfg.tlb = &ipmmu_gather_ops;
|
||||
domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
|
||||
domain->io_domain.geometry.force_aperture = true;
|
||||
/*
|
||||
* TODO: Add support for coherent walk through CCI with DVM and remove
|
||||
* cache handling. For now, delegate it to the io-pgtable code.
|
||||
*/
|
||||
domain->cfg.coherent_walk = false;
|
||||
domain->cfg.iommu_dev = domain->mmu->root->dev;
|
||||
|
||||
/*
|
||||
* Find an unused context.
|
||||
*/
|
||||
ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
domain->context_id = ret;
|
||||
|
||||
domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
|
||||
domain);
|
||||
if (!domain->iop) {
|
||||
ipmmu_domain_free_context(domain->mmu->root,
|
||||
domain->context_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ipmmu_domain_setup_context(domain);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -522,14 +535,16 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
|
||||
{
|
||||
const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
|
||||
struct ipmmu_vmsa_device *mmu = domain->mmu;
|
||||
unsigned long iova;
|
||||
u32 status;
|
||||
u32 iova;
|
||||
|
||||
status = ipmmu_ctx_read_root(domain, IMSTR);
|
||||
if (!(status & err_mask))
|
||||
return IRQ_NONE;
|
||||
|
||||
iova = ipmmu_ctx_read_root(domain, IMEAR);
|
||||
iova = ipmmu_ctx_read_root(domain, IMELAR);
|
||||
if (IS_ENABLED(CONFIG_64BIT))
|
||||
iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32;
|
||||
|
||||
/*
|
||||
* Clear the error status flags. Unlike traditional interrupt flag
|
||||
@ -541,10 +556,10 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
|
||||
|
||||
/* Log fatal errors. */
|
||||
if (status & IMSTR_MHIT)
|
||||
dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n",
|
||||
dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n",
|
||||
iova);
|
||||
if (status & IMSTR_ABORT)
|
||||
dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n",
|
||||
dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n",
|
||||
iova);
|
||||
|
||||
if (!(status & (IMSTR_PF | IMSTR_TF)))
|
||||
@ -560,7 +575,7 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
|
||||
return IRQ_HANDLED;
|
||||
|
||||
dev_err_ratelimited(mmu->dev,
|
||||
"Unhandled fault: status 0x%08x iova 0x%08x\n",
|
||||
"Unhandled fault: status 0x%08x iova 0x%lx\n",
|
||||
status, iova);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@ -885,27 +900,37 @@ error:
|
||||
|
||||
static int ipmmu_add_device(struct device *dev)
|
||||
{
|
||||
struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
|
||||
struct iommu_group *group;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Only let through devices that have been verified in xlate()
|
||||
*/
|
||||
if (!to_ipmmu(dev))
|
||||
if (!mmu)
|
||||
return -ENODEV;
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA))
|
||||
return ipmmu_init_arm_mapping(dev);
|
||||
if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)) {
|
||||
ret = ipmmu_init_arm_mapping(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
group = iommu_group_get_for_dev(dev);
|
||||
if (IS_ERR(group))
|
||||
return PTR_ERR(group);
|
||||
|
||||
group = iommu_group_get_for_dev(dev);
|
||||
if (IS_ERR(group))
|
||||
return PTR_ERR(group);
|
||||
iommu_group_put(group);
|
||||
}
|
||||
|
||||
iommu_group_put(group);
|
||||
iommu_device_link(&mmu->iommu, dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ipmmu_remove_device(struct device *dev)
|
||||
{
|
||||
struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
|
||||
|
||||
iommu_device_unlink(&mmu->iommu, dev);
|
||||
arm_iommu_detach_device(dev);
|
||||
iommu_group_remove_device(dev);
|
||||
}
|
||||
@ -959,6 +984,7 @@ static const struct ipmmu_features ipmmu_features_default = {
|
||||
.use_ns_alias_offset = true,
|
||||
.has_cache_leaf_nodes = false,
|
||||
.number_of_contexts = 1, /* software only tested with one context */
|
||||
.num_utlbs = 32,
|
||||
.setup_imbuscr = true,
|
||||
.twobit_imttbcr_sl0 = false,
|
||||
.reserved_context = false,
|
||||
@ -968,6 +994,7 @@ static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
|
||||
.use_ns_alias_offset = false,
|
||||
.has_cache_leaf_nodes = true,
|
||||
.number_of_contexts = 8,
|
||||
.num_utlbs = 48,
|
||||
.setup_imbuscr = false,
|
||||
.twobit_imttbcr_sl0 = true,
|
||||
.reserved_context = true,
|
||||
@ -1020,10 +1047,10 @@ static int ipmmu_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
mmu->dev = &pdev->dev;
|
||||
mmu->num_utlbs = 48;
|
||||
spin_lock_init(&mmu->lock);
|
||||
bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
|
||||
mmu->features = of_device_get_match_data(&pdev->dev);
|
||||
memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
|
||||
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
|
||||
|
||||
/* Map I/O memory and request IRQ. */
|
||||
@ -1047,8 +1074,7 @@ static int ipmmu_probe(struct platform_device *pdev)
|
||||
if (mmu->features->use_ns_alias_offset)
|
||||
mmu->base += IM_NS_ALIAS_OFFSET;
|
||||
|
||||
mmu->num_ctx = min_t(unsigned int, IPMMU_CTX_MAX,
|
||||
mmu->features->number_of_contexts);
|
||||
mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
|
||||
@ -1140,10 +1166,48 @@ static int ipmmu_remove(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int ipmmu_resume_noirq(struct device *dev)
|
||||
{
|
||||
struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
|
||||
unsigned int i;
|
||||
|
||||
/* Reset root MMU and restore contexts */
|
||||
if (ipmmu_is_root(mmu)) {
|
||||
ipmmu_device_reset(mmu);
|
||||
|
||||
for (i = 0; i < mmu->num_ctx; i++) {
|
||||
if (!mmu->domains[i])
|
||||
continue;
|
||||
|
||||
ipmmu_domain_setup_context(mmu->domains[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/* Re-enable active micro-TLBs */
|
||||
for (i = 0; i < mmu->features->num_utlbs; i++) {
|
||||
if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID)
|
||||
continue;
|
||||
|
||||
ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops ipmmu_pm = {
|
||||
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq)
|
||||
};
|
||||
#define DEV_PM_OPS &ipmmu_pm
|
||||
#else
|
||||
#define DEV_PM_OPS NULL
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
static struct platform_driver ipmmu_driver = {
|
||||
.driver = {
|
||||
.name = "ipmmu-vmsa",
|
||||
.of_match_table = of_match_ptr(ipmmu_of_ids),
|
||||
.pm = DEV_PM_OPS,
|
||||
},
|
||||
.probe = ipmmu_probe,
|
||||
.remove = ipmmu_remove,
|
||||
|
@ -236,17 +236,6 @@ DEBUG_FOPS_RO(regs);
|
||||
DEFINE_SHOW_ATTRIBUTE(tlb);
|
||||
DEFINE_SHOW_ATTRIBUTE(pagetable);
|
||||
|
||||
#define __DEBUG_ADD_FILE(attr, mode) \
|
||||
{ \
|
||||
struct dentry *dent; \
|
||||
dent = debugfs_create_file(#attr, mode, obj->debug_dir, \
|
||||
obj, &attr##_fops); \
|
||||
if (!dent) \
|
||||
goto err; \
|
||||
}
|
||||
|
||||
#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400)
|
||||
|
||||
void omap_iommu_debugfs_add(struct omap_iommu *obj)
|
||||
{
|
||||
struct dentry *d;
|
||||
@ -254,23 +243,13 @@ void omap_iommu_debugfs_add(struct omap_iommu *obj)
|
||||
if (!iommu_debug_root)
|
||||
return;
|
||||
|
||||
obj->debug_dir = debugfs_create_dir(obj->name, iommu_debug_root);
|
||||
if (!obj->debug_dir)
|
||||
return;
|
||||
d = debugfs_create_dir(obj->name, iommu_debug_root);
|
||||
obj->debug_dir = d;
|
||||
|
||||
d = debugfs_create_u32("nr_tlb_entries", 0400, obj->debug_dir,
|
||||
&obj->nr_tlb_entries);
|
||||
if (!d)
|
||||
return;
|
||||
|
||||
DEBUG_ADD_FILE_RO(regs);
|
||||
DEBUG_ADD_FILE_RO(tlb);
|
||||
DEBUG_ADD_FILE_RO(pagetable);
|
||||
|
||||
return;
|
||||
|
||||
err:
|
||||
debugfs_remove_recursive(obj->debug_dir);
|
||||
debugfs_create_u32("nr_tlb_entries", 0400, d, &obj->nr_tlb_entries);
|
||||
debugfs_create_file("regs", 0400, d, obj, ®s_fops);
|
||||
debugfs_create_file("tlb", 0400, d, obj, &tlb_fops);
|
||||
debugfs_create_file("pagetable", 0400, d, obj, &pagetable_fops);
|
||||
}
|
||||
|
||||
void omap_iommu_debugfs_remove(struct omap_iommu *obj)
|
||||
@ -284,8 +263,6 @@ void omap_iommu_debugfs_remove(struct omap_iommu *obj)
|
||||
void __init omap_iommu_debugfs_init(void)
|
||||
{
|
||||
iommu_debug_root = debugfs_create_dir("omap_iommu", NULL);
|
||||
if (!iommu_debug_root)
|
||||
pr_err("can't create debugfs dir\n");
|
||||
}
|
||||
|
||||
void __exit omap_iommu_debugfs_exit(void)
|
||||
|
@ -35,8 +35,7 @@
|
||||
|
||||
static const struct iommu_ops omap_iommu_ops;
|
||||
|
||||
#define to_iommu(dev) \
|
||||
((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
|
||||
#define to_iommu(dev) ((struct omap_iommu *)dev_get_drvdata(dev))
|
||||
|
||||
/* bitmap of the page sizes currently supported */
|
||||
#define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
|
||||
|
@ -42,6 +42,7 @@ struct iommu_ops;
|
||||
struct iommu_group;
|
||||
struct iommu_fwspec;
|
||||
struct dev_pin_info;
|
||||
struct iommu_param;
|
||||
|
||||
struct bus_attribute {
|
||||
struct attribute attr;
|
||||
@ -961,6 +962,7 @@ struct dev_links_info {
|
||||
* device (i.e. the bus driver that discovered the device).
|
||||
* @iommu_group: IOMMU group the device belongs to.
|
||||
* @iommu_fwspec: IOMMU-specific properties supplied by firmware.
|
||||
* @iommu_param: Per device generic IOMMU runtime data
|
||||
*
|
||||
* @offline_disabled: If set, the device is permanently online.
|
||||
* @offline: Set after successful invocation of bus type's .offline().
|
||||
@ -1054,6 +1056,7 @@ struct device {
|
||||
void (*release)(struct device *dev);
|
||||
struct iommu_group *iommu_group;
|
||||
struct iommu_fwspec *iommu_fwspec;
|
||||
struct iommu_param *iommu_param;
|
||||
|
||||
bool offline_disabled:1;
|
||||
bool offline:1;
|
||||
|
@ -5,59 +5,21 @@
|
||||
#ifndef __DMA_IOMMU_H
|
||||
#define __DMA_IOMMU_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/errno.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/errno.h>
|
||||
|
||||
#ifdef CONFIG_IOMMU_DMA
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/msi.h>
|
||||
|
||||
int iommu_dma_init(void);
|
||||
|
||||
/* Domain management interface for IOMMU drivers */
|
||||
int iommu_get_dma_cookie(struct iommu_domain *domain);
|
||||
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
|
||||
void iommu_put_dma_cookie(struct iommu_domain *domain);
|
||||
|
||||
/* Setup call for arch DMA mapping code */
|
||||
int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||
u64 size, struct device *dev);
|
||||
|
||||
/* General helpers for DMA-API <-> IOMMU-API interaction */
|
||||
int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
|
||||
unsigned long attrs);
|
||||
|
||||
/*
|
||||
* These implement the bulk of the relevant DMA mapping callbacks, but require
|
||||
* the arch code to take care of attributes and cache maintenance
|
||||
*/
|
||||
struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
|
||||
unsigned long attrs, int prot, dma_addr_t *handle,
|
||||
void (*flush_page)(struct device *, const void *, phys_addr_t));
|
||||
void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
|
||||
dma_addr_t *handle);
|
||||
|
||||
int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma);
|
||||
|
||||
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, int prot);
|
||||
int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, int prot);
|
||||
|
||||
/*
|
||||
* Arch code with no special attribute handling may use these
|
||||
* directly as DMA mapping callbacks for simplicity
|
||||
*/
|
||||
void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size);
|
||||
|
||||
/* The DMA API isn't _quite_ the whole story, though... */
|
||||
/*
|
||||
@ -75,16 +37,16 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc,
|
||||
|
||||
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
|
||||
|
||||
#else
|
||||
#else /* CONFIG_IOMMU_DMA */
|
||||
|
||||
struct iommu_domain;
|
||||
struct msi_desc;
|
||||
struct msi_msg;
|
||||
struct device;
|
||||
|
||||
static inline int iommu_dma_init(void)
|
||||
static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base,
|
||||
u64 size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
|
||||
@ -117,5 +79,4 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he
|
||||
}
|
||||
|
||||
#endif /* CONFIG_IOMMU_DMA */
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __DMA_IOMMU_H */
|
||||
|
@ -435,6 +435,12 @@ enum {
|
||||
#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
|
||||
#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
|
||||
|
||||
extern int intel_iommu_sm;
|
||||
|
||||
#define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap))
|
||||
#define pasid_supported(iommu) (sm_supported(iommu) && \
|
||||
ecap_pasid((iommu)->ecap))
|
||||
|
||||
struct pasid_entry;
|
||||
struct pasid_state_entry;
|
||||
struct page_req_dsc;
|
||||
@ -642,7 +648,6 @@ extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
|
||||
|
||||
extern int dmar_ir_support(void);
|
||||
|
||||
struct dmar_domain *get_valid_domain_for_dev(struct device *dev);
|
||||
void *alloc_pgtable_page(int node);
|
||||
void free_pgtable_page(void *vaddr);
|
||||
struct intel_iommu *domain_get_iommu(struct dmar_domain *domain);
|
||||
|
@ -49,7 +49,7 @@ struct svm_dev_ops {
|
||||
|
||||
/**
|
||||
* intel_svm_bind_mm() - Bind the current process to a PASID
|
||||
* @dev: Device to be granted acccess
|
||||
* @dev: Device to be granted access
|
||||
* @pasid: Address for allocated PASID
|
||||
* @flags: Flags. Later for requesting supervisor mode, etc.
|
||||
* @ops: Callbacks to device driver
|
||||
|
@ -44,6 +44,8 @@ struct iommu_gather_ops {
|
||||
* tables.
|
||||
* @ias: Input address (iova) size, in bits.
|
||||
* @oas: Output address (paddr) size, in bits.
|
||||
* @coherent_walk A flag to indicate whether or not page table walks made
|
||||
* by the IOMMU are coherent with the CPU caches.
|
||||
* @tlb: TLB management callbacks for this set of tables.
|
||||
* @iommu_dev: The device representing the DMA configuration for the
|
||||
* page table walker.
|
||||
@ -68,11 +70,6 @@ struct io_pgtable_cfg {
|
||||
* when the SoC is in "4GB mode" and they can only access the high
|
||||
* remap of DRAM (0x1_00000000 to 0x1_ffffffff).
|
||||
*
|
||||
* IO_PGTABLE_QUIRK_NO_DMA: Guarantees that the tables will only ever
|
||||
* be accessed by a fully cache-coherent IOMMU or CPU (e.g. for a
|
||||
* software-emulated IOMMU), such that pagetable updates need not
|
||||
* be treated as explicit DMA data.
|
||||
*
|
||||
* IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
|
||||
* on unmap, for DMA domains using the flush queue mechanism for
|
||||
* delayed invalidation.
|
||||
@ -81,12 +78,12 @@ struct io_pgtable_cfg {
|
||||
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
|
||||
#define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
|
||||
#define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3)
|
||||
#define IO_PGTABLE_QUIRK_NO_DMA BIT(4)
|
||||
#define IO_PGTABLE_QUIRK_NON_STRICT BIT(5)
|
||||
#define IO_PGTABLE_QUIRK_NON_STRICT BIT(4)
|
||||
unsigned long quirks;
|
||||
unsigned long pgsize_bitmap;
|
||||
unsigned int ias;
|
||||
unsigned int oas;
|
||||
bool coherent_walk;
|
||||
const struct iommu_gather_ops *tlb;
|
||||
struct device *iommu_dev;
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/of.h>
|
||||
#include <uapi/linux/iommu.h>
|
||||
|
||||
#define IOMMU_READ (1 << 0)
|
||||
#define IOMMU_WRITE (1 << 1)
|
||||
@ -29,6 +30,12 @@
|
||||
* if the IOMMU page table format is equivalent.
|
||||
*/
|
||||
#define IOMMU_PRIV (1 << 5)
|
||||
/*
|
||||
* Non-coherent masters on few Qualcomm SoCs can use this page protection flag
|
||||
* to set correct cacheability attributes to use an outer level of cache -
|
||||
* last level cache, aka system cache.
|
||||
*/
|
||||
#define IOMMU_QCOM_SYS_CACHE (1 << 6)
|
||||
|
||||
struct iommu_ops;
|
||||
struct iommu_group;
|
||||
@ -37,6 +44,7 @@ struct device;
|
||||
struct iommu_domain;
|
||||
struct notifier_block;
|
||||
struct iommu_sva;
|
||||
struct iommu_fault_event;
|
||||
|
||||
/* iommu fault flags */
|
||||
#define IOMMU_FAULT_READ 0x0
|
||||
@ -46,6 +54,7 @@ typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
|
||||
struct device *, unsigned long, int, void *);
|
||||
typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *,
|
||||
void *);
|
||||
typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
|
||||
|
||||
struct iommu_domain_geometry {
|
||||
dma_addr_t aperture_start; /* First address that can be mapped */
|
||||
@ -123,6 +132,12 @@ enum iommu_attr {
|
||||
enum iommu_resv_type {
|
||||
/* Memory regions which must be mapped 1:1 at all times */
|
||||
IOMMU_RESV_DIRECT,
|
||||
/*
|
||||
* Memory regions which are advertised to be 1:1 but are
|
||||
* commonly considered relaxable in some conditions,
|
||||
* for instance in device assignment use case (USB, Graphics)
|
||||
*/
|
||||
IOMMU_RESV_DIRECT_RELAXABLE,
|
||||
/* Arbitrary "never map this or give it to a device" address ranges */
|
||||
IOMMU_RESV_RESERVED,
|
||||
/* Hardware MSI region (untranslated) */
|
||||
@ -212,6 +227,7 @@ struct iommu_sva_ops {
|
||||
* @sva_bind: Bind process address space to device
|
||||
* @sva_unbind: Unbind process address space from device
|
||||
* @sva_get_pasid: Get PASID associated to a SVA handle
|
||||
* @page_response: handle page request response
|
||||
* @pgsize_bitmap: bitmap of all possible supported page sizes
|
||||
*/
|
||||
struct iommu_ops {
|
||||
@ -272,6 +288,10 @@ struct iommu_ops {
|
||||
void (*sva_unbind)(struct iommu_sva *handle);
|
||||
int (*sva_get_pasid)(struct iommu_sva *handle);
|
||||
|
||||
int (*page_response)(struct device *dev,
|
||||
struct iommu_fault_event *evt,
|
||||
struct iommu_page_response *msg);
|
||||
|
||||
unsigned long pgsize_bitmap;
|
||||
};
|
||||
|
||||
@ -289,6 +309,48 @@ struct iommu_device {
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iommu_fault_event - Generic fault event
|
||||
*
|
||||
* Can represent recoverable faults such as a page requests or
|
||||
* unrecoverable faults such as DMA or IRQ remapping faults.
|
||||
*
|
||||
* @fault: fault descriptor
|
||||
* @list: pending fault event list, used for tracking responses
|
||||
*/
|
||||
struct iommu_fault_event {
|
||||
struct iommu_fault fault;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iommu_fault_param - per-device IOMMU fault data
|
||||
* @handler: Callback function to handle IOMMU faults at device level
|
||||
* @data: handler private data
|
||||
* @faults: holds the pending faults which needs response
|
||||
* @lock: protect pending faults list
|
||||
*/
|
||||
struct iommu_fault_param {
|
||||
iommu_dev_fault_handler_t handler;
|
||||
void *data;
|
||||
struct list_head faults;
|
||||
struct mutex lock;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iommu_param - collection of per-device IOMMU data
|
||||
*
|
||||
* @fault_param: IOMMU detected device fault reporting data
|
||||
*
|
||||
* TODO: migrate other per device data pointers under iommu_dev_data, e.g.
|
||||
* struct iommu_group *iommu_group;
|
||||
* struct iommu_fwspec *iommu_fwspec;
|
||||
*/
|
||||
struct iommu_param {
|
||||
struct mutex lock;
|
||||
struct iommu_fault_param *fault_param;
|
||||
};
|
||||
|
||||
int iommu_device_register(struct iommu_device *iommu);
|
||||
void iommu_device_unregister(struct iommu_device *iommu);
|
||||
int iommu_device_sysfs_add(struct iommu_device *iommu,
|
||||
@ -350,6 +412,7 @@ extern void iommu_set_fault_handler(struct iommu_domain *domain,
|
||||
extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
|
||||
extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
|
||||
extern int iommu_request_dm_for_dev(struct device *dev);
|
||||
extern int iommu_request_dma_domain_for_dev(struct device *dev);
|
||||
extern struct iommu_resv_region *
|
||||
iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
|
||||
enum iommu_resv_type type);
|
||||
@ -378,6 +441,17 @@ extern int iommu_group_register_notifier(struct iommu_group *group,
|
||||
struct notifier_block *nb);
|
||||
extern int iommu_group_unregister_notifier(struct iommu_group *group,
|
||||
struct notifier_block *nb);
|
||||
extern int iommu_register_device_fault_handler(struct device *dev,
|
||||
iommu_dev_fault_handler_t handler,
|
||||
void *data);
|
||||
|
||||
extern int iommu_unregister_device_fault_handler(struct device *dev);
|
||||
|
||||
extern int iommu_report_device_fault(struct device *dev,
|
||||
struct iommu_fault_event *evt);
|
||||
extern int iommu_page_response(struct device *dev,
|
||||
struct iommu_page_response *msg);
|
||||
|
||||
extern int iommu_group_id(struct iommu_group *group);
|
||||
extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
|
||||
extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
|
||||
@ -492,6 +566,7 @@ struct iommu_ops {};
|
||||
struct iommu_group {};
|
||||
struct iommu_fwspec {};
|
||||
struct iommu_device {};
|
||||
struct iommu_fault_param {};
|
||||
|
||||
static inline bool iommu_present(struct bus_type *bus)
|
||||
{
|
||||
@ -614,6 +689,11 @@ static inline int iommu_request_dm_for_dev(struct device *dev)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int iommu_request_dma_domain_for_dev(struct device *dev)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int iommu_attach_group(struct iommu_domain *domain,
|
||||
struct iommu_group *group)
|
||||
{
|
||||
@ -685,6 +765,31 @@ static inline int iommu_group_unregister_notifier(struct iommu_group *group,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline
|
||||
int iommu_register_device_fault_handler(struct device *dev,
|
||||
iommu_dev_fault_handler_t handler,
|
||||
void *data)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int iommu_unregister_device_fault_handler(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline
|
||||
int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int iommu_page_response(struct device *dev,
|
||||
struct iommu_page_response *msg)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int iommu_group_id(struct iommu_group *group)
|
||||
{
|
||||
return -ENODEV;
|
||||
|
155
include/uapi/linux/iommu.h
Normal file
155
include/uapi/linux/iommu.h
Normal file
@ -0,0 +1,155 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
* IOMMU user API definitions
|
||||
*/
|
||||
|
||||
#ifndef _UAPI_IOMMU_H
|
||||
#define _UAPI_IOMMU_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define IOMMU_FAULT_PERM_READ (1 << 0) /* read */
|
||||
#define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */
|
||||
#define IOMMU_FAULT_PERM_EXEC (1 << 2) /* exec */
|
||||
#define IOMMU_FAULT_PERM_PRIV (1 << 3) /* privileged */
|
||||
|
||||
/* Generic fault types, can be expanded IRQ remapping fault */
|
||||
enum iommu_fault_type {
|
||||
IOMMU_FAULT_DMA_UNRECOV = 1, /* unrecoverable fault */
|
||||
IOMMU_FAULT_PAGE_REQ, /* page request fault */
|
||||
};
|
||||
|
||||
enum iommu_fault_reason {
|
||||
IOMMU_FAULT_REASON_UNKNOWN = 0,
|
||||
|
||||
/* Could not access the PASID table (fetch caused external abort) */
|
||||
IOMMU_FAULT_REASON_PASID_FETCH,
|
||||
|
||||
/* PASID entry is invalid or has configuration errors */
|
||||
IOMMU_FAULT_REASON_BAD_PASID_ENTRY,
|
||||
|
||||
/*
|
||||
* PASID is out of range (e.g. exceeds the maximum PASID
|
||||
* supported by the IOMMU) or disabled.
|
||||
*/
|
||||
IOMMU_FAULT_REASON_PASID_INVALID,
|
||||
|
||||
/*
|
||||
* An external abort occurred fetching (or updating) a translation
|
||||
* table descriptor
|
||||
*/
|
||||
IOMMU_FAULT_REASON_WALK_EABT,
|
||||
|
||||
/*
|
||||
* Could not access the page table entry (Bad address),
|
||||
* actual translation fault
|
||||
*/
|
||||
IOMMU_FAULT_REASON_PTE_FETCH,
|
||||
|
||||
/* Protection flag check failed */
|
||||
IOMMU_FAULT_REASON_PERMISSION,
|
||||
|
||||
/* access flag check failed */
|
||||
IOMMU_FAULT_REASON_ACCESS,
|
||||
|
||||
/* Output address of a translation stage caused Address Size fault */
|
||||
IOMMU_FAULT_REASON_OOR_ADDRESS,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iommu_fault_unrecoverable - Unrecoverable fault data
|
||||
* @reason: reason of the fault, from &enum iommu_fault_reason
|
||||
* @flags: parameters of this fault (IOMMU_FAULT_UNRECOV_* values)
|
||||
* @pasid: Process Address Space ID
|
||||
* @perm: requested permission access using by the incoming transaction
|
||||
* (IOMMU_FAULT_PERM_* values)
|
||||
* @addr: offending page address
|
||||
* @fetch_addr: address that caused a fetch abort, if any
|
||||
*/
|
||||
struct iommu_fault_unrecoverable {
|
||||
__u32 reason;
|
||||
#define IOMMU_FAULT_UNRECOV_PASID_VALID (1 << 0)
|
||||
#define IOMMU_FAULT_UNRECOV_ADDR_VALID (1 << 1)
|
||||
#define IOMMU_FAULT_UNRECOV_FETCH_ADDR_VALID (1 << 2)
|
||||
__u32 flags;
|
||||
__u32 pasid;
|
||||
__u32 perm;
|
||||
__u64 addr;
|
||||
__u64 fetch_addr;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iommu_fault_page_request - Page Request data
|
||||
* @flags: encodes whether the corresponding fields are valid and whether this
|
||||
* is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values)
|
||||
* @pasid: Process Address Space ID
|
||||
* @grpid: Page Request Group Index
|
||||
* @perm: requested page permissions (IOMMU_FAULT_PERM_* values)
|
||||
* @addr: page address
|
||||
* @private_data: device-specific private information
|
||||
*/
|
||||
struct iommu_fault_page_request {
|
||||
#define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0)
|
||||
#define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1)
|
||||
#define IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA (1 << 2)
|
||||
__u32 flags;
|
||||
__u32 pasid;
|
||||
__u32 grpid;
|
||||
__u32 perm;
|
||||
__u64 addr;
|
||||
__u64 private_data[2];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iommu_fault - Generic fault data
|
||||
* @type: fault type from &enum iommu_fault_type
|
||||
* @padding: reserved for future use (should be zero)
|
||||
* @event: fault event, when @type is %IOMMU_FAULT_DMA_UNRECOV
|
||||
* @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ
|
||||
* @padding2: sets the fault size to allow for future extensions
|
||||
*/
|
||||
struct iommu_fault {
|
||||
__u32 type;
|
||||
__u32 padding;
|
||||
union {
|
||||
struct iommu_fault_unrecoverable event;
|
||||
struct iommu_fault_page_request prm;
|
||||
__u8 padding2[56];
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* enum iommu_page_response_code - Return status of fault handlers
|
||||
* @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables
|
||||
* populated, retry the access. This is "Success" in PCI PRI.
|
||||
* @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from
|
||||
* this device if possible. This is "Response Failure" in PCI PRI.
|
||||
* @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the
|
||||
* access. This is "Invalid Request" in PCI PRI.
|
||||
*/
|
||||
enum iommu_page_response_code {
|
||||
IOMMU_PAGE_RESP_SUCCESS = 0,
|
||||
IOMMU_PAGE_RESP_INVALID,
|
||||
IOMMU_PAGE_RESP_FAILURE,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iommu_page_response - Generic page response information
|
||||
* @version: API version of this structure
|
||||
* @flags: encodes whether the corresponding fields are valid
|
||||
* (IOMMU_FAULT_PAGE_RESPONSE_* values)
|
||||
* @pasid: Process Address Space ID
|
||||
* @grpid: Page Request Group Index
|
||||
* @code: response code from &enum iommu_page_response_code
|
||||
*/
|
||||
struct iommu_page_response {
|
||||
#define IOMMU_PAGE_RESP_VERSION_1 1
|
||||
__u32 version;
|
||||
#define IOMMU_PAGE_RESP_PASID_VALID (1 << 0)
|
||||
__u32 flags;
|
||||
__u32 pasid;
|
||||
__u32 grpid;
|
||||
__u32 code;
|
||||
};
|
||||
|
||||
#endif /* _UAPI_IOMMU_H */
|
Loading…
Reference in New Issue
Block a user