IOMMU Updates for Linux v6.1:

Including:
 
 	- Removal of the bus_set_iommu() interface which became
 	  unnecesary because of IOMMU per-device probing
 
 	- Make the dma-iommu.h header private
 
 	- Intel VT-d changes from Lu Baolu:
 	  - Decouple PASID and PRI from SVA
 	  - Add ESRTPS & ESIRTPS capability check
 	  - Cleanups
 
 	- Apple DART support for the M1 Pro/MAX SOCs
 
 	- Support for AMD IOMMUv2 page-tables for the DMA-API layer. The
 	  v2 page-tables are compatible with the x86 CPU page-tables.
 	  Using them for DMA-API prepares support for hardware-assisted
 	  IOMMU virtualization
 
 	- Support for MT6795 Helio X10 M4Us in the Mediatek IOMMU driver
 
 	- Some smaller fixes and cleanups
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEr9jSbILcajRFYWYyK/BELZcBGuMFAmNEC5oACgkQK/BELZcB
 GuNcOQ/6A5SXmcvDRLYZW1ENM5Z6xsZ1LabSZkjhYSpmbJyu8Uny/Z2aRWqxPMLJ
 hJeHTsWSLhrTq1VfjFhELHB3kgT2DRr7H3LXXaMNC6qz690EcavX1wKX2AxH0m22
 8YrktkyAmFQ3BG6rsQLdlMMasLph/x06ix/xO9opQZVFdj/fV0Jx7ekX1JK+U3hx
 MI96i5W3G5PBVHBypAvjxSlmA4saj9Fhk7l3IZL7py9AOKz7NypuwWRs+86PMBiO
 EzLt5aF4g8pmKChF/c9BsoIbjBYvTG/s3NbycIng0ACc2SOvf+EvtoVZQclWifbT
 lwti9PLdsoVUnPOZHLYOTx4xSf/UyoLVzaLxJ52aoXnNYe2qaX5DANXhT2mWIY/Y
 z1mzOkShmK7WF7a8arRyqJeLJ4SvDx8GrbvLiom3DAzmqVHzzFGadHtt5fvGYN4F
 Jet/JIN3HjECQbamqtPBpWquBFhLmgusPksIiyMFscRvYdZqkaVkTkElcF3WqAMm
 QkeecfoTQ9Vdtdz44ZVLRjKpS77yRZmHshp1r/rfSI+9Ok8uRI+xmmcyrAI6ElqH
 DH14tLHPzw694rTHF+bTCd+pPMGOoFLi0xAfUXAeGWm1uzC1JIRrVu5JeQNOUOSD
 5SQDXB7dPrhXngaws5Fx2u3amCO3688mslcGgM7q54kC+LyVo0E=
 =h0sT
 -----END PGP SIGNATURE-----

Merge tag 'iommu-updates-v6.1' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu updates from Joerg Roedel:

 - remove the bus_set_iommu() interface which became unnecesary because
   of IOMMU per-device probing

 - make the dma-iommu.h header private

 - Intel VT-d changes from Lu Baolu:
	  - Decouple PASID and PRI from SVA
	  - Add ESRTPS & ESIRTPS capability check
	  - Cleanups

 - Apple DART support for the M1 Pro/MAX SOCs

 - support for AMD IOMMUv2 page-tables for the DMA-API layer.

   The v2 page-tables are compatible with the x86 CPU page-tables. Using
   them for DMA-API prepares support for hardware-assisted IOMMU
   virtualization

 - support for MT6795 Helio X10 M4Us in the Mediatek IOMMU driver

 - some smaller fixes and cleanups

* tag 'iommu-updates-v6.1' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (59 commits)
  iommu/vt-d: Avoid unnecessary global DMA cache invalidation
  iommu/vt-d: Avoid unnecessary global IRTE cache invalidation
  iommu/vt-d: Rename cap_5lp_support to cap_fl5lp_support
  iommu/vt-d: Remove pasid_set_eafe()
  iommu/vt-d: Decouple PASID & PRI enabling from SVA
  iommu/vt-d: Remove unnecessary SVA data accesses in page fault path
  dt-bindings: iommu: arm,smmu-v3: Relax order of interrupt names
  iommu: dart: Support t6000 variant
  iommu/io-pgtable-dart: Add DART PTE support for t6000
  iommu/io-pgtable: Add DART subpage protection support
  iommu/io-pgtable: Move Apple DART support to its own file
  iommu/mediatek: Add support for MT6795 Helio X10 M4Us
  iommu/mediatek: Introduce new flag TF_PORT_TO_ADDR_MT8173
  dt-bindings: mediatek: Add bindings for MT6795 M4U
  iommu/iova: Fix module config properly
  iommu/amd: Fix sparse warning
  iommu/amd: Remove outdated comment
  iommu/amd: Free domain ID after domain_flush_pages
  iommu/amd: Free domain id in error path
  iommu/virtio: Fix compile error with viommu_capable()
  ...
This commit is contained in:
Linus Torvalds 2022-10-10 13:20:53 -07:00
commit f23cdfcd04
60 changed files with 1501 additions and 902 deletions

View File

@ -321,6 +321,8 @@
force_enable - Force enable the IOMMU on platforms known force_enable - Force enable the IOMMU on platforms known
to be buggy with IOMMU enabled. Use this to be buggy with IOMMU enabled. Use this
option with care. option with care.
pgtbl_v1 - Use v1 page table for DMA-API (Default).
pgtbl_v2 - Use v2 page table for DMA-API.
amd_iommu_dump= [HW,X86-64] amd_iommu_dump= [HW,X86-64]
Enable AMD IOMMU driver option to dump the ACPI table Enable AMD IOMMU driver option to dump the ACPI table

View File

@ -39,16 +39,11 @@ properties:
any others. any others.
- minItems: 1 - minItems: 1
items: items:
- enum: enum:
- eventq # Event Queue not empty - eventq # Event Queue not empty
- gerror # Global Error activated - gerror # Global Error activated
- const: gerror - cmdq-sync # CMD_SYNC complete
- enum: - priq # PRI Queue not empty
- cmdq-sync # CMD_SYNC complete
- priq # PRI Queue not empty
- enum:
- cmdq-sync
- priq
'#iommu-cells': '#iommu-cells':
const: 1 const: 1

View File

@ -73,6 +73,7 @@ properties:
- mediatek,mt2701-m4u # generation one - mediatek,mt2701-m4u # generation one
- mediatek,mt2712-m4u # generation two - mediatek,mt2712-m4u # generation two
- mediatek,mt6779-m4u # generation two - mediatek,mt6779-m4u # generation two
- mediatek,mt6795-m4u # generation two
- mediatek,mt8167-m4u # generation two - mediatek,mt8167-m4u # generation two
- mediatek,mt8173-m4u # generation two - mediatek,mt8173-m4u # generation two
- mediatek,mt8183-m4u # generation two - mediatek,mt8183-m4u # generation two
@ -124,6 +125,7 @@ properties:
dt-binding/memory/mt2701-larb-port.h for mt2701 and mt7623, dt-binding/memory/mt2701-larb-port.h for mt2701 and mt7623,
dt-binding/memory/mt2712-larb-port.h for mt2712, dt-binding/memory/mt2712-larb-port.h for mt2712,
dt-binding/memory/mt6779-larb-port.h for mt6779, dt-binding/memory/mt6779-larb-port.h for mt6779,
dt-binding/memory/mt6795-larb-port.h for mt6795,
dt-binding/memory/mt8167-larb-port.h for mt8167, dt-binding/memory/mt8167-larb-port.h for mt8167,
dt-binding/memory/mt8173-larb-port.h for mt8173, dt-binding/memory/mt8173-larb-port.h for mt8173,
dt-binding/memory/mt8183-larb-port.h for mt8183, dt-binding/memory/mt8183-larb-port.h for mt8183,
@ -148,6 +150,7 @@ allOf:
enum: enum:
- mediatek,mt2701-m4u - mediatek,mt2701-m4u
- mediatek,mt2712-m4u - mediatek,mt2712-m4u
- mediatek,mt6795-m4u
- mediatek,mt8173-m4u - mediatek,mt8173-m4u
- mediatek,mt8186-iommu-mm - mediatek,mt8186-iommu-mm
- mediatek,mt8192-m4u - mediatek,mt8192-m4u
@ -177,6 +180,7 @@ allOf:
contains: contains:
enum: enum:
- mediatek,mt2712-m4u - mediatek,mt2712-m4u
- mediatek,mt6795-m4u
- mediatek,mt8173-m4u - mediatek,mt8173-m4u
then: then:

View File

@ -1915,6 +1915,7 @@ F: drivers/dma/apple-admac.c
F: drivers/i2c/busses/i2c-pasemi-core.c F: drivers/i2c/busses/i2c-pasemi-core.c
F: drivers/i2c/busses/i2c-pasemi-platform.c F: drivers/i2c/busses/i2c-pasemi-platform.c
F: drivers/iommu/apple-dart.c F: drivers/iommu/apple-dart.c
F: drivers/iommu/io-pgtable-dart.c
F: drivers/irqchip/irq-apple-aic.c F: drivers/irqchip/irq-apple-aic.c
F: drivers/mailbox/apple-mailbox.c F: drivers/mailbox/apple-mailbox.c
F: drivers/nvme/host/apple.c F: drivers/nvme/host/apple.c
@ -10689,8 +10690,8 @@ L: iommu@lists.linux.dev
S: Maintained S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
F: drivers/iommu/dma-iommu.c F: drivers/iommu/dma-iommu.c
F: drivers/iommu/dma-iommu.h
F: drivers/iommu/iova.c F: drivers/iommu/iova.c
F: include/linux/dma-iommu.h
F: include/linux/iova.h F: include/linux/iova.h
IOMMU SUBSYSTEM IOMMU SUBSYSTEM

View File

@ -209,7 +209,6 @@ config ARM64
select HAVE_KPROBES select HAVE_KPROBES
select HAVE_KRETPROBES select HAVE_KRETPROBES
select HAVE_GENERIC_VDSO select HAVE_GENERIC_VDSO
select IOMMU_DMA if IOMMU_SUPPORT
select IRQ_DOMAIN select IRQ_DOMAIN
select IRQ_FORCED_THREADING select IRQ_FORCED_THREADING
select KASAN_VMALLOC if KASAN select KASAN_VMALLOC if KASAN

View File

@ -7,7 +7,7 @@
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/dma-map-ops.h> #include <linux/dma-map-ops.h>
#include <linux/dma-iommu.h> #include <linux/iommu.h>
#include <xen/xen.h> #include <xen/xen.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>

View File

@ -19,7 +19,6 @@
#define pr_fmt(fmt) "ACPI: VIOT: " fmt #define pr_fmt(fmt) "ACPI: VIOT: " fmt
#include <linux/acpi_viot.h> #include <linux/acpi_viot.h>
#include <linux/dma-iommu.h>
#include <linux/fwnode.h> #include <linux/fwnode.h>
#include <linux/iommu.h> #include <linux/iommu.h>
#include <linux/list.h> #include <linux/list.h>

View File

@ -4,7 +4,6 @@
// Author: Inki Dae <inki.dae@samsung.com> // Author: Inki Dae <inki.dae@samsung.com>
// Author: Andrzej Hajda <a.hajda@samsung.com> // Author: Andrzej Hajda <a.hajda@samsung.com>
#include <linux/dma-iommu.h>
#include <linux/dma-map-ops.h> #include <linux/dma-map-ops.h>
#include <linux/iommu.h> #include <linux/iommu.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>

View File

@ -67,6 +67,17 @@ config IOMMU_IO_PGTABLE_ARMV7S_SELFTEST
If unsure, say N here. If unsure, say N here.
config IOMMU_IO_PGTABLE_DART
bool "Apple DART Formats"
select IOMMU_IO_PGTABLE
depends on ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
help
Enable support for the Apple DART pagetable formats. These include
the t8020 and t6000/t8110 DART formats used in Apple M1/M2 family
SoCs.
If unsure, say N here.
endmenu endmenu
config IOMMU_DEBUGFS config IOMMU_DEBUGFS
@ -137,7 +148,7 @@ config OF_IOMMU
# IOMMU-agnostic DMA-mapping layer # IOMMU-agnostic DMA-mapping layer
config IOMMU_DMA config IOMMU_DMA
bool def_bool ARM64 || IA64 || X86
select DMA_OPS select DMA_OPS
select IOMMU_API select IOMMU_API
select IOMMU_IOVA select IOMMU_IOVA
@ -294,7 +305,7 @@ config APPLE_DART
tristate "Apple DART IOMMU Support" tristate "Apple DART IOMMU Support"
depends on ARCH_APPLE || (COMPILE_TEST && !GENERIC_ATOMIC64) depends on ARCH_APPLE || (COMPILE_TEST && !GENERIC_ATOMIC64)
select IOMMU_API select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE select IOMMU_IO_PGTABLE_DART
default ARCH_APPLE default ARCH_APPLE
help help
Support for Apple DART (Device Address Resolution Table) IOMMUs Support for Apple DART (Device Address Resolution Table) IOMMUs
@ -476,7 +487,6 @@ config VIRTIO_IOMMU
depends on VIRTIO depends on VIRTIO
depends on (ARM64 || X86) depends on (ARM64 || X86)
select IOMMU_API select IOMMU_API
select IOMMU_DMA
select INTERVAL_TREE select INTERVAL_TREE
select ACPI_VIOT if ACPI select ACPI_VIOT if ACPI
help help

View File

@ -8,6 +8,7 @@ obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o
obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_DART) += io-pgtable-dart.o
obj-$(CONFIG_IOASID) += ioasid.o obj-$(CONFIG_IOASID) += ioasid.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o obj-$(CONFIG_OF_IOMMU) += of_iommu.o

View File

@ -9,7 +9,6 @@ config AMD_IOMMU
select PCI_PASID select PCI_PASID
select IOMMU_API select IOMMU_API
select IOMMU_IOVA select IOMMU_IOVA
select IOMMU_DMA
select IOMMU_IO_PGTABLE select IOMMU_IO_PGTABLE
depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE
help help

View File

@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += iommu_v2.o obj-$(CONFIG_AMD_IOMMU_V2) += iommu_v2.o

View File

@ -18,7 +18,6 @@ extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
extern int amd_iommu_init_devices(void); extern int amd_iommu_init_devices(void);
extern void amd_iommu_uninit_devices(void); extern void amd_iommu_uninit_devices(void);
extern void amd_iommu_init_notifier(void); extern void amd_iommu_init_notifier(void);
extern int amd_iommu_init_api(void);
extern void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid); extern void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
#ifdef CONFIG_AMD_IOMMU_DEBUGFS #ifdef CONFIG_AMD_IOMMU_DEBUGFS

View File

@ -94,6 +94,7 @@
#define FEATURE_HE (1ULL<<8) #define FEATURE_HE (1ULL<<8)
#define FEATURE_PC (1ULL<<9) #define FEATURE_PC (1ULL<<9)
#define FEATURE_GAM_VAPIC (1ULL<<21) #define FEATURE_GAM_VAPIC (1ULL<<21)
#define FEATURE_GIOSUP (1ULL<<48)
#define FEATURE_EPHSUP (1ULL<<50) #define FEATURE_EPHSUP (1ULL<<50)
#define FEATURE_SNP (1ULL<<63) #define FEATURE_SNP (1ULL<<63)
@ -276,6 +277,8 @@
* 512GB Pages are not supported due to a hardware bug * 512GB Pages are not supported due to a hardware bug
*/ */
#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38)) #define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
/* 4K, 2MB, 1G page sizes are supported */
#define AMD_IOMMU_PGSIZES_V2 (PAGE_SIZE | (1ULL << 21) | (1ULL << 30))
/* Bit value definition for dte irq remapping fields*/ /* Bit value definition for dte irq remapping fields*/
#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6) #define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)
@ -376,6 +379,7 @@
#define DTE_FLAG_IW (1ULL << 62) #define DTE_FLAG_IW (1ULL << 62)
#define DTE_FLAG_IOTLB (1ULL << 32) #define DTE_FLAG_IOTLB (1ULL << 32)
#define DTE_FLAG_GIOV (1ULL << 54)
#define DTE_FLAG_GV (1ULL << 55) #define DTE_FLAG_GV (1ULL << 55)
#define DTE_FLAG_MASK (0x3ffULL << 32) #define DTE_FLAG_MASK (0x3ffULL << 32)
#define DTE_GLX_SHIFT (56) #define DTE_GLX_SHIFT (56)
@ -434,6 +438,7 @@
#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page #define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page
translation */ translation */
#define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */ #define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */
#define PD_GIOV_MASK (1UL << 4) /* domain enable GIOV support */
extern bool amd_iommu_dump; extern bool amd_iommu_dump;
#define DUMP_printk(format, arg...) \ #define DUMP_printk(format, arg...) \
@ -456,6 +461,8 @@ struct irq_remap_table {
/* Interrupt remapping feature used? */ /* Interrupt remapping feature used? */
extern bool amd_iommu_irq_remap; extern bool amd_iommu_irq_remap;
extern const struct iommu_ops amd_iommu_ops;
/* IVRS indicates that pre-boot remapping was enabled */ /* IVRS indicates that pre-boot remapping was enabled */
extern bool amdr_ivrs_remap_support; extern bool amdr_ivrs_remap_support;
@ -526,7 +533,8 @@ struct amd_io_pgtable {
struct io_pgtable iop; struct io_pgtable iop;
int mode; int mode;
u64 *root; u64 *root;
atomic64_t pt_root; /* pgtable root and pgtable mode */ atomic64_t pt_root; /* pgtable root and pgtable mode */
u64 *pgd; /* v2 pgtable pgd pointer */
}; };
/* /*

View File

@ -95,8 +95,6 @@
* out of it. * out of it.
*/ */
extern const struct iommu_ops amd_iommu_ops;
/* /*
* structure describing one IOMMU in the ACPI table. Typically followed by one * structure describing one IOMMU in the ACPI table. Typically followed by one
* or more ivhd_entrys. * or more ivhd_entrys.
@ -2068,6 +2066,17 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
init_iommu_perf_ctr(iommu); init_iommu_perf_ctr(iommu);
if (amd_iommu_pgtable == AMD_IOMMU_V2) {
if (!iommu_feature(iommu, FEATURE_GIOSUP) ||
!iommu_feature(iommu, FEATURE_GT)) {
pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n");
amd_iommu_pgtable = AMD_IOMMU_V1;
} else if (iommu_default_passthrough()) {
pr_warn("V2 page table doesn't support passthrough mode. Fallback to v1.\n");
amd_iommu_pgtable = AMD_IOMMU_V1;
}
}
if (is_rd890_iommu(iommu->dev)) { if (is_rd890_iommu(iommu->dev)) {
int i, j; int i, j;
@ -2146,6 +2155,8 @@ static void print_iommu_info(void)
if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
pr_info("X2APIC enabled\n"); pr_info("X2APIC enabled\n");
} }
if (amd_iommu_pgtable == AMD_IOMMU_V2)
pr_info("V2 page table enabled\n");
} }
static int __init amd_iommu_init_pci(void) static int __init amd_iommu_init_pci(void)
@ -2168,20 +2179,13 @@ static int __init amd_iommu_init_pci(void)
/* /*
* Order is important here to make sure any unity map requirements are * Order is important here to make sure any unity map requirements are
* fulfilled. The unity mappings are created and written to the device * fulfilled. The unity mappings are created and written to the device
* table during the amd_iommu_init_api() call. * table during the iommu_init_pci() call.
* *
* After that we call init_device_table_dma() to make sure any * After that we call init_device_table_dma() to make sure any
* uninitialized DTE will block DMA, and in the end we flush the caches * uninitialized DTE will block DMA, and in the end we flush the caches
* of all IOMMUs to make sure the changes to the device table are * of all IOMMUs to make sure the changes to the device table are
* active. * active.
*/ */
ret = amd_iommu_init_api();
if (ret) {
pr_err("IOMMU: Failed to initialize IOMMU-API interface (error=%d)!\n",
ret);
goto out;
}
for_each_pci_segment(pci_seg) for_each_pci_segment(pci_seg)
init_device_table_dma(pci_seg); init_device_table_dma(pci_seg);
@ -3366,17 +3370,30 @@ static int __init parse_amd_iommu_intr(char *str)
static int __init parse_amd_iommu_options(char *str) static int __init parse_amd_iommu_options(char *str)
{ {
for (; *str; ++str) { if (!str)
return -EINVAL;
while (*str) {
if (strncmp(str, "fullflush", 9) == 0) { if (strncmp(str, "fullflush", 9) == 0) {
pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n"); pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n");
iommu_set_dma_strict(); iommu_set_dma_strict();
} } else if (strncmp(str, "force_enable", 12) == 0) {
if (strncmp(str, "force_enable", 12) == 0)
amd_iommu_force_enable = true; amd_iommu_force_enable = true;
if (strncmp(str, "off", 3) == 0) } else if (strncmp(str, "off", 3) == 0) {
amd_iommu_disabled = true; amd_iommu_disabled = true;
if (strncmp(str, "force_isolation", 15) == 0) } else if (strncmp(str, "force_isolation", 15) == 0) {
amd_iommu_force_isolation = true; amd_iommu_force_isolation = true;
} else if (strncmp(str, "pgtbl_v1", 8) == 0) {
amd_iommu_pgtable = AMD_IOMMU_V1;
} else if (strncmp(str, "pgtbl_v2", 8) == 0) {
amd_iommu_pgtable = AMD_IOMMU_V2;
} else {
pr_notice("Unknown option - '%s'\n", str);
}
str += strcspn(str, ",");
while (*str == ',')
str++;
} }
return 1; return 1;

View File

@ -360,8 +360,9 @@ static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
* supporting all features of AMD IOMMU page tables like level skipping * supporting all features of AMD IOMMU page tables like level skipping
* and full 64 bit address spaces. * and full 64 bit address spaces.
*/ */
static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova, static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp) phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
{ {
struct protection_domain *dom = io_pgtable_ops_to_domain(ops); struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
LIST_HEAD(freelist); LIST_HEAD(freelist);
@ -369,39 +370,47 @@ static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
u64 __pte, *pte; u64 __pte, *pte;
int ret, i, count; int ret, i, count;
BUG_ON(!IS_ALIGNED(iova, size)); BUG_ON(!IS_ALIGNED(iova, pgsize));
BUG_ON(!IS_ALIGNED(paddr, size)); BUG_ON(!IS_ALIGNED(paddr, pgsize));
ret = -EINVAL; ret = -EINVAL;
if (!(prot & IOMMU_PROT_MASK)) if (!(prot & IOMMU_PROT_MASK))
goto out; goto out;
count = PAGE_SIZE_PTE_COUNT(size); while (pgcount > 0) {
pte = alloc_pte(dom, iova, size, NULL, gfp, &updated); count = PAGE_SIZE_PTE_COUNT(pgsize);
pte = alloc_pte(dom, iova, pgsize, NULL, gfp, &updated);
ret = -ENOMEM; ret = -ENOMEM;
if (!pte) if (!pte)
goto out; goto out;
for (i = 0; i < count; ++i) for (i = 0; i < count; ++i)
free_clear_pte(&pte[i], pte[i], &freelist); free_clear_pte(&pte[i], pte[i], &freelist);
if (!list_empty(&freelist)) if (!list_empty(&freelist))
updated = true; updated = true;
if (count > 1) { if (count > 1) {
__pte = PAGE_SIZE_PTE(__sme_set(paddr), size); __pte = PAGE_SIZE_PTE(__sme_set(paddr), pgsize);
__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC; __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
} else } else
__pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC; __pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
if (prot & IOMMU_PROT_IR) if (prot & IOMMU_PROT_IR)
__pte |= IOMMU_PTE_IR; __pte |= IOMMU_PTE_IR;
if (prot & IOMMU_PROT_IW) if (prot & IOMMU_PROT_IW)
__pte |= IOMMU_PTE_IW; __pte |= IOMMU_PTE_IW;
for (i = 0; i < count; ++i) for (i = 0; i < count; ++i)
pte[i] = __pte; pte[i] = __pte;
iova += pgsize;
paddr += pgsize;
pgcount--;
if (mapped)
*mapped += pgsize;
}
ret = 0; ret = 0;
@ -426,17 +435,18 @@ out:
return ret; return ret;
} }
static unsigned long iommu_v1_unmap_page(struct io_pgtable_ops *ops, static unsigned long iommu_v1_unmap_pages(struct io_pgtable_ops *ops,
unsigned long iova, unsigned long iova,
size_t size, size_t pgsize, size_t pgcount,
struct iommu_iotlb_gather *gather) struct iommu_iotlb_gather *gather)
{ {
struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops); struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
unsigned long long unmapped; unsigned long long unmapped;
unsigned long unmap_size; unsigned long unmap_size;
u64 *pte; u64 *pte;
size_t size = pgcount << __ffs(pgsize);
BUG_ON(!is_power_of_2(size)); BUG_ON(!is_power_of_2(pgsize));
unmapped = 0; unmapped = 0;
@ -448,14 +458,14 @@ static unsigned long iommu_v1_unmap_page(struct io_pgtable_ops *ops,
count = PAGE_SIZE_PTE_COUNT(unmap_size); count = PAGE_SIZE_PTE_COUNT(unmap_size);
for (i = 0; i < count; i++) for (i = 0; i < count; i++)
pte[i] = 0ULL; pte[i] = 0ULL;
} else {
return unmapped;
} }
iova = (iova & ~(unmap_size - 1)) + unmap_size; iova = (iova & ~(unmap_size - 1)) + unmap_size;
unmapped += unmap_size; unmapped += unmap_size;
} }
BUG_ON(unmapped && !is_power_of_2(unmapped));
return unmapped; return unmapped;
} }
@ -514,8 +524,8 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE, cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
cfg->tlb = &v1_flush_ops; cfg->tlb = &v1_flush_ops;
pgtable->iop.ops.map = iommu_v1_map_page; pgtable->iop.ops.map_pages = iommu_v1_map_pages;
pgtable->iop.ops.unmap = iommu_v1_unmap_page; pgtable->iop.ops.unmap_pages = iommu_v1_unmap_pages;
pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys; pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
return &pgtable->iop; return &pgtable->iop;

View File

@ -0,0 +1,415 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* CPU-agnostic AMD IO page table v2 allocator.
*
* Copyright (C) 2022 Advanced Micro Devices, Inc.
* Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
* Author: Vasant Hegde <vasant.hegde@amd.com>
*/
#define pr_fmt(fmt) "AMD-Vi: " fmt
#define dev_fmt(fmt) pr_fmt(fmt)
#include <linux/bitops.h>
#include <linux/io-pgtable.h>
#include <linux/kernel.h>
#include <asm/barrier.h>
#include "amd_iommu_types.h"
#include "amd_iommu.h"
#define IOMMU_PAGE_PRESENT BIT_ULL(0) /* Is present */
#define IOMMU_PAGE_RW BIT_ULL(1) /* Writeable */
#define IOMMU_PAGE_USER BIT_ULL(2) /* Userspace addressable */
#define IOMMU_PAGE_PWT BIT_ULL(3) /* Page write through */
#define IOMMU_PAGE_PCD BIT_ULL(4) /* Page cache disabled */
#define IOMMU_PAGE_ACCESS BIT_ULL(5) /* Was accessed (updated by IOMMU) */
#define IOMMU_PAGE_DIRTY BIT_ULL(6) /* Was written to (updated by IOMMU) */
#define IOMMU_PAGE_PSE BIT_ULL(7) /* Page Size Extensions */
#define IOMMU_PAGE_NX BIT_ULL(63) /* No execute */
#define MAX_PTRS_PER_PAGE 512
#define IOMMU_PAGE_SIZE_2M BIT_ULL(21)
#define IOMMU_PAGE_SIZE_1G BIT_ULL(30)
static inline int get_pgtable_level(void)
{
/* 5 level page table is not supported */
return PAGE_MODE_4_LEVEL;
}
static inline bool is_large_pte(u64 pte)
{
return (pte & IOMMU_PAGE_PSE);
}
static inline void *alloc_pgtable_page(void)
{
return (void *)get_zeroed_page(GFP_KERNEL);
}
static inline u64 set_pgtable_attr(u64 *page)
{
u64 prot;
prot = IOMMU_PAGE_PRESENT | IOMMU_PAGE_RW | IOMMU_PAGE_USER;
prot |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY;
return (iommu_virt_to_phys(page) | prot);
}
static inline void *get_pgtable_pte(u64 pte)
{
return iommu_phys_to_virt(pte & PM_ADDR_MASK);
}
static u64 set_pte_attr(u64 paddr, u64 pg_size, int prot)
{
u64 pte;
pte = __sme_set(paddr & PM_ADDR_MASK);
pte |= IOMMU_PAGE_PRESENT | IOMMU_PAGE_USER;
pte |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY;
if (prot & IOMMU_PROT_IW)
pte |= IOMMU_PAGE_RW;
/* Large page */
if (pg_size == IOMMU_PAGE_SIZE_1G || pg_size == IOMMU_PAGE_SIZE_2M)
pte |= IOMMU_PAGE_PSE;
return pte;
}
static inline u64 get_alloc_page_size(u64 size)
{
if (size >= IOMMU_PAGE_SIZE_1G)
return IOMMU_PAGE_SIZE_1G;
if (size >= IOMMU_PAGE_SIZE_2M)
return IOMMU_PAGE_SIZE_2M;
return PAGE_SIZE;
}
static inline int page_size_to_level(u64 pg_size)
{
if (pg_size == IOMMU_PAGE_SIZE_1G)
return PAGE_MODE_3_LEVEL;
if (pg_size == IOMMU_PAGE_SIZE_2M)
return PAGE_MODE_2_LEVEL;
return PAGE_MODE_1_LEVEL;
}
static inline void free_pgtable_page(u64 *pt)
{
free_page((unsigned long)pt);
}
static void free_pgtable(u64 *pt, int level)
{
u64 *p;
int i;
for (i = 0; i < MAX_PTRS_PER_PAGE; i++) {
/* PTE present? */
if (!IOMMU_PTE_PRESENT(pt[i]))
continue;
if (is_large_pte(pt[i]))
continue;
/*
* Free the next level. No need to look at l1 tables here since
* they can only contain leaf PTEs; just free them directly.
*/
p = get_pgtable_pte(pt[i]);
if (level > 2)
free_pgtable(p, level - 1);
else
free_pgtable_page(p);
}
free_pgtable_page(pt);
}
/* Allocate page table */
static u64 *v2_alloc_pte(u64 *pgd, unsigned long iova,
unsigned long pg_size, bool *updated)
{
u64 *pte, *page;
int level, end_level;
level = get_pgtable_level() - 1;
end_level = page_size_to_level(pg_size);
pte = &pgd[PM_LEVEL_INDEX(level, iova)];
iova = PAGE_SIZE_ALIGN(iova, PAGE_SIZE);
while (level >= end_level) {
u64 __pte, __npte;
__pte = *pte;
if (IOMMU_PTE_PRESENT(__pte) && is_large_pte(__pte)) {
/* Unmap large pte */
cmpxchg64(pte, *pte, 0ULL);
*updated = true;
continue;
}
if (!IOMMU_PTE_PRESENT(__pte)) {
page = alloc_pgtable_page();
if (!page)
return NULL;
__npte = set_pgtable_attr(page);
/* pte could have been changed somewhere. */
if (cmpxchg64(pte, __pte, __npte) != __pte)
free_pgtable_page(page);
else if (IOMMU_PTE_PRESENT(__pte))
*updated = true;
continue;
}
level -= 1;
pte = get_pgtable_pte(__pte);
pte = &pte[PM_LEVEL_INDEX(level, iova)];
}
/* Tear down existing pte entries */
if (IOMMU_PTE_PRESENT(*pte)) {
u64 *__pte;
*updated = true;
__pte = get_pgtable_pte(*pte);
cmpxchg64(pte, *pte, 0ULL);
if (pg_size == IOMMU_PAGE_SIZE_1G)
free_pgtable(__pte, end_level - 1);
else if (pg_size == IOMMU_PAGE_SIZE_2M)
free_pgtable_page(__pte);
}
return pte;
}
/*
* This function checks if there is a PTE for a given dma address.
* If there is one, it returns the pointer to it.
*/
static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
unsigned long iova, unsigned long *page_size)
{
u64 *pte;
int level;
level = get_pgtable_level() - 1;
pte = &pgtable->pgd[PM_LEVEL_INDEX(level, iova)];
/* Default page size is 4K */
*page_size = PAGE_SIZE;
while (level) {
/* Not present */
if (!IOMMU_PTE_PRESENT(*pte))
return NULL;
/* Walk to the next level */
pte = get_pgtable_pte(*pte);
pte = &pte[PM_LEVEL_INDEX(level - 1, iova)];
/* Large page */
if (is_large_pte(*pte)) {
if (level == PAGE_MODE_3_LEVEL)
*page_size = IOMMU_PAGE_SIZE_1G;
else if (level == PAGE_MODE_2_LEVEL)
*page_size = IOMMU_PAGE_SIZE_2M;
else
return NULL; /* Wrongly set PSE bit in PTE */
break;
}
level -= 1;
}
return pte;
}
static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
{
struct protection_domain *pdom = io_pgtable_ops_to_domain(ops);
struct io_pgtable_cfg *cfg = &pdom->iop.iop.cfg;
u64 *pte;
unsigned long map_size;
unsigned long mapped_size = 0;
unsigned long o_iova = iova;
size_t size = pgcount << __ffs(pgsize);
int count = 0;
int ret = 0;
bool updated = false;
if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize) || !pgcount)
return -EINVAL;
if (!(prot & IOMMU_PROT_MASK))
return -EINVAL;
while (mapped_size < size) {
map_size = get_alloc_page_size(pgsize);
pte = v2_alloc_pte(pdom->iop.pgd, iova, map_size, &updated);
if (!pte) {
ret = -EINVAL;
goto out;
}
*pte = set_pte_attr(paddr, map_size, prot);
count++;
iova += map_size;
paddr += map_size;
mapped_size += map_size;
}
out:
if (updated) {
if (count > 1)
amd_iommu_flush_tlb(&pdom->domain, 0);
else
amd_iommu_flush_page(&pdom->domain, 0, o_iova);
}
if (mapped)
*mapped += mapped_size;
return ret;
}
static unsigned long iommu_v2_unmap_pages(struct io_pgtable_ops *ops,
unsigned long iova,
size_t pgsize, size_t pgcount,
struct iommu_iotlb_gather *gather)
{
struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
struct io_pgtable_cfg *cfg = &pgtable->iop.cfg;
unsigned long unmap_size;
unsigned long unmapped = 0;
size_t size = pgcount << __ffs(pgsize);
u64 *pte;
if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
return 0;
while (unmapped < size) {
pte = fetch_pte(pgtable, iova, &unmap_size);
if (!pte)
return unmapped;
*pte = 0ULL;
iova = (iova & ~(unmap_size - 1)) + unmap_size;
unmapped += unmap_size;
}
return unmapped;
}
static phys_addr_t iommu_v2_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova)
{
struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
unsigned long offset_mask, pte_pgsize;
u64 *pte, __pte;
pte = fetch_pte(pgtable, iova, &pte_pgsize);
if (!pte || !IOMMU_PTE_PRESENT(*pte))
return 0;
offset_mask = pte_pgsize - 1;
__pte = __sme_clr(*pte & PM_ADDR_MASK);
return (__pte & ~offset_mask) | (iova & offset_mask);
}
/*
* ----------------------------------------------------
*/
static void v2_tlb_flush_all(void *cookie)
{
}
static void v2_tlb_flush_walk(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
}
static void v2_tlb_add_page(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule,
void *cookie)
{
}
static const struct iommu_flush_ops v2_flush_ops = {
.tlb_flush_all = v2_tlb_flush_all,
.tlb_flush_walk = v2_tlb_flush_walk,
.tlb_add_page = v2_tlb_add_page,
};
static void v2_free_pgtable(struct io_pgtable *iop)
{
struct protection_domain *pdom;
struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
pdom = container_of(pgtable, struct protection_domain, iop);
if (!(pdom->flags & PD_IOMMUV2_MASK))
return;
/*
* Make changes visible to IOMMUs. No need to clear gcr3 entry
* as gcr3 table is already freed.
*/
amd_iommu_domain_update(pdom);
/* Free page table */
free_pgtable(pgtable->pgd, get_pgtable_level());
}
static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
{
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
struct protection_domain *pdom = (struct protection_domain *)cookie;
int ret;
pgtable->pgd = alloc_pgtable_page();
if (!pgtable->pgd)
return NULL;
ret = amd_iommu_domain_set_gcr3(&pdom->domain, 0, iommu_virt_to_phys(pgtable->pgd));
if (ret)
goto err_free_pgd;
pgtable->iop.ops.map_pages = iommu_v2_map_pages;
pgtable->iop.ops.unmap_pages = iommu_v2_unmap_pages;
pgtable->iop.ops.iova_to_phys = iommu_v2_iova_to_phys;
cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES_V2,
cfg->ias = IOMMU_IN_ADDR_BIT_SIZE,
cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
cfg->tlb = &v2_flush_ops;
return &pgtable->iop;
err_free_pgd:
free_pgtable_page(pgtable->pgd);
return NULL;
}
struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns = {
.alloc = v2_alloc_pgtable,
.free = v2_free_pgtable,
};

View File

@ -11,8 +11,6 @@
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/amba/bus.h>
#include <linux/platform_device.h>
#include <linux/pci-ats.h> #include <linux/pci-ats.h>
#include <linux/bitmap.h> #include <linux/bitmap.h>
#include <linux/slab.h> #include <linux/slab.h>
@ -20,7 +18,6 @@
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/dma-map-ops.h> #include <linux/dma-map-ops.h>
#include <linux/dma-direct.h> #include <linux/dma-direct.h>
#include <linux/dma-iommu.h>
#include <linux/iommu-helper.h> #include <linux/iommu-helper.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/amd-iommu.h> #include <linux/amd-iommu.h>
@ -42,6 +39,7 @@
#include <asm/dma.h> #include <asm/dma.h>
#include "amd_iommu.h" #include "amd_iommu.h"
#include "../dma-iommu.h"
#include "../irq_remapping.h" #include "../irq_remapping.h"
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
@ -66,10 +64,6 @@ LIST_HEAD(ioapic_map);
LIST_HEAD(hpet_map); LIST_HEAD(hpet_map);
LIST_HEAD(acpihid_map); LIST_HEAD(acpihid_map);
/*
* Domain for untranslated devices - only allocated
* if iommu=pt passed on kernel cmd line.
*/
const struct iommu_ops amd_iommu_ops; const struct iommu_ops amd_iommu_ops;
static ATOMIC_NOTIFIER_HEAD(ppr_notifier); static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
@ -85,6 +79,7 @@ struct iommu_cmd {
struct kmem_cache *amd_iommu_irq_cache; struct kmem_cache *amd_iommu_irq_cache;
static void detach_device(struct device *dev); static void detach_device(struct device *dev);
static int domain_enable_v2(struct protection_domain *domain, int pasids);
/**************************************************************************** /****************************************************************************
* *
@ -1597,6 +1592,9 @@ static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C; tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
flags |= tmp; flags |= tmp;
if (domain->flags & PD_GIOV_MASK)
pte_root |= DTE_FLAG_GIOV;
} }
flags &= ~DEV_DOMID_MASK; flags &= ~DEV_DOMID_MASK;
@ -1650,6 +1648,10 @@ static void do_attach(struct iommu_dev_data *dev_data,
domain->dev_iommu[iommu->index] += 1; domain->dev_iommu[iommu->index] += 1;
domain->dev_cnt += 1; domain->dev_cnt += 1;
/* Override supported page sizes */
if (domain->flags & PD_GIOV_MASK)
domain->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
/* Update device table */ /* Update device table */
set_dte_entry(iommu, dev_data->devid, domain, set_dte_entry(iommu, dev_data->devid, domain,
ats, dev_data->iommu_v2); ats, dev_data->iommu_v2);
@ -1694,7 +1696,7 @@ static void pdev_iommuv2_disable(struct pci_dev *pdev)
pci_disable_pasid(pdev); pci_disable_pasid(pdev);
} }
static int pdev_iommuv2_enable(struct pci_dev *pdev) static int pdev_pri_ats_enable(struct pci_dev *pdev)
{ {
int ret; int ret;
@ -1757,11 +1759,19 @@ static int attach_device(struct device *dev,
struct iommu_domain *def_domain = iommu_get_dma_domain(dev); struct iommu_domain *def_domain = iommu_get_dma_domain(dev);
ret = -EINVAL; ret = -EINVAL;
if (def_domain->type != IOMMU_DOMAIN_IDENTITY)
/*
* In case of using AMD_IOMMU_V1 page table mode and the device
* is enabling for PPR/ATS support (using v2 table),
* we need to make sure that the domain type is identity map.
*/
if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
def_domain->type != IOMMU_DOMAIN_IDENTITY) {
goto out; goto out;
}
if (dev_data->iommu_v2) { if (dev_data->iommu_v2) {
if (pdev_iommuv2_enable(pdev) != 0) if (pdev_pri_ats_enable(pdev) != 0)
goto out; goto out;
dev_data->ats.enabled = true; dev_data->ats.enabled = true;
@ -1852,6 +1862,10 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
if (!iommu) if (!iommu)
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
/* Not registered yet? */
if (!iommu->iommu.ops)
return ERR_PTR(-ENODEV);
if (dev_iommu_priv_get(dev)) if (dev_iommu_priv_get(dev))
return &iommu->iommu; return &iommu->iommu;
@ -1938,25 +1952,6 @@ void amd_iommu_domain_update(struct protection_domain *domain)
amd_iommu_domain_flush_complete(domain); amd_iommu_domain_flush_complete(domain);
} }
int __init amd_iommu_init_api(void)
{
int err;
err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
if (err)
return err;
#ifdef CONFIG_ARM_AMBA
err = bus_set_iommu(&amba_bustype, &amd_iommu_ops);
if (err)
return err;
#endif
err = bus_set_iommu(&platform_bus_type, &amd_iommu_ops);
if (err)
return err;
return 0;
}
/***************************************************************************** /*****************************************************************************
* *
* The following functions belong to the exported interface of AMD IOMMU * The following functions belong to the exported interface of AMD IOMMU
@ -1989,12 +1984,12 @@ static void protection_domain_free(struct protection_domain *domain)
if (!domain) if (!domain)
return; return;
if (domain->id)
domain_id_free(domain->id);
if (domain->iop.pgtbl_cfg.tlb) if (domain->iop.pgtbl_cfg.tlb)
free_io_pgtable_ops(&domain->iop.iop.ops); free_io_pgtable_ops(&domain->iop.iop.ops);
if (domain->id)
domain_id_free(domain->id);
kfree(domain); kfree(domain);
} }
@ -2012,8 +2007,10 @@ static int protection_domain_init_v1(struct protection_domain *domain, int mode)
if (mode != PAGE_MODE_NONE) { if (mode != PAGE_MODE_NONE) {
pt_root = (void *)get_zeroed_page(GFP_KERNEL); pt_root = (void *)get_zeroed_page(GFP_KERNEL);
if (!pt_root) if (!pt_root) {
domain_id_free(domain->id);
return -ENOMEM; return -ENOMEM;
}
} }
amd_iommu_domain_set_pgtable(domain, pt_root, mode); amd_iommu_domain_set_pgtable(domain, pt_root, mode);
@ -2021,6 +2018,24 @@ static int protection_domain_init_v1(struct protection_domain *domain, int mode)
return 0; return 0;
} }
static int protection_domain_init_v2(struct protection_domain *domain)
{
spin_lock_init(&domain->lock);
domain->id = domain_id_alloc();
if (!domain->id)
return -ENOMEM;
INIT_LIST_HEAD(&domain->dev_list);
domain->flags |= PD_GIOV_MASK;
if (domain_enable_v2(domain, 1)) {
domain_id_free(domain->id);
return -ENOMEM;
}
return 0;
}
static struct protection_domain *protection_domain_alloc(unsigned int type) static struct protection_domain *protection_domain_alloc(unsigned int type)
{ {
struct io_pgtable_ops *pgtbl_ops; struct io_pgtable_ops *pgtbl_ops;
@ -2048,6 +2063,9 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
case AMD_IOMMU_V1: case AMD_IOMMU_V1:
ret = protection_domain_init_v1(domain, mode); ret = protection_domain_init_v1(domain, mode);
break; break;
case AMD_IOMMU_V2:
ret = protection_domain_init_v2(domain);
break;
default: default:
ret = -EINVAL; ret = -EINVAL;
} }
@ -2056,8 +2074,10 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
goto out_err; goto out_err;
pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain); pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain);
if (!pgtbl_ops) if (!pgtbl_ops) {
domain_id_free(domain->id);
goto out_err; goto out_err;
}
return domain; return domain;
out_err: out_err:
@ -2175,13 +2195,13 @@ static void amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
struct protection_domain *domain = to_pdomain(dom); struct protection_domain *domain = to_pdomain(dom);
struct io_pgtable_ops *ops = &domain->iop.iop.ops; struct io_pgtable_ops *ops = &domain->iop.iop.ops;
if (ops->map) if (ops->map_pages)
domain_flush_np_cache(domain, iova, size); domain_flush_np_cache(domain, iova, size);
} }
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
phys_addr_t paddr, size_t page_size, int iommu_prot, phys_addr_t paddr, size_t pgsize, size_t pgcount,
gfp_t gfp) int iommu_prot, gfp_t gfp, size_t *mapped)
{ {
struct protection_domain *domain = to_pdomain(dom); struct protection_domain *domain = to_pdomain(dom);
struct io_pgtable_ops *ops = &domain->iop.iop.ops; struct io_pgtable_ops *ops = &domain->iop.iop.ops;
@ -2197,8 +2217,10 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
if (iommu_prot & IOMMU_WRITE) if (iommu_prot & IOMMU_WRITE)
prot |= IOMMU_PROT_IW; prot |= IOMMU_PROT_IW;
if (ops->map) if (ops->map_pages) {
ret = ops->map(ops, iova, paddr, page_size, prot, gfp); ret = ops->map_pages(ops, iova, paddr, pgsize,
pgcount, prot, gfp, mapped);
}
return ret; return ret;
} }
@ -2224,9 +2246,9 @@ static void amd_iommu_iotlb_gather_add_page(struct iommu_domain *domain,
iommu_iotlb_gather_add_range(gather, iova, size); iommu_iotlb_gather_add_range(gather, iova, size);
} }
static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, static size_t amd_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova,
size_t page_size, size_t pgsize, size_t pgcount,
struct iommu_iotlb_gather *gather) struct iommu_iotlb_gather *gather)
{ {
struct protection_domain *domain = to_pdomain(dom); struct protection_domain *domain = to_pdomain(dom);
struct io_pgtable_ops *ops = &domain->iop.iop.ops; struct io_pgtable_ops *ops = &domain->iop.iop.ops;
@ -2236,9 +2258,10 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
(domain->iop.mode == PAGE_MODE_NONE)) (domain->iop.mode == PAGE_MODE_NONE))
return 0; return 0;
r = (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0; r = (ops->unmap_pages) ? ops->unmap_pages(ops, iova, pgsize, pgcount, NULL) : 0;
amd_iommu_iotlb_gather_add_page(dom, gather, iova, page_size); if (r)
amd_iommu_iotlb_gather_add_page(dom, gather, iova, r);
return r; return r;
} }
@ -2252,7 +2275,7 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
return ops->iova_to_phys(ops, iova); return ops->iova_to_phys(ops, iova);
} }
static bool amd_iommu_capable(enum iommu_cap cap) static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap)
{ {
switch (cap) { switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY: case IOMMU_CAP_CACHE_COHERENCY:
@ -2400,8 +2423,8 @@ const struct iommu_ops amd_iommu_ops = {
.default_domain_ops = &(const struct iommu_domain_ops) { .default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = amd_iommu_attach_device, .attach_dev = amd_iommu_attach_device,
.detach_dev = amd_iommu_detach_device, .detach_dev = amd_iommu_detach_device,
.map = amd_iommu_map, .map_pages = amd_iommu_map_pages,
.unmap = amd_iommu_unmap, .unmap_pages = amd_iommu_unmap_pages,
.iotlb_sync_map = amd_iommu_iotlb_sync_map, .iotlb_sync_map = amd_iommu_iotlb_sync_map,
.iova_to_phys = amd_iommu_iova_to_phys, .iova_to_phys = amd_iommu_iova_to_phys,
.flush_iotlb_all = amd_iommu_flush_iotlb_all, .flush_iotlb_all = amd_iommu_flush_iotlb_all,
@ -2448,11 +2471,10 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom)
} }
EXPORT_SYMBOL(amd_iommu_domain_direct_map); EXPORT_SYMBOL(amd_iommu_domain_direct_map);
int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids) /* Note: This function expects iommu_domain->lock to be held prior calling the function. */
static int domain_enable_v2(struct protection_domain *domain, int pasids)
{ {
struct protection_domain *domain = to_pdomain(dom); int levels;
unsigned long flags;
int levels, ret;
/* Number of GCR3 table levels required */ /* Number of GCR3 table levels required */
for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9) for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
@ -2461,7 +2483,25 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
if (levels > amd_iommu_max_glx_val) if (levels > amd_iommu_max_glx_val)
return -EINVAL; return -EINVAL;
spin_lock_irqsave(&domain->lock, flags); domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
if (domain->gcr3_tbl == NULL)
return -ENOMEM;
domain->glx = levels;
domain->flags |= PD_IOMMUV2_MASK;
amd_iommu_domain_update(domain);
return 0;
}
int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
{
struct protection_domain *pdom = to_pdomain(dom);
unsigned long flags;
int ret;
spin_lock_irqsave(&pdom->lock, flags);
/* /*
* Save us all sanity checks whether devices already in the * Save us all sanity checks whether devices already in the
@ -2469,24 +2509,14 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
* devices attached when it is switched into IOMMUv2 mode. * devices attached when it is switched into IOMMUv2 mode.
*/ */
ret = -EBUSY; ret = -EBUSY;
if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK) if (pdom->dev_cnt > 0 || pdom->flags & PD_IOMMUV2_MASK)
goto out; goto out;
ret = -ENOMEM; if (!pdom->gcr3_tbl)
domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC); ret = domain_enable_v2(pdom, pasids);
if (domain->gcr3_tbl == NULL)
goto out;
domain->glx = levels;
domain->flags |= PD_IOMMUV2_MASK;
amd_iommu_domain_update(domain);
ret = 0;
out: out:
spin_unlock_irqrestore(&domain->lock, flags); spin_unlock_irqrestore(&pdom->lock, flags);
return ret; return ret;
} }
EXPORT_SYMBOL(amd_iommu_domain_enable_v2); EXPORT_SYMBOL(amd_iommu_domain_enable_v2);

View File

@ -15,7 +15,6 @@
#include <linux/bitfield.h> #include <linux/bitfield.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/dev_printk.h> #include <linux/dev_printk.h>
#include <linux/dma-iommu.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
@ -33,6 +32,8 @@
#include <linux/swab.h> #include <linux/swab.h>
#include <linux/types.h> #include <linux/types.h>
#include "dma-iommu.h"
#define DART_MAX_STREAMS 16 #define DART_MAX_STREAMS 16
#define DART_MAX_TTBR 4 #define DART_MAX_TTBR 4
#define MAX_DARTS_PER_DEVICE 2 #define MAX_DARTS_PER_DEVICE 2
@ -81,10 +82,16 @@
#define DART_TTBR_VALID BIT(31) #define DART_TTBR_VALID BIT(31)
#define DART_TTBR_SHIFT 12 #define DART_TTBR_SHIFT 12
struct apple_dart_hw {
u32 oas;
enum io_pgtable_fmt fmt;
};
/* /*
* Private structure associated with each DART device. * Private structure associated with each DART device.
* *
* @dev: device struct * @dev: device struct
* @hw: SoC-specific hardware data
* @regs: mapped MMIO region * @regs: mapped MMIO region
* @irq: interrupt number, can be shared with other DARTs * @irq: interrupt number, can be shared with other DARTs
* @clks: clocks associated with this DART * @clks: clocks associated with this DART
@ -98,6 +105,7 @@
*/ */
struct apple_dart { struct apple_dart {
struct device *dev; struct device *dev;
const struct apple_dart_hw *hw;
void __iomem *regs; void __iomem *regs;
@ -421,13 +429,13 @@ static int apple_dart_finalize_domain(struct iommu_domain *domain,
pgtbl_cfg = (struct io_pgtable_cfg){ pgtbl_cfg = (struct io_pgtable_cfg){
.pgsize_bitmap = dart->pgsize, .pgsize_bitmap = dart->pgsize,
.ias = 32, .ias = 32,
.oas = 36, .oas = dart->hw->oas,
.coherent_walk = 1, .coherent_walk = 1,
.iommu_dev = dart->dev, .iommu_dev = dart->dev,
}; };
dart_domain->pgtbl_ops = dart_domain->pgtbl_ops =
alloc_io_pgtable_ops(APPLE_DART, &pgtbl_cfg, domain); alloc_io_pgtable_ops(dart->hw->fmt, &pgtbl_cfg, domain);
if (!dart_domain->pgtbl_ops) { if (!dart_domain->pgtbl_ops) {
ret = -ENOMEM; ret = -ENOMEM;
goto done; goto done;
@ -820,27 +828,6 @@ static irqreturn_t apple_dart_irq(int irq, void *dev)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static int apple_dart_set_bus_ops(const struct iommu_ops *ops)
{
int ret;
if (!iommu_present(&platform_bus_type)) {
ret = bus_set_iommu(&platform_bus_type, ops);
if (ret)
return ret;
}
#ifdef CONFIG_PCI
if (!iommu_present(&pci_bus_type)) {
ret = bus_set_iommu(&pci_bus_type, ops);
if (ret) {
bus_set_iommu(&platform_bus_type, NULL);
return ret;
}
}
#endif
return 0;
}
static int apple_dart_probe(struct platform_device *pdev) static int apple_dart_probe(struct platform_device *pdev)
{ {
int ret; int ret;
@ -854,6 +841,7 @@ static int apple_dart_probe(struct platform_device *pdev)
return -ENOMEM; return -ENOMEM;
dart->dev = dev; dart->dev = dev;
dart->hw = of_device_get_match_data(dev);
spin_lock_init(&dart->lock); spin_lock_init(&dart->lock);
dart->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); dart->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
@ -895,14 +883,10 @@ static int apple_dart_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dart); platform_set_drvdata(pdev, dart);
ret = apple_dart_set_bus_ops(&apple_dart_iommu_ops);
if (ret)
goto err_free_irq;
ret = iommu_device_sysfs_add(&dart->iommu, dev, NULL, "apple-dart.%s", ret = iommu_device_sysfs_add(&dart->iommu, dev, NULL, "apple-dart.%s",
dev_name(&pdev->dev)); dev_name(&pdev->dev));
if (ret) if (ret)
goto err_remove_bus_ops; goto err_free_irq;
ret = iommu_device_register(&dart->iommu, &apple_dart_iommu_ops, dev); ret = iommu_device_register(&dart->iommu, &apple_dart_iommu_ops, dev);
if (ret) if (ret)
@ -916,8 +900,6 @@ static int apple_dart_probe(struct platform_device *pdev)
err_sysfs_remove: err_sysfs_remove:
iommu_device_sysfs_remove(&dart->iommu); iommu_device_sysfs_remove(&dart->iommu);
err_remove_bus_ops:
apple_dart_set_bus_ops(NULL);
err_free_irq: err_free_irq:
free_irq(dart->irq, dart); free_irq(dart->irq, dart);
err_clk_disable: err_clk_disable:
@ -932,7 +914,6 @@ static int apple_dart_remove(struct platform_device *pdev)
apple_dart_hw_reset(dart); apple_dart_hw_reset(dart);
free_irq(dart->irq, dart); free_irq(dart->irq, dart);
apple_dart_set_bus_ops(NULL);
iommu_device_unregister(&dart->iommu); iommu_device_unregister(&dart->iommu);
iommu_device_sysfs_remove(&dart->iommu); iommu_device_sysfs_remove(&dart->iommu);
@ -942,8 +923,18 @@ static int apple_dart_remove(struct platform_device *pdev)
return 0; return 0;
} }
static const struct apple_dart_hw apple_dart_hw_t8103 = {
.oas = 36,
.fmt = APPLE_DART,
};
static const struct apple_dart_hw apple_dart_hw_t6000 = {
.oas = 42,
.fmt = APPLE_DART2,
};
static const struct of_device_id apple_dart_of_match[] = { static const struct of_device_id apple_dart_of_match[] = {
{ .compatible = "apple,t8103-dart", .data = NULL }, { .compatible = "apple,t8103-dart", .data = &apple_dart_hw_t8103 },
{ .compatible = "apple,t6000-dart", .data = &apple_dart_hw_t6000 },
{}, {},
}; };
MODULE_DEVICE_TABLE(of, apple_dart_of_match); MODULE_DEVICE_TABLE(of, apple_dart_of_match);

View File

@ -14,7 +14,6 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/crash_dump.h> #include <linux/crash_dump.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/dma-iommu.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/io-pgtable.h> #include <linux/io-pgtable.h>
@ -28,9 +27,8 @@
#include <linux/pci-ats.h> #include <linux/pci-ats.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/amba/bus.h>
#include "arm-smmu-v3.h" #include "arm-smmu-v3.h"
#include "../../dma-iommu.h"
#include "../../iommu-sva-lib.h" #include "../../iommu-sva-lib.h"
static bool disable_bypass = true; static bool disable_bypass = true;
@ -1992,11 +1990,14 @@ static const struct iommu_flush_ops arm_smmu_flush_ops = {
}; };
/* IOMMU API */ /* IOMMU API */
static bool arm_smmu_capable(enum iommu_cap cap) static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
{ {
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
switch (cap) { switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY: case IOMMU_CAP_CACHE_COHERENCY:
return true; /* Assume that a coherent TCU implies coherent TBUs */
return master->smmu->features & ARM_SMMU_FEAT_COHERENCY;
case IOMMU_CAP_NOEXEC: case IOMMU_CAP_NOEXEC:
return true; return true;
default: default:
@ -3694,43 +3695,6 @@ static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu)
return SZ_128K; return SZ_128K;
} }
static int arm_smmu_set_bus_ops(struct iommu_ops *ops)
{
int err;
#ifdef CONFIG_PCI
if (pci_bus_type.iommu_ops != ops) {
err = bus_set_iommu(&pci_bus_type, ops);
if (err)
return err;
}
#endif
#ifdef CONFIG_ARM_AMBA
if (amba_bustype.iommu_ops != ops) {
err = bus_set_iommu(&amba_bustype, ops);
if (err)
goto err_reset_pci_ops;
}
#endif
if (platform_bus_type.iommu_ops != ops) {
err = bus_set_iommu(&platform_bus_type, ops);
if (err)
goto err_reset_amba_ops;
}
return 0;
err_reset_amba_ops:
#ifdef CONFIG_ARM_AMBA
bus_set_iommu(&amba_bustype, NULL);
#endif
err_reset_pci_ops: __maybe_unused;
#ifdef CONFIG_PCI
bus_set_iommu(&pci_bus_type, NULL);
#endif
return err;
}
static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start, static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start,
resource_size_t size) resource_size_t size)
{ {
@ -3869,27 +3833,17 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev); ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
if (ret) { if (ret) {
dev_err(dev, "Failed to register iommu\n"); dev_err(dev, "Failed to register iommu\n");
goto err_sysfs_remove; iommu_device_sysfs_remove(&smmu->iommu);
return ret;
} }
ret = arm_smmu_set_bus_ops(&arm_smmu_ops);
if (ret)
goto err_unregister_device;
return 0; return 0;
err_unregister_device:
iommu_device_unregister(&smmu->iommu);
err_sysfs_remove:
iommu_device_sysfs_remove(&smmu->iommu);
return ret;
} }
static int arm_smmu_device_remove(struct platform_device *pdev) static int arm_smmu_device_remove(struct platform_device *pdev)
{ {
struct arm_smmu_device *smmu = platform_get_drvdata(pdev); struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
arm_smmu_set_bus_ops(NULL);
iommu_device_unregister(&smmu->iommu); iommu_device_unregister(&smmu->iommu);
iommu_device_sysfs_remove(&smmu->iommu); iommu_device_sysfs_remove(&smmu->iommu);
arm_smmu_device_disable(smmu); arm_smmu_device_disable(smmu);

View File

@ -21,7 +21,6 @@
#include <linux/acpi_iort.h> #include <linux/acpi_iort.h>
#include <linux/bitfield.h> #include <linux/bitfield.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/dma-iommu.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
@ -37,10 +36,10 @@
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/amba/bus.h>
#include <linux/fsl/mc.h> #include <linux/fsl/mc.h>
#include "arm-smmu.h" #include "arm-smmu.h"
#include "../../dma-iommu.h"
/* /*
* Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
@ -93,8 +92,6 @@ static struct platform_driver arm_smmu_driver;
static struct iommu_ops arm_smmu_ops; static struct iommu_ops arm_smmu_ops;
#ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS #ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS
static int arm_smmu_bus_init(struct iommu_ops *ops);
static struct device_node *dev_get_dev_node(struct device *dev) static struct device_node *dev_get_dev_node(struct device *dev)
{ {
if (dev_is_pci(dev)) { if (dev_is_pci(dev)) {
@ -180,20 +177,6 @@ static int arm_smmu_register_legacy_master(struct device *dev,
kfree(sids); kfree(sids);
return err; return err;
} }
/*
* With the legacy DT binding in play, we have no guarantees about
* probe order, but then we're also not doing default domains, so we can
* delay setting bus ops until we're sure every possible SMMU is ready,
* and that way ensure that no probe_device() calls get missed.
*/
static int arm_smmu_legacy_bus_init(void)
{
if (using_legacy_binding)
return arm_smmu_bus_init(&arm_smmu_ops);
return 0;
}
device_initcall_sync(arm_smmu_legacy_bus_init);
#else #else
static int arm_smmu_register_legacy_master(struct device *dev, static int arm_smmu_register_legacy_master(struct device *dev,
struct arm_smmu_device **smmu) struct arm_smmu_device **smmu)
@ -1330,15 +1313,14 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
return ops->iova_to_phys(ops, iova); return ops->iova_to_phys(ops, iova);
} }
static bool arm_smmu_capable(enum iommu_cap cap) static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
{ {
struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
switch (cap) { switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY: case IOMMU_CAP_CACHE_COHERENCY:
/* /* Assume that a coherent TCU implies coherent TBUs */
* Return true here as the SMMU can always send out coherent return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
* requests.
*/
return true;
case IOMMU_CAP_NOEXEC: case IOMMU_CAP_NOEXEC:
return true; return true;
default: default:
@ -2016,52 +1998,6 @@ static int arm_smmu_device_dt_probe(struct arm_smmu_device *smmu,
return 0; return 0;
} }
static int arm_smmu_bus_init(struct iommu_ops *ops)
{
int err;
/* Oh, for a proper bus abstraction */
if (!iommu_present(&platform_bus_type)) {
err = bus_set_iommu(&platform_bus_type, ops);
if (err)
return err;
}
#ifdef CONFIG_ARM_AMBA
if (!iommu_present(&amba_bustype)) {
err = bus_set_iommu(&amba_bustype, ops);
if (err)
goto err_reset_platform_ops;
}
#endif
#ifdef CONFIG_PCI
if (!iommu_present(&pci_bus_type)) {
err = bus_set_iommu(&pci_bus_type, ops);
if (err)
goto err_reset_amba_ops;
}
#endif
#ifdef CONFIG_FSL_MC_BUS
if (!iommu_present(&fsl_mc_bus_type)) {
err = bus_set_iommu(&fsl_mc_bus_type, ops);
if (err)
goto err_reset_pci_ops;
}
#endif
return 0;
err_reset_pci_ops: __maybe_unused;
#ifdef CONFIG_PCI
bus_set_iommu(&pci_bus_type, NULL);
#endif
err_reset_amba_ops: __maybe_unused;
#ifdef CONFIG_ARM_AMBA
bus_set_iommu(&amba_bustype, NULL);
#endif
err_reset_platform_ops: __maybe_unused;
bus_set_iommu(&platform_bus_type, NULL);
return err;
}
static void arm_smmu_rmr_install_bypass_smr(struct arm_smmu_device *smmu) static void arm_smmu_rmr_install_bypass_smr(struct arm_smmu_device *smmu)
{ {
struct list_head rmr_list; struct list_head rmr_list;
@ -2226,7 +2162,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
err = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev); err = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
if (err) { if (err) {
dev_err(dev, "Failed to register iommu\n"); dev_err(dev, "Failed to register iommu\n");
goto err_sysfs_remove; iommu_device_sysfs_remove(&smmu->iommu);
return err;
} }
platform_set_drvdata(pdev, smmu); platform_set_drvdata(pdev, smmu);
@ -2248,24 +2185,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
pm_runtime_enable(dev); pm_runtime_enable(dev);
} }
/*
* For ACPI and generic DT bindings, an SMMU will be probed before
* any device which might need it, so we want the bus ops in place
* ready to handle default domain setup as soon as any SMMU exists.
*/
if (!using_legacy_binding) {
err = arm_smmu_bus_init(&arm_smmu_ops);
if (err)
goto err_unregister_device;
}
return 0; return 0;
err_unregister_device:
iommu_device_unregister(&smmu->iommu);
err_sysfs_remove:
iommu_device_sysfs_remove(&smmu->iommu);
return err;
} }
static int arm_smmu_device_remove(struct platform_device *pdev) static int arm_smmu_device_remove(struct platform_device *pdev)
@ -2278,7 +2198,6 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS)) if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
dev_notice(&pdev->dev, "disabling translation\n"); dev_notice(&pdev->dev, "disabling translation\n");
arm_smmu_bus_init(NULL);
iommu_device_unregister(&smmu->iommu); iommu_device_unregister(&smmu->iommu);
iommu_device_sysfs_remove(&smmu->iommu); iommu_device_sysfs_remove(&smmu->iommu);

View File

@ -493,7 +493,7 @@ static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain,
return ret; return ret;
} }
static bool qcom_iommu_capable(enum iommu_cap cap) static bool qcom_iommu_capable(struct device *dev, enum iommu_cap cap)
{ {
switch (cap) { switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY: case IOMMU_CAP_CACHE_COHERENCY:
@ -837,8 +837,6 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
goto err_pm_disable; goto err_pm_disable;
} }
bus_set_iommu(&platform_bus_type, &qcom_iommu_ops);
if (qcom_iommu->local_base) { if (qcom_iommu->local_base) {
pm_runtime_get_sync(dev); pm_runtime_get_sync(dev);
writel_relaxed(0xffffffff, qcom_iommu->local_base + SMMU_INTR_SEL_NS); writel_relaxed(0xffffffff, qcom_iommu->local_base + SMMU_INTR_SEL_NS);
@ -856,8 +854,6 @@ static int qcom_iommu_device_remove(struct platform_device *pdev)
{ {
struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev); struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
bus_set_iommu(&platform_bus_type, NULL);
pm_runtime_force_suspend(&pdev->dev); pm_runtime_force_suspend(&pdev->dev);
platform_set_drvdata(pdev, NULL); platform_set_drvdata(pdev, NULL);
iommu_device_sysfs_remove(&qcom_iommu->iommu); iommu_device_sysfs_remove(&qcom_iommu->iommu);

View File

@ -13,7 +13,6 @@
#include <linux/crash_dump.h> #include <linux/crash_dump.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/dma-direct.h> #include <linux/dma-direct.h>
#include <linux/dma-iommu.h>
#include <linux/dma-map-ops.h> #include <linux/dma-map-ops.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/huge_mm.h> #include <linux/huge_mm.h>
@ -30,6 +29,8 @@
#include <linux/swiotlb.h> #include <linux/swiotlb.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include "dma-iommu.h"
struct iommu_dma_msi_page { struct iommu_dma_msi_page {
struct list_head list; struct list_head list;
dma_addr_t iova; dma_addr_t iova;
@ -1633,6 +1634,13 @@ out_free_page:
return NULL; return NULL;
} }
/**
* iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain
* @desc: MSI descriptor, will store the MSI page
* @msi_addr: MSI target address to be mapped
*
* Return: 0 on success or negative error code if the mapping failed.
*/
int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
{ {
struct device *dev = msi_desc_to_dev(desc); struct device *dev = msi_desc_to_dev(desc);
@ -1661,8 +1669,12 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
return 0; return 0;
} }
void iommu_dma_compose_msi_msg(struct msi_desc *desc, /**
struct msi_msg *msg) * iommu_dma_compose_msi_msg() - Apply translation to an MSI message
* @desc: MSI descriptor prepared by iommu_dma_prepare_msi()
* @msg: MSI message containing target physical address
*/
void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
{ {
struct device *dev = msi_desc_to_dev(desc); struct device *dev = msi_desc_to_dev(desc);
const struct iommu_domain *domain = iommu_get_domain_for_dev(dev); const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);

42
drivers/iommu/dma-iommu.h Normal file
View File

@ -0,0 +1,42 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2014-2015 ARM Ltd.
*/
#ifndef __DMA_IOMMU_H
#define __DMA_IOMMU_H
#include <linux/iommu.h>
#ifdef CONFIG_IOMMU_DMA
int iommu_get_dma_cookie(struct iommu_domain *domain);
void iommu_put_dma_cookie(struct iommu_domain *domain);
int iommu_dma_init_fq(struct iommu_domain *domain);
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
extern bool iommu_dma_forcedac;
#else /* CONFIG_IOMMU_DMA */
static inline int iommu_dma_init_fq(struct iommu_domain *domain)
{
return -EINVAL;
}
static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
{
return -ENODEV;
}
static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
{
}
static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
{
}
#endif /* CONFIG_IOMMU_DMA */
#endif /* __DMA_IOMMU_H */

View File

@ -1446,16 +1446,7 @@ static int __init exynos_iommu_init(void)
goto err_zero_lv2; goto err_zero_lv2;
} }
ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
if (ret) {
pr_err("%s: Failed to register exynos-iommu driver.\n",
__func__);
goto err_set_iommu;
}
return 0; return 0;
err_set_iommu:
kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
err_zero_lv2: err_zero_lv2:
platform_driver_unregister(&exynos_sysmmu_driver); platform_driver_unregister(&exynos_sysmmu_driver);
err_reg_driver: err_reg_driver:

View File

@ -178,7 +178,7 @@ static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
return iova; return iova;
} }
static bool fsl_pamu_capable(enum iommu_cap cap) static bool fsl_pamu_capable(struct device *dev, enum iommu_cap cap)
{ {
return cap == IOMMU_CAP_CACHE_COHERENCY; return cap == IOMMU_CAP_CACHE_COHERENCY;
} }
@ -476,11 +476,7 @@ int __init pamu_domain_init(void)
if (ret) { if (ret) {
iommu_device_sysfs_remove(&pamu_iommu); iommu_device_sysfs_remove(&pamu_iommu);
pr_err("Can't register iommu device\n"); pr_err("Can't register iommu device\n");
return ret;
} }
bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
return ret; return ret;
} }

View File

@ -19,8 +19,9 @@ config INTEL_IOMMU
select DMAR_TABLE select DMAR_TABLE
select SWIOTLB select SWIOTLB
select IOASID select IOASID
select IOMMU_DMA
select PCI_ATS select PCI_ATS
select PCI_PRI
select PCI_PASID
help help
DMA remapping (DMAR) devices support enables independent address DMA remapping (DMAR) devices support enables independent address
translations for Direct Memory Access (DMA) from devices. translations for Direct Memory Access (DMA) from devices.
@ -48,10 +49,7 @@ config INTEL_IOMMU_DEBUGFS
config INTEL_IOMMU_SVM config INTEL_IOMMU_SVM
bool "Support for Shared Virtual Memory with Intel IOMMU" bool "Support for Shared Virtual Memory with Intel IOMMU"
depends on X86_64 depends on X86_64
select PCI_PASID
select PCI_PRI
select MMU_NOTIFIER select MMU_NOTIFIER
select IOASID
select IOMMU_SVA select IOMMU_SVA
help help
Shared Virtual Memory (SVM) provides a facility for devices Shared Virtual Memory (SVM) provides a facility for devices

View File

@ -37,7 +37,7 @@ static inline void check_dmar_capabilities(struct intel_iommu *a,
MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_MHMV_MASK); MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_MHMV_MASK);
MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_IRO_MASK); MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_IRO_MASK);
CHECK_FEATURE_MISMATCH(a, b, cap, 5lp_support, CAP_FL5LP_MASK); CHECK_FEATURE_MISMATCH(a, b, cap, fl5lp_support, CAP_FL5LP_MASK);
CHECK_FEATURE_MISMATCH(a, b, cap, fl1gp_support, CAP_FL1GP_MASK); CHECK_FEATURE_MISMATCH(a, b, cap, fl1gp_support, CAP_FL1GP_MASK);
CHECK_FEATURE_MISMATCH(a, b, cap, read_drain, CAP_RD_MASK); CHECK_FEATURE_MISMATCH(a, b, cap, read_drain, CAP_RD_MASK);
CHECK_FEATURE_MISMATCH(a, b, cap, write_drain, CAP_WD_MASK); CHECK_FEATURE_MISMATCH(a, b, cap, write_drain, CAP_WD_MASK);
@ -84,7 +84,7 @@ static int cap_audit_hotplug(struct intel_iommu *iommu, enum cap_audit_type type
goto out; goto out;
} }
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, 5lp_support, CAP_FL5LP_MASK); CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, fl5lp_support, CAP_FL5LP_MASK);
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, fl1gp_support, CAP_FL1GP_MASK); CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, fl1gp_support, CAP_FL1GP_MASK);
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, read_drain, CAP_RD_MASK); CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, read_drain, CAP_RD_MASK);
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, write_drain, CAP_WD_MASK); CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, write_drain, CAP_WD_MASK);

View File

@ -15,7 +15,6 @@
#include <linux/crash_dump.h> #include <linux/crash_dump.h>
#include <linux/dma-direct.h> #include <linux/dma-direct.h>
#include <linux/dma-iommu.h>
#include <linux/dmi.h> #include <linux/dmi.h>
#include <linux/intel-svm.h> #include <linux/intel-svm.h>
#include <linux/memory.h> #include <linux/memory.h>
@ -26,6 +25,7 @@
#include <linux/tboot.h> #include <linux/tboot.h>
#include "iommu.h" #include "iommu.h"
#include "../dma-iommu.h"
#include "../irq_remapping.h" #include "../irq_remapping.h"
#include "../iommu-sva-lib.h" #include "../iommu-sva-lib.h"
#include "pasid.h" #include "pasid.h"
@ -199,6 +199,11 @@ static inline void context_set_domain_id(struct context_entry *context,
context->hi |= (value & ((1 << 16) - 1)) << 8; context->hi |= (value & ((1 << 16) - 1)) << 8;
} }
static inline void context_set_pasid(struct context_entry *context)
{
context->lo |= CONTEXT_PASIDE;
}
static inline int context_domain_id(struct context_entry *c) static inline int context_domain_id(struct context_entry *c)
{ {
return((c->hi >> 8) & 0xffff); return((c->hi >> 8) & 0xffff);
@ -399,7 +404,7 @@ static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
{ {
unsigned long fl_sagaw, sl_sagaw; unsigned long fl_sagaw, sl_sagaw;
fl_sagaw = BIT(2) | (cap_5lp_support(iommu->cap) ? BIT(3) : 0); fl_sagaw = BIT(2) | (cap_fl5lp_support(iommu->cap) ? BIT(3) : 0);
sl_sagaw = cap_sagaw(iommu->cap); sl_sagaw = cap_sagaw(iommu->cap);
/* Second level only. */ /* Second level only. */
@ -1234,6 +1239,13 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
raw_spin_unlock_irqrestore(&iommu->register_lock, flag); raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
/*
* Hardware invalidates all DMA remapping hardware translation
* caches as part of SRTP flow.
*/
if (cap_esrtps(iommu->cap))
return;
iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
if (sm_supported(iommu)) if (sm_supported(iommu))
qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0); qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
@ -1350,21 +1362,18 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
} }
static struct device_domain_info * static struct device_domain_info *
iommu_support_dev_iotlb(struct dmar_domain *domain, struct intel_iommu *iommu, domain_lookup_dev_info(struct dmar_domain *domain,
u8 bus, u8 devfn) struct intel_iommu *iommu, u8 bus, u8 devfn)
{ {
struct device_domain_info *info; struct device_domain_info *info;
unsigned long flags; unsigned long flags;
if (!iommu->qi)
return NULL;
spin_lock_irqsave(&domain->lock, flags); spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link) { list_for_each_entry(info, &domain->devices, link) {
if (info->iommu == iommu && info->bus == bus && if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) { info->devfn == devfn) {
spin_unlock_irqrestore(&domain->lock, flags); spin_unlock_irqrestore(&domain->lock, flags);
return info->ats_supported ? info : NULL; return info;
} }
} }
spin_unlock_irqrestore(&domain->lock, flags); spin_unlock_irqrestore(&domain->lock, flags);
@ -1389,7 +1398,7 @@ static void domain_update_iotlb(struct dmar_domain *domain)
spin_unlock_irqrestore(&domain->lock, flags); spin_unlock_irqrestore(&domain->lock, flags);
} }
static void iommu_enable_dev_iotlb(struct device_domain_info *info) static void iommu_enable_pci_caps(struct device_domain_info *info)
{ {
struct pci_dev *pdev; struct pci_dev *pdev;
@ -1412,7 +1421,6 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
info->pfsid = pci_dev_id(pf_pdev); info->pfsid = pci_dev_id(pf_pdev);
} }
#ifdef CONFIG_INTEL_IOMMU_SVM
/* The PCIe spec, in its wisdom, declares that the behaviour of /* The PCIe spec, in its wisdom, declares that the behaviour of
the device if you enable PASID support after ATS support is the device if you enable PASID support after ATS support is
undefined. So always enable PASID support on devices which undefined. So always enable PASID support on devices which
@ -1425,7 +1433,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
(info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1) && (info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1) &&
!pci_reset_pri(pdev) && !pci_enable_pri(pdev, PRQ_DEPTH)) !pci_reset_pri(pdev) && !pci_enable_pri(pdev, PRQ_DEPTH))
info->pri_enabled = 1; info->pri_enabled = 1;
#endif
if (info->ats_supported && pci_ats_page_aligned(pdev) && if (info->ats_supported && pci_ats_page_aligned(pdev) &&
!pci_enable_ats(pdev, VTD_PAGE_SHIFT)) { !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
info->ats_enabled = 1; info->ats_enabled = 1;
@ -1448,16 +1456,16 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
info->ats_enabled = 0; info->ats_enabled = 0;
domain_update_iotlb(info->domain); domain_update_iotlb(info->domain);
} }
#ifdef CONFIG_INTEL_IOMMU_SVM
if (info->pri_enabled) { if (info->pri_enabled) {
pci_disable_pri(pdev); pci_disable_pri(pdev);
info->pri_enabled = 0; info->pri_enabled = 0;
} }
if (info->pasid_enabled) { if (info->pasid_enabled) {
pci_disable_pasid(pdev); pci_disable_pasid(pdev);
info->pasid_enabled = 0; info->pasid_enabled = 0;
} }
#endif
} }
static void __iommu_flush_dev_iotlb(struct device_domain_info *info, static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
@ -1907,7 +1915,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
u8 bus, u8 devfn) u8 bus, u8 devfn)
{ {
struct device_domain_info *info = struct device_domain_info *info =
iommu_support_dev_iotlb(domain, iommu, bus, devfn); domain_lookup_dev_info(domain, iommu, bus, devfn);
u16 did = domain_id_iommu(domain, iommu); u16 did = domain_id_iommu(domain, iommu);
int translation = CONTEXT_TT_MULTI_LEVEL; int translation = CONTEXT_TT_MULTI_LEVEL;
struct context_entry *context; struct context_entry *context;
@ -1980,6 +1988,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
context_set_sm_dte(context); context_set_sm_dte(context);
if (info && info->pri_supported) if (info && info->pri_supported)
context_set_sm_pre(context); context_set_sm_pre(context);
if (info && info->pasid_supported)
context_set_pasid(context);
} else { } else {
struct dma_pte *pgd = domain->pgd; struct dma_pte *pgd = domain->pgd;
int agaw; int agaw;
@ -2037,7 +2047,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
} else { } else {
iommu_flush_write_buffer(iommu); iommu_flush_write_buffer(iommu);
} }
iommu_enable_dev_iotlb(info); iommu_enable_pci_caps(info);
ret = 0; ret = 0;
@ -3896,7 +3906,6 @@ static int __init probe_acpi_namespace_devices(void)
continue; continue;
} }
pn->dev->bus->iommu_ops = &intel_iommu_ops;
ret = iommu_probe_device(pn->dev); ret = iommu_probe_device(pn->dev);
if (ret) if (ret)
break; break;
@ -4029,7 +4038,6 @@ int __init intel_iommu_init(void)
} }
up_read(&dmar_global_lock); up_read(&dmar_global_lock);
bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
if (si_domain && !hw_pass_through) if (si_domain && !hw_pass_through)
register_memory_notifier(&intel_iommu_memory_nb); register_memory_notifier(&intel_iommu_memory_nb);
@ -4437,7 +4445,7 @@ static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
return true; return true;
} }
static bool intel_iommu_capable(enum iommu_cap cap) static bool intel_iommu_capable(struct device *dev, enum iommu_cap cap)
{ {
if (cap == IOMMU_CAP_CACHE_COHERENCY) if (cap == IOMMU_CAP_CACHE_COHERENCY)
return true; return true;
@ -4457,7 +4465,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
u8 bus, devfn; u8 bus, devfn;
iommu = device_to_iommu(dev, &bus, &devfn); iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu) if (!iommu || !iommu->iommu.ops)
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
info = kzalloc(sizeof(*info), GFP_KERNEL); info = kzalloc(sizeof(*info), GFP_KERNEL);
@ -4574,52 +4582,6 @@ static void intel_iommu_get_resv_regions(struct device *device,
list_add_tail(&reg->list, head); list_add_tail(&reg->list, head);
} }
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct context_entry *context;
struct dmar_domain *domain;
u64 ctx_lo;
int ret;
domain = info->domain;
if (!domain)
return -EINVAL;
spin_lock(&iommu->lock);
ret = -EINVAL;
if (!info->pasid_supported)
goto out;
context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
if (WARN_ON(!context))
goto out;
ctx_lo = context[0].lo;
if (!(ctx_lo & CONTEXT_PASIDE)) {
ctx_lo |= CONTEXT_PASIDE;
context[0].lo = ctx_lo;
wmb();
iommu->flush.flush_context(iommu,
domain_id_iommu(domain, iommu),
PCI_DEVID(info->bus, info->devfn),
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
}
/* Enable PASID support in the device, if it wasn't already */
if (!info->pasid_enabled)
iommu_enable_dev_iotlb(info);
ret = 0;
out:
spin_unlock(&iommu->lock);
return ret;
}
static struct iommu_group *intel_iommu_device_group(struct device *dev) static struct iommu_group *intel_iommu_device_group(struct device *dev)
{ {
if (dev_is_pci(dev)) if (dev_is_pci(dev))
@ -4643,9 +4605,6 @@ static int intel_iommu_enable_sva(struct device *dev)
if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE)) if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE))
return -ENODEV; return -ENODEV;
if (intel_iommu_enable_pasid(iommu, dev))
return -ENODEV;
if (!info->pasid_enabled || !info->pri_enabled || !info->ats_enabled) if (!info->pasid_enabled || !info->pri_enabled || !info->ats_enabled)
return -EINVAL; return -EINVAL;

View File

@ -146,7 +146,9 @@
/* /*
* Decoding Capability Register * Decoding Capability Register
*/ */
#define cap_5lp_support(c) (((c) >> 60) & 1) #define cap_esrtps(c) (((c) >> 63) & 1)
#define cap_esirtps(c) (((c) >> 62) & 1)
#define cap_fl5lp_support(c) (((c) >> 60) & 1)
#define cap_pi_support(c) (((c) >> 59) & 1) #define cap_pi_support(c) (((c) >> 59) & 1)
#define cap_fl1gp_support(c) (((c) >> 56) & 1) #define cap_fl1gp_support(c) (((c) >> 56) & 1)
#define cap_read_drain(c) (((c) >> 55) & 1) #define cap_read_drain(c) (((c) >> 55) & 1)
@ -586,6 +588,7 @@ struct intel_iommu {
#ifdef CONFIG_INTEL_IOMMU_SVM #ifdef CONFIG_INTEL_IOMMU_SVM
struct page_req_dsc *prq; struct page_req_dsc *prq;
unsigned char prq_name[16]; /* Name for PRQ interrupt */ unsigned char prq_name[16]; /* Name for PRQ interrupt */
unsigned long prq_seq_number;
struct completion prq_complete; struct completion prq_complete;
struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */ struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */
#endif #endif
@ -741,7 +744,6 @@ extern int dmar_ir_support(void);
void *alloc_pgtable_page(int node); void *alloc_pgtable_page(int node);
void free_pgtable_page(void *vaddr); void free_pgtable_page(void *vaddr);
void iommu_flush_write_buffer(struct intel_iommu *iommu); void iommu_flush_write_buffer(struct intel_iommu *iommu);
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn); struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
#ifdef CONFIG_INTEL_IOMMU_SVM #ifdef CONFIG_INTEL_IOMMU_SVM
@ -761,7 +763,6 @@ struct intel_svm_dev {
struct device *dev; struct device *dev;
struct intel_iommu *iommu; struct intel_iommu *iommu;
struct iommu_sva sva; struct iommu_sva sva;
unsigned long prq_seq_number;
u32 pasid; u32 pasid;
int users; int users;
u16 did; u16 did;

View File

@ -494,7 +494,8 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
* Global invalidation of interrupt entry cache to make sure the * Global invalidation of interrupt entry cache to make sure the
* hardware uses the new irq remapping table. * hardware uses the new irq remapping table.
*/ */
qi_global_iec(iommu); if (!cap_esirtps(iommu->cap))
qi_global_iec(iommu);
} }
static void iommu_enable_irq_remapping(struct intel_iommu *iommu) static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
@ -680,7 +681,8 @@ static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
* global invalidation of interrupt entry cache before disabling * global invalidation of interrupt entry cache before disabling
* interrupt-remapping. * interrupt-remapping.
*/ */
qi_global_iec(iommu); if (!cap_esirtps(iommu->cap))
qi_global_iec(iommu);
raw_spin_lock_irqsave(&iommu->register_lock, flags); raw_spin_lock_irqsave(&iommu->register_lock, flags);

View File

@ -392,16 +392,6 @@ pasid_set_flpm(struct pasid_entry *pe, u64 value)
pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2); pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
} }
/*
* Setup the Extended Access Flag Enable (EAFE) field (Bit 135)
* of a scalable mode PASID entry.
*/
static inline void
pasid_set_eafe(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[2], 1 << 7, 1 << 7);
}
static void static void
pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu, pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
u16 did, u32 pasid) u16 did, u32 pasid)
@ -529,7 +519,7 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
} }
} }
if ((flags & PASID_FLAG_FL5LP) && !cap_5lp_support(iommu->cap)) { if ((flags & PASID_FLAG_FL5LP) && !cap_fl5lp_support(iommu->cap)) {
pr_err("No 5-level paging support for first-level on %s\n", pr_err("No 5-level paging support for first-level on %s\n",
iommu->name); iommu->name);
return -EINVAL; return -EINVAL;

View File

@ -48,23 +48,6 @@ static void *pasid_private_find(ioasid_t pasid)
return xa_load(&pasid_private_array, pasid); return xa_load(&pasid_private_array, pasid);
} }
static struct intel_svm_dev *
svm_lookup_device_by_sid(struct intel_svm *svm, u16 sid)
{
struct intel_svm_dev *sdev = NULL, *t;
rcu_read_lock();
list_for_each_entry_rcu(t, &svm->devs, list) {
if (t->sid == sid) {
sdev = t;
break;
}
}
rcu_read_unlock();
return sdev;
}
static struct intel_svm_dev * static struct intel_svm_dev *
svm_lookup_device_by_dev(struct intel_svm *svm, struct device *dev) svm_lookup_device_by_dev(struct intel_svm *svm, struct device *dev)
{ {
@ -181,7 +164,7 @@ void intel_svm_check(struct intel_iommu *iommu)
} }
if (cpu_feature_enabled(X86_FEATURE_LA57) && if (cpu_feature_enabled(X86_FEATURE_LA57) &&
!cap_5lp_support(iommu->cap)) { !cap_fl5lp_support(iommu->cap)) {
pr_err("%s SVM disabled, incompatible paging mode\n", pr_err("%s SVM disabled, incompatible paging mode\n",
iommu->name); iommu->name);
return; return;
@ -706,11 +689,10 @@ static void handle_bad_prq_event(struct intel_iommu *iommu,
static irqreturn_t prq_event_thread(int irq, void *d) static irqreturn_t prq_event_thread(int irq, void *d)
{ {
struct intel_svm_dev *sdev = NULL;
struct intel_iommu *iommu = d; struct intel_iommu *iommu = d;
struct intel_svm *svm = NULL;
struct page_req_dsc *req; struct page_req_dsc *req;
int head, tail, handled; int head, tail, handled;
struct pci_dev *pdev;
u64 address; u64 address;
/* /*
@ -730,8 +712,6 @@ static irqreturn_t prq_event_thread(int irq, void *d)
pr_err("IOMMU: %s: Page request without PASID\n", pr_err("IOMMU: %s: Page request without PASID\n",
iommu->name); iommu->name);
bad_req: bad_req:
svm = NULL;
sdev = NULL;
handle_bad_prq_event(iommu, req, QI_RESP_INVALID); handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
goto prq_advance; goto prq_advance;
} }
@ -758,34 +738,19 @@ bad_req:
if (unlikely(req->lpig && !req->rd_req && !req->wr_req)) if (unlikely(req->lpig && !req->rd_req && !req->wr_req))
goto prq_advance; goto prq_advance;
if (!svm || svm->pasid != req->pasid) { pdev = pci_get_domain_bus_and_slot(iommu->segment,
/* PCI_BUS_NUM(req->rid),
* It can't go away, because the driver is not permitted req->rid & 0xff);
* to unbind the mm while any page faults are outstanding.
*/
svm = pasid_private_find(req->pasid);
if (IS_ERR_OR_NULL(svm) || (svm->flags & SVM_FLAG_SUPERVISOR_MODE))
goto bad_req;
}
if (!sdev || sdev->sid != req->rid) {
sdev = svm_lookup_device_by_sid(svm, req->rid);
if (!sdev)
goto bad_req;
}
sdev->prq_seq_number++;
/* /*
* If prq is to be handled outside iommu driver via receiver of * If prq is to be handled outside iommu driver via receiver of
* the fault notifiers, we skip the page response here. * the fault notifiers, we skip the page response here.
*/ */
if (intel_svm_prq_report(iommu, sdev->dev, req)) if (!pdev || intel_svm_prq_report(iommu, &pdev->dev, req))
handle_bad_prq_event(iommu, req, QI_RESP_INVALID); handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
trace_prq_report(iommu, sdev->dev, req->qw_0, req->qw_1, trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1,
req->priv_data[0], req->priv_data[1], req->priv_data[0], req->priv_data[1],
sdev->prq_seq_number); iommu->prq_seq_number++);
prq_advance: prq_advance:
head = (head + sizeof(*req)) & PRQ_RING_MASK; head = (head + sizeof(*req)) & PRQ_RING_MASK;
} }
@ -881,8 +846,6 @@ int intel_svm_page_response(struct device *dev,
struct iommu_page_response *msg) struct iommu_page_response *msg)
{ {
struct iommu_fault_page_request *prm; struct iommu_fault_page_request *prm;
struct intel_svm_dev *sdev = NULL;
struct intel_svm *svm = NULL;
struct intel_iommu *iommu; struct intel_iommu *iommu;
bool private_present; bool private_present;
bool pasid_present; bool pasid_present;
@ -901,8 +864,6 @@ int intel_svm_page_response(struct device *dev,
if (!msg || !evt) if (!msg || !evt)
return -EINVAL; return -EINVAL;
mutex_lock(&pasid_mutex);
prm = &evt->fault.prm; prm = &evt->fault.prm;
sid = PCI_DEVID(bus, devfn); sid = PCI_DEVID(bus, devfn);
pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
@ -919,12 +880,6 @@ int intel_svm_page_response(struct device *dev,
goto out; goto out;
} }
ret = pasid_to_svm_sdev(dev, prm->pasid, &svm, &sdev);
if (ret || !sdev) {
ret = -ENODEV;
goto out;
}
/* /*
* Per VT-d spec. v3.0 ch7.7, system software must respond * Per VT-d spec. v3.0 ch7.7, system software must respond
* with page group response if private data is present (PDP) * with page group response if private data is present (PDP)
@ -954,6 +909,5 @@ int intel_svm_page_response(struct device *dev,
qi_submit_sync(iommu, &desc, 1, 0); qi_submit_sync(iommu, &desc, 1, 0);
} }
out: out:
mutex_unlock(&pasid_mutex);
return ret; return ret;
} }

View File

@ -130,9 +130,6 @@
#define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL #define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL
#define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
#define APPLE_DART_PTE_PROT_NO_WRITE (1<<7)
#define APPLE_DART_PTE_PROT_NO_READ (1<<8)
/* IOPTE accessors */ /* IOPTE accessors */
#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d)) #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
@ -200,8 +197,7 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
void *pages; void *pages;
VM_BUG_ON((gfp & __GFP_HIGHMEM)); VM_BUG_ON((gfp & __GFP_HIGHMEM));
p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE, p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
gfp | __GFP_ZERO, order);
if (!p) if (!p)
return NULL; return NULL;
@ -406,15 +402,6 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
{ {
arm_lpae_iopte pte; arm_lpae_iopte pte;
if (data->iop.fmt == APPLE_DART) {
pte = 0;
if (!(prot & IOMMU_WRITE))
pte |= APPLE_DART_PTE_PROT_NO_WRITE;
if (!(prot & IOMMU_READ))
pte |= APPLE_DART_PTE_PROT_NO_READ;
return pte;
}
if (data->iop.fmt == ARM_64_LPAE_S1 || if (data->iop.fmt == ARM_64_LPAE_S1 ||
data->iop.fmt == ARM_32_LPAE_S1) { data->iop.fmt == ARM_32_LPAE_S1) {
pte = ARM_LPAE_PTE_nG; pte = ARM_LPAE_PTE_nG;
@ -1107,52 +1094,6 @@ out_free_data:
return NULL; return NULL;
} }
static struct io_pgtable *
apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
{
struct arm_lpae_io_pgtable *data;
int i;
if (cfg->oas > 36)
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
if (!data)
return NULL;
/*
* The table format itself always uses two levels, but the total VA
* space is mapped by four separate tables, making the MMIO registers
* an effective "level 1". For simplicity, though, we treat this
* equivalently to LPAE stage 2 concatenation at level 2, with the
* additional TTBRs each just pointing at consecutive pages.
*/
if (data->start_level < 1)
goto out_free_data;
if (data->start_level == 1 && data->pgd_bits > 2)
goto out_free_data;
if (data->start_level > 1)
data->pgd_bits = 0;
data->start_level = 2;
cfg->apple_dart_cfg.n_ttbrs = 1 << data->pgd_bits;
data->pgd_bits += data->bits_per_level;
data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
cfg);
if (!data->pgd)
goto out_free_data;
for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i)
cfg->apple_dart_cfg.ttbr[i] =
virt_to_phys(data->pgd + i * ARM_LPAE_GRANULE(data));
return &data->iop;
out_free_data:
kfree(data);
return NULL;
}
struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = { struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
.alloc = arm_64_lpae_alloc_pgtable_s1, .alloc = arm_64_lpae_alloc_pgtable_s1,
.free = arm_lpae_free_pgtable, .free = arm_lpae_free_pgtable,
@ -1178,11 +1119,6 @@ struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
.free = arm_lpae_free_pgtable, .free = arm_lpae_free_pgtable,
}; };
struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns = {
.alloc = apple_dart_alloc_pgtable,
.free = arm_lpae_free_pgtable,
};
#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
static struct io_pgtable_cfg *cfg_cookie __initdata; static struct io_pgtable_cfg *cfg_cookie __initdata;
@ -1343,12 +1279,17 @@ static int __init arm_lpae_do_selftests(void)
}; };
int i, j, pass = 0, fail = 0; int i, j, pass = 0, fail = 0;
struct device dev;
struct io_pgtable_cfg cfg = { struct io_pgtable_cfg cfg = {
.tlb = &dummy_tlb_ops, .tlb = &dummy_tlb_ops,
.oas = 48, .oas = 48,
.coherent_walk = true, .coherent_walk = true,
.iommu_dev = &dev,
}; };
/* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */
set_dev_node(&dev, NUMA_NO_NODE);
for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
for (j = 0; j < ARRAY_SIZE(ias); ++j) { for (j = 0; j < ARRAY_SIZE(ias); ++j) {
cfg.pgsize_bitmap = pgsize[i]; cfg.pgsize_bitmap = pgsize[i];

View File

@ -0,0 +1,469 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Apple DART page table allocator.
*
* Copyright (C) 2022 The Asahi Linux Contributors
*
* Based on io-pgtable-arm.
*
* Copyright (C) 2014 ARM Limited
*
* Author: Will Deacon <will.deacon@arm.com>
*/
#define pr_fmt(fmt) "dart io-pgtable: " fmt
#include <linux/atomic.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/io-pgtable.h>
#include <linux/kernel.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <asm/barrier.h>
#define DART1_MAX_ADDR_BITS 36
#define DART_MAX_TABLES 4
#define DART_LEVELS 2
/* Struct accessors */
#define io_pgtable_to_data(x) \
container_of((x), struct dart_io_pgtable, iop)
#define io_pgtable_ops_to_data(x) \
io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
#define DART_GRANULE(d) \
(sizeof(dart_iopte) << (d)->bits_per_level)
#define DART_PTES_PER_TABLE(d) \
(DART_GRANULE(d) >> ilog2(sizeof(dart_iopte)))
#define APPLE_DART_PTE_SUBPAGE_START GENMASK_ULL(63, 52)
#define APPLE_DART_PTE_SUBPAGE_END GENMASK_ULL(51, 40)
#define APPLE_DART1_PADDR_MASK GENMASK_ULL(35, 12)
#define APPLE_DART2_PADDR_MASK GENMASK_ULL(37, 10)
#define APPLE_DART2_PADDR_SHIFT (4)
/* Apple DART1 protection bits */
#define APPLE_DART1_PTE_PROT_NO_READ BIT(8)
#define APPLE_DART1_PTE_PROT_NO_WRITE BIT(7)
#define APPLE_DART1_PTE_PROT_SP_DIS BIT(1)
/* Apple DART2 protection bits */
#define APPLE_DART2_PTE_PROT_NO_READ BIT(3)
#define APPLE_DART2_PTE_PROT_NO_WRITE BIT(2)
#define APPLE_DART2_PTE_PROT_NO_CACHE BIT(1)
/* marks PTE as valid */
#define APPLE_DART_PTE_VALID BIT(0)
/* IOPTE accessors */
#define iopte_deref(pte, d) __va(iopte_to_paddr(pte, d))
struct dart_io_pgtable {
struct io_pgtable iop;
int tbl_bits;
int bits_per_level;
void *pgd[DART_MAX_TABLES];
};
typedef u64 dart_iopte;
static dart_iopte paddr_to_iopte(phys_addr_t paddr,
struct dart_io_pgtable *data)
{
dart_iopte pte;
if (data->iop.fmt == APPLE_DART)
return paddr & APPLE_DART1_PADDR_MASK;
/* format is APPLE_DART2 */
pte = paddr >> APPLE_DART2_PADDR_SHIFT;
pte &= APPLE_DART2_PADDR_MASK;
return pte;
}
static phys_addr_t iopte_to_paddr(dart_iopte pte,
struct dart_io_pgtable *data)
{
u64 paddr;
if (data->iop.fmt == APPLE_DART)
return pte & APPLE_DART1_PADDR_MASK;
/* format is APPLE_DART2 */
paddr = pte & APPLE_DART2_PADDR_MASK;
paddr <<= APPLE_DART2_PADDR_SHIFT;
return paddr;
}
static void *__dart_alloc_pages(size_t size, gfp_t gfp,
struct io_pgtable_cfg *cfg)
{
int order = get_order(size);
struct page *p;
VM_BUG_ON((gfp & __GFP_HIGHMEM));
p = alloc_pages(gfp | __GFP_ZERO, order);
if (!p)
return NULL;
return page_address(p);
}
static int dart_init_pte(struct dart_io_pgtable *data,
unsigned long iova, phys_addr_t paddr,
dart_iopte prot, int num_entries,
dart_iopte *ptep)
{
int i;
dart_iopte pte = prot;
size_t sz = data->iop.cfg.pgsize_bitmap;
for (i = 0; i < num_entries; i++)
if (ptep[i] & APPLE_DART_PTE_VALID) {
/* We require an unmap first */
WARN_ON(ptep[i] & APPLE_DART_PTE_VALID);
return -EEXIST;
}
/* subpage protection: always allow access to the entire page */
pte |= FIELD_PREP(APPLE_DART_PTE_SUBPAGE_START, 0);
pte |= FIELD_PREP(APPLE_DART_PTE_SUBPAGE_END, 0xfff);
pte |= APPLE_DART1_PTE_PROT_SP_DIS;
pte |= APPLE_DART_PTE_VALID;
for (i = 0; i < num_entries; i++)
ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data);
return 0;
}
static dart_iopte dart_install_table(dart_iopte *table,
dart_iopte *ptep,
dart_iopte curr,
struct dart_io_pgtable *data)
{
dart_iopte old, new;
new = paddr_to_iopte(__pa(table), data) | APPLE_DART_PTE_VALID;
/*
* Ensure the table itself is visible before its PTE can be.
* Whilst we could get away with cmpxchg64_release below, this
* doesn't have any ordering semantics when !CONFIG_SMP.
*/
dma_wmb();
old = cmpxchg64_relaxed(ptep, curr, new);
return old;
}
static int dart_get_table(struct dart_io_pgtable *data, unsigned long iova)
{
return (iova >> (3 * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
((1 << data->tbl_bits) - 1);
}
static int dart_get_l1_index(struct dart_io_pgtable *data, unsigned long iova)
{
return (iova >> (2 * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
((1 << data->bits_per_level) - 1);
}
static int dart_get_l2_index(struct dart_io_pgtable *data, unsigned long iova)
{
return (iova >> (data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
((1 << data->bits_per_level) - 1);
}
static dart_iopte *dart_get_l2(struct dart_io_pgtable *data, unsigned long iova)
{
dart_iopte pte, *ptep;
int tbl = dart_get_table(data, iova);
ptep = data->pgd[tbl];
if (!ptep)
return NULL;
ptep += dart_get_l1_index(data, iova);
pte = READ_ONCE(*ptep);
/* Valid entry? */
if (!pte)
return NULL;
/* Deref to get level 2 table */
return iopte_deref(pte, data);
}
static dart_iopte dart_prot_to_pte(struct dart_io_pgtable *data,
int prot)
{
dart_iopte pte = 0;
if (data->iop.fmt == APPLE_DART) {
if (!(prot & IOMMU_WRITE))
pte |= APPLE_DART1_PTE_PROT_NO_WRITE;
if (!(prot & IOMMU_READ))
pte |= APPLE_DART1_PTE_PROT_NO_READ;
}
if (data->iop.fmt == APPLE_DART2) {
if (!(prot & IOMMU_WRITE))
pte |= APPLE_DART2_PTE_PROT_NO_WRITE;
if (!(prot & IOMMU_READ))
pte |= APPLE_DART2_PTE_PROT_NO_READ;
if (!(prot & IOMMU_CACHE))
pte |= APPLE_DART2_PTE_PROT_NO_CACHE;
}
return pte;
}
static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int iommu_prot, gfp_t gfp, size_t *mapped)
{
struct dart_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable_cfg *cfg = &data->iop.cfg;
size_t tblsz = DART_GRANULE(data);
int ret = 0, tbl, num_entries, max_entries, map_idx_start;
dart_iopte pte, *cptep, *ptep;
dart_iopte prot;
if (WARN_ON(pgsize != cfg->pgsize_bitmap))
return -EINVAL;
if (WARN_ON(paddr >> cfg->oas))
return -ERANGE;
/* If no access, then nothing to do */
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
return 0;
tbl = dart_get_table(data, iova);
ptep = data->pgd[tbl];
ptep += dart_get_l1_index(data, iova);
pte = READ_ONCE(*ptep);
/* no L2 table present */
if (!pte) {
cptep = __dart_alloc_pages(tblsz, gfp, cfg);
if (!cptep)
return -ENOMEM;
pte = dart_install_table(cptep, ptep, 0, data);
if (pte)
free_pages((unsigned long)cptep, get_order(tblsz));
/* L2 table is present (now) */
pte = READ_ONCE(*ptep);
}
ptep = iopte_deref(pte, data);
/* install a leaf entries into L2 table */
prot = dart_prot_to_pte(data, iommu_prot);
map_idx_start = dart_get_l2_index(data, iova);
max_entries = DART_PTES_PER_TABLE(data) - map_idx_start;
num_entries = min_t(int, pgcount, max_entries);
ptep += map_idx_start;
ret = dart_init_pte(data, iova, paddr, prot, num_entries, ptep);
if (!ret && mapped)
*mapped += num_entries * pgsize;
/*
* Synchronise all PTE updates for the new mapping before there's
* a chance for anything to kick off a table walk for the new iova.
*/
wmb();
return ret;
}
static size_t dart_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
size_t pgsize, size_t pgcount,
struct iommu_iotlb_gather *gather)
{
struct dart_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable_cfg *cfg = &data->iop.cfg;
int i = 0, num_entries, max_entries, unmap_idx_start;
dart_iopte pte, *ptep;
if (WARN_ON(pgsize != cfg->pgsize_bitmap || !pgcount))
return 0;
ptep = dart_get_l2(data, iova);
/* Valid L2 IOPTE pointer? */
if (WARN_ON(!ptep))
return 0;
unmap_idx_start = dart_get_l2_index(data, iova);
ptep += unmap_idx_start;
max_entries = DART_PTES_PER_TABLE(data) - unmap_idx_start;
num_entries = min_t(int, pgcount, max_entries);
while (i < num_entries) {
pte = READ_ONCE(*ptep);
if (WARN_ON(!pte))
break;
/* clear pte */
*ptep = 0;
if (!iommu_iotlb_gather_queued(gather))
io_pgtable_tlb_add_page(&data->iop, gather,
iova + i * pgsize, pgsize);
ptep++;
i++;
}
return i * pgsize;
}
static phys_addr_t dart_iova_to_phys(struct io_pgtable_ops *ops,
unsigned long iova)
{
struct dart_io_pgtable *data = io_pgtable_ops_to_data(ops);
dart_iopte pte, *ptep;
ptep = dart_get_l2(data, iova);
/* Valid L2 IOPTE pointer? */
if (!ptep)
return 0;
ptep += dart_get_l2_index(data, iova);
pte = READ_ONCE(*ptep);
/* Found translation */
if (pte) {
iova &= (data->iop.cfg.pgsize_bitmap - 1);
return iopte_to_paddr(pte, data) | iova;
}
/* Ran out of page tables to walk */
return 0;
}
static struct dart_io_pgtable *
dart_alloc_pgtable(struct io_pgtable_cfg *cfg)
{
struct dart_io_pgtable *data;
int tbl_bits, bits_per_level, va_bits, pg_shift;
pg_shift = __ffs(cfg->pgsize_bitmap);
bits_per_level = pg_shift - ilog2(sizeof(dart_iopte));
va_bits = cfg->ias - pg_shift;
tbl_bits = max_t(int, 0, va_bits - (bits_per_level * DART_LEVELS));
if ((1 << tbl_bits) > DART_MAX_TABLES)
return NULL;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return NULL;
data->tbl_bits = tbl_bits;
data->bits_per_level = bits_per_level;
data->iop.ops = (struct io_pgtable_ops) {
.map_pages = dart_map_pages,
.unmap_pages = dart_unmap_pages,
.iova_to_phys = dart_iova_to_phys,
};
return data;
}
static struct io_pgtable *
apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
{
struct dart_io_pgtable *data;
int i;
if (!cfg->coherent_walk)
return NULL;
if (cfg->oas != 36 && cfg->oas != 42)
return NULL;
if (cfg->ias > cfg->oas)
return NULL;
if (!(cfg->pgsize_bitmap == SZ_4K || cfg->pgsize_bitmap == SZ_16K))
return NULL;
data = dart_alloc_pgtable(cfg);
if (!data)
return NULL;
cfg->apple_dart_cfg.n_ttbrs = 1 << data->tbl_bits;
for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i) {
data->pgd[i] = __dart_alloc_pages(DART_GRANULE(data), GFP_KERNEL,
cfg);
if (!data->pgd[i])
goto out_free_data;
cfg->apple_dart_cfg.ttbr[i] = virt_to_phys(data->pgd[i]);
}
return &data->iop;
out_free_data:
while (--i >= 0)
free_pages((unsigned long)data->pgd[i],
get_order(DART_GRANULE(data)));
kfree(data);
return NULL;
}
static void apple_dart_free_pgtable(struct io_pgtable *iop)
{
struct dart_io_pgtable *data = io_pgtable_to_data(iop);
dart_iopte *ptep, *end;
int i;
for (i = 0; i < (1 << data->tbl_bits) && data->pgd[i]; ++i) {
ptep = data->pgd[i];
end = (void *)ptep + DART_GRANULE(data);
while (ptep != end) {
dart_iopte pte = *ptep++;
if (pte) {
unsigned long page =
(unsigned long)iopte_deref(pte, data);
free_pages(page, get_order(DART_GRANULE(data)));
}
}
free_pages((unsigned long)data->pgd[i],
get_order(DART_GRANULE(data)));
}
kfree(data);
}
struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns = {
.alloc = apple_dart_alloc_pgtable,
.free = apple_dart_free_pgtable,
};

View File

@ -20,13 +20,17 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = {
[ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns, [ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns,
[ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns, [ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns,
[ARM_MALI_LPAE] = &io_pgtable_arm_mali_lpae_init_fns, [ARM_MALI_LPAE] = &io_pgtable_arm_mali_lpae_init_fns,
#endif
#ifdef CONFIG_IOMMU_IO_PGTABLE_DART
[APPLE_DART] = &io_pgtable_apple_dart_init_fns, [APPLE_DART] = &io_pgtable_apple_dart_init_fns,
[APPLE_DART2] = &io_pgtable_apple_dart_init_fns,
#endif #endif
#ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S #ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S
[ARM_V7S] = &io_pgtable_arm_v7s_init_fns, [ARM_V7S] = &io_pgtable_arm_v7s_init_fns,
#endif #endif
#ifdef CONFIG_AMD_IOMMU #ifdef CONFIG_AMD_IOMMU
[AMD_IOMMU_V1] = &io_pgtable_amd_iommu_v1_init_fns, [AMD_IOMMU_V1] = &io_pgtable_amd_iommu_v1_init_fns,
[AMD_IOMMU_V2] = &io_pgtable_amd_iommu_v2_init_fns,
#endif #endif
}; };

View File

@ -6,8 +6,8 @@
#define pr_fmt(fmt) "iommu: " fmt #define pr_fmt(fmt) "iommu: " fmt
#include <linux/amba/bus.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/dma-iommu.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/bits.h> #include <linux/bits.h>
#include <linux/bug.h> #include <linux/bug.h>
@ -16,17 +16,21 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/host1x_context_bus.h>
#include <linux/iommu.h> #include <linux/iommu.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/platform_device.h>
#include <linux/property.h> #include <linux/property.h>
#include <linux/fsl/mc.h> #include <linux/fsl/mc.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/cc_platform.h> #include <linux/cc_platform.h>
#include <trace/events/iommu.h> #include <trace/events/iommu.h>
#include "dma-iommu.h"
static struct kset *iommu_group_kset; static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida); static DEFINE_IDA(iommu_group_ida);
@ -75,6 +79,8 @@ static const char * const iommu_group_resv_type_string[] = {
#define IOMMU_CMD_LINE_DMA_API BIT(0) #define IOMMU_CMD_LINE_DMA_API BIT(0)
#define IOMMU_CMD_LINE_STRICT BIT(1) #define IOMMU_CMD_LINE_STRICT BIT(1)
static int iommu_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data);
static int iommu_alloc_default_domain(struct iommu_group *group, static int iommu_alloc_default_domain(struct iommu_group *group,
struct device *dev); struct device *dev);
static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
@ -103,6 +109,22 @@ struct iommu_group_attribute iommu_group_attr_##_name = \
static LIST_HEAD(iommu_device_list); static LIST_HEAD(iommu_device_list);
static DEFINE_SPINLOCK(iommu_device_lock); static DEFINE_SPINLOCK(iommu_device_lock);
static struct bus_type * const iommu_buses[] = {
&platform_bus_type,
#ifdef CONFIG_PCI
&pci_bus_type,
#endif
#ifdef CONFIG_ARM_AMBA
&amba_bustype,
#endif
#ifdef CONFIG_FSL_MC_BUS
&fsl_mc_bus_type,
#endif
#ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS
&host1x_context_device_bus_type,
#endif
};
/* /*
* Use a function instead of an array here because the domain-type is a * Use a function instead of an array here because the domain-type is a
* bit-field, so an array would waste memory. * bit-field, so an array would waste memory.
@ -126,6 +148,8 @@ static const char *iommu_domain_type_str(unsigned int t)
static int __init iommu_subsys_init(void) static int __init iommu_subsys_init(void)
{ {
struct notifier_block *nb;
if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) { if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) {
if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
iommu_set_default_passthrough(false); iommu_set_default_passthrough(false);
@ -152,10 +176,27 @@ static int __init iommu_subsys_init(void)
(iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ? (iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ?
"(set via kernel command line)" : ""); "(set via kernel command line)" : "");
nb = kcalloc(ARRAY_SIZE(iommu_buses), sizeof(*nb), GFP_KERNEL);
if (!nb)
return -ENOMEM;
for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) {
nb[i].notifier_call = iommu_bus_notifier;
bus_register_notifier(iommu_buses[i], &nb[i]);
}
return 0; return 0;
} }
subsys_initcall(iommu_subsys_init); subsys_initcall(iommu_subsys_init);
static int remove_iommu_group(struct device *dev, void *data)
{
if (dev->iommu && dev->iommu->iommu_dev == data)
iommu_release_device(dev);
return 0;
}
/** /**
* iommu_device_register() - Register an IOMMU hardware instance * iommu_device_register() - Register an IOMMU hardware instance
* @iommu: IOMMU handle for the instance * @iommu: IOMMU handle for the instance
@ -167,23 +208,42 @@ subsys_initcall(iommu_subsys_init);
int iommu_device_register(struct iommu_device *iommu, int iommu_device_register(struct iommu_device *iommu,
const struct iommu_ops *ops, struct device *hwdev) const struct iommu_ops *ops, struct device *hwdev)
{ {
int err = 0;
/* We need to be able to take module references appropriately */ /* We need to be able to take module references appropriately */
if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner)) if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner))
return -EINVAL; return -EINVAL;
/*
* Temporarily enforce global restriction to a single driver. This was
* already the de-facto behaviour, since any possible combination of
* existing drivers would compete for at least the PCI or platform bus.
*/
if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops)
return -EBUSY;
iommu->ops = ops; iommu->ops = ops;
if (hwdev) if (hwdev)
iommu->fwnode = hwdev->fwnode; iommu->fwnode = dev_fwnode(hwdev);
spin_lock(&iommu_device_lock); spin_lock(&iommu_device_lock);
list_add_tail(&iommu->list, &iommu_device_list); list_add_tail(&iommu->list, &iommu_device_list);
spin_unlock(&iommu_device_lock); spin_unlock(&iommu_device_lock);
return 0;
for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) {
iommu_buses[i]->iommu_ops = ops;
err = bus_iommu_probe(iommu_buses[i]);
}
if (err)
iommu_device_unregister(iommu);
return err;
} }
EXPORT_SYMBOL_GPL(iommu_device_register); EXPORT_SYMBOL_GPL(iommu_device_register);
void iommu_device_unregister(struct iommu_device *iommu) void iommu_device_unregister(struct iommu_device *iommu)
{ {
for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++)
bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group);
spin_lock(&iommu_device_lock); spin_lock(&iommu_device_lock);
list_del(&iommu->list); list_del(&iommu->list);
spin_unlock(&iommu_device_lock); spin_unlock(&iommu_device_lock);
@ -654,7 +714,6 @@ struct iommu_group *iommu_group_alloc(void)
ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
NULL, "%d", group->id); NULL, "%d", group->id);
if (ret) { if (ret) {
ida_free(&iommu_group_ida, group->id);
kobject_put(&group->kobj); kobject_put(&group->kobj);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
@ -1612,13 +1671,6 @@ static int probe_iommu_group(struct device *dev, void *data)
return ret; return ret;
} }
static int remove_iommu_group(struct device *dev, void *data)
{
iommu_release_device(dev);
return 0;
}
static int iommu_bus_notifier(struct notifier_block *nb, static int iommu_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data) unsigned long action, void *data)
{ {
@ -1775,75 +1827,6 @@ int bus_iommu_probe(struct bus_type *bus)
return ret; return ret;
} }
static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
{
struct notifier_block *nb;
int err;
nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
if (!nb)
return -ENOMEM;
nb->notifier_call = iommu_bus_notifier;
err = bus_register_notifier(bus, nb);
if (err)
goto out_free;
err = bus_iommu_probe(bus);
if (err)
goto out_err;
return 0;
out_err:
/* Clean up */
bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
bus_unregister_notifier(bus, nb);
out_free:
kfree(nb);
return err;
}
/**
* bus_set_iommu - set iommu-callbacks for the bus
* @bus: bus.
* @ops: the callbacks provided by the iommu-driver
*
* This function is called by an iommu driver to set the iommu methods
* used for a particular bus. Drivers for devices on that bus can use
* the iommu-api after these ops are registered.
* This special function is needed because IOMMUs are usually devices on
* the bus itself, so the iommu drivers are not initialized when the bus
* is set up. With this function the iommu-driver can set the iommu-ops
* afterwards.
*/
int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
{
int err;
if (ops == NULL) {
bus->iommu_ops = NULL;
return 0;
}
if (bus->iommu_ops != NULL)
return -EBUSY;
bus->iommu_ops = ops;
/* Do IOMMU specific setup for this bus-type */
err = iommu_bus_init(bus, ops);
if (err)
bus->iommu_ops = NULL;
return err;
}
EXPORT_SYMBOL_GPL(bus_set_iommu);
bool iommu_present(struct bus_type *bus) bool iommu_present(struct bus_type *bus)
{ {
return bus->iommu_ops != NULL; return bus->iommu_ops != NULL;
@ -1869,19 +1852,10 @@ bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
if (!ops->capable) if (!ops->capable)
return false; return false;
return ops->capable(cap); return ops->capable(dev, cap);
} }
EXPORT_SYMBOL_GPL(device_iommu_capable); EXPORT_SYMBOL_GPL(device_iommu_capable);
bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
{
if (!bus->iommu_ops || !bus->iommu_ops->capable)
return false;
return bus->iommu_ops->capable(cap);
}
EXPORT_SYMBOL_GPL(iommu_capable);
/** /**
* iommu_set_fault_handler() - set a fault handler for an iommu domain * iommu_set_fault_handler() - set a fault handler for an iommu domain
* @domain: iommu domain * @domain: iommu domain

View File

@ -661,9 +661,6 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
unsigned long flags; unsigned long flags;
int i; int i;
if (!mag)
return;
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
for (i = 0 ; i < mag->size; ++i) { for (i = 0 ; i < mag->size; ++i) {
@ -683,12 +680,12 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
static bool iova_magazine_full(struct iova_magazine *mag) static bool iova_magazine_full(struct iova_magazine *mag)
{ {
return (mag && mag->size == IOVA_MAG_SIZE); return mag->size == IOVA_MAG_SIZE;
} }
static bool iova_magazine_empty(struct iova_magazine *mag) static bool iova_magazine_empty(struct iova_magazine *mag)
{ {
return (!mag || mag->size == 0); return mag->size == 0;
} }
static unsigned long iova_magazine_pop(struct iova_magazine *mag, static unsigned long iova_magazine_pop(struct iova_magazine *mag,
@ -697,8 +694,6 @@ static unsigned long iova_magazine_pop(struct iova_magazine *mag,
int i; int i;
unsigned long pfn; unsigned long pfn;
BUG_ON(iova_magazine_empty(mag));
/* Only fall back to the rbtree if we have no suitable pfns at all */ /* Only fall back to the rbtree if we have no suitable pfns at all */
for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--) for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--)
if (i == 0) if (i == 0)
@ -713,8 +708,6 @@ static unsigned long iova_magazine_pop(struct iova_magazine *mag,
static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn) static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
{ {
BUG_ON(iova_magazine_full(mag));
mag->pfns[mag->size++] = pfn; mag->pfns[mag->size++] = pfn;
} }
@ -882,7 +875,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
{ {
unsigned int log_size = order_base_2(size); unsigned int log_size = order_base_2(size);
if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE || !iovad->rcaches) if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
return 0; return 0;
return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size); return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);

View File

@ -1090,11 +1090,6 @@ static int ipmmu_probe(struct platform_device *pdev)
ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev); ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev);
if (ret) if (ret)
return ret; return ret;
#if defined(CONFIG_IOMMU_DMA)
if (!iommu_present(&platform_bus_type))
bus_set_iommu(&platform_bus_type, &ipmmu_ops);
#endif
} }
/* /*
@ -1168,32 +1163,4 @@ static struct platform_driver ipmmu_driver = {
.probe = ipmmu_probe, .probe = ipmmu_probe,
.remove = ipmmu_remove, .remove = ipmmu_remove,
}; };
builtin_platform_driver(ipmmu_driver);
static int __init ipmmu_init(void)
{
struct device_node *np;
static bool setup_done;
int ret;
if (setup_done)
return 0;
np = of_find_matching_node(NULL, ipmmu_of_ids);
if (!np)
return 0;
of_node_put(np);
ret = platform_driver_register(&ipmmu_driver);
if (ret < 0)
return ret;
#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
if (!iommu_present(&platform_bus_type))
bus_set_iommu(&platform_bus_type, &ipmmu_ops);
#endif
setup_done = true;
return 0;
}
subsys_initcall(ipmmu_init);

View File

@ -792,8 +792,6 @@ static int msm_iommu_probe(struct platform_device *pdev)
goto fail; goto fail;
} }
bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
pr_info("device mapped at %p, irq %d with %d ctx banks\n", pr_info("device mapped at %p, irq %d with %d ctx banks\n",
iommu->base, iommu->irq, iommu->ncb); iommu->base, iommu->irq, iommu->ncb);

View File

@ -138,6 +138,7 @@
#define PM_CLK_AO BIT(15) #define PM_CLK_AO BIT(15)
#define IFA_IOMMU_PCIE_SUPPORT BIT(16) #define IFA_IOMMU_PCIE_SUPPORT BIT(16)
#define PGTABLE_PA_35_EN BIT(17) #define PGTABLE_PA_35_EN BIT(17)
#define TF_PORT_TO_ADDR_MT8173 BIT(18)
#define MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, mask) \ #define MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, mask) \
((((pdata)->flags) & (mask)) == (_x)) ((((pdata)->flags) & (mask)) == (_x))
@ -157,6 +158,7 @@
enum mtk_iommu_plat { enum mtk_iommu_plat {
M4U_MT2712, M4U_MT2712,
M4U_MT6779, M4U_MT6779,
M4U_MT6795,
M4U_MT8167, M4U_MT8167,
M4U_MT8173, M4U_MT8173,
M4U_MT8183, M4U_MT8183,
@ -955,7 +957,7 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data, unsigned int ban
* Global control settings are in bank0. May re-init these global registers * Global control settings are in bank0. May re-init these global registers
* since no sure if there is bank0 consumers. * since no sure if there is bank0 consumers.
*/ */
if (data->plat_data->m4u_plat == M4U_MT8173) { if (MTK_IOMMU_HAS_FLAG(data->plat_data, TF_PORT_TO_ADDR_MT8173)) {
regval = F_MMU_PREFETCH_RT_REPLACE_MOD | regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173; F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
} else { } else {
@ -1243,30 +1245,13 @@ static int mtk_iommu_probe(struct platform_device *pdev)
data->hw_list = &data->hw_list_head; data->hw_list = &data->hw_list_head;
} }
if (!iommu_present(&platform_bus_type)) {
ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
if (ret)
goto out_list_del;
}
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match); ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
if (ret) if (ret)
goto out_bus_set_null; goto out_list_del;
} else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) &&
MTK_IOMMU_HAS_FLAG(data->plat_data, IFA_IOMMU_PCIE_SUPPORT)) {
#ifdef CONFIG_PCI
if (!iommu_present(&pci_bus_type)) {
ret = bus_set_iommu(&pci_bus_type, &mtk_iommu_ops);
if (ret) /* PCIe fail don't affect platform_bus. */
goto out_list_del;
}
#endif
} }
return ret; return ret;
out_bus_set_null:
bus_set_iommu(&platform_bus_type, NULL);
out_list_del: out_list_del:
list_del(&data->list); list_del(&data->list);
iommu_device_unregister(&data->iommu); iommu_device_unregister(&data->iommu);
@ -1294,11 +1279,6 @@ static int mtk_iommu_remove(struct platform_device *pdev)
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
device_link_remove(data->smicomm_dev, &pdev->dev); device_link_remove(data->smicomm_dev, &pdev->dev);
component_master_del(&pdev->dev, &mtk_iommu_com_ops); component_master_del(&pdev->dev, &mtk_iommu_com_ops);
} else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) &&
MTK_IOMMU_HAS_FLAG(data->plat_data, IFA_IOMMU_PCIE_SUPPORT)) {
#ifdef CONFIG_PCI
bus_set_iommu(&pci_bus_type, NULL);
#endif
} }
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
for (i = 0; i < data->plat_data->banks_num; i++) { for (i = 0; i < data->plat_data->banks_num; i++) {
@ -1413,6 +1393,19 @@ static const struct mtk_iommu_plat_data mt6779_data = {
.larbid_remap = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}}, .larbid_remap = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}},
}; };
static const struct mtk_iommu_plat_data mt6795_data = {
.m4u_plat = M4U_MT6795,
.flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI |
HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM |
TF_PORT_TO_ADDR_MT8173,
.inv_sel_reg = REG_MMU_INV_SEL_GEN1,
.banks_num = 1,
.banks_enable = {true},
.iova_region = single_domain,
.iova_region_nr = ARRAY_SIZE(single_domain),
.larbid_remap = {{0}, {1}, {2}, {3}, {4}}, /* Linear mapping. */
};
static const struct mtk_iommu_plat_data mt8167_data = { static const struct mtk_iommu_plat_data mt8167_data = {
.m4u_plat = M4U_MT8167, .m4u_plat = M4U_MT8167,
.flags = RESET_AXI | HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM, .flags = RESET_AXI | HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM,
@ -1427,7 +1420,8 @@ static const struct mtk_iommu_plat_data mt8167_data = {
static const struct mtk_iommu_plat_data mt8173_data = { static const struct mtk_iommu_plat_data mt8173_data = {
.m4u_plat = M4U_MT8173, .m4u_plat = M4U_MT8173,
.flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI | .flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI |
HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM, HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM |
TF_PORT_TO_ADDR_MT8173,
.inv_sel_reg = REG_MMU_INV_SEL_GEN1, .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
.banks_num = 1, .banks_num = 1,
.banks_enable = {true}, .banks_enable = {true},
@ -1524,6 +1518,7 @@ static const struct mtk_iommu_plat_data mt8195_data_vpp = {
static const struct of_device_id mtk_iommu_of_ids[] = { static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data}, { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
{ .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data}, { .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
{ .compatible = "mediatek,mt6795-m4u", .data = &mt6795_data},
{ .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data}, { .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data},
{ .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data}, { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
{ .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data}, { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},

View File

@ -691,19 +691,11 @@ static int mtk_iommu_v1_probe(struct platform_device *pdev)
if (ret) if (ret)
goto out_sysfs_remove; goto out_sysfs_remove;
if (!iommu_present(&platform_bus_type)) {
ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_v1_ops);
if (ret)
goto out_dev_unreg;
}
ret = component_master_add_with_match(dev, &mtk_iommu_v1_com_ops, match); ret = component_master_add_with_match(dev, &mtk_iommu_v1_com_ops, match);
if (ret) if (ret)
goto out_bus_set_null; goto out_dev_unreg;
return ret; return ret;
out_bus_set_null:
bus_set_iommu(&platform_bus_type, NULL);
out_dev_unreg: out_dev_unreg:
iommu_device_unregister(&data->iommu); iommu_device_unregister(&data->iommu);
out_sysfs_remove: out_sysfs_remove:
@ -718,9 +710,6 @@ static int mtk_iommu_v1_remove(struct platform_device *pdev)
iommu_device_sysfs_remove(&data->iommu); iommu_device_sysfs_remove(&data->iommu);
iommu_device_unregister(&data->iommu); iommu_device_unregister(&data->iommu);
if (iommu_present(&platform_bus_type))
bus_set_iommu(&platform_bus_type, NULL);
clk_disable_unprepare(data->bclk); clk_disable_unprepare(data->bclk);
devm_free_irq(&pdev->dev, data->irq, data); devm_free_irq(&pdev->dev, data->irq, data);
component_master_del(&pdev->dev, &mtk_iommu_v1_com_ops); component_master_del(&pdev->dev, &mtk_iommu_v1_com_ops);

View File

@ -32,12 +32,12 @@ static inline bool is_omap_iommu_detached(struct omap_iommu *obj)
ssize_t bytes; \ ssize_t bytes; \
const char *str = "%20s: %08x\n"; \ const char *str = "%20s: %08x\n"; \
const int maxcol = 32; \ const int maxcol = 32; \
bytes = snprintf(p, maxcol, str, __stringify(name), \ if (len < maxcol) \
goto out; \
bytes = scnprintf(p, maxcol, str, __stringify(name), \
iommu_read_reg(obj, MMU_##name)); \ iommu_read_reg(obj, MMU_##name)); \
p += bytes; \ p += bytes; \
len -= bytes; \ len -= bytes; \
if (len < maxcol) \
goto out; \
} while (0) } while (0)
static ssize_t static ssize_t

View File

@ -1776,14 +1776,8 @@ static int __init omap_iommu_init(void)
goto fail_driver; goto fail_driver;
} }
ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
if (ret)
goto fail_bus;
return 0; return 0;
fail_bus:
platform_driver_unregister(&omap_iommu_driver);
fail_driver: fail_driver:
kmem_cache_destroy(iopte_cachep); kmem_cache_destroy(iopte_cachep);
return ret; return ret;

View File

@ -1300,8 +1300,6 @@ static int rk_iommu_probe(struct platform_device *pdev)
if (!dma_dev) if (!dma_dev)
dma_dev = &pdev->dev; dma_dev = &pdev->dev;
bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
pm_runtime_enable(dev); pm_runtime_enable(dev);
for (i = 0; i < iommu->num_irq; i++) { for (i = 0; i < iommu->num_irq; i++) {

View File

@ -39,7 +39,7 @@ static struct s390_domain *to_s390_domain(struct iommu_domain *dom)
return container_of(dom, struct s390_domain, domain); return container_of(dom, struct s390_domain, domain);
} }
static bool s390_iommu_capable(enum iommu_cap cap) static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap)
{ {
switch (cap) { switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY: case IOMMU_CAP_CACHE_COHERENCY:
@ -185,7 +185,12 @@ static void s390_iommu_detach_device(struct iommu_domain *domain,
static struct iommu_device *s390_iommu_probe_device(struct device *dev) static struct iommu_device *s390_iommu_probe_device(struct device *dev)
{ {
struct zpci_dev *zdev = to_zpci_dev(dev); struct zpci_dev *zdev;
if (!dev_is_pci(dev))
return ERR_PTR(-ENODEV);
zdev = to_zpci_dev(dev);
return &zdev->iommu_dev; return &zdev->iommu_dev;
} }
@ -385,9 +390,3 @@ static const struct iommu_ops s390_iommu_ops = {
.free = s390_domain_free, .free = s390_domain_free,
} }
}; };
static int __init s390_iommu_init(void)
{
return bus_set_iommu(&pci_bus_type, &s390_iommu_ops);
}
subsys_initcall(s390_iommu_init);

View File

@ -496,9 +496,6 @@ static int sprd_iommu_probe(struct platform_device *pdev)
if (ret) if (ret)
goto remove_sysfs; goto remove_sysfs;
if (!iommu_present(&platform_bus_type))
bus_set_iommu(&platform_bus_type, &sprd_iommu_ops);
ret = sprd_iommu_clk_enable(sdev); ret = sprd_iommu_clk_enable(sdev);
if (ret) if (ret)
goto unregister_iommu; goto unregister_iommu;
@ -534,8 +531,6 @@ static int sprd_iommu_remove(struct platform_device *pdev)
iommu_group_put(sdev->group); iommu_group_put(sdev->group);
sdev->group = NULL; sdev->group = NULL;
bus_set_iommu(&platform_bus_type, NULL);
platform_set_drvdata(pdev, NULL); platform_set_drvdata(pdev, NULL);
iommu_device_sysfs_remove(&sdev->iommu); iommu_device_sysfs_remove(&sdev->iommu);
iommu_device_unregister(&sdev->iommu); iommu_device_unregister(&sdev->iommu);

View File

@ -965,8 +965,6 @@ static int sun50i_iommu_probe(struct platform_device *pdev)
if (ret < 0) if (ret < 0)
goto err_unregister; goto err_unregister;
bus_set_iommu(&platform_bus_type, &sun50i_iommu_ops);
return 0; return 0;
err_unregister: err_unregister:

View File

@ -1083,8 +1083,8 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
/* /*
* This is a bit of a hack. Ideally we'd want to simply return this * This is a bit of a hack. Ideally we'd want to simply return this
* value. However the IOMMU registration process will attempt to add * value. However iommu_device_register() will attempt to add
* all devices to the IOMMU when bus_set_iommu() is called. In order * all devices to the IOMMU before we get that far. In order
* not to rely on global variables to track the IOMMU instance, we * not to rely on global variables to track the IOMMU instance, we
* set it here so that it can be looked up from the .probe_device() * set it here so that it can be looked up from the .probe_device()
* callback via the IOMMU device's .drvdata field. * callback via the IOMMU device's .drvdata field.
@ -1138,32 +1138,15 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
return ERR_PTR(err); return ERR_PTR(err);
err = iommu_device_register(&smmu->iommu, &tegra_smmu_ops, dev); err = iommu_device_register(&smmu->iommu, &tegra_smmu_ops, dev);
if (err) if (err) {
goto remove_sysfs; iommu_device_sysfs_remove(&smmu->iommu);
return ERR_PTR(err);
err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops); }
if (err < 0)
goto unregister;
#ifdef CONFIG_PCI
err = bus_set_iommu(&pci_bus_type, &tegra_smmu_ops);
if (err < 0)
goto unset_platform_bus;
#endif
if (IS_ENABLED(CONFIG_DEBUG_FS)) if (IS_ENABLED(CONFIG_DEBUG_FS))
tegra_smmu_debugfs_init(smmu); tegra_smmu_debugfs_init(smmu);
return smmu; return smmu;
unset_platform_bus: __maybe_unused;
bus_set_iommu(&platform_bus_type, NULL);
unregister:
iommu_device_unregister(&smmu->iommu);
remove_sysfs:
iommu_device_sysfs_remove(&smmu->iommu);
return ERR_PTR(err);
} }
void tegra_smmu_remove(struct tegra_smmu *smmu) void tegra_smmu_remove(struct tegra_smmu *smmu)

View File

@ -7,9 +7,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/amba/bus.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/dma-iommu.h>
#include <linux/dma-map-ops.h> #include <linux/dma-map-ops.h>
#include <linux/freezer.h> #include <linux/freezer.h>
#include <linux/interval_tree.h> #include <linux/interval_tree.h>
@ -17,7 +15,6 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/virtio.h> #include <linux/virtio.h>
#include <linux/virtio_config.h> #include <linux/virtio_config.h>
#include <linux/virtio_ids.h> #include <linux/virtio_ids.h>
@ -25,6 +22,8 @@
#include <uapi/linux/virtio_iommu.h> #include <uapi/linux/virtio_iommu.h>
#include "dma-iommu.h"
#define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_BASE 0x8000000
#define MSI_IOVA_LENGTH 0x100000 #define MSI_IOVA_LENGTH 0x100000
@ -925,7 +924,7 @@ static struct virtio_driver virtio_iommu_drv;
static int viommu_match_node(struct device *dev, const void *data) static int viommu_match_node(struct device *dev, const void *data)
{ {
return dev->parent->fwnode == data; return device_match_fwnode(dev->parent, data);
} }
static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode) static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode)
@ -1006,7 +1005,7 @@ static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
return iommu_fwspec_add_ids(dev, args->args, 1); return iommu_fwspec_add_ids(dev, args->args, 1);
} }
static bool viommu_capable(enum iommu_cap cap) static bool viommu_capable(struct device *dev, enum iommu_cap cap)
{ {
switch (cap) { switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY: case IOMMU_CAP_CACHE_COHERENCY:
@ -1156,26 +1155,6 @@ static int viommu_probe(struct virtio_device *vdev)
iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev); iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev);
#ifdef CONFIG_PCI
if (pci_bus_type.iommu_ops != &viommu_ops) {
ret = bus_set_iommu(&pci_bus_type, &viommu_ops);
if (ret)
goto err_unregister;
}
#endif
#ifdef CONFIG_ARM_AMBA
if (amba_bustype.iommu_ops != &viommu_ops) {
ret = bus_set_iommu(&amba_bustype, &viommu_ops);
if (ret)
goto err_unregister;
}
#endif
if (platform_bus_type.iommu_ops != &viommu_ops) {
ret = bus_set_iommu(&platform_bus_type, &viommu_ops);
if (ret)
goto err_unregister;
}
vdev->priv = viommu; vdev->priv = viommu;
dev_info(dev, "input address: %u bits\n", dev_info(dev, "input address: %u bits\n",
@ -1184,9 +1163,6 @@ static int viommu_probe(struct virtio_device *vdev)
return 0; return 0;
err_unregister:
iommu_device_sysfs_remove(&viommu->iommu);
iommu_device_unregister(&viommu->iommu);
err_free_vqs: err_free_vqs:
vdev->config->del_vqs(vdev); vdev->config->del_vqs(vdev);

View File

@ -13,7 +13,7 @@
#define pr_fmt(fmt) "GICv2m: " fmt #define pr_fmt(fmt) "GICv2m: " fmt
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/dma-iommu.h> #include <linux/iommu.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/irqdomain.h> #include <linux/irqdomain.h>
#include <linux/kernel.h> #include <linux/kernel.h>

View File

@ -11,9 +11,9 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/crash_dump.h> #include <linux/crash_dump.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/dma-iommu.h>
#include <linux/efi.h> #include <linux/efi.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/iommu.h>
#include <linux/iopoll.h> #include <linux/iopoll.h>
#include <linux/irqdomain.h> #include <linux/irqdomain.h>
#include <linux/list.h> #include <linux/list.h>

View File

@ -6,7 +6,7 @@
#define pr_fmt(fmt) "GICv3: " fmt #define pr_fmt(fmt) "GICv3: " fmt
#include <linux/dma-iommu.h> #include <linux/iommu.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/irqdomain.h> #include <linux/irqdomain.h>
#include <linux/kernel.h> #include <linux/kernel.h>

View File

@ -11,6 +11,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/msi.h> #include <linux/msi.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/iommu.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/irqchip/chained_irq.h> #include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h> #include <linux/irqdomain.h>
@ -18,7 +19,6 @@
#include <linux/of_pci.h> #include <linux/of_pci.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/dma-iommu.h>
#define MSI_IRQS_PER_MSIR 32 #define MSI_IRQS_PER_MSIR 32
#define MSI_MSIR_OFFSET 4 #define MSI_MSIR_OFFSET 4

View File

@ -37,7 +37,6 @@
#include <linux/vfio.h> #include <linux/vfio.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/dma-iommu.h>
#include <linux/irqdomain.h> #include <linux/irqdomain.h>
#include "vfio.h" #include "vfio.h"

View File

@ -0,0 +1,95 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/*
* Copyright (c) 2022 Collabora Ltd.
* Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
*/
#ifndef _DT_BINDINGS_MEMORY_MT6795_LARB_PORT_H_
#define _DT_BINDINGS_MEMORY_MT6795_LARB_PORT_H_
#include <dt-bindings/memory/mtk-memory-port.h>
#define M4U_LARB0_ID 0
#define M4U_LARB1_ID 1
#define M4U_LARB2_ID 2
#define M4U_LARB3_ID 3
#define M4U_LARB4_ID 4
/* larb0 */
#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 0)
#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 1)
#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB0_ID, 2)
#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 3)
#define M4U_PORT_DISP_OVL1 MTK_M4U_ID(M4U_LARB0_ID, 4)
#define M4U_PORT_DISP_RDMA2 MTK_M4U_ID(M4U_LARB0_ID, 5)
#define M4U_PORT_DISP_WDMA1 MTK_M4U_ID(M4U_LARB0_ID, 6)
#define M4U_PORT_DISP_OD_R MTK_M4U_ID(M4U_LARB0_ID, 7)
#define M4U_PORT_DISP_OD_W MTK_M4U_ID(M4U_LARB0_ID, 8)
#define M4U_PORT_MDP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 9)
#define M4U_PORT_MDP_RDMA1 MTK_M4U_ID(M4U_LARB0_ID, 10)
#define M4U_PORT_MDP_WDMA MTK_M4U_ID(M4U_LARB0_ID, 11)
#define M4U_PORT_MDP_WROT0 MTK_M4U_ID(M4U_LARB0_ID, 12)
#define M4U_PORT_MDP_WROT1 MTK_M4U_ID(M4U_LARB0_ID, 13)
/* larb1 */
#define M4U_PORT_VDEC_MC MTK_M4U_ID(M4U_LARB1_ID, 0)
#define M4U_PORT_VDEC_PP MTK_M4U_ID(M4U_LARB1_ID, 1)
#define M4U_PORT_VDEC_UFO MTK_M4U_ID(M4U_LARB1_ID, 2)
#define M4U_PORT_VDEC_VLD MTK_M4U_ID(M4U_LARB1_ID, 3)
#define M4U_PORT_VDEC_VLD2 MTK_M4U_ID(M4U_LARB1_ID, 4)
#define M4U_PORT_VDEC_AVC_MV MTK_M4U_ID(M4U_LARB1_ID, 5)
#define M4U_PORT_VDEC_PRED_RD MTK_M4U_ID(M4U_LARB1_ID, 6)
#define M4U_PORT_VDEC_PRED_WR MTK_M4U_ID(M4U_LARB1_ID, 7)
#define M4U_PORT_VDEC_PPWRAP MTK_M4U_ID(M4U_LARB1_ID, 8)
/* larb2 */
#define M4U_PORT_CAM_IMGO MTK_M4U_ID(M4U_LARB2_ID, 0)
#define M4U_PORT_CAM_RRZO MTK_M4U_ID(M4U_LARB2_ID, 1)
#define M4U_PORT_CAM_AAO MTK_M4U_ID(M4U_LARB2_ID, 2)
#define M4U_PORT_CAM_LCSO MTK_M4U_ID(M4U_LARB2_ID, 3)
#define M4U_PORT_CAM_ESFKO MTK_M4U_ID(M4U_LARB2_ID, 4)
#define M4U_PORT_CAM_IMGO_S MTK_M4U_ID(M4U_LARB2_ID, 5)
#define M4U_PORT_CAM_LSCI MTK_M4U_ID(M4U_LARB2_ID, 6)
#define M4U_PORT_CAM_LSCI_D MTK_M4U_ID(M4U_LARB2_ID, 7)
#define M4U_PORT_CAM_BPCI MTK_M4U_ID(M4U_LARB2_ID, 8)
#define M4U_PORT_CAM_BPCI_D MTK_M4U_ID(M4U_LARB2_ID, 9)
#define M4U_PORT_CAM_UFDI MTK_M4U_ID(M4U_LARB2_ID, 10)
#define M4U_PORT_CAM_IMGI MTK_M4U_ID(M4U_LARB2_ID, 11)
#define M4U_PORT_CAM_IMG2O MTK_M4U_ID(M4U_LARB2_ID, 12)
#define M4U_PORT_CAM_IMG3O MTK_M4U_ID(M4U_LARB2_ID, 13)
#define M4U_PORT_CAM_VIPI MTK_M4U_ID(M4U_LARB2_ID, 14)
#define M4U_PORT_CAM_VIP2I MTK_M4U_ID(M4U_LARB2_ID, 15)
#define M4U_PORT_CAM_VIP3I MTK_M4U_ID(M4U_LARB2_ID, 16)
#define M4U_PORT_CAM_LCEI MTK_M4U_ID(M4U_LARB2_ID, 17)
#define M4U_PORT_CAM_RB MTK_M4U_ID(M4U_LARB2_ID, 18)
#define M4U_PORT_CAM_RP MTK_M4U_ID(M4U_LARB2_ID, 19)
#define M4U_PORT_CAM_WR MTK_M4U_ID(M4U_LARB2_ID, 20)
/* larb3 */
#define M4U_PORT_VENC_RCPU MTK_M4U_ID(M4U_LARB3_ID, 0)
#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB3_ID, 1)
#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 2)
#define M4U_PORT_VENC_SV_COMV MTK_M4U_ID(M4U_LARB3_ID, 3)
#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB3_ID, 4)
#define M4U_PORT_JPGENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 5)
#define M4U_PORT_REMDC_SDMA MTK_M4U_ID(M4U_LARB3_ID, 6)
#define M4U_PORT_REMDC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 7)
#define M4U_PORT_JPGENC_RDMA MTK_M4U_ID(M4U_LARB3_ID, 8)
#define M4U_PORT_JPGENC_SDMA MTK_M4U_ID(M4U_LARB3_ID, 9)
#define M4U_PORT_JPGDEC_WDMA MTK_M4U_ID(M4U_LARB3_ID, 10)
#define M4U_PORT_JPGDEC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 11)
#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB3_ID, 12)
#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 13)
#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB3_ID, 14)
#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 15)
#define M4U_PORT_REMDC_WDMA MTK_M4U_ID(M4U_LARB3_ID, 16)
#define M4U_PORT_VENC_NBM_RDMA MTK_M4U_ID(M4U_LARB3_ID, 17)
#define M4U_PORT_VENC_NBM_WDMA MTK_M4U_ID(M4U_LARB3_ID, 18)
/* larb4 */
#define M4U_PORT_MJC_MV_RD MTK_M4U_ID(M4U_LARB4_ID, 0)
#define M4U_PORT_MJC_MV_WR MTK_M4U_ID(M4U_LARB4_ID, 1)
#define M4U_PORT_MJC_DMA_RD MTK_M4U_ID(M4U_LARB4_ID, 2)
#define M4U_PORT_MJC_DMA_WR MTK_M4U_ID(M4U_LARB4_ID, 3)
#endif

View File

@ -1,93 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2014-2015 ARM Ltd.
*/
#ifndef __DMA_IOMMU_H
#define __DMA_IOMMU_H
#include <linux/errno.h>
#include <linux/types.h>
#ifdef CONFIG_IOMMU_DMA
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
#include <linux/msi.h>
/* Domain management interface for IOMMU drivers */
int iommu_get_dma_cookie(struct iommu_domain *domain);
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
void iommu_put_dma_cookie(struct iommu_domain *domain);
/* Setup call for arch DMA mapping code */
void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit);
int iommu_dma_init_fq(struct iommu_domain *domain);
/* The DMA API isn't _quite_ the whole story, though... */
/*
* iommu_dma_prepare_msi() - Map the MSI page in the IOMMU device
*
* The MSI page will be stored in @desc.
*
* Return: 0 on success otherwise an error describing the failure.
*/
int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
/* Update the MSI message if required. */
void iommu_dma_compose_msi_msg(struct msi_desc *desc,
struct msi_msg *msg);
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
struct iommu_domain *domain);
extern bool iommu_dma_forcedac;
#else /* CONFIG_IOMMU_DMA */
struct iommu_domain;
struct msi_desc;
struct msi_msg;
struct device;
static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base,
u64 dma_limit)
{
}
static inline int iommu_dma_init_fq(struct iommu_domain *domain)
{
return -EINVAL;
}
static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
{
return -ENODEV;
}
static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
{
return -ENODEV;
}
static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
{
}
static inline int iommu_dma_prepare_msi(struct msi_desc *desc,
phys_addr_t msi_addr)
{
return 0;
}
static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc,
struct msi_msg *msg)
{
}
static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
{
}
#endif /* CONFIG_IOMMU_DMA */
#endif /* __DMA_IOMMU_H */

View File

@ -16,7 +16,9 @@ enum io_pgtable_fmt {
ARM_V7S, ARM_V7S,
ARM_MALI_LPAE, ARM_MALI_LPAE,
AMD_IOMMU_V1, AMD_IOMMU_V1,
AMD_IOMMU_V2,
APPLE_DART, APPLE_DART,
APPLE_DART2,
IO_PGTABLE_NUM_FMTS, IO_PGTABLE_NUM_FMTS,
}; };
@ -260,6 +262,7 @@ extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns; extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns; extern struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns;
extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns; extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns;
extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns;
extern struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns; extern struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns;
#endif /* __IO_PGTABLE_H */ #endif /* __IO_PGTABLE_H */

View File

@ -212,7 +212,7 @@ struct iommu_iotlb_gather {
* @of_xlate: add OF master IDs to iommu grouping * @of_xlate: add OF master IDs to iommu grouping
* @is_attach_deferred: Check if domain attach should be deferred from iommu * @is_attach_deferred: Check if domain attach should be deferred from iommu
* driver init to device driver init (default no) * driver init to device driver init (default no)
* @dev_has/enable/disable_feat: per device entries to check/enable/disable * @dev_enable/disable_feat: per device entries to enable/disable
* iommu specific features. * iommu specific features.
* @sva_bind: Bind process address space to device * @sva_bind: Bind process address space to device
* @sva_unbind: Unbind process address space from device * @sva_unbind: Unbind process address space from device
@ -227,7 +227,7 @@ struct iommu_iotlb_gather {
* @owner: Driver module providing these ops * @owner: Driver module providing these ops
*/ */
struct iommu_ops { struct iommu_ops {
bool (*capable)(enum iommu_cap); bool (*capable)(struct device *dev, enum iommu_cap);
/* Domain allocation and freeing by the iommu driver */ /* Domain allocation and freeing by the iommu driver */
struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
@ -416,11 +416,9 @@ static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
return dev->iommu->iommu_dev->ops; return dev->iommu->iommu_dev->ops;
} }
extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
extern int bus_iommu_probe(struct bus_type *bus); extern int bus_iommu_probe(struct bus_type *bus);
extern bool iommu_present(struct bus_type *bus); extern bool iommu_present(struct bus_type *bus);
extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap); extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
extern struct iommu_group *iommu_group_get_by_id(int id); extern struct iommu_group *iommu_group_get_by_id(int id);
extern void iommu_domain_free(struct iommu_domain *domain); extern void iommu_domain_free(struct iommu_domain *domain);
@ -697,11 +695,6 @@ static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
return false; return false;
} }
static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
{
return false;
}
static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
{ {
return NULL; return NULL;
@ -1070,4 +1063,40 @@ void iommu_debugfs_setup(void);
static inline void iommu_debugfs_setup(void) {} static inline void iommu_debugfs_setup(void) {}
#endif #endif
#ifdef CONFIG_IOMMU_DMA
#include <linux/msi.h>
/* Setup call for arch DMA mapping code */
void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit);
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg);
#else /* CONFIG_IOMMU_DMA */
struct msi_desc;
struct msi_msg;
static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
{
}
static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
{
return -ENODEV;
}
static inline int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
{
return 0;
}
static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
{
}
#endif /* CONFIG_IOMMU_DMA */
#endif /* __LINUX_IOMMU_H */ #endif /* __LINUX_IOMMU_H */

View File

@ -75,7 +75,7 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
return iova >> iova_shift(iovad); return iova >> iova_shift(iovad);
} }
#if IS_ENABLED(CONFIG_IOMMU_IOVA) #if IS_REACHABLE(CONFIG_IOMMU_IOVA)
int iova_cache_get(void); int iova_cache_get(void);
void iova_cache_put(void); void iova_cache_put(void);