iommu: Convert remaining simple drivers to domain_alloc_paging()

These drivers don't support IOMMU_DOMAIN_DMA, so this commit effectively
allows them to support that mode.

The prior work to require default_domains makes this safe because every
one of these drivers is either compilation incompatible with dma-iommu.c,
or already establishing a default_domain. In both cases alloc_domain()
will never be called with IOMMU_DOMAIN_DMA for these drivers so it is safe
to drop the test.

Removing these tests clarifies that the domain allocation path is only
about the functionality of a paging domain and has nothing to do with
policy of how the paging domain is used for UNMANAGED/DMA/DMA_FQ.

Tested-by: Niklas Schnelle <schnelle@linux.ibm.com>
Tested-by: Steven Price <steven.price@arm.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/24-v8-81230027b2fa+9d-iommu_all_defdom_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Jason Gunthorpe 2023-09-13 10:43:57 -03:00 committed by Joerg Roedel
parent 3529375e77
commit 4efd98d41e
4 changed files with 8 additions and 20 deletions

View File

@ -302,13 +302,10 @@ static void __program_context(void __iomem *base, int ctx,
SET_M(base, ctx, 1); SET_M(base, ctx, 1);
} }
static struct iommu_domain *msm_iommu_domain_alloc(unsigned type) static struct iommu_domain *msm_iommu_domain_alloc_paging(struct device *dev)
{ {
struct msm_priv *priv; struct msm_priv *priv;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
priv = kzalloc(sizeof(*priv), GFP_KERNEL); priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) if (!priv)
goto fail_nomem; goto fail_nomem;
@ -691,7 +688,7 @@ fail:
static struct iommu_ops msm_iommu_ops = { static struct iommu_ops msm_iommu_ops = {
.identity_domain = &msm_iommu_identity_domain, .identity_domain = &msm_iommu_identity_domain,
.domain_alloc = msm_iommu_domain_alloc, .domain_alloc_paging = msm_iommu_domain_alloc_paging,
.probe_device = msm_iommu_probe_device, .probe_device = msm_iommu_probe_device,
.device_group = generic_device_group, .device_group = generic_device_group,
.pgsize_bitmap = MSM_IOMMU_PGSIZES, .pgsize_bitmap = MSM_IOMMU_PGSIZES,

View File

@ -270,13 +270,10 @@ static int mtk_iommu_v1_domain_finalise(struct mtk_iommu_v1_data *data)
return 0; return 0;
} }
static struct iommu_domain *mtk_iommu_v1_domain_alloc(unsigned type) static struct iommu_domain *mtk_iommu_v1_domain_alloc_paging(struct device *dev)
{ {
struct mtk_iommu_v1_domain *dom; struct mtk_iommu_v1_domain *dom;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
dom = kzalloc(sizeof(*dom), GFP_KERNEL); dom = kzalloc(sizeof(*dom), GFP_KERNEL);
if (!dom) if (!dom)
return NULL; return NULL;
@ -585,7 +582,7 @@ static int mtk_iommu_v1_hw_init(const struct mtk_iommu_v1_data *data)
static const struct iommu_ops mtk_iommu_v1_ops = { static const struct iommu_ops mtk_iommu_v1_ops = {
.identity_domain = &mtk_iommu_v1_identity_domain, .identity_domain = &mtk_iommu_v1_identity_domain,
.domain_alloc = mtk_iommu_v1_domain_alloc, .domain_alloc_paging = mtk_iommu_v1_domain_alloc_paging,
.probe_device = mtk_iommu_v1_probe_device, .probe_device = mtk_iommu_v1_probe_device,
.probe_finalize = mtk_iommu_v1_probe_finalize, .probe_finalize = mtk_iommu_v1_probe_finalize,
.release_device = mtk_iommu_v1_release_device, .release_device = mtk_iommu_v1_release_device,

View File

@ -1580,13 +1580,10 @@ static struct iommu_domain omap_iommu_identity_domain = {
.ops = &omap_iommu_identity_ops, .ops = &omap_iommu_identity_ops,
}; };
static struct iommu_domain *omap_iommu_domain_alloc(unsigned type) static struct iommu_domain *omap_iommu_domain_alloc_paging(struct device *dev)
{ {
struct omap_iommu_domain *omap_domain; struct omap_iommu_domain *omap_domain;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL); omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
if (!omap_domain) if (!omap_domain)
return NULL; return NULL;
@ -1748,7 +1745,7 @@ static struct iommu_group *omap_iommu_device_group(struct device *dev)
static const struct iommu_ops omap_iommu_ops = { static const struct iommu_ops omap_iommu_ops = {
.identity_domain = &omap_iommu_identity_domain, .identity_domain = &omap_iommu_identity_domain,
.domain_alloc = omap_iommu_domain_alloc, .domain_alloc_paging = omap_iommu_domain_alloc_paging,
.probe_device = omap_iommu_probe_device, .probe_device = omap_iommu_probe_device,
.release_device = omap_iommu_release_device, .release_device = omap_iommu_release_device,
.device_group = omap_iommu_device_group, .device_group = omap_iommu_device_group,

View File

@ -39,13 +39,10 @@ static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap)
} }
} }
static struct iommu_domain *s390_domain_alloc(unsigned domain_type) static struct iommu_domain *s390_domain_alloc_paging(struct device *dev)
{ {
struct s390_domain *s390_domain; struct s390_domain *s390_domain;
if (domain_type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL); s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL);
if (!s390_domain) if (!s390_domain)
return NULL; return NULL;
@ -447,7 +444,7 @@ void zpci_destroy_iommu(struct zpci_dev *zdev)
static const struct iommu_ops s390_iommu_ops = { static const struct iommu_ops s390_iommu_ops = {
.default_domain = &s390_iommu_platform_domain, .default_domain = &s390_iommu_platform_domain,
.capable = s390_iommu_capable, .capable = s390_iommu_capable,
.domain_alloc = s390_domain_alloc, .domain_alloc_paging = s390_domain_alloc_paging,
.probe_device = s390_iommu_probe_device, .probe_device = s390_iommu_probe_device,
.release_device = s390_iommu_release_device, .release_device = s390_iommu_release_device,
.device_group = generic_device_group, .device_group = generic_device_group,