iommu: Pass struct iommu_iotlb_gather to ->unmap() and ->iotlb_sync()
To allow IOMMU drivers to batch up TLB flushing operations and postpone them until ->iotlb_sync() is called, extend the prototypes for the ->unmap() and ->iotlb_sync() IOMMU ops callbacks to take a pointer to the current iommu_iotlb_gather structure. All affected IOMMU drivers are updated, but there should be no functional change since the extra parameter is ignored for now. Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
		
							parent
							
								
									4fcf8544fc
								
							
						
					
					
						commit
						56f8af5e9d
					
				| @ -3055,7 +3055,8 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, | ||||
| } | ||||
| 
 | ||||
| static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, | ||||
| 			   size_t page_size) | ||||
| 			      size_t page_size, | ||||
| 			      struct iommu_iotlb_gather *gather) | ||||
| { | ||||
| 	struct protection_domain *domain = to_pdomain(dom); | ||||
| 	size_t unmap_size; | ||||
| @ -3196,6 +3197,12 @@ static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain) | ||||
| 	domain_flush_complete(dom); | ||||
| } | ||||
| 
 | ||||
| static void amd_iommu_iotlb_sync(struct iommu_domain *domain, | ||||
| 				 struct iommu_iotlb_gather *gather) | ||||
| { | ||||
| 	amd_iommu_flush_iotlb_all(domain); | ||||
| } | ||||
| 
 | ||||
| const struct iommu_ops amd_iommu_ops = { | ||||
| 	.capable = amd_iommu_capable, | ||||
| 	.domain_alloc = amd_iommu_domain_alloc, | ||||
| @ -3214,7 +3221,7 @@ const struct iommu_ops amd_iommu_ops = { | ||||
| 	.is_attach_deferred = amd_iommu_is_attach_deferred, | ||||
| 	.pgsize_bitmap	= AMD_IOMMU_PGSIZES, | ||||
| 	.flush_iotlb_all = amd_iommu_flush_iotlb_all, | ||||
| 	.iotlb_sync = amd_iommu_flush_iotlb_all, | ||||
| 	.iotlb_sync = amd_iommu_iotlb_sync, | ||||
| }; | ||||
| 
 | ||||
| /*****************************************************************************
 | ||||
|  | ||||
| @ -1985,8 +1985,8 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, | ||||
| 	return ops->map(ops, iova, paddr, size, prot); | ||||
| } | ||||
| 
 | ||||
| static size_t | ||||
| arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) | ||||
| static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, | ||||
| 			     size_t size, struct iommu_iotlb_gather *gather) | ||||
| { | ||||
| 	int ret; | ||||
| 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); | ||||
| @ -2010,7 +2010,8 @@ static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain) | ||||
| 		arm_smmu_tlb_inv_context(smmu_domain); | ||||
| } | ||||
| 
 | ||||
| static void arm_smmu_iotlb_sync(struct iommu_domain *domain) | ||||
| static void arm_smmu_iotlb_sync(struct iommu_domain *domain, | ||||
| 				struct iommu_iotlb_gather *gather) | ||||
| { | ||||
| 	struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; | ||||
| 
 | ||||
|  | ||||
| @ -1301,7 +1301,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, | ||||
| } | ||||
| 
 | ||||
| static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, | ||||
| 			     size_t size) | ||||
| 			     size_t size, struct iommu_iotlb_gather *gather) | ||||
| { | ||||
| 	struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; | ||||
| 	struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; | ||||
| @ -1329,7 +1329,8 @@ static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void arm_smmu_iotlb_sync(struct iommu_domain *domain) | ||||
| static void arm_smmu_iotlb_sync(struct iommu_domain *domain, | ||||
| 				struct iommu_iotlb_gather *gather) | ||||
| { | ||||
| 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); | ||||
| 	struct arm_smmu_device *smmu = smmu_domain->smmu; | ||||
|  | ||||
| @ -1130,7 +1130,8 @@ static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain | ||||
| } | ||||
| 
 | ||||
| static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain, | ||||
| 				 unsigned long l_iova, size_t size) | ||||
| 				 unsigned long l_iova, size_t size, | ||||
| 				 struct iommu_iotlb_gather *gather) | ||||
| { | ||||
| 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); | ||||
| 	sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; | ||||
|  | ||||
| @ -5147,7 +5147,8 @@ static int intel_iommu_map(struct iommu_domain *domain, | ||||
| } | ||||
| 
 | ||||
| static size_t intel_iommu_unmap(struct iommu_domain *domain, | ||||
| 				unsigned long iova, size_t size) | ||||
| 				unsigned long iova, size_t size, | ||||
| 				struct iommu_iotlb_gather *gather) | ||||
| { | ||||
| 	struct dmar_domain *dmar_domain = to_dmar_domain(domain); | ||||
| 	struct page *freelist = NULL; | ||||
|  | ||||
| @ -1899,7 +1899,7 @@ static size_t __iommu_unmap(struct iommu_domain *domain, | ||||
| 	while (unmapped < size) { | ||||
| 		size_t pgsize = iommu_pgsize(domain, iova, size - unmapped); | ||||
| 
 | ||||
| 		unmapped_page = ops->unmap(domain, iova, pgsize); | ||||
| 		unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather); | ||||
| 		if (!unmapped_page) | ||||
| 			break; | ||||
| 
 | ||||
|  | ||||
| @ -733,14 +733,14 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, | ||||
| } | ||||
| 
 | ||||
| static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, | ||||
| 			  size_t size) | ||||
| 			  size_t size, struct iommu_iotlb_gather *gather) | ||||
| { | ||||
| 	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); | ||||
| 
 | ||||
| 	return domain->iop->unmap(domain->iop, iova, size); | ||||
| } | ||||
| 
 | ||||
| static void ipmmu_iotlb_sync(struct iommu_domain *io_domain) | ||||
| static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain) | ||||
| { | ||||
| 	struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); | ||||
| 
 | ||||
| @ -748,6 +748,12 @@ static void ipmmu_iotlb_sync(struct iommu_domain *io_domain) | ||||
| 		ipmmu_tlb_flush_all(domain); | ||||
| } | ||||
| 
 | ||||
| static void ipmmu_iotlb_sync(struct iommu_domain *io_domain, | ||||
| 			     struct iommu_iotlb_gather *gather) | ||||
| { | ||||
| 	ipmmu_flush_iotlb_all(io_domain); | ||||
| } | ||||
| 
 | ||||
| static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, | ||||
| 				      dma_addr_t iova) | ||||
| { | ||||
| @ -957,7 +963,7 @@ static const struct iommu_ops ipmmu_ops = { | ||||
| 	.detach_dev = ipmmu_detach_device, | ||||
| 	.map = ipmmu_map, | ||||
| 	.unmap = ipmmu_unmap, | ||||
| 	.flush_iotlb_all = ipmmu_iotlb_sync, | ||||
| 	.flush_iotlb_all = ipmmu_flush_iotlb_all, | ||||
| 	.iotlb_sync = ipmmu_iotlb_sync, | ||||
| 	.iova_to_phys = ipmmu_iova_to_phys, | ||||
| 	.add_device = ipmmu_add_device, | ||||
|  | ||||
| @ -509,7 +509,7 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova, | ||||
| } | ||||
| 
 | ||||
| static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova, | ||||
| 			      size_t len) | ||||
| 			      size_t len, struct iommu_iotlb_gather *gather) | ||||
| { | ||||
| 	struct msm_priv *priv = to_msm_priv(domain); | ||||
| 	unsigned long flags; | ||||
|  | ||||
| @ -371,7 +371,8 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, | ||||
| } | ||||
| 
 | ||||
| static size_t mtk_iommu_unmap(struct iommu_domain *domain, | ||||
| 			      unsigned long iova, size_t size) | ||||
| 			      unsigned long iova, size_t size, | ||||
| 			      struct iommu_iotlb_gather *gather) | ||||
| { | ||||
| 	struct mtk_iommu_domain *dom = to_mtk_domain(domain); | ||||
| 	unsigned long flags; | ||||
| @ -384,7 +385,13 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain, | ||||
| 	return unmapsz; | ||||
| } | ||||
| 
 | ||||
| static void mtk_iommu_iotlb_sync(struct iommu_domain *domain) | ||||
| static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain) | ||||
| { | ||||
| 	mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data()); | ||||
| } | ||||
| 
 | ||||
| static void mtk_iommu_iotlb_sync(struct iommu_domain *domain, | ||||
| 				 struct iommu_iotlb_gather *gather) | ||||
| { | ||||
| 	mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data()); | ||||
| } | ||||
| @ -490,7 +497,7 @@ static const struct iommu_ops mtk_iommu_ops = { | ||||
| 	.detach_dev	= mtk_iommu_detach_device, | ||||
| 	.map		= mtk_iommu_map, | ||||
| 	.unmap		= mtk_iommu_unmap, | ||||
| 	.flush_iotlb_all = mtk_iommu_iotlb_sync, | ||||
| 	.flush_iotlb_all = mtk_iommu_flush_iotlb_all, | ||||
| 	.iotlb_sync	= mtk_iommu_iotlb_sync, | ||||
| 	.iova_to_phys	= mtk_iommu_iova_to_phys, | ||||
| 	.add_device	= mtk_iommu_add_device, | ||||
|  | ||||
| @ -324,7 +324,8 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, | ||||
| } | ||||
| 
 | ||||
| static size_t mtk_iommu_unmap(struct iommu_domain *domain, | ||||
| 			      unsigned long iova, size_t size) | ||||
| 			      unsigned long iova, size_t size, | ||||
| 			      struct iommu_iotlb_gather *gather) | ||||
| { | ||||
| 	struct mtk_iommu_domain *dom = to_mtk_domain(domain); | ||||
| 	unsigned long flags; | ||||
|  | ||||
| @ -1149,7 +1149,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, | ||||
| } | ||||
| 
 | ||||
| static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, | ||||
| 			       size_t size) | ||||
| 			       size_t size, struct iommu_iotlb_gather *gather) | ||||
| { | ||||
| 	struct omap_iommu_domain *omap_domain = to_omap_domain(domain); | ||||
| 	struct device *dev = omap_domain->dev; | ||||
|  | ||||
| @ -417,7 +417,7 @@ static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, | ||||
| } | ||||
| 
 | ||||
| static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova, | ||||
| 			       size_t size) | ||||
| 			       size_t size, struct iommu_iotlb_gather *gather) | ||||
| { | ||||
| 	size_t ret; | ||||
| 	unsigned long flags; | ||||
| @ -441,7 +441,7 @@ static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova, | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static void qcom_iommu_iotlb_sync(struct iommu_domain *domain) | ||||
| static void qcom_iommu_flush_iotlb_all(struct iommu_domain *domain) | ||||
| { | ||||
| 	struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); | ||||
| 	struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops, | ||||
| @ -454,6 +454,12 @@ static void qcom_iommu_iotlb_sync(struct iommu_domain *domain) | ||||
| 	pm_runtime_put_sync(qcom_domain->iommu->dev); | ||||
| } | ||||
| 
 | ||||
| static void qcom_iommu_iotlb_sync(struct iommu_domain *domain, | ||||
| 				  struct iommu_iotlb_gather *gather) | ||||
| { | ||||
| 	qcom_iommu_flush_iotlb_all(domain); | ||||
| } | ||||
| 
 | ||||
| static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain, | ||||
| 					   dma_addr_t iova) | ||||
| { | ||||
| @ -581,7 +587,7 @@ static const struct iommu_ops qcom_iommu_ops = { | ||||
| 	.detach_dev	= qcom_iommu_detach_dev, | ||||
| 	.map		= qcom_iommu_map, | ||||
| 	.unmap		= qcom_iommu_unmap, | ||||
| 	.flush_iotlb_all = qcom_iommu_iotlb_sync, | ||||
| 	.flush_iotlb_all = qcom_iommu_flush_iotlb_all, | ||||
| 	.iotlb_sync	= qcom_iommu_iotlb_sync, | ||||
| 	.iova_to_phys	= qcom_iommu_iova_to_phys, | ||||
| 	.add_device	= qcom_iommu_add_device, | ||||
|  | ||||
| @ -794,7 +794,7 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, | ||||
| } | ||||
| 
 | ||||
| static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, | ||||
| 			     size_t size) | ||||
| 			     size_t size, struct iommu_iotlb_gather *gather) | ||||
| { | ||||
| 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain); | ||||
| 	unsigned long flags; | ||||
|  | ||||
| @ -314,7 +314,8 @@ static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain, | ||||
| } | ||||
| 
 | ||||
| static size_t s390_iommu_unmap(struct iommu_domain *domain, | ||||
| 			       unsigned long iova, size_t size) | ||||
| 			       unsigned long iova, size_t size, | ||||
| 			       struct iommu_iotlb_gather *gather) | ||||
| { | ||||
| 	struct s390_domain *s390_domain = to_s390_domain(domain); | ||||
| 	int flags = ZPCI_PTE_INVALID; | ||||
|  | ||||
| @ -207,7 +207,7 @@ static inline int __gart_iommu_unmap(struct gart_device *gart, | ||||
| } | ||||
| 
 | ||||
| static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova, | ||||
| 			       size_t bytes) | ||||
| 			       size_t bytes, struct iommu_iotlb_gather *gather) | ||||
| { | ||||
| 	struct gart_device *gart = gart_handle; | ||||
| 	int err; | ||||
| @ -273,11 +273,17 @@ static int gart_iommu_of_xlate(struct device *dev, | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static void gart_iommu_sync(struct iommu_domain *domain) | ||||
| static void gart_iommu_sync_map(struct iommu_domain *domain) | ||||
| { | ||||
| 	FLUSH_GART_REGS(gart_handle); | ||||
| } | ||||
| 
 | ||||
| static void gart_iommu_sync(struct iommu_domain *domain, | ||||
| 			    struct iommu_iotlb_gather *gather) | ||||
| { | ||||
| 	gart_iommu_sync_map(domain); | ||||
| } | ||||
| 
 | ||||
| static const struct iommu_ops gart_iommu_ops = { | ||||
| 	.capable	= gart_iommu_capable, | ||||
| 	.domain_alloc	= gart_iommu_domain_alloc, | ||||
| @ -292,7 +298,7 @@ static const struct iommu_ops gart_iommu_ops = { | ||||
| 	.iova_to_phys	= gart_iommu_iova_to_phys, | ||||
| 	.pgsize_bitmap	= GART_IOMMU_PGSIZES, | ||||
| 	.of_xlate	= gart_iommu_of_xlate, | ||||
| 	.iotlb_sync_map	= gart_iommu_sync, | ||||
| 	.iotlb_sync_map	= gart_iommu_sync_map, | ||||
| 	.iotlb_sync	= gart_iommu_sync, | ||||
| }; | ||||
| 
 | ||||
|  | ||||
| @ -680,7 +680,7 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, | ||||
| } | ||||
| 
 | ||||
| static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, | ||||
| 			       size_t size) | ||||
| 			       size_t size, struct iommu_iotlb_gather *gather) | ||||
| { | ||||
| 	struct tegra_smmu_as *as = to_smmu_as(domain); | ||||
| 	dma_addr_t pte_dma; | ||||
|  | ||||
| @ -742,7 +742,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova, | ||||
| } | ||||
| 
 | ||||
| static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova, | ||||
| 			   size_t size) | ||||
| 			   size_t size, struct iommu_iotlb_gather *gather) | ||||
| { | ||||
| 	int ret = 0; | ||||
| 	size_t unmapped; | ||||
| @ -788,7 +788,8 @@ static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain, | ||||
| 	return paddr; | ||||
| } | ||||
| 
 | ||||
| static void viommu_iotlb_sync(struct iommu_domain *domain) | ||||
| static void viommu_iotlb_sync(struct iommu_domain *domain, | ||||
| 			      struct iommu_iotlb_gather *gather) | ||||
| { | ||||
| 	struct viommu_domain *vdomain = to_viommu_domain(domain); | ||||
| 
 | ||||
|  | ||||
| @ -258,10 +258,11 @@ struct iommu_ops { | ||||
| 	int (*map)(struct iommu_domain *domain, unsigned long iova, | ||||
| 		   phys_addr_t paddr, size_t size, int prot); | ||||
| 	size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, | ||||
| 		     size_t size); | ||||
| 		     size_t size, struct iommu_iotlb_gather *iotlb_gather); | ||||
| 	void (*flush_iotlb_all)(struct iommu_domain *domain); | ||||
| 	void (*iotlb_sync_map)(struct iommu_domain *domain); | ||||
| 	void (*iotlb_sync)(struct iommu_domain *domain); | ||||
| 	void (*iotlb_sync)(struct iommu_domain *domain, | ||||
| 			   struct iommu_iotlb_gather *iotlb_gather); | ||||
| 	phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); | ||||
| 	int (*add_device)(struct device *dev); | ||||
| 	void (*remove_device)(struct device *dev); | ||||
| @ -502,7 +503,7 @@ static inline void iommu_tlb_sync(struct iommu_domain *domain, | ||||
| 				  struct iommu_iotlb_gather *iotlb_gather) | ||||
| { | ||||
| 	if (domain->ops->iotlb_sync) | ||||
| 		domain->ops->iotlb_sync(domain); | ||||
| 		domain->ops->iotlb_sync(domain, iotlb_gather); | ||||
| 
 | ||||
| 	iommu_iotlb_gather_init(iotlb_gather); | ||||
| } | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user