arm/smmu driver updates via Will Deacon fixing locking around page

table walks and a couple other issues.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.15 (GNU/Linux)
 
 iQIcBAABAgAGBQJSp3VBAAoJECObm247sIsi0REP/03lnW12hEKGb46M2mCy/hkX
 L6OdPzXhjKpucLbochQvvXu0OOeG+XzFyQFqupoMo6Crw6b2sNh8lAutNJ5+dqns
 pfSK/v/KjSXoCh08+VYvEGTinOkYTlXtvH7dY7kx8CSWd8SGhugynSRvsWJfzhXx
 BaFaNqop3hti1VKAjTdZUwNaCK2WLx1JBcj4lOnxbSrWwfj7PURKKG0FABPChRKJ
 1DvEkV3HVH+851WVNr6zgmdWbqEzZZgbfJGz2NbqKXVXvMRPvT7LTNGrsOtlpRD1
 tVy85cnox+QfBtIe7ueUtq/b9aOffYOhq5bgE4zx04dpr5/yMbLV901aIxIQtbTv
 YRPwODTUL7DTxVCpHEUSIebP2K7cyiR2qAhxLt0WMn4I/tgMqGoGAo5H5LWWuv+r
 CYlDzKS1b5ucomrMD4n33U/BEB6KCof6hwTV1P6qSCDu8sm1Ddcuug3c66OtSeRI
 Vi+8AAo7bBQvgU99mTCs9j+9BrcxQID3XPCJIPdZba0AQ2GaHO/gtxCZGQCvLbyd
 AReCly0U6/qiwQ12c5JxBBMwTu9TfjnS7Ah9yd8yMTjvfUinTLV3/5AfxfO7GamS
 yb4MFEcw7Cif+O01vBSJm5ZbxoDzdeO0dtAUQwNGxZpTzwbWoQ+SyfEvgXmvACtF
 tO0u9wAcZ4CqsTgoAU1z
 =9WG5
 -----END PGP SIGNATURE-----

Merge tag 'iommu-fixes-for-v3.13-rc4' of git://github.com/awilliam/linux-vfio

Pull iommu fixes from Alex Williamson:
 "arm/smmu driver updates via Will Deacon fixing locking around page
  table walks and a couple other issues"

* tag 'iommu-fixes-for-v3.13-rc4' of git://github.com/awilliam/linux-vfio:
  iommu/arm-smmu: fix error return code in arm_smmu_device_dt_probe()
  iommu/arm-smmu: remove potential NULL dereference on mapping path
  iommu/arm-smmu: use mutex instead of spinlock for locking page tables
This commit is contained in:
Linus Torvalds 2013-12-12 10:20:58 -08:00
commit 319720f534

View File

@ -392,7 +392,7 @@ struct arm_smmu_domain {
struct arm_smmu_cfg root_cfg; struct arm_smmu_cfg root_cfg;
phys_addr_t output_mask; phys_addr_t output_mask;
spinlock_t lock; struct mutex lock;
}; };
static DEFINE_SPINLOCK(arm_smmu_devices_lock); static DEFINE_SPINLOCK(arm_smmu_devices_lock);
@ -900,7 +900,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
goto out_free_domain; goto out_free_domain;
smmu_domain->root_cfg.pgd = pgd; smmu_domain->root_cfg.pgd = pgd;
spin_lock_init(&smmu_domain->lock); mutex_init(&smmu_domain->lock);
domain->priv = smmu_domain; domain->priv = smmu_domain;
return 0; return 0;
@ -1137,7 +1137,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
* Sanity check the domain. We don't currently support domains * Sanity check the domain. We don't currently support domains
* that cross between different SMMU chains. * that cross between different SMMU chains.
*/ */
spin_lock(&smmu_domain->lock); mutex_lock(&smmu_domain->lock);
if (!smmu_domain->leaf_smmu) { if (!smmu_domain->leaf_smmu) {
/* Now that we have a master, we can finalise the domain */ /* Now that we have a master, we can finalise the domain */
ret = arm_smmu_init_domain_context(domain, dev); ret = arm_smmu_init_domain_context(domain, dev);
@ -1152,7 +1152,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
dev_name(device_smmu->dev)); dev_name(device_smmu->dev));
goto err_unlock; goto err_unlock;
} }
spin_unlock(&smmu_domain->lock); mutex_unlock(&smmu_domain->lock);
/* Looks ok, so add the device to the domain */ /* Looks ok, so add the device to the domain */
master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
@ -1162,7 +1162,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return arm_smmu_domain_add_master(smmu_domain, master); return arm_smmu_domain_add_master(smmu_domain, master);
err_unlock: err_unlock:
spin_unlock(&smmu_domain->lock); mutex_unlock(&smmu_domain->lock);
return ret; return ret;
} }
@ -1394,7 +1394,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
if (paddr & ~output_mask) if (paddr & ~output_mask)
return -ERANGE; return -ERANGE;
spin_lock(&smmu_domain->lock); mutex_lock(&smmu_domain->lock);
pgd += pgd_index(iova); pgd += pgd_index(iova);
end = iova + size; end = iova + size;
do { do {
@ -1410,7 +1410,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
} while (pgd++, iova != end); } while (pgd++, iova != end);
out_unlock: out_unlock:
spin_unlock(&smmu_domain->lock); mutex_unlock(&smmu_domain->lock);
/* Ensure new page tables are visible to the hardware walker */ /* Ensure new page tables are visible to the hardware walker */
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
@ -1423,9 +1423,8 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int flags) phys_addr_t paddr, size_t size, int flags)
{ {
struct arm_smmu_domain *smmu_domain = domain->priv; struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_device *smmu = smmu_domain->leaf_smmu;
if (!smmu_domain || !smmu) if (!smmu_domain)
return -ENODEV; return -ENODEV;
/* Check for silent address truncation up the SMMU chain. */ /* Check for silent address truncation up the SMMU chain. */
@ -1449,44 +1448,34 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova) dma_addr_t iova)
{ {
pgd_t *pgd; pgd_t *pgdp, pgd;
pud_t *pud; pud_t pud;
pmd_t *pmd; pmd_t pmd;
pte_t *pte; pte_t pte;
struct arm_smmu_domain *smmu_domain = domain->priv; struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
struct arm_smmu_device *smmu = root_cfg->smmu;
spin_lock(&smmu_domain->lock); pgdp = root_cfg->pgd;
pgd = root_cfg->pgd; if (!pgdp)
if (!pgd) return 0;
goto err_unlock;
pgd += pgd_index(iova); pgd = *(pgdp + pgd_index(iova));
if (pgd_none_or_clear_bad(pgd)) if (pgd_none(pgd))
goto err_unlock; return 0;
pud = pud_offset(pgd, iova); pud = *pud_offset(&pgd, iova);
if (pud_none_or_clear_bad(pud)) if (pud_none(pud))
goto err_unlock; return 0;
pmd = pmd_offset(pud, iova); pmd = *pmd_offset(&pud, iova);
if (pmd_none_or_clear_bad(pmd)) if (pmd_none(pmd))
goto err_unlock; return 0;
pte = pmd_page_vaddr(*pmd) + pte_index(iova); pte = *(pmd_page_vaddr(pmd) + pte_index(iova));
if (pte_none(pte)) if (pte_none(pte))
goto err_unlock; return 0;
spin_unlock(&smmu_domain->lock); return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK);
err_unlock:
spin_unlock(&smmu_domain->lock);
dev_warn(smmu->dev,
"invalid (corrupt?) page tables detected for iova 0x%llx\n",
(unsigned long long)iova);
return -EINVAL;
} }
static int arm_smmu_domain_has_cap(struct iommu_domain *domain, static int arm_smmu_domain_has_cap(struct iommu_domain *domain,
@ -1863,6 +1852,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
dev_err(dev, dev_err(dev,
"found only %d context interrupt(s) but %d required\n", "found only %d context interrupt(s) but %d required\n",
smmu->num_context_irqs, smmu->num_context_banks); smmu->num_context_irqs, smmu->num_context_banks);
err = -ENODEV;
goto out_put_parent; goto out_put_parent;
} }