iommu/vt-d: Fix recursive lock issue in iommu_flush_dev_iotlb()

The per domain spinlock is acquired in iommu_flush_dev_iotlb(), which
is possbile to be called in the interrupt context. For example, the
drm-intel's CI system got completely blocked with below error:

 WARNING: inconsistent lock state
 6.0.0-rc1-CI_DRM_11990-g6590d43d39b9+ #1 Not tainted
 --------------------------------
 inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage.
 swapper/6/0 [HC0[0]:SC1[1]:HE1:SE0] takes:
 ffff88810440d678 (&domain->lock){+.?.}-{2:2}, at: iommu_flush_dev_iotlb.part.61+0x23/0x80
 {SOFTIRQ-ON-W} state was registered at:
   lock_acquire+0xd3/0x310
   _raw_spin_lock+0x2a/0x40
   domain_update_iommu_cap+0x20b/0x2c0
   intel_iommu_attach_device+0x5bd/0x860
   __iommu_attach_device+0x18/0xe0
   bus_iommu_probe+0x1f3/0x2d0
   bus_set_iommu+0x82/0xd0
   intel_iommu_init+0xe45/0x102a
   pci_iommu_init+0x9/0x31
   do_one_initcall+0x53/0x2f0
   kernel_init_freeable+0x18f/0x1e1
   kernel_init+0x11/0x120
   ret_from_fork+0x1f/0x30
 irq event stamp: 162354
 hardirqs last  enabled at (162354): [<ffffffff81b59274>] _raw_spin_unlock_irqrestore+0x54/0x70
 hardirqs last disabled at (162353): [<ffffffff81b5901b>] _raw_spin_lock_irqsave+0x4b/0x50
 softirqs last  enabled at (162338): [<ffffffff81e00323>] __do_softirq+0x323/0x48e
 softirqs last disabled at (162349): [<ffffffff810c1588>] irq_exit_rcu+0xb8/0xe0
 other info that might help us debug this:
  Possible unsafe locking scenario:
        CPU0
        ----
   lock(&domain->lock);
   <Interrupt>
     lock(&domain->lock);
   *** DEADLOCK ***
 1 lock held by swapper/6/0:

This coverts the spin_lock/unlock() into the irq save/restore varieties
to fix the recursive locking issues.

Fixes: ffd5869d93 ("iommu/vt-d: Replace spin_lock_irqsave() with spin_lock()")
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Acked-by: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://lore.kernel.org/r/20220817025650.3253959-1-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Lu Baolu 2022-08-23 14:15:56 +08:00 committed by Joerg Roedel
parent 53fc7ad6ed
commit a349ffcb4d

View File

@ -515,8 +515,9 @@ static int domain_update_device_node(struct dmar_domain *domain)
{
struct device_domain_info *info;
int nid = NUMA_NO_NODE;
unsigned long flags;
spin_lock(&domain->lock);
spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link) {
/*
* There could possibly be multiple device numa nodes as devices
@ -528,7 +529,7 @@ static int domain_update_device_node(struct dmar_domain *domain)
if (nid != NUMA_NO_NODE)
break;
}
spin_unlock(&domain->lock);
spin_unlock_irqrestore(&domain->lock, flags);
return nid;
}
@ -1362,19 +1363,20 @@ iommu_support_dev_iotlb(struct dmar_domain *domain, struct intel_iommu *iommu,
u8 bus, u8 devfn)
{
struct device_domain_info *info;
unsigned long flags;
if (!iommu->qi)
return NULL;
spin_lock(&domain->lock);
spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link) {
if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) {
spin_unlock(&domain->lock);
spin_unlock_irqrestore(&domain->lock, flags);
return info->ats_supported ? info : NULL;
}
}
spin_unlock(&domain->lock);
spin_unlock_irqrestore(&domain->lock, flags);
return NULL;
}
@ -1383,8 +1385,9 @@ static void domain_update_iotlb(struct dmar_domain *domain)
{
struct device_domain_info *info;
bool has_iotlb_device = false;
unsigned long flags;
spin_lock(&domain->lock);
spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link) {
if (info->ats_enabled) {
has_iotlb_device = true;
@ -1392,7 +1395,7 @@ static void domain_update_iotlb(struct dmar_domain *domain)
}
}
domain->has_iotlb_device = has_iotlb_device;
spin_unlock(&domain->lock);
spin_unlock_irqrestore(&domain->lock, flags);
}
static void iommu_enable_dev_iotlb(struct device_domain_info *info)
@ -1484,14 +1487,15 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
u64 addr, unsigned mask)
{
struct device_domain_info *info;
unsigned long flags;
if (!domain->has_iotlb_device)
return;
spin_lock(&domain->lock);
spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link)
__iommu_flush_dev_iotlb(info, addr, mask);
spin_unlock(&domain->lock);
spin_unlock_irqrestore(&domain->lock, flags);
}
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
@ -2453,6 +2457,7 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu;
unsigned long flags;
u8 bus, devfn;
int ret;
@ -2464,9 +2469,9 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
if (ret)
return ret;
info->domain = domain;
spin_lock(&domain->lock);
spin_lock_irqsave(&domain->lock, flags);
list_add(&info->link, &domain->devices);
spin_unlock(&domain->lock);
spin_unlock_irqrestore(&domain->lock, flags);
/* PASID table is mandatory for a PCI device in scalable mode. */
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
@ -4090,6 +4095,7 @@ static void dmar_remove_one_dev_info(struct device *dev)
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct dmar_domain *domain = info->domain;
struct intel_iommu *iommu = info->iommu;
unsigned long flags;
if (!dev_is_real_dma_subdevice(info->dev)) {
if (dev_is_pci(info->dev) && sm_supported(iommu))
@ -4101,9 +4107,9 @@ static void dmar_remove_one_dev_info(struct device *dev)
intel_pasid_free_table(info->dev);
}
spin_lock(&domain->lock);
spin_lock_irqsave(&domain->lock, flags);
list_del(&info->link);
spin_unlock(&domain->lock);
spin_unlock_irqrestore(&domain->lock, flags);
domain_detach_iommu(domain, iommu);
info->domain = NULL;
@ -4422,19 +4428,20 @@ static void domain_set_force_snooping(struct dmar_domain *domain)
static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
unsigned long flags;
if (dmar_domain->force_snooping)
return true;
spin_lock(&dmar_domain->lock);
spin_lock_irqsave(&dmar_domain->lock, flags);
if (!domain_support_force_snooping(dmar_domain)) {
spin_unlock(&dmar_domain->lock);
spin_unlock_irqrestore(&dmar_domain->lock, flags);
return false;
}
domain_set_force_snooping(dmar_domain);
dmar_domain->force_snooping = true;
spin_unlock(&dmar_domain->lock);
spin_unlock_irqrestore(&dmar_domain->lock, flags);
return true;
}