iommu/vt-d: Update to use PCI DMA aliases

VT-d code currently makes use of pci_find_upstream_pcie_bridge() in
order to find the topology based alias of a device.  This function has
a few problems.  First, it doesn't check the entire alias path of the
device to the root bus, therefore if a PCIe device is masked upstream,
the wrong result is produced.  Also, it's known to get confused and
give up when it crosses a bridge from a conventional PCI bus to a PCIe
bus that lacks a PCIe capability.  The PCI-core provided DMA alias
support solves both of these problems and additionally adds support
for DMA function quirks allowing VT-d to work with devices like
Marvell and Ricoh with known broken requester IDs.

Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Alex Williamson 2014-07-03 09:51:43 -06:00 committed by Joerg Roedel
parent e17f9ff413
commit 579305f75d
2 changed files with 148 additions and 141 deletions

View File

@ -1841,54 +1841,56 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
return 0; return 0;
} }
struct domain_context_mapping_data {
struct dmar_domain *domain;
struct intel_iommu *iommu;
int translation;
};
static int domain_context_mapping_cb(struct pci_dev *pdev,
u16 alias, void *opaque)
{
struct domain_context_mapping_data *data = opaque;
return domain_context_mapping_one(data->domain, data->iommu,
PCI_BUS_NUM(alias), alias & 0xff,
data->translation);
}
static int static int
domain_context_mapping(struct dmar_domain *domain, struct device *dev, domain_context_mapping(struct dmar_domain *domain, struct device *dev,
int translation) int translation)
{ {
int ret;
struct pci_dev *pdev, *tmp, *parent;
struct intel_iommu *iommu; struct intel_iommu *iommu;
u8 bus, devfn; u8 bus, devfn;
struct domain_context_mapping_data data;
iommu = device_to_iommu(dev, &bus, &devfn); iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu) if (!iommu)
return -ENODEV; return -ENODEV;
ret = domain_context_mapping_one(domain, iommu, bus, devfn, if (!dev_is_pci(dev))
return domain_context_mapping_one(domain, iommu, bus, devfn,
translation); translation);
if (ret || !dev_is_pci(dev))
return ret;
/* dependent device mapping */ data.domain = domain;
pdev = to_pci_dev(dev); data.iommu = iommu;
tmp = pci_find_upstream_pcie_bridge(pdev); data.translation = translation;
if (!tmp)
return 0; return pci_for_each_dma_alias(to_pci_dev(dev),
/* Secondary interface's bus number and devfn 0 */ &domain_context_mapping_cb, &data);
parent = pdev->bus->self;
while (parent != tmp) {
ret = domain_context_mapping_one(domain, iommu,
parent->bus->number,
parent->devfn, translation);
if (ret)
return ret;
parent = parent->bus->self;
} }
if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
return domain_context_mapping_one(domain, iommu, static int domain_context_mapped_cb(struct pci_dev *pdev,
tmp->subordinate->number, 0, u16 alias, void *opaque)
translation); {
else /* this is a legacy PCI bridge */ struct intel_iommu *iommu = opaque;
return domain_context_mapping_one(domain, iommu,
tmp->bus->number, return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
tmp->devfn,
translation);
} }
static int domain_context_mapped(struct device *dev) static int domain_context_mapped(struct device *dev)
{ {
int ret;
struct pci_dev *pdev, *tmp, *parent;
struct intel_iommu *iommu; struct intel_iommu *iommu;
u8 bus, devfn; u8 bus, devfn;
@ -1896,30 +1898,11 @@ static int domain_context_mapped(struct device *dev)
if (!iommu) if (!iommu)
return -ENODEV; return -ENODEV;
ret = device_context_mapped(iommu, bus, devfn); if (!dev_is_pci(dev))
if (!ret || !dev_is_pci(dev)) return device_context_mapped(iommu, bus, devfn);
return ret;
/* dependent device mapping */ return !pci_for_each_dma_alias(to_pci_dev(dev),
pdev = to_pci_dev(dev); domain_context_mapped_cb, iommu);
tmp = pci_find_upstream_pcie_bridge(pdev);
if (!tmp)
return ret;
/* Secondary interface's bus number and devfn 0 */
parent = pdev->bus->self;
while (parent != tmp) {
ret = device_context_mapped(iommu, parent->bus->number,
parent->devfn);
if (!ret)
return ret;
parent = parent->bus->self;
}
if (pci_is_pcie(tmp))
return device_context_mapped(iommu, tmp->subordinate->number,
0);
else
return device_context_mapped(iommu, tmp->bus->number,
tmp->devfn);
} }
/* Returns a number of VTD pages, but aligned to MM page size */ /* Returns a number of VTD pages, but aligned to MM page size */
@ -2208,79 +2191,86 @@ static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
return domain; return domain;
} }
static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
{
*(u16 *)opaque = alias;
return 0;
}
/* domain is initialized */ /* domain is initialized */
static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw) static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
{ {
struct dmar_domain *domain, *free = NULL; struct dmar_domain *domain, *tmp;
struct intel_iommu *iommu = NULL; struct intel_iommu *iommu;
struct device_domain_info *info; struct device_domain_info *info;
struct pci_dev *dev_tmp = NULL; u16 dma_alias;
unsigned long flags; unsigned long flags;
u8 bus, devfn, bridge_bus, bridge_devfn; u8 bus, devfn;
domain = find_domain(dev); domain = find_domain(dev);
if (domain) if (domain)
return domain; return domain;
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return NULL;
if (dev_is_pci(dev)) { if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *pdev = to_pci_dev(dev);
u16 segment;
segment = pci_domain_nr(pdev->bus); pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
dev_tmp = pci_find_upstream_pcie_bridge(pdev);
if (dev_tmp) {
if (pci_is_pcie(dev_tmp)) {
bridge_bus = dev_tmp->subordinate->number;
bridge_devfn = 0;
} else {
bridge_bus = dev_tmp->bus->number;
bridge_devfn = dev_tmp->devfn;
}
spin_lock_irqsave(&device_domain_lock, flags); spin_lock_irqsave(&device_domain_lock, flags);
info = dmar_search_domain_by_dev_info(segment, info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
bridge_bus, PCI_BUS_NUM(dma_alias),
bridge_devfn); dma_alias & 0xff);
if (info) { if (info) {
iommu = info->iommu; iommu = info->iommu;
domain = info->domain; domain = info->domain;
} }
spin_unlock_irqrestore(&device_domain_lock, flags); spin_unlock_irqrestore(&device_domain_lock, flags);
/* pcie-pci bridge already has a domain, uses it */
/* DMA alias already has a domain, uses it */
if (info) if (info)
goto found_domain; goto found_domain;
} }
}
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
goto error;
/* Allocate and initialize new domain for the device */ /* Allocate and initialize new domain for the device */
domain = alloc_domain(false); domain = alloc_domain(false);
if (!domain) if (!domain)
goto error; return NULL;
if (iommu_attach_domain(domain, iommu)) { if (iommu_attach_domain(domain, iommu)) {
free_domain_mem(domain); free_domain_mem(domain);
domain = NULL; return NULL;
goto error; }
if (domain_init(domain, gaw)) {
domain_exit(domain);
return NULL;
}
/* register PCI DMA alias device */
if (dev_is_pci(dev)) {
tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
dma_alias & 0xff, NULL, domain);
if (!tmp || tmp != domain) {
domain_exit(domain);
domain = tmp;
} }
free = domain;
if (domain_init(domain, gaw))
goto error;
/* register pcie-to-pci device */
if (dev_tmp) {
domain = dmar_insert_dev_info(iommu, bridge_bus, bridge_devfn,
NULL, domain);
if (!domain) if (!domain)
goto error; return NULL;
} }
found_domain: found_domain:
domain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain); tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
error:
if (free != domain) if (!tmp || tmp != domain) {
domain_exit(free); domain_exit(domain);
domain = tmp;
}
return domain; return domain;
} }
@ -4043,33 +4033,27 @@ out_free_dmar:
return ret; return ret;
} }
static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
{
struct intel_iommu *iommu = opaque;
iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
return 0;
}
/*
* NB - intel-iommu lacks any sort of reference counting for the users of
* dependent devices. If multiple endpoints have intersecting dependent
* devices, unbinding the driver from any one of them will possibly leave
* the others unable to operate.
*/
static void iommu_detach_dependent_devices(struct intel_iommu *iommu, static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
struct device *dev) struct device *dev)
{ {
struct pci_dev *tmp, *parent, *pdev;
if (!iommu || !dev || !dev_is_pci(dev)) if (!iommu || !dev || !dev_is_pci(dev))
return; return;
pdev = to_pci_dev(dev); pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
/* dependent device detach */
tmp = pci_find_upstream_pcie_bridge(pdev);
/* Secondary interface's bus number and devfn 0 */
if (tmp) {
parent = pdev->bus->self;
while (parent != tmp) {
iommu_detach_dev(iommu, parent->bus->number,
parent->devfn);
parent = parent->bus->self;
}
if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
iommu_detach_dev(iommu,
tmp->subordinate->number, 0);
else /* this is a legacy PCI bridge */
iommu_detach_dev(iommu, tmp->bus->number,
tmp->devfn);
}
} }
static void domain_remove_one_dev_info(struct dmar_domain *domain, static void domain_remove_one_dev_info(struct dmar_domain *domain,

View File

@ -369,29 +369,52 @@ static int set_hpet_sid(struct irte *irte, u8 id)
return 0; return 0;
} }
struct set_msi_sid_data {
struct pci_dev *pdev;
u16 alias;
};
static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
{
struct set_msi_sid_data *data = opaque;
data->pdev = pdev;
data->alias = alias;
return 0;
}
static int set_msi_sid(struct irte *irte, struct pci_dev *dev) static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
{ {
struct pci_dev *bridge; struct set_msi_sid_data data;
if (!irte || !dev) if (!irte || !dev)
return -1; return -1;
/* PCIe device or Root Complex integrated PCI device */ pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
if (pci_is_pcie(dev) || !dev->bus->parent) {
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
(dev->bus->number << 8) | dev->devfn);
return 0;
}
bridge = pci_find_upstream_pcie_bridge(dev); /*
if (bridge) { * DMA alias provides us with a PCI device and alias. The only case
if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */ * where the it will return an alias on a different bus than the
* device is the case of a PCIe-to-PCI bridge, where the alias is for
* the subordinate bus. In this case we can only verify the bus.
*
* If the alias device is on a different bus than our source device
* then we have a topology based alias, use it.
*
* Otherwise, the alias is for a device DMA quirk and we cannot
* assume that MSI uses the same requester ID. Therefore use the
* original device.
*/
if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number)
set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
(bridge->bus->number << 8) | dev->bus->number); PCI_DEVID(PCI_BUS_NUM(data.alias),
else /* this is a legacy PCI bridge */ dev->bus->number));
else if (data.pdev->bus->number != dev->bus->number)
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
else
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
(bridge->bus->number << 8) | bridge->devfn); PCI_DEVID(dev->bus->number, dev->devfn));
}
return 0; return 0;
} }