forked from Minki/linux
PCI/MSI: Use msi_add_msi_desc()
Simplify the allocation of MSI descriptors by using msi_add_msi_desc() which moves the storage handling to core code and prepares for dynamic extension of the MSI-X vector space. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Michael Kelley <mikelley@microsoft.com> Tested-by: Nishanth Menon <nm@ti.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Acked-by: Bjorn Helgaas <bhelgaas@google.com> Link: https://lore.kernel.org/r/20211206210748.035348646@linutronix.de
This commit is contained in:
parent
5512c5eaf5
commit
71020a3c0d
@ -376,40 +376,41 @@ static int pci_setup_msi_context(struct pci_dev *dev)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct msi_desc *
|
static int msi_setup_msi_desc(struct pci_dev *dev, int nvec,
|
||||||
msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity_desc *masks)
|
struct irq_affinity_desc *masks)
|
||||||
{
|
{
|
||||||
struct msi_desc *entry;
|
struct msi_desc desc;
|
||||||
u16 control;
|
u16 control;
|
||||||
|
|
||||||
/* MSI Entry Initialization */
|
/* MSI Entry Initialization */
|
||||||
entry = alloc_msi_entry(&dev->dev, nvec, masks);
|
memset(&desc, 0, sizeof(desc));
|
||||||
if (!entry)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
|
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
|
||||||
/* Lies, damned lies, and MSIs */
|
/* Lies, damned lies, and MSIs */
|
||||||
if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
|
if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
|
||||||
control |= PCI_MSI_FLAGS_MASKBIT;
|
control |= PCI_MSI_FLAGS_MASKBIT;
|
||||||
|
/* Respect XEN's mask disabling */
|
||||||
|
if (pci_msi_ignore_mask)
|
||||||
|
control &= ~PCI_MSI_FLAGS_MASKBIT;
|
||||||
|
|
||||||
entry->pci.msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
|
desc.nvec_used = nvec;
|
||||||
entry->pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
|
desc.pci.msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
|
||||||
!!(control & PCI_MSI_FLAGS_MASKBIT);
|
desc.pci.msi_attrib.can_mask = !!(control & PCI_MSI_FLAGS_MASKBIT);
|
||||||
entry->pci.msi_attrib.default_irq = dev->irq;
|
desc.pci.msi_attrib.default_irq = dev->irq;
|
||||||
entry->pci.msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
|
desc.pci.msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
|
||||||
entry->pci.msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
|
desc.pci.msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
|
||||||
|
desc.affinity = masks;
|
||||||
|
|
||||||
if (control & PCI_MSI_FLAGS_64BIT)
|
if (control & PCI_MSI_FLAGS_64BIT)
|
||||||
entry->pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
|
desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
|
||||||
else
|
else
|
||||||
entry->pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
|
desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
|
||||||
|
|
||||||
/* Save the initial mask status */
|
/* Save the initial mask status */
|
||||||
if (entry->pci.msi_attrib.can_mask)
|
if (desc.pci.msi_attrib.can_mask)
|
||||||
pci_read_config_dword(dev, entry->pci.mask_pos, &entry->pci.msi_mask);
|
pci_read_config_dword(dev, desc.pci.mask_pos, &desc.pci.msi_mask);
|
||||||
|
|
||||||
|
return msi_add_msi_desc(&dev->dev, &desc);
|
||||||
return entry;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int msi_verify_entries(struct pci_dev *dev)
|
static int msi_verify_entries(struct pci_dev *dev)
|
||||||
@ -459,17 +460,14 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
|
|||||||
masks = irq_create_affinity_masks(nvec, affd);
|
masks = irq_create_affinity_masks(nvec, affd);
|
||||||
|
|
||||||
msi_lock_descs(&dev->dev);
|
msi_lock_descs(&dev->dev);
|
||||||
entry = msi_setup_entry(dev, nvec, masks);
|
ret = msi_setup_msi_desc(dev, nvec, masks);
|
||||||
if (!entry) {
|
if (ret)
|
||||||
ret = -ENOMEM;
|
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
|
||||||
|
|
||||||
/* All MSIs are unmasked by default; mask them all */
|
/* All MSIs are unmasked by default; mask them all */
|
||||||
|
entry = first_pci_msi_entry(dev);
|
||||||
pci_msi_mask(entry, msi_multi_mask(entry));
|
pci_msi_mask(entry, msi_multi_mask(entry));
|
||||||
|
|
||||||
list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
|
|
||||||
|
|
||||||
/* Configure MSI capability structure */
|
/* Configure MSI capability structure */
|
||||||
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
|
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -519,48 +517,40 @@ static void __iomem *msix_map_region(struct pci_dev *dev,
|
|||||||
return ioremap(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
|
return ioremap(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
|
static int msix_setup_msi_descs(struct pci_dev *dev, void __iomem *base,
|
||||||
struct msix_entry *entries, int nvec,
|
struct msix_entry *entries, int nvec,
|
||||||
struct irq_affinity_desc *masks)
|
struct irq_affinity_desc *masks)
|
||||||
{
|
{
|
||||||
int i, vec_count = pci_msix_vec_count(dev);
|
int ret = 0, i, vec_count = pci_msix_vec_count(dev);
|
||||||
struct irq_affinity_desc *curmsk;
|
struct irq_affinity_desc *curmsk;
|
||||||
struct msi_desc *entry;
|
struct msi_desc desc;
|
||||||
void __iomem *addr;
|
void __iomem *addr;
|
||||||
|
|
||||||
for (i = 0, curmsk = masks; i < nvec; i++) {
|
memset(&desc, 0, sizeof(desc));
|
||||||
entry = alloc_msi_entry(&dev->dev, 1, curmsk);
|
|
||||||
if (!entry) {
|
desc.nvec_used = 1;
|
||||||
/* No enough memory. Don't try again */
|
desc.pci.msi_attrib.is_msix = 1;
|
||||||
return -ENOMEM;
|
desc.pci.msi_attrib.is_64 = 1;
|
||||||
|
desc.pci.msi_attrib.default_irq = dev->irq;
|
||||||
|
desc.pci.mask_base = base;
|
||||||
|
|
||||||
|
for (i = 0, curmsk = masks; i < nvec; i++, curmsk++) {
|
||||||
|
desc.msi_index = entries ? entries[i].entry : i;
|
||||||
|
desc.affinity = masks ? curmsk : NULL;
|
||||||
|
desc.pci.msi_attrib.is_virtual = desc.msi_index >= vec_count;
|
||||||
|
desc.pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
|
||||||
|
!desc.pci.msi_attrib.is_virtual;
|
||||||
|
|
||||||
|
if (!desc.pci.msi_attrib.can_mask) {
|
||||||
|
addr = pci_msix_desc_addr(&desc);
|
||||||
|
desc.pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
|
||||||
}
|
}
|
||||||
|
|
||||||
entry->pci.msi_attrib.is_msix = 1;
|
ret = msi_add_msi_desc(&dev->dev, &desc);
|
||||||
entry->pci.msi_attrib.is_64 = 1;
|
if (ret)
|
||||||
|
break;
|
||||||
if (entries)
|
|
||||||
entry->msi_index = entries[i].entry;
|
|
||||||
else
|
|
||||||
entry->msi_index = i;
|
|
||||||
|
|
||||||
entry->pci.msi_attrib.is_virtual = entry->msi_index >= vec_count;
|
|
||||||
|
|
||||||
entry->pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
|
|
||||||
!entry->pci.msi_attrib.is_virtual;
|
|
||||||
|
|
||||||
entry->pci.msi_attrib.default_irq = dev->irq;
|
|
||||||
entry->pci.mask_base = base;
|
|
||||||
|
|
||||||
if (entry->pci.msi_attrib.can_mask) {
|
|
||||||
addr = pci_msix_desc_addr(entry);
|
|
||||||
entry->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
|
|
||||||
}
|
|
||||||
|
|
||||||
list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
|
|
||||||
if (masks)
|
|
||||||
curmsk++;
|
|
||||||
}
|
}
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
|
static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
|
||||||
@ -598,7 +588,7 @@ static int msix_setup_interrupts(struct pci_dev *dev, void __iomem *base,
|
|||||||
masks = irq_create_affinity_masks(nvec, affd);
|
masks = irq_create_affinity_masks(nvec, affd);
|
||||||
|
|
||||||
msi_lock_descs(&dev->dev);
|
msi_lock_descs(&dev->dev);
|
||||||
ret = msix_setup_entries(dev, base, entries, nvec, masks);
|
ret = msix_setup_msi_descs(dev, base, entries, nvec, masks);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_free;
|
goto out_free;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user