Merge branch 'pci/msi' into next
* pci/msi: PCI/MSI: Use dev_printk() when possible of/pci: Remove unused MSI controller helpers PCI: mvebu: Remove useless MSI enabling code PCI: aardvark: Move to MSI handling using generic MSI support PCI/MSI: Make pci_msi_shutdown() and pci_msix_shutdown() static PCI/MSI: Stop disabling MSI/MSI-X in pci_device_shutdown()
This commit is contained in:
commit
c6a7a77cc4
@ -285,51 +285,6 @@ parse_failed:
|
||||
EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
|
||||
#endif /* CONFIG_OF_ADDRESS */
|
||||
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
|
||||
static LIST_HEAD(of_pci_msi_chip_list);
|
||||
static DEFINE_MUTEX(of_pci_msi_chip_mutex);
|
||||
|
||||
int of_pci_msi_chip_add(struct msi_controller *chip)
|
||||
{
|
||||
if (!of_property_read_bool(chip->of_node, "msi-controller"))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&of_pci_msi_chip_mutex);
|
||||
list_add(&chip->list, &of_pci_msi_chip_list);
|
||||
mutex_unlock(&of_pci_msi_chip_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_pci_msi_chip_add);
|
||||
|
||||
void of_pci_msi_chip_remove(struct msi_controller *chip)
|
||||
{
|
||||
mutex_lock(&of_pci_msi_chip_mutex);
|
||||
list_del(&chip->list);
|
||||
mutex_unlock(&of_pci_msi_chip_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_pci_msi_chip_remove);
|
||||
|
||||
struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node)
|
||||
{
|
||||
struct msi_controller *c;
|
||||
|
||||
mutex_lock(&of_pci_msi_chip_mutex);
|
||||
list_for_each_entry(c, &of_pci_msi_chip_list, list) {
|
||||
if (c->of_node == of_node) {
|
||||
mutex_unlock(&of_pci_msi_chip_mutex);
|
||||
return c;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&of_pci_msi_chip_mutex);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_pci_find_msi_chip_by_node);
|
||||
|
||||
#endif /* CONFIG_PCI_MSI */
|
||||
|
||||
/**
|
||||
* of_pci_map_rid - Translate a requester ID through a downstream mapping.
|
||||
* @np: root complex device node.
|
||||
|
@ -200,10 +200,12 @@ struct advk_pcie {
|
||||
struct list_head resources;
|
||||
struct irq_domain *irq_domain;
|
||||
struct irq_chip irq_chip;
|
||||
struct msi_controller msi;
|
||||
struct irq_domain *msi_domain;
|
||||
struct irq_domain *msi_inner_domain;
|
||||
struct irq_chip msi_bottom_irq_chip;
|
||||
struct irq_chip msi_irq_chip;
|
||||
DECLARE_BITMAP(msi_irq_in_use, MSI_IRQ_NUM);
|
||||
struct msi_domain_info msi_domain_info;
|
||||
DECLARE_BITMAP(msi_used, MSI_IRQ_NUM);
|
||||
struct mutex msi_used_lock;
|
||||
u16 msi_msg;
|
||||
int root_bus_nr;
|
||||
@ -545,94 +547,64 @@ static struct pci_ops advk_pcie_ops = {
|
||||
.write = advk_pcie_wr_conf,
|
||||
};
|
||||
|
||||
static int advk_pcie_alloc_msi(struct advk_pcie *pcie)
|
||||
static void advk_msi_irq_compose_msi_msg(struct irq_data *data,
|
||||
struct msi_msg *msg)
|
||||
{
|
||||
int hwirq;
|
||||
struct advk_pcie *pcie = irq_data_get_irq_chip_data(data);
|
||||
phys_addr_t msi_msg = virt_to_phys(&pcie->msi_msg);
|
||||
|
||||
msg->address_lo = lower_32_bits(msi_msg);
|
||||
msg->address_hi = upper_32_bits(msi_msg);
|
||||
msg->data = data->irq;
|
||||
}
|
||||
|
||||
static int advk_msi_set_affinity(struct irq_data *irq_data,
|
||||
const struct cpumask *mask, bool force)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int advk_msi_irq_domain_alloc(struct irq_domain *domain,
|
||||
unsigned int virq,
|
||||
unsigned int nr_irqs, void *args)
|
||||
{
|
||||
struct advk_pcie *pcie = domain->host_data;
|
||||
int hwirq, i;
|
||||
|
||||
mutex_lock(&pcie->msi_used_lock);
|
||||
hwirq = find_first_zero_bit(pcie->msi_irq_in_use, MSI_IRQ_NUM);
|
||||
if (hwirq >= MSI_IRQ_NUM)
|
||||
hwirq = -ENOSPC;
|
||||
else
|
||||
set_bit(hwirq, pcie->msi_irq_in_use);
|
||||
hwirq = bitmap_find_next_zero_area(pcie->msi_used, MSI_IRQ_NUM,
|
||||
0, nr_irqs, 0);
|
||||
if (hwirq >= MSI_IRQ_NUM) {
|
||||
mutex_unlock(&pcie->msi_used_lock);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
bitmap_set(pcie->msi_used, hwirq, nr_irqs);
|
||||
mutex_unlock(&pcie->msi_used_lock);
|
||||
|
||||
for (i = 0; i < nr_irqs; i++)
|
||||
irq_domain_set_info(domain, virq + i, hwirq + i,
|
||||
&pcie->msi_bottom_irq_chip,
|
||||
domain->host_data, handle_simple_irq,
|
||||
NULL, NULL);
|
||||
|
||||
return hwirq;
|
||||
}
|
||||
|
||||
static void advk_pcie_free_msi(struct advk_pcie *pcie, int hwirq)
|
||||
static void advk_msi_irq_domain_free(struct irq_domain *domain,
|
||||
unsigned int virq, unsigned int nr_irqs)
|
||||
{
|
||||
struct device *dev = &pcie->pdev->dev;
|
||||
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
|
||||
struct advk_pcie *pcie = domain->host_data;
|
||||
|
||||
mutex_lock(&pcie->msi_used_lock);
|
||||
if (!test_bit(hwirq, pcie->msi_irq_in_use))
|
||||
dev_err(dev, "trying to free unused MSI#%d\n", hwirq);
|
||||
else
|
||||
clear_bit(hwirq, pcie->msi_irq_in_use);
|
||||
bitmap_clear(pcie->msi_used, d->hwirq, nr_irqs);
|
||||
mutex_unlock(&pcie->msi_used_lock);
|
||||
}
|
||||
|
||||
static int advk_pcie_setup_msi_irq(struct msi_controller *chip,
|
||||
struct pci_dev *pdev,
|
||||
struct msi_desc *desc)
|
||||
{
|
||||
struct advk_pcie *pcie = pdev->bus->sysdata;
|
||||
struct msi_msg msg;
|
||||
int virq, hwirq;
|
||||
phys_addr_t msi_msg_phys;
|
||||
|
||||
/* We support MSI, but not MSI-X */
|
||||
if (desc->msi_attrib.is_msix)
|
||||
return -EINVAL;
|
||||
|
||||
hwirq = advk_pcie_alloc_msi(pcie);
|
||||
if (hwirq < 0)
|
||||
return hwirq;
|
||||
|
||||
virq = irq_create_mapping(pcie->msi_domain, hwirq);
|
||||
if (!virq) {
|
||||
advk_pcie_free_msi(pcie, hwirq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
irq_set_msi_desc(virq, desc);
|
||||
|
||||
msi_msg_phys = virt_to_phys(&pcie->msi_msg);
|
||||
|
||||
msg.address_lo = lower_32_bits(msi_msg_phys);
|
||||
msg.address_hi = upper_32_bits(msi_msg_phys);
|
||||
msg.data = virq;
|
||||
|
||||
pci_write_msi_msg(virq, &msg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void advk_pcie_teardown_msi_irq(struct msi_controller *chip,
|
||||
unsigned int irq)
|
||||
{
|
||||
struct irq_data *d = irq_get_irq_data(irq);
|
||||
struct msi_desc *msi = irq_data_get_msi_desc(d);
|
||||
struct advk_pcie *pcie = msi_desc_to_pci_sysdata(msi);
|
||||
unsigned long hwirq = d->hwirq;
|
||||
|
||||
irq_dispose_mapping(irq);
|
||||
advk_pcie_free_msi(pcie, hwirq);
|
||||
}
|
||||
|
||||
static int advk_pcie_msi_map(struct irq_domain *domain,
|
||||
unsigned int virq, irq_hw_number_t hw)
|
||||
{
|
||||
struct advk_pcie *pcie = domain->host_data;
|
||||
|
||||
irq_set_chip_and_handler(virq, &pcie->msi_irq_chip,
|
||||
handle_simple_irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops advk_pcie_msi_irq_ops = {
|
||||
.map = advk_pcie_msi_map,
|
||||
static const struct irq_domain_ops advk_msi_domain_ops = {
|
||||
.alloc = advk_msi_irq_domain_alloc,
|
||||
.free = advk_msi_irq_domain_free,
|
||||
};
|
||||
|
||||
static void advk_pcie_irq_mask(struct irq_data *d)
|
||||
@ -680,31 +652,26 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
|
||||
{
|
||||
struct device *dev = &pcie->pdev->dev;
|
||||
struct device_node *node = dev->of_node;
|
||||
struct irq_chip *msi_irq_chip;
|
||||
struct msi_controller *msi;
|
||||
struct irq_chip *bottom_ic, *msi_ic;
|
||||
struct msi_domain_info *msi_di;
|
||||
phys_addr_t msi_msg_phys;
|
||||
int ret;
|
||||
|
||||
msi_irq_chip = &pcie->msi_irq_chip;
|
||||
|
||||
msi_irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-msi",
|
||||
dev_name(dev));
|
||||
if (!msi_irq_chip->name)
|
||||
return -ENOMEM;
|
||||
|
||||
msi_irq_chip->irq_enable = pci_msi_unmask_irq;
|
||||
msi_irq_chip->irq_disable = pci_msi_mask_irq;
|
||||
msi_irq_chip->irq_mask = pci_msi_mask_irq;
|
||||
msi_irq_chip->irq_unmask = pci_msi_unmask_irq;
|
||||
|
||||
msi = &pcie->msi;
|
||||
|
||||
msi->setup_irq = advk_pcie_setup_msi_irq;
|
||||
msi->teardown_irq = advk_pcie_teardown_msi_irq;
|
||||
msi->of_node = node;
|
||||
|
||||
mutex_init(&pcie->msi_used_lock);
|
||||
|
||||
bottom_ic = &pcie->msi_bottom_irq_chip;
|
||||
|
||||
bottom_ic->name = "MSI";
|
||||
bottom_ic->irq_compose_msi_msg = advk_msi_irq_compose_msi_msg;
|
||||
bottom_ic->irq_set_affinity = advk_msi_set_affinity;
|
||||
|
||||
msi_ic = &pcie->msi_irq_chip;
|
||||
msi_ic->name = "advk-MSI";
|
||||
|
||||
msi_di = &pcie->msi_domain_info;
|
||||
msi_di->flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_MULTI_PCI_MSI;
|
||||
msi_di->chip = msi_ic;
|
||||
|
||||
msi_msg_phys = virt_to_phys(&pcie->msi_msg);
|
||||
|
||||
advk_writel(pcie, lower_32_bits(msi_msg_phys),
|
||||
@ -712,16 +679,18 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
|
||||
advk_writel(pcie, upper_32_bits(msi_msg_phys),
|
||||
PCIE_MSI_ADDR_HIGH_REG);
|
||||
|
||||
pcie->msi_domain =
|
||||
pcie->msi_inner_domain =
|
||||
irq_domain_add_linear(NULL, MSI_IRQ_NUM,
|
||||
&advk_pcie_msi_irq_ops, pcie);
|
||||
if (!pcie->msi_domain)
|
||||
&advk_msi_domain_ops, pcie);
|
||||
if (!pcie->msi_inner_domain)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = of_pci_msi_chip_add(msi);
|
||||
if (ret < 0) {
|
||||
irq_domain_remove(pcie->msi_domain);
|
||||
return ret;
|
||||
pcie->msi_domain =
|
||||
pci_msi_create_irq_domain(of_node_to_fwnode(node),
|
||||
msi_di, pcie->msi_inner_domain);
|
||||
if (!pcie->msi_domain) {
|
||||
irq_domain_remove(pcie->msi_inner_domain);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -729,8 +698,8 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
|
||||
|
||||
static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie)
|
||||
{
|
||||
of_pci_msi_chip_remove(&pcie->msi);
|
||||
irq_domain_remove(pcie->msi_domain);
|
||||
irq_domain_remove(pcie->msi_inner_domain);
|
||||
}
|
||||
|
||||
static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
|
||||
@ -917,8 +886,6 @@ static int advk_pcie_probe(struct platform_device *pdev)
|
||||
struct advk_pcie *pcie;
|
||||
struct resource *res;
|
||||
struct pci_bus *bus, *child;
|
||||
struct msi_controller *msi;
|
||||
struct device_node *msi_node;
|
||||
int ret, irq;
|
||||
|
||||
pcie = devm_kzalloc(dev, sizeof(struct advk_pcie), GFP_KERNEL);
|
||||
@ -962,14 +929,8 @@ static int advk_pcie_probe(struct platform_device *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
msi_node = of_parse_phandle(dev->of_node, "msi-parent", 0);
|
||||
if (msi_node)
|
||||
msi = of_pci_find_msi_chip_by_node(msi_node);
|
||||
else
|
||||
msi = NULL;
|
||||
|
||||
bus = pci_scan_root_bus_msi(dev, 0, &advk_pcie_ops,
|
||||
pcie, &pcie->resources, &pcie->msi);
|
||||
bus = pci_scan_root_bus(dev, 0, &advk_pcie_ops,
|
||||
pcie, &pcie->resources);
|
||||
if (!bus) {
|
||||
advk_pcie_remove_msi_irq_domain(pcie);
|
||||
advk_pcie_remove_irq_domain(pcie);
|
||||
|
@ -1006,22 +1006,6 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static void mvebu_pcie_msi_enable(struct mvebu_pcie *pcie)
|
||||
{
|
||||
struct device_node *msi_node;
|
||||
|
||||
msi_node = of_parse_phandle(pcie->pdev->dev.of_node,
|
||||
"msi-parent", 0);
|
||||
if (!msi_node)
|
||||
return;
|
||||
|
||||
pcie->msi = of_pci_find_msi_chip_by_node(msi_node);
|
||||
of_node_put(msi_node);
|
||||
|
||||
if (pcie->msi)
|
||||
pcie->msi->dev = &pcie->pdev->dev;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int mvebu_pcie_suspend(struct device *dev)
|
||||
{
|
||||
@ -1299,7 +1283,6 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
|
||||
for (i = 0; i < (IO_SPACE_LIMIT - SZ_64K); i += SZ_64K)
|
||||
pci_ioremap_io(i, pcie->io.start + i);
|
||||
|
||||
mvebu_pcie_msi_enable(pcie);
|
||||
mvebu_pcie_enable(pcie);
|
||||
|
||||
platform_set_drvdata(pdev, pcie);
|
||||
|
@ -541,7 +541,8 @@ msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd)
|
||||
if (affd) {
|
||||
masks = irq_create_affinity_masks(nvec, affd);
|
||||
if (!masks)
|
||||
pr_err("Unable to allocate affinity masks, ignoring\n");
|
||||
dev_err(&dev->dev, "can't allocate MSI affinity masks for %d vectors\n",
|
||||
nvec);
|
||||
}
|
||||
|
||||
/* MSI Entry Initialization */
|
||||
@ -681,7 +682,8 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
|
||||
if (affd) {
|
||||
masks = irq_create_affinity_masks(nvec, affd);
|
||||
if (!masks)
|
||||
pr_err("Unable to allocate affinity masks, ignoring\n");
|
||||
dev_err(&dev->dev, "can't allocate MSI-X affinity masks for %d vectors\n",
|
||||
nvec);
|
||||
}
|
||||
|
||||
for (i = 0, curmsk = masks; i < nvec; i++) {
|
||||
@ -882,7 +884,7 @@ int pci_msi_vec_count(struct pci_dev *dev)
|
||||
}
|
||||
EXPORT_SYMBOL(pci_msi_vec_count);
|
||||
|
||||
void pci_msi_shutdown(struct pci_dev *dev)
|
||||
static void pci_msi_shutdown(struct pci_dev *dev)
|
||||
{
|
||||
struct msi_desc *desc;
|
||||
u32 mask;
|
||||
@ -994,7 +996,7 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
|
||||
}
|
||||
EXPORT_SYMBOL(pci_enable_msix);
|
||||
|
||||
void pci_msix_shutdown(struct pci_dev *dev)
|
||||
static void pci_msix_shutdown(struct pci_dev *dev)
|
||||
{
|
||||
struct msi_desc *entry;
|
||||
|
||||
|
@ -461,8 +461,6 @@ static void pci_device_shutdown(struct device *dev)
|
||||
|
||||
if (drv && drv->shutdown)
|
||||
drv->shutdown(pci_dev);
|
||||
pci_msi_shutdown(pci_dev);
|
||||
pci_msix_shutdown(pci_dev);
|
||||
|
||||
/*
|
||||
* If this is a kexec reboot, turn off Bus Master bit on the
|
||||
|
@ -85,15 +85,4 @@ static inline int of_pci_get_host_bridge_resources(struct device_node *dev,
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI)
|
||||
int of_pci_msi_chip_add(struct msi_controller *chip);
|
||||
void of_pci_msi_chip_remove(struct msi_controller *chip);
|
||||
struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node);
|
||||
#else
|
||||
static inline int of_pci_msi_chip_add(struct msi_controller *chip) { return -EINVAL; }
|
||||
static inline void of_pci_msi_chip_remove(struct msi_controller *chip) { }
|
||||
static inline struct msi_controller *
|
||||
of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; }
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -1292,11 +1292,9 @@ struct msix_entry {
|
||||
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
int pci_msi_vec_count(struct pci_dev *dev);
|
||||
void pci_msi_shutdown(struct pci_dev *dev);
|
||||
void pci_disable_msi(struct pci_dev *dev);
|
||||
int pci_msix_vec_count(struct pci_dev *dev);
|
||||
int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec);
|
||||
void pci_msix_shutdown(struct pci_dev *dev);
|
||||
void pci_disable_msix(struct pci_dev *dev);
|
||||
void pci_restore_msi_state(struct pci_dev *dev);
|
||||
int pci_msi_enabled(void);
|
||||
@ -1322,13 +1320,11 @@ int pci_irq_get_node(struct pci_dev *pdev, int vec);
|
||||
|
||||
#else
|
||||
static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
|
||||
static inline void pci_msi_shutdown(struct pci_dev *dev) { }
|
||||
static inline void pci_disable_msi(struct pci_dev *dev) { }
|
||||
static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
|
||||
static inline int pci_enable_msix(struct pci_dev *dev,
|
||||
struct msix_entry *entries, int nvec)
|
||||
{ return -ENOSYS; }
|
||||
static inline void pci_msix_shutdown(struct pci_dev *dev) { }
|
||||
static inline void pci_disable_msix(struct pci_dev *dev) { }
|
||||
static inline void pci_restore_msi_state(struct pci_dev *dev) { }
|
||||
static inline int pci_msi_enabled(void) { return 0; }
|
||||
|
Loading…
Reference in New Issue
Block a user