2019-05-19 12:08:55 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2010-03-18 20:31:34 +00:00
|
|
|
/*
|
2011-06-06 16:22:23 +00:00
|
|
|
* Xen PCI - handle PCI (INTx) and MSI infrastructure calls for PV, HVM and
|
|
|
|
* initial domain support. We also handle the DSDT _PRT callbacks for GSI's
|
|
|
|
* used in HVM and initial domain mode (PV does not parse ACPI, so it has no
|
|
|
|
* concept of GSIs). Under PV we hook under the pnbbios API for IRQs and
|
|
|
|
* 0xcf8 PCI configuration read/write.
|
2010-03-18 20:31:34 +00:00
|
|
|
*
|
|
|
|
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
|
2011-06-06 16:22:23 +00:00
|
|
|
* Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
|
|
* Stefano Stabellini <stefano.stabellini@eu.citrix.com>
|
2010-03-18 20:31:34 +00:00
|
|
|
*/
|
2016-07-14 00:19:01 +00:00
|
|
|
#include <linux/export.h>
|
2010-03-18 20:31:34 +00:00
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/acpi.h>
|
|
|
|
|
|
|
|
#include <linux/io.h>
|
2010-10-21 16:40:08 +00:00
|
|
|
#include <asm/io_apic.h>
|
2010-03-18 20:31:34 +00:00
|
|
|
#include <asm/pci_x86.h>
|
|
|
|
|
|
|
|
#include <asm/xen/hypervisor.h>
|
|
|
|
|
2010-06-24 16:50:18 +00:00
|
|
|
#include <xen/features.h>
|
2010-03-18 20:31:34 +00:00
|
|
|
#include <xen/events.h>
|
|
|
|
#include <asm/xen/pci.h>
|
2014-12-02 20:19:13 +00:00
|
|
|
#include <asm/xen/cpuid.h>
|
|
|
|
#include <asm/apic.h>
|
2020-08-20 04:30:47 +00:00
|
|
|
#include <asm/acpi.h>
|
2014-06-09 08:19:48 +00:00
|
|
|
#include <asm/i8259.h>
|
2010-03-18 20:31:34 +00:00
|
|
|
|
2011-07-06 14:16:21 +00:00
|
|
|
static int xen_pcifront_enable_irq(struct pci_dev *dev)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
int share = 1;
|
|
|
|
int pirq;
|
|
|
|
u8 gsi;
|
|
|
|
|
|
|
|
rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
|
|
|
|
if (rc < 0) {
|
|
|
|
dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
|
|
|
|
rc);
|
|
|
|
return rc;
|
|
|
|
}
|
2011-07-06 19:15:23 +00:00
|
|
|
/* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/
|
|
|
|
pirq = gsi;
|
2011-07-06 14:16:21 +00:00
|
|
|
|
2014-06-09 08:19:48 +00:00
|
|
|
if (gsi < nr_legacy_irqs())
|
2011-07-06 14:16:21 +00:00
|
|
|
share = 0;
|
|
|
|
|
|
|
|
rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront");
|
|
|
|
if (rc < 0) {
|
|
|
|
dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n",
|
|
|
|
gsi, pirq, rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev->irq = rc;
|
|
|
|
dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-06-24 15:42:04 +00:00
|
|
|
#ifdef CONFIG_ACPI
|
2020-04-28 15:36:40 +00:00
|
|
|
static int xen_register_pirq(u32 gsi, int triggering, bool set_pirq)
|
2010-06-24 15:42:04 +00:00
|
|
|
{
|
2020-06-11 12:31:34 +00:00
|
|
|
int rc, pirq = -1, irq;
|
2010-06-24 15:42:04 +00:00
|
|
|
struct physdev_map_pirq map_irq;
|
|
|
|
int shareable = 0;
|
|
|
|
char *name;
|
|
|
|
|
2012-05-21 15:54:10 +00:00
|
|
|
irq = xen_irq_from_gsi(gsi);
|
|
|
|
if (irq > 0)
|
|
|
|
return irq;
|
|
|
|
|
2011-07-06 19:15:23 +00:00
|
|
|
if (set_pirq)
|
|
|
|
pirq = gsi;
|
|
|
|
|
2011-07-06 14:16:21 +00:00
|
|
|
map_irq.domid = DOMID_SELF;
|
|
|
|
map_irq.type = MAP_PIRQ_TYPE_GSI;
|
|
|
|
map_irq.index = gsi;
|
|
|
|
map_irq.pirq = pirq;
|
|
|
|
|
|
|
|
rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
|
|
|
|
if (rc) {
|
|
|
|
printk(KERN_WARNING "xen map irq failed %d\n", rc);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2011-07-06 14:48:22 +00:00
|
|
|
if (triggering == ACPI_EDGE_SENSITIVE) {
|
|
|
|
shareable = 0;
|
|
|
|
name = "ioapic-edge";
|
|
|
|
} else {
|
|
|
|
shareable = 1;
|
|
|
|
name = "ioapic-level";
|
|
|
|
}
|
|
|
|
|
2011-07-06 16:42:43 +00:00
|
|
|
irq = xen_bind_pirq_gsi_to_irq(gsi, map_irq.pirq, shareable, name);
|
2011-07-06 14:48:22 +00:00
|
|
|
if (irq < 0)
|
|
|
|
goto out;
|
|
|
|
|
2011-07-06 16:42:43 +00:00
|
|
|
printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n", map_irq.pirq, irq, gsi);
|
2011-07-06 14:16:21 +00:00
|
|
|
out:
|
|
|
|
return irq;
|
|
|
|
}
|
|
|
|
|
2011-07-06 16:42:43 +00:00
|
|
|
static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
|
|
|
|
int trigger, int polarity)
|
|
|
|
{
|
|
|
|
if (!xen_hvm_domain())
|
|
|
|
return -1;
|
|
|
|
|
2020-04-28 15:36:40 +00:00
|
|
|
return xen_register_pirq(gsi, trigger,
|
2011-07-06 19:15:23 +00:00
|
|
|
false /* no mapping of GSI to PIRQ */);
|
2011-07-06 16:42:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_XEN_DOM0
|
2020-04-28 15:36:40 +00:00
|
|
|
static int xen_register_gsi(u32 gsi, int triggering, int polarity)
|
2011-07-06 14:16:21 +00:00
|
|
|
{
|
|
|
|
int rc, irq;
|
|
|
|
struct physdev_setup_gsi setup_gsi;
|
|
|
|
|
|
|
|
if (!xen_pv_domain())
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
|
|
|
|
gsi, triggering, polarity);
|
|
|
|
|
2020-04-28 15:36:40 +00:00
|
|
|
irq = xen_register_pirq(gsi, triggering, true);
|
2011-07-06 14:16:21 +00:00
|
|
|
|
|
|
|
setup_gsi.gsi = gsi;
|
|
|
|
setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
|
|
|
|
setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
|
|
|
|
|
|
|
|
rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
|
|
|
|
if (rc == -EEXIST)
|
|
|
|
printk(KERN_INFO "Already setup the GSI :%d\n", gsi);
|
|
|
|
else if (rc) {
|
|
|
|
printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n",
|
|
|
|
gsi, rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
return irq;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
|
|
|
|
int trigger, int polarity)
|
|
|
|
{
|
2020-04-28 15:36:40 +00:00
|
|
|
return xen_register_gsi(gsi, trigger, polarity);
|
2011-07-06 14:16:21 +00:00
|
|
|
}
|
|
|
|
#endif
|
2011-07-06 14:41:47 +00:00
|
|
|
#endif
|
2011-07-06 14:16:21 +00:00
|
|
|
|
2010-03-18 20:31:34 +00:00
|
|
|
#if defined(CONFIG_PCI_MSI)
|
|
|
|
#include <linux/msi.h>
|
|
|
|
|
|
|
|
struct xen_pci_frontend_ops *xen_pci_frontend;
|
|
|
|
EXPORT_SYMBOL_GPL(xen_pci_frontend);
|
|
|
|
|
2020-08-26 11:17:04 +00:00
|
|
|
struct xen_msi_ops {
|
|
|
|
int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
|
|
|
|
void (*teardown_msi_irqs)(struct pci_dev *dev);
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct xen_msi_ops xen_msi_ops __ro_after_init;
|
|
|
|
|
2011-07-06 14:16:21 +00:00
|
|
|
static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
|
|
|
|
{
|
|
|
|
int irq, ret, i;
|
|
|
|
struct msi_desc *msidesc;
|
|
|
|
int *v;
|
|
|
|
|
2013-02-28 14:05:41 +00:00
|
|
|
if (type == PCI_CAP_ID_MSI && nvec > 1)
|
|
|
|
return 1;
|
|
|
|
|
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 21:03:40 +00:00
|
|
|
v = kcalloc(max(1, nvec), sizeof(int), GFP_KERNEL);
|
2011-07-06 14:16:21 +00:00
|
|
|
if (!v)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (type == PCI_CAP_ID_MSIX)
|
|
|
|
ret = xen_pci_frontend_enable_msix(dev, v, nvec);
|
|
|
|
else
|
|
|
|
ret = xen_pci_frontend_enable_msi(dev, v);
|
|
|
|
if (ret)
|
|
|
|
goto error;
|
|
|
|
i = 0;
|
2015-07-09 08:00:40 +00:00
|
|
|
for_each_pci_msi_entry(msidesc, dev) {
|
2013-04-03 14:52:50 +00:00
|
|
|
irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i],
|
2014-02-27 18:15:35 +00:00
|
|
|
(type == PCI_CAP_ID_MSI) ? nvec : 1,
|
2011-07-06 14:16:21 +00:00
|
|
|
(type == PCI_CAP_ID_MSIX) ?
|
|
|
|
"pcifront-msi-x" :
|
|
|
|
"pcifront-msi",
|
|
|
|
DOMID_SELF);
|
2011-09-29 17:26:45 +00:00
|
|
|
if (irq < 0) {
|
|
|
|
ret = irq;
|
2011-07-06 14:16:21 +00:00
|
|
|
goto free;
|
2011-09-29 17:26:45 +00:00
|
|
|
}
|
2011-07-06 14:16:21 +00:00
|
|
|
i++;
|
|
|
|
}
|
|
|
|
kfree(v);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error:
|
2016-02-11 21:10:25 +00:00
|
|
|
if (ret == -ENOSYS)
|
|
|
|
dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
|
|
|
|
else if (ret)
|
|
|
|
dev_err(&dev->dev, "Xen PCI frontend error: %d!\n", ret);
|
2011-07-06 14:16:21 +00:00
|
|
|
free:
|
|
|
|
kfree(v);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-07-01 16:10:39 +00:00
|
|
|
static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq,
|
|
|
|
struct msi_msg *msg)
|
|
|
|
{
|
2020-10-24 21:35:17 +00:00
|
|
|
/*
|
|
|
|
* We set vector == 0 to tell the hypervisor we don't care about
|
|
|
|
* it, but we want a pirq setup instead. We use the dest_id fields
|
|
|
|
* to pass the pirq that we want.
|
|
|
|
*/
|
|
|
|
memset(msg, 0, sizeof(*msg));
|
|
|
|
msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
|
|
|
|
msg->arch_addr_hi.destid_8_31 = pirq >> 8;
|
|
|
|
msg->arch_addr_lo.destid_0_7 = pirq & 0xFF;
|
|
|
|
msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW;
|
|
|
|
msg->arch_data.delivery_mode = APIC_DELIVERY_MODE_EXTINT;
|
2010-07-01 16:10:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
|
|
|
|
{
|
2011-02-18 16:43:32 +00:00
|
|
|
int irq, pirq;
|
2010-07-01 16:10:39 +00:00
|
|
|
struct msi_desc *msidesc;
|
|
|
|
struct msi_msg msg;
|
|
|
|
|
2013-02-28 14:05:41 +00:00
|
|
|
if (type == PCI_CAP_ID_MSI && nvec > 1)
|
|
|
|
return 1;
|
|
|
|
|
2015-07-09 08:00:40 +00:00
|
|
|
for_each_pci_msi_entry(msidesc, dev) {
|
xen: do not re-use pirq number cached in pci device msi msg data
Revert the main part of commit:
af42b8d12f8a ("xen: fix MSI setup and teardown for PV on HVM guests")
That commit introduced reading the pci device's msi message data to see
if a pirq was previously configured for the device's msi/msix, and re-use
that pirq. At the time, that was the correct behavior. However, a
later change to Qemu caused it to call into the Xen hypervisor to unmap
all pirqs for a pci device, when the pci device disables its MSI/MSIX
vectors; specifically the Qemu commit:
c976437c7dba9c7444fb41df45468968aaa326ad
("qemu-xen: free all the pirqs for msi/msix when driver unload")
Once Qemu added this pirq unmapping, it was no longer correct for the
kernel to re-use the pirq number cached in the pci device msi message
data. All Qemu releases since 2.1.0 contain the patch that unmaps the
pirqs when the pci device disables its MSI/MSIX vectors.
This bug is causing failures to initialize multiple NVMe controllers
under Xen, because the NVMe driver sets up a single MSIX vector for
each controller (concurrently), and then after using that to talk to
the controller for some configuration data, it disables the single MSIX
vector and re-configures all the MSIX vectors it needs. So the MSIX
setup code tries to re-use the cached pirq from the first vector
for each controller, but the hypervisor has already given away that
pirq to another controller, and its initialization fails.
This is discussed in more detail at:
https://lists.xen.org/archives/html/xen-devel/2017-01/msg00447.html
Fixes: af42b8d12f8a ("xen: fix MSI setup and teardown for PV on HVM guests")
Signed-off-by: Dan Streetman <dan.streetman@canonical.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
2017-01-13 20:07:51 +00:00
|
|
|
pirq = xen_allocate_pirq_msi(dev, msidesc);
|
|
|
|
if (pirq < 0) {
|
|
|
|
irq = -ENODEV;
|
|
|
|
goto error;
|
2010-12-01 14:51:44 +00:00
|
|
|
}
|
xen: do not re-use pirq number cached in pci device msi msg data
Revert the main part of commit:
af42b8d12f8a ("xen: fix MSI setup and teardown for PV on HVM guests")
That commit introduced reading the pci device's msi message data to see
if a pirq was previously configured for the device's msi/msix, and re-use
that pirq. At the time, that was the correct behavior. However, a
later change to Qemu caused it to call into the Xen hypervisor to unmap
all pirqs for a pci device, when the pci device disables its MSI/MSIX
vectors; specifically the Qemu commit:
c976437c7dba9c7444fb41df45468968aaa326ad
("qemu-xen: free all the pirqs for msi/msix when driver unload")
Once Qemu added this pirq unmapping, it was no longer correct for the
kernel to re-use the pirq number cached in the pci device msi message
data. All Qemu releases since 2.1.0 contain the patch that unmaps the
pirqs when the pci device disables its MSI/MSIX vectors.
This bug is causing failures to initialize multiple NVMe controllers
under Xen, because the NVMe driver sets up a single MSIX vector for
each controller (concurrently), and then after using that to talk to
the controller for some configuration data, it disables the single MSIX
vector and re-configures all the MSIX vectors it needs. So the MSIX
setup code tries to re-use the cached pirq from the first vector
for each controller, but the hypervisor has already given away that
pirq to another controller, and its initialization fails.
This is discussed in more detail at:
https://lists.xen.org/archives/html/xen-devel/2017-01/msg00447.html
Fixes: af42b8d12f8a ("xen: fix MSI setup and teardown for PV on HVM guests")
Signed-off-by: Dan Streetman <dan.streetman@canonical.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
2017-01-13 20:07:51 +00:00
|
|
|
xen_msi_compose_msg(dev, pirq, &msg);
|
|
|
|
__pci_write_msi_msg(msidesc, &msg);
|
|
|
|
dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
|
2013-04-03 14:52:50 +00:00
|
|
|
irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq,
|
2014-02-27 18:15:35 +00:00
|
|
|
(type == PCI_CAP_ID_MSI) ? nvec : 1,
|
2011-02-18 16:43:32 +00:00
|
|
|
(type == PCI_CAP_ID_MSIX) ?
|
2011-04-14 15:17:36 +00:00
|
|
|
"msi-x" : "msi",
|
|
|
|
DOMID_SELF);
|
2011-02-18 16:43:32 +00:00
|
|
|
if (irq < 0)
|
2010-07-01 16:10:39 +00:00
|
|
|
goto error;
|
2011-02-18 16:43:32 +00:00
|
|
|
dev_dbg(&dev->dev,
|
|
|
|
"xen: msi --> pirq=%d --> irq=%d\n", pirq, irq);
|
2010-07-01 16:10:39 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error:
|
2016-12-06 14:28:21 +00:00
|
|
|
dev_err(&dev->dev, "Failed to create MSI%s! ret=%d!\n",
|
|
|
|
type == PCI_CAP_ID_MSI ? "" : "-X", irq);
|
2011-09-29 17:26:45 +00:00
|
|
|
return irq;
|
2010-07-01 16:10:39 +00:00
|
|
|
}
|
|
|
|
|
2011-02-18 16:43:26 +00:00
|
|
|
#ifdef CONFIG_XEN_DOM0
|
2011-09-22 08:17:57 +00:00
|
|
|
static bool __read_mostly pci_seg_supported = true;
|
|
|
|
|
2010-10-11 14:30:09 +00:00
|
|
|
static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
|
|
|
|
{
|
2011-02-18 17:06:55 +00:00
|
|
|
int ret = 0;
|
2010-10-11 14:30:09 +00:00
|
|
|
struct msi_desc *msidesc;
|
|
|
|
|
2015-07-09 08:00:40 +00:00
|
|
|
for_each_pci_msi_entry(msidesc, dev) {
|
2011-02-18 17:06:55 +00:00
|
|
|
struct physdev_map_pirq map_irq;
|
2011-04-14 15:17:36 +00:00
|
|
|
domid_t domid;
|
|
|
|
|
|
|
|
domid = ret = xen_find_device_domain_owner(dev);
|
|
|
|
/* N.B. Casting int's -ENODEV to uint16_t results in 0xFFED,
|
|
|
|
* hence check ret value for < 0. */
|
|
|
|
if (ret < 0)
|
|
|
|
domid = DOMID_SELF;
|
2011-02-18 17:06:55 +00:00
|
|
|
|
|
|
|
memset(&map_irq, 0, sizeof(map_irq));
|
2011-04-14 15:17:36 +00:00
|
|
|
map_irq.domid = domid;
|
2011-09-22 08:17:57 +00:00
|
|
|
map_irq.type = MAP_PIRQ_TYPE_MSI_SEG;
|
2011-02-18 17:06:55 +00:00
|
|
|
map_irq.index = -1;
|
|
|
|
map_irq.pirq = -1;
|
2011-09-22 08:17:57 +00:00
|
|
|
map_irq.bus = dev->bus->number |
|
|
|
|
(pci_domain_nr(dev->bus) << 16);
|
2011-02-18 17:06:55 +00:00
|
|
|
map_irq.devfn = dev->devfn;
|
|
|
|
|
2014-02-27 18:15:35 +00:00
|
|
|
if (type == PCI_CAP_ID_MSI && nvec > 1) {
|
|
|
|
map_irq.type = MAP_PIRQ_TYPE_MULTI_MSI;
|
|
|
|
map_irq.entry_nr = nvec;
|
|
|
|
} else if (type == PCI_CAP_ID_MSIX) {
|
2011-02-18 17:06:55 +00:00
|
|
|
int pos;
|
PCI: Fail MSI-X mappings if there's no space assigned to MSI-X BAR
Unlike MSI, which is configured via registers in the MSI capability in
Configuration Space, MSI-X is configured via tables in Memory Space.
These MSI-X tables are mapped by a device BAR, and if no Memory Space
has been assigned to the BAR, MSI-X cannot be used.
Fail MSI-X setup if no space has been assigned for the BAR.
Previously, we ioremapped the MSI-X table even if the resource hadn't been
assigned. In this case, the resource address is undefined (and is often
zero), which may lead to warnings or oopses in this path:
pci_enable_msix
msix_capability_init
msix_map_region
ioremap_nocache
The PCI core sets resource flags to zero when it can't assign space for the
resource (see reset_resource()). There are also some cases where it sets
the IORESOURCE_UNSET flag, e.g., pci_reassigndev_resource_alignment(),
pci_assign_resource(), etc. So we must check for both cases.
[bhelgaas: changelog]
Reported-by: Zhang Jukuo <zhangjukuo@huawei.com>
Tested-by: Zhang Jukuo <zhangjukuo@huawei.com>
Signed-off-by: Yijing Wang <wangyijing@huawei.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2015-01-28 01:52:17 +00:00
|
|
|
unsigned long flags;
|
2011-02-18 17:06:55 +00:00
|
|
|
u32 table_offset, bir;
|
|
|
|
|
2013-04-22 23:12:28 +00:00
|
|
|
pos = dev->msix_cap;
|
2011-02-18 17:06:55 +00:00
|
|
|
pci_read_config_dword(dev, pos + PCI_MSIX_TABLE,
|
|
|
|
&table_offset);
|
2013-04-22 23:12:21 +00:00
|
|
|
bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
|
PCI: Fail MSI-X mappings if there's no space assigned to MSI-X BAR
Unlike MSI, which is configured via registers in the MSI capability in
Configuration Space, MSI-X is configured via tables in Memory Space.
These MSI-X tables are mapped by a device BAR, and if no Memory Space
has been assigned to the BAR, MSI-X cannot be used.
Fail MSI-X setup if no space has been assigned for the BAR.
Previously, we ioremapped the MSI-X table even if the resource hadn't been
assigned. In this case, the resource address is undefined (and is often
zero), which may lead to warnings or oopses in this path:
pci_enable_msix
msix_capability_init
msix_map_region
ioremap_nocache
The PCI core sets resource flags to zero when it can't assign space for the
resource (see reset_resource()). There are also some cases where it sets
the IORESOURCE_UNSET flag, e.g., pci_reassigndev_resource_alignment(),
pci_assign_resource(), etc. So we must check for both cases.
[bhelgaas: changelog]
Reported-by: Zhang Jukuo <zhangjukuo@huawei.com>
Tested-by: Zhang Jukuo <zhangjukuo@huawei.com>
Signed-off-by: Yijing Wang <wangyijing@huawei.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2015-01-28 01:52:17 +00:00
|
|
|
flags = pci_resource_flags(dev, bir);
|
|
|
|
if (!flags || (flags & IORESOURCE_UNSET))
|
|
|
|
return -EINVAL;
|
2011-02-18 17:06:55 +00:00
|
|
|
|
|
|
|
map_irq.table_base = pci_resource_start(dev, bir);
|
|
|
|
map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
|
|
|
|
}
|
|
|
|
|
2011-09-22 08:17:57 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
if (pci_seg_supported)
|
|
|
|
ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq,
|
|
|
|
&map_irq);
|
2014-02-27 18:15:35 +00:00
|
|
|
if (type == PCI_CAP_ID_MSI && nvec > 1 && ret) {
|
|
|
|
/*
|
|
|
|
* If MAP_PIRQ_TYPE_MULTI_MSI is not available
|
|
|
|
* there's nothing else we can do in this case.
|
|
|
|
* Just set ret > 0 so driver can retry with
|
|
|
|
* single MSI.
|
|
|
|
*/
|
|
|
|
ret = 1;
|
|
|
|
goto out;
|
|
|
|
}
|
2011-09-22 08:17:57 +00:00
|
|
|
if (ret == -EINVAL && !pci_domain_nr(dev->bus)) {
|
|
|
|
map_irq.type = MAP_PIRQ_TYPE_MSI;
|
|
|
|
map_irq.index = -1;
|
|
|
|
map_irq.pirq = -1;
|
|
|
|
map_irq.bus = dev->bus->number;
|
|
|
|
ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq,
|
|
|
|
&map_irq);
|
|
|
|
if (ret != -EINVAL)
|
|
|
|
pci_seg_supported = false;
|
|
|
|
}
|
2011-02-18 17:06:55 +00:00
|
|
|
if (ret) {
|
2011-04-14 15:17:36 +00:00
|
|
|
dev_warn(&dev->dev, "xen map irq failed %d for %d domain\n",
|
|
|
|
ret, domid);
|
2011-02-18 17:06:55 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2014-02-27 18:15:35 +00:00
|
|
|
ret = xen_bind_pirq_msi_to_irq(dev, msidesc, map_irq.pirq,
|
|
|
|
(type == PCI_CAP_ID_MSI) ? nvec : 1,
|
|
|
|
(type == PCI_CAP_ID_MSIX) ? "msi-x" : "msi",
|
|
|
|
domid);
|
2011-02-18 17:06:55 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2010-10-11 14:30:09 +00:00
|
|
|
}
|
2011-02-18 17:06:55 +00:00
|
|
|
ret = 0;
|
|
|
|
out:
|
|
|
|
return ret;
|
2010-10-11 14:30:09 +00:00
|
|
|
}
|
2011-12-08 09:36:39 +00:00
|
|
|
|
2013-12-04 05:09:16 +00:00
|
|
|
static void xen_initdom_restore_msi_irqs(struct pci_dev *dev)
|
2011-12-08 09:36:39 +00:00
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (pci_seg_supported) {
|
|
|
|
struct physdev_pci_device restore_ext;
|
|
|
|
|
|
|
|
restore_ext.seg = pci_domain_nr(dev->bus);
|
|
|
|
restore_ext.bus = dev->bus->number;
|
|
|
|
restore_ext.devfn = dev->devfn;
|
|
|
|
ret = HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi_ext,
|
|
|
|
&restore_ext);
|
|
|
|
if (ret == -ENOSYS)
|
|
|
|
pci_seg_supported = false;
|
|
|
|
WARN(ret && ret != -ENOSYS, "restore_msi_ext -> %d\n", ret);
|
|
|
|
}
|
|
|
|
if (!pci_seg_supported) {
|
|
|
|
struct physdev_restore_msi restore;
|
|
|
|
|
|
|
|
restore.bus = dev->bus->number;
|
|
|
|
restore.devfn = dev->devfn;
|
|
|
|
ret = HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi, &restore);
|
|
|
|
WARN(ret && ret != -ENOSYS, "restore_msi -> %d\n", ret);
|
|
|
|
}
|
|
|
|
}
|
2020-08-26 11:16:56 +00:00
|
|
|
#else /* CONFIG_XEN_DOM0 */
|
|
|
|
#define xen_initdom_setup_msi_irqs NULL
|
|
|
|
#define xen_initdom_restore_msi_irqs NULL
|
|
|
|
#endif /* !CONFIG_XEN_DOM0 */
|
2010-03-18 20:31:34 +00:00
|
|
|
|
2011-07-06 14:16:21 +00:00
|
|
|
static void xen_teardown_msi_irqs(struct pci_dev *dev)
|
2010-03-18 20:31:34 +00:00
|
|
|
{
|
2011-07-06 14:16:21 +00:00
|
|
|
struct msi_desc *msidesc;
|
2020-08-26 11:16:55 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for_each_pci_msi_entry(msidesc, dev) {
|
|
|
|
if (msidesc->irq) {
|
|
|
|
for (i = 0; i < msidesc->nvec_used; i++)
|
|
|
|
xen_destroy_irq(msidesc->irq + i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xen_pv_teardown_msi_irqs(struct pci_dev *dev)
|
|
|
|
{
|
|
|
|
struct msi_desc *msidesc = first_pci_msi_entry(dev);
|
2010-03-18 20:31:34 +00:00
|
|
|
|
2011-07-06 14:16:21 +00:00
|
|
|
if (msidesc->msi_attrib.is_msix)
|
|
|
|
xen_pci_frontend_disable_msix(dev);
|
|
|
|
else
|
|
|
|
xen_pci_frontend_disable_msi(dev);
|
2010-03-18 20:31:34 +00:00
|
|
|
|
2020-08-26 11:16:55 +00:00
|
|
|
xen_teardown_msi_irqs(dev);
|
2011-07-06 14:16:21 +00:00
|
|
|
}
|
2011-03-10 16:08:07 +00:00
|
|
|
|
2020-08-26 11:16:58 +00:00
|
|
|
static int xen_msi_domain_alloc_irqs(struct irq_domain *domain,
|
|
|
|
struct device *dev, int nvec)
|
|
|
|
{
|
|
|
|
int type;
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(!dev_is_pci(dev)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (first_msi_entry(dev)->msi_attrib.is_msix)
|
|
|
|
type = PCI_CAP_ID_MSIX;
|
|
|
|
else
|
|
|
|
type = PCI_CAP_ID_MSI;
|
|
|
|
|
2020-08-26 11:17:04 +00:00
|
|
|
return xen_msi_ops.setup_msi_irqs(to_pci_dev(dev), nvec, type);
|
2020-08-26 11:16:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void xen_msi_domain_free_irqs(struct irq_domain *domain,
|
|
|
|
struct device *dev)
|
|
|
|
{
|
|
|
|
if (WARN_ON_ONCE(!dev_is_pci(dev)))
|
|
|
|
return;
|
|
|
|
|
2020-08-26 11:17:04 +00:00
|
|
|
xen_msi_ops.teardown_msi_irqs(to_pci_dev(dev));
|
2020-08-26 11:16:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct msi_domain_ops xen_pci_msi_domain_ops = {
|
|
|
|
.domain_alloc_irqs = xen_msi_domain_alloc_irqs,
|
|
|
|
.domain_free_irqs = xen_msi_domain_free_irqs,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct msi_domain_info xen_pci_msi_domain_info = {
|
|
|
|
.ops = &xen_pci_msi_domain_ops,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This irq domain is a blatant violation of the irq domain design, but
|
|
|
|
* distangling XEN into real irq domains is not a job for mere mortals with
|
|
|
|
* limited XENology. But it's the least dangerous way for a mere mortal to
|
|
|
|
* get rid of the arch_*_msi_irqs() hackery in order to store the irq
|
|
|
|
* domain pointer in struct device. This irq domain wrappery allows to do
|
|
|
|
* that without breaking XEN terminally.
|
|
|
|
*/
|
|
|
|
static __init struct irq_domain *xen_create_pci_msi_domain(void)
|
|
|
|
{
|
|
|
|
struct irq_domain *d = NULL;
|
|
|
|
struct fwnode_handle *fn;
|
|
|
|
|
|
|
|
fn = irq_domain_alloc_named_fwnode("XEN-MSI");
|
|
|
|
if (fn)
|
|
|
|
d = msi_create_irq_domain(fn, &xen_pci_msi_domain_info, NULL);
|
|
|
|
|
|
|
|
/* FIXME: No idea how to survive if this fails */
|
|
|
|
BUG_ON(!d);
|
|
|
|
|
|
|
|
return d;
|
|
|
|
}
|
|
|
|
|
2020-08-26 11:16:56 +00:00
|
|
|
static __init void xen_setup_pci_msi(void)
|
|
|
|
{
|
|
|
|
if (xen_pv_domain()) {
|
|
|
|
if (xen_initial_domain()) {
|
2020-08-26 11:17:04 +00:00
|
|
|
xen_msi_ops.setup_msi_irqs = xen_initdom_setup_msi_irqs;
|
2020-08-26 11:16:56 +00:00
|
|
|
x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs;
|
|
|
|
} else {
|
2020-08-26 11:17:04 +00:00
|
|
|
xen_msi_ops.setup_msi_irqs = xen_setup_msi_irqs;
|
2020-08-26 11:16:56 +00:00
|
|
|
}
|
2020-08-26 11:17:04 +00:00
|
|
|
xen_msi_ops.teardown_msi_irqs = xen_pv_teardown_msi_irqs;
|
2020-08-26 11:16:56 +00:00
|
|
|
pci_msi_ignore_mask = 1;
|
|
|
|
} else if (xen_hvm_domain()) {
|
2020-08-26 11:17:04 +00:00
|
|
|
xen_msi_ops.setup_msi_irqs = xen_hvm_setup_msi_irqs;
|
|
|
|
xen_msi_ops.teardown_msi_irqs = xen_teardown_msi_irqs;
|
2020-08-26 11:16:56 +00:00
|
|
|
} else {
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-08-26 11:16:58 +00:00
|
|
|
/*
|
|
|
|
* Override the PCI/MSI irq domain init function. No point
|
|
|
|
* in allocating the native domain and never use it.
|
|
|
|
*/
|
|
|
|
x86_init.irqs.create_pci_msi_domain = xen_create_pci_msi_domain;
|
2020-08-26 11:16:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#else /* CONFIG_PCI_MSI */
|
|
|
|
static inline void xen_setup_pci_msi(void) { }
|
|
|
|
#endif /* CONFIG_PCI_MSI */
|
2011-01-11 17:20:13 +00:00
|
|
|
|
2010-03-18 20:31:34 +00:00
|
|
|
int __init pci_xen_init(void)
|
|
|
|
{
|
|
|
|
if (!xen_pv_domain() || xen_initial_domain())
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n");
|
|
|
|
|
|
|
|
pcibios_set_cache_line_size();
|
|
|
|
|
|
|
|
pcibios_enable_irq = xen_pcifront_enable_irq;
|
|
|
|
pcibios_disable_irq = NULL;
|
|
|
|
|
|
|
|
/* Keep ACPI out of the picture */
|
2017-11-10 18:08:19 +00:00
|
|
|
acpi_noirq_set();
|
2010-03-18 20:31:34 +00:00
|
|
|
|
2020-08-26 11:16:56 +00:00
|
|
|
xen_setup_pci_msi();
|
2010-03-18 20:31:34 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2010-06-24 16:50:18 +00:00
|
|
|
|
2014-12-02 20:19:12 +00:00
|
|
|
#ifdef CONFIG_PCI_MSI
|
2020-08-26 11:16:54 +00:00
|
|
|
static void __init xen_hvm_msi_init(void)
|
2014-12-02 20:19:12 +00:00
|
|
|
{
|
2014-12-02 20:19:13 +00:00
|
|
|
if (!disable_apic) {
|
|
|
|
/*
|
|
|
|
* If hardware supports (x2)APIC virtualization (as indicated
|
|
|
|
* by hypervisor's leaf 4) then we don't need to use pirqs/
|
|
|
|
* event channels for MSI handling and instead use regular
|
|
|
|
* APIC processing
|
|
|
|
*/
|
|
|
|
uint32_t eax = cpuid_eax(xen_cpuid_base() + 4);
|
|
|
|
|
|
|
|
if (((eax & XEN_HVM_CPUID_X2APIC_VIRT) && x2apic_mode) ||
|
2016-04-04 20:25:00 +00:00
|
|
|
((eax & XEN_HVM_CPUID_APIC_ACCESS_VIRT) && boot_cpu_has(X86_FEATURE_APIC)))
|
2014-12-02 20:19:13 +00:00
|
|
|
return;
|
|
|
|
}
|
2020-08-26 11:16:56 +00:00
|
|
|
xen_setup_pci_msi();
|
2014-12-02 20:19:12 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-06-24 16:50:18 +00:00
|
|
|
int __init pci_xen_hvm_init(void)
|
|
|
|
{
|
2017-04-24 19:04:53 +00:00
|
|
|
if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs))
|
2010-06-24 16:50:18 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
#ifdef CONFIG_ACPI
|
|
|
|
/*
|
|
|
|
* We don't want to change the actual ACPI delivery model,
|
|
|
|
* just how GSIs get registered.
|
|
|
|
*/
|
|
|
|
__acpi_register_gsi = acpi_register_gsi_xen_hvm;
|
2015-01-20 02:21:07 +00:00
|
|
|
__acpi_unregister_gsi = NULL;
|
2010-06-24 16:50:18 +00:00
|
|
|
#endif
|
2010-07-01 16:10:39 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_PCI_MSI
|
2014-12-02 20:19:12 +00:00
|
|
|
/*
|
|
|
|
* We need to wait until after x2apic is initialized
|
|
|
|
* before we can set MSI IRQ ops.
|
|
|
|
*/
|
2020-08-26 11:16:54 +00:00
|
|
|
x86_platform.apic_post_init = xen_hvm_msi_init;
|
2010-07-01 16:10:39 +00:00
|
|
|
#endif
|
2010-06-24 16:50:18 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2010-09-02 13:51:39 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_XEN_DOM0
|
2011-06-09 13:49:13 +00:00
|
|
|
int __init pci_xen_initial_domain(void)
|
2010-09-02 13:51:39 +00:00
|
|
|
{
|
2011-07-06 19:15:23 +00:00
|
|
|
int irq;
|
2011-06-09 13:49:13 +00:00
|
|
|
|
2020-08-26 11:16:56 +00:00
|
|
|
xen_setup_pci_msi();
|
2010-09-02 13:51:39 +00:00
|
|
|
__acpi_register_gsi = acpi_register_gsi_xen;
|
2015-01-20 02:21:07 +00:00
|
|
|
__acpi_unregister_gsi = NULL;
|
2016-04-20 13:15:01 +00:00
|
|
|
/*
|
|
|
|
* Pre-allocate the legacy IRQs. Use NR_LEGACY_IRQS here
|
|
|
|
* because we don't have a PIC and thus nr_legacy_irqs() is zero.
|
|
|
|
*/
|
|
|
|
for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
|
2010-09-02 13:51:39 +00:00
|
|
|
int trigger, polarity;
|
|
|
|
|
|
|
|
if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
|
|
|
|
continue;
|
|
|
|
|
2020-04-28 15:36:40 +00:00
|
|
|
xen_register_pirq(irq,
|
2011-07-06 16:42:43 +00:00
|
|
|
trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE,
|
2011-07-06 19:15:23 +00:00
|
|
|
true /* Map GSI to PIRQ */);
|
2010-09-02 13:51:39 +00:00
|
|
|
}
|
2011-06-06 18:20:35 +00:00
|
|
|
if (0 == nr_ioapics) {
|
2014-06-09 08:19:48 +00:00
|
|
|
for (irq = 0; irq < nr_legacy_irqs(); irq++)
|
2011-07-06 19:15:23 +00:00
|
|
|
xen_bind_pirq_gsi_to_irq(irq, irq, 0, "xt-pic");
|
2011-06-06 18:20:35 +00:00
|
|
|
}
|
2011-06-09 13:49:13 +00:00
|
|
|
return 0;
|
2010-09-02 13:51:39 +00:00
|
|
|
}
|
2010-11-08 19:13:35 +00:00
|
|
|
|
|
|
|
struct xen_device_domain_owner {
|
|
|
|
domid_t domain;
|
|
|
|
struct pci_dev *dev;
|
|
|
|
struct list_head list;
|
|
|
|
};
|
|
|
|
|
|
|
|
static DEFINE_SPINLOCK(dev_domain_list_spinlock);
|
|
|
|
static struct list_head dev_domain_list = LIST_HEAD_INIT(dev_domain_list);
|
|
|
|
|
|
|
|
static struct xen_device_domain_owner *find_device(struct pci_dev *dev)
|
|
|
|
{
|
|
|
|
struct xen_device_domain_owner *owner;
|
|
|
|
|
|
|
|
list_for_each_entry(owner, &dev_domain_list, list) {
|
|
|
|
if (owner->dev == dev)
|
|
|
|
return owner;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int xen_find_device_domain_owner(struct pci_dev *dev)
|
|
|
|
{
|
|
|
|
struct xen_device_domain_owner *owner;
|
|
|
|
int domain = -ENODEV;
|
|
|
|
|
|
|
|
spin_lock(&dev_domain_list_spinlock);
|
|
|
|
owner = find_device(dev);
|
|
|
|
if (owner)
|
|
|
|
domain = owner->domain;
|
|
|
|
spin_unlock(&dev_domain_list_spinlock);
|
|
|
|
return domain;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(xen_find_device_domain_owner);
|
|
|
|
|
|
|
|
int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain)
|
|
|
|
{
|
|
|
|
struct xen_device_domain_owner *owner;
|
|
|
|
|
|
|
|
owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL);
|
|
|
|
if (!owner)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
spin_lock(&dev_domain_list_spinlock);
|
|
|
|
if (find_device(dev)) {
|
|
|
|
spin_unlock(&dev_domain_list_spinlock);
|
|
|
|
kfree(owner);
|
|
|
|
return -EEXIST;
|
|
|
|
}
|
|
|
|
owner->domain = domain;
|
|
|
|
owner->dev = dev;
|
|
|
|
list_add_tail(&owner->list, &dev_domain_list);
|
|
|
|
spin_unlock(&dev_domain_list_spinlock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(xen_register_device_domain_owner);
|
|
|
|
|
|
|
|
int xen_unregister_device_domain_owner(struct pci_dev *dev)
|
|
|
|
{
|
|
|
|
struct xen_device_domain_owner *owner;
|
|
|
|
|
|
|
|
spin_lock(&dev_domain_list_spinlock);
|
|
|
|
owner = find_device(dev);
|
|
|
|
if (!owner) {
|
|
|
|
spin_unlock(&dev_domain_list_spinlock);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
list_del(&owner->list);
|
|
|
|
spin_unlock(&dev_domain_list_spinlock);
|
|
|
|
kfree(owner);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner);
|
2011-05-16 17:47:30 +00:00
|
|
|
#endif
|