forked from Minki/linux
Merge branch 'pci/ctrl/dwc'
- Stop link on host_init errors and de-initialization (Serge Semin) - Add support for unrolled iATU register space in dw_pcie_disable_atu() (Serge Semin) - Disable outbound windows only for controllers that use iATU (Serge Semin) - Set INCREASE_REGION_SIZE flag based on limit address, not on the size, since even a small size may cross a 4GB boundary (Serge Semin) - Deallocate EPC memory on dw_pcie_ep_init() errors to avoid a leak (Serge Semin) - Always enable CDM check if "snps,enable-cdm-check" exists instead of exiting early if the optional "num-lanes" was absent (Serge Semin) - Simplify detection of whether we're using unrolled iATU registers (Serge Semin) - Make dw_pcie_link_up() more generic by using dw_pcie_readl_dbi() instead of readl() (Serge Semin) - Add dw_pcie_start_link() and dw_pcie_stop_link() wrappers to factor out checks for ops being implemented (Serge Semin) - Move io_cfg_atu_shared to struct pcie_port and rename to cfg0_io_shared, since it's not used by dwc common code or dwc endpoint code (Serge Semin) - Rename struct pcie_port to dw_pcie_rp to indicate that it's DesignWare-specific (Serge Semin) - Drop unused struct dw_plat_pcie regmap pointer (Serge Semin) - Fix some coding style issues (Serge Semin) - Log link speed and width if it comes up (Serge Semin) - Save DWC IP core version in native format as read from PORT_LOGIC.PCIE_VERSION_OFF register (Serge Semin) - Read DWC IP core version from PORT_LOGIC.PCIE_VERSION_OFF (Serge Semin) - Add macros to compare Synopsys IP core versions (Serge Semin) - Drop manual DWC IP core version setup from intel-gw and tegra194 (Serge Semin) - Add dw_pcie_ops.host_deinit() callback (Serge Semin) - Drop enum dw_pcie_as_type in favor of PCIE_ATU_TYPE_MEM/IO (Serge Semin) - Drop enum dw_pcie_region_type in favor of PCIE_ATU_REGION_DIR_IB/OB (Serge Semin) - Simplify in/outbound iATU setup methods and reduce duplicated code (Serge Semin) - Detect iATU region size from hardware (Serge Semin) - Validate iATU outbound mappings against hardware constraints (Serge Semin) - Check for errors in iATU setup (Serge Semin) - Allocate a 32-bit DMA-able page to be MSI target instead of using a driver data structure that may not be addressable with 32-bit address (Will McVicker) - Use the bitmap API to allocate bitmaps instead of open-coding it (Christophe JAILLET) - Correct dw_pcie_free_msi() checking for when to remove IRQ handler and data (Dmitry Baryshkov) - Split MSI init to new dw_pcie_msi_host_init() function (Dmitry Baryshkov) - Convert struct pcie_port.msi_irq to an array so we can support more than 32 MSI interrupts (Dmitry Baryshkov) - Handle MSIs routed to multiple GIC interrupts for Qualcomm platforms with groups of 32 MSI vectors (Dmitry Baryshkov) - Add additional MSI interrupts to qcom DT (Dmitry Baryshkov) * pci/ctrl/dwc: dt-bindings: PCI: qcom: Support additional MSI vectors PCI: dwc: Handle MSIs routed to multiple GIC interrupts PCI: dwc: Convert struct pcie_port.msi_irq to an array PCI: dwc: Split MSI IRQ parsing/allocation to a separate function PCI: dwc: Correct msi_irq condition in dw_pcie_free_msi() PCI: dwc: Use the bitmap API to allocate bitmaps PCI: dwc: Fix MSI msi_msg DMA mapping PCI: dwc: Check iATU in/outbound range setup status PCI: dwc: Validate iATU outbound mappings against hardware constraints PCI: dwc: Add iATU regions size detection procedure PCI: dwc: Simplify in/outbound iATU setup methods PCI: dwc: Drop enum dw_pcie_region_type in favor of PCIE_ATU_REGION_DIR_IB/OB PCI: dwc: Drop enum dw_pcie_as_type in favor of PCIE_ATU_TYPE_MEM/IO PCI: dwc: Add dw_pcie_ops.host_deinit() callback PCI: tegra194: Drop manual DW PCIe controller version setup PCI: intel-gw: Drop manual DW PCIe controller version setup PCI: dwc: Add macros to compare Synopsys IP core versions PCI: dwc: Read DWC IP core version from register PCI: dwc: Use native DWC IP core version representation PCI: dwc: Detect iATU settings after getting "addr_space" resource PCI: dwc: Log link speed and width if it comes up PCI: dwc-plat: Drop dw_plat_pcie_of_match[] forward declaration PCI: dwc-plat: Drop unused regmap pointer PCI: dwc-plat: Simplify dw_plat_pcie_probe() return values PCI: dwc: Rename struct pcie_port to dw_pcie_rp PCI: dwc: Move io_cfg_atu_shared to struct pcie_port PCI: dwc: Add start_link/stop_link inlines PCI: dwc: Reuse local pointer to the resource data PCI: dwc: Organize local variable usage PCI: dwc: Convert dw_pcie_link_up() to use dw_pcie_readl_dbi() PCI: dwc: Simplify unrolled iATU detection PCI: dwc: Add newlines to log messages PCI: dwc: Add braces to multi-line if-else statements PCI: dwc: Always enable CDM check if "snps,enable-cdm-check" exists PCI: dwc: Deallocate EPC memory on dw_pcie_ep_init() errors PCI: dwc: Set INCREASE_REGION_SIZE flag based on limit address PCI: dwc: Disable outbound windows only for controllers using iATU PCI: dwc: Add unroll iATU space support to dw_pcie_disable_atu() PCI: dwc: Stop link on host_init errors and de-initialization
This commit is contained in:
commit
964db794ae
@ -43,11 +43,12 @@ properties:
|
||||
maxItems: 5
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
minItems: 1
|
||||
maxItems: 8
|
||||
|
||||
interrupt-names:
|
||||
items:
|
||||
- const: msi
|
||||
minItems: 1
|
||||
maxItems: 8
|
||||
|
||||
# Common definitions for clocks, clock-names and reset.
|
||||
# Platform constraints are described later.
|
||||
@ -623,6 +624,50 @@ allOf:
|
||||
- resets
|
||||
- reset-names
|
||||
|
||||
# Newer chipsets support either 1 or 8 MSI vectors
|
||||
# On older chipsets it's always 1 MSI vector
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- qcom,pcie-msm8996
|
||||
- qcom,pcie-sc7280
|
||||
- qcom,pcie-sc8180x
|
||||
- qcom,pcie-sdm845
|
||||
- qcom,pcie-sm8150
|
||||
- qcom,pcie-sm8250
|
||||
- qcom,pcie-sm8450-pcie0
|
||||
- qcom,pcie-sm8450-pcie1
|
||||
then:
|
||||
oneOf:
|
||||
- properties:
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
interrupt-names:
|
||||
items:
|
||||
- const: msi
|
||||
- properties:
|
||||
interrupts:
|
||||
minItems: 8
|
||||
interrupt-names:
|
||||
items:
|
||||
- const: msi0
|
||||
- const: msi1
|
||||
- const: msi2
|
||||
- const: msi3
|
||||
- const: msi4
|
||||
- const: msi5
|
||||
- const: msi6
|
||||
- const: msi7
|
||||
else:
|
||||
properties:
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
interrupt-names:
|
||||
items:
|
||||
- const: msi
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
|
@ -178,7 +178,7 @@ static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
|
||||
dra7xx_pcie_enable_msi_interrupts(dra7xx);
|
||||
}
|
||||
|
||||
static int dra7xx_pcie_host_init(struct pcie_port *pp)
|
||||
static int dra7xx_pcie_host_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
|
||||
@ -202,7 +202,7 @@ static const struct irq_domain_ops intx_domain_ops = {
|
||||
.xlate = pci_irqd_intx_xlate,
|
||||
};
|
||||
|
||||
static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
|
||||
static int dra7xx_pcie_handle_msi(struct dw_pcie_rp *pp, int index)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
unsigned long val;
|
||||
@ -224,7 +224,7 @@ static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void dra7xx_pcie_handle_msi_irq(struct pcie_port *pp)
|
||||
static void dra7xx_pcie_handle_msi_irq(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
int ret, i, count, num_ctrls;
|
||||
@ -255,8 +255,8 @@ static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
struct dra7xx_pcie *dra7xx;
|
||||
struct dw_pcie_rp *pp;
|
||||
struct dw_pcie *pci;
|
||||
struct pcie_port *pp;
|
||||
unsigned long reg;
|
||||
u32 bit;
|
||||
|
||||
@ -344,7 +344,7 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
|
||||
static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct device *dev = pci->dev;
|
||||
@ -475,7 +475,7 @@ static int dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
|
||||
{
|
||||
int ret;
|
||||
struct dw_pcie *pci = dra7xx->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct dw_pcie_rp *pp = &pci->pp;
|
||||
struct device *dev = pci->dev;
|
||||
|
||||
pp->irq = platform_get_irq(pdev, 1);
|
||||
@ -483,7 +483,7 @@ static int dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
|
||||
return pp->irq;
|
||||
|
||||
/* MSI IRQ is muxed */
|
||||
pp->msi_irq = -ENODEV;
|
||||
pp->msi_irq[0] = -ENODEV;
|
||||
|
||||
ret = dra7xx_pcie_init_irq_domain(pp);
|
||||
if (ret < 0)
|
||||
|
@ -249,7 +249,7 @@ static int exynos_pcie_link_up(struct dw_pcie *pci)
|
||||
return (val & PCIE_ELBI_XMLH_LINKUP);
|
||||
}
|
||||
|
||||
static int exynos_pcie_host_init(struct pcie_port *pp)
|
||||
static int exynos_pcie_host_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct exynos_pcie *ep = to_exynos_pcie(pci);
|
||||
@ -276,7 +276,7 @@ static int exynos_add_pcie_port(struct exynos_pcie *ep,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct dw_pcie *pci = &ep->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct dw_pcie_rp *pp = &pci->pp;
|
||||
struct device *dev = &pdev->dev;
|
||||
int ret;
|
||||
|
||||
@ -292,7 +292,7 @@ static int exynos_add_pcie_port(struct exynos_pcie *ep,
|
||||
}
|
||||
|
||||
pp->ops = &exynos_pcie_host_ops;
|
||||
pp->msi_irq = -ENODEV;
|
||||
pp->msi_irq[0] = -ENODEV;
|
||||
|
||||
ret = dw_pcie_host_init(pp);
|
||||
if (ret) {
|
||||
@ -406,7 +406,7 @@ static int __maybe_unused exynos_pcie_resume_noirq(struct device *dev)
|
||||
{
|
||||
struct exynos_pcie *ep = dev_get_drvdata(dev);
|
||||
struct dw_pcie *pci = &ep->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct dw_pcie_rp *pp = &pci->pp;
|
||||
int ret;
|
||||
|
||||
ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
|
||||
|
@ -863,7 +863,7 @@ err_reset_phy:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int imx6_pcie_host_init(struct pcie_port *pp)
|
||||
static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
|
||||
@ -992,7 +992,7 @@ static int imx6_pcie_resume_noirq(struct device *dev)
|
||||
{
|
||||
int ret;
|
||||
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
|
||||
struct pcie_port *pp = &imx6_pcie->pci->pp;
|
||||
struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
|
||||
|
||||
if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
|
||||
return 0;
|
||||
@ -1291,7 +1291,7 @@ static struct platform_driver imx6_pcie_driver = {
|
||||
static void imx6_pcie_quirk(struct pci_dev *dev)
|
||||
{
|
||||
struct pci_bus *bus = dev->bus;
|
||||
struct pcie_port *pp = bus->sysdata;
|
||||
struct dw_pcie_rp *pp = bus->sysdata;
|
||||
|
||||
/* Bus parent is the PCI bridge, its parent is this platform driver */
|
||||
if (!bus->dev.parent || !bus->dev.parent->parent)
|
||||
|
@ -109,7 +109,7 @@ struct ks_pcie_of_data {
|
||||
enum dw_pcie_device_mode mode;
|
||||
const struct dw_pcie_host_ops *host_ops;
|
||||
const struct dw_pcie_ep_ops *ep_ops;
|
||||
unsigned int version;
|
||||
u32 version;
|
||||
};
|
||||
|
||||
struct keystone_pcie {
|
||||
@ -147,7 +147,7 @@ static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
|
||||
|
||||
static void ks_pcie_msi_irq_ack(struct irq_data *data)
|
||||
{
|
||||
struct pcie_port *pp = irq_data_get_irq_chip_data(data);
|
||||
struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
|
||||
struct keystone_pcie *ks_pcie;
|
||||
u32 irq = data->hwirq;
|
||||
struct dw_pcie *pci;
|
||||
@ -167,7 +167,7 @@ static void ks_pcie_msi_irq_ack(struct irq_data *data)
|
||||
|
||||
static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
{
|
||||
struct pcie_port *pp = irq_data_get_irq_chip_data(data);
|
||||
struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
|
||||
struct keystone_pcie *ks_pcie;
|
||||
struct dw_pcie *pci;
|
||||
u64 msi_target;
|
||||
@ -192,7 +192,7 @@ static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
|
||||
|
||||
static void ks_pcie_msi_mask(struct irq_data *data)
|
||||
{
|
||||
struct pcie_port *pp = irq_data_get_irq_chip_data(data);
|
||||
struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
|
||||
struct keystone_pcie *ks_pcie;
|
||||
u32 irq = data->hwirq;
|
||||
struct dw_pcie *pci;
|
||||
@ -216,7 +216,7 @@ static void ks_pcie_msi_mask(struct irq_data *data)
|
||||
|
||||
static void ks_pcie_msi_unmask(struct irq_data *data)
|
||||
{
|
||||
struct pcie_port *pp = irq_data_get_irq_chip_data(data);
|
||||
struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
|
||||
struct keystone_pcie *ks_pcie;
|
||||
u32 irq = data->hwirq;
|
||||
struct dw_pcie *pci;
|
||||
@ -247,7 +247,7 @@ static struct irq_chip ks_pcie_msi_irq_chip = {
|
||||
.irq_unmask = ks_pcie_msi_unmask,
|
||||
};
|
||||
|
||||
static int ks_pcie_msi_host_init(struct pcie_port *pp)
|
||||
static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
|
||||
return dw_pcie_allocate_domains(pp);
|
||||
@ -390,7 +390,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
|
||||
u32 val;
|
||||
u32 num_viewport = ks_pcie->num_viewport;
|
||||
struct dw_pcie *pci = ks_pcie->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct dw_pcie_rp *pp = &pci->pp;
|
||||
u64 start, end;
|
||||
struct resource *mem;
|
||||
int i;
|
||||
@ -428,7 +428,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
|
||||
static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
|
||||
unsigned int devfn, int where)
|
||||
{
|
||||
struct pcie_port *pp = bus->sysdata;
|
||||
struct dw_pcie_rp *pp = bus->sysdata;
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
|
||||
u32 reg;
|
||||
@ -456,7 +456,7 @@ static struct pci_ops ks_child_pcie_ops = {
|
||||
*/
|
||||
static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
|
||||
{
|
||||
struct pcie_port *pp = bus->sysdata;
|
||||
struct dw_pcie_rp *pp = bus->sysdata;
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
|
||||
|
||||
@ -574,7 +574,7 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
|
||||
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
|
||||
u32 offset = irq - ks_pcie->msi_host_irq;
|
||||
struct dw_pcie *pci = ks_pcie->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct dw_pcie_rp *pp = &pci->pp;
|
||||
struct device *dev = pci->dev;
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
u32 vector, reg, pos;
|
||||
@ -799,7 +799,7 @@ static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init ks_pcie_host_init(struct pcie_port *pp)
|
||||
static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
|
||||
@ -1069,19 +1069,19 @@ static int ks_pcie_am654_set_mode(struct device *dev,
|
||||
|
||||
static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
|
||||
.host_ops = &ks_pcie_host_ops,
|
||||
.version = 0x365A,
|
||||
.version = DW_PCIE_VER_365A,
|
||||
};
|
||||
|
||||
static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = {
|
||||
.host_ops = &ks_pcie_am654_host_ops,
|
||||
.mode = DW_PCIE_RC_TYPE,
|
||||
.version = 0x490A,
|
||||
.version = DW_PCIE_VER_490A,
|
||||
};
|
||||
|
||||
static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = {
|
||||
.ep_ops = &ks_pcie_am654_ep_ops,
|
||||
.mode = DW_PCIE_EP_TYPE,
|
||||
.version = 0x490A,
|
||||
.version = DW_PCIE_VER_490A,
|
||||
};
|
||||
|
||||
static const struct of_device_id ks_pcie_of_match[] = {
|
||||
@ -1114,12 +1114,12 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
|
||||
struct device_link **link;
|
||||
struct gpio_desc *gpiod;
|
||||
struct resource *res;
|
||||
unsigned int version;
|
||||
void __iomem *base;
|
||||
u32 num_viewport;
|
||||
struct phy **phy;
|
||||
u32 num_lanes;
|
||||
char name[10];
|
||||
u32 version;
|
||||
int ret;
|
||||
int irq;
|
||||
int i;
|
||||
@ -1233,7 +1233,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
|
||||
goto err_get_sync;
|
||||
}
|
||||
|
||||
if (pci->version >= 0x480A)
|
||||
if (dw_pcie_ver_is_ge(pci, 480A))
|
||||
ret = ks_pcie_am654_set_mode(dev, mode);
|
||||
else
|
||||
ret = ks_pcie_set_mode(dev);
|
||||
|
@ -32,15 +32,6 @@ struct ls_pcie_ep {
|
||||
const struct ls_pcie_ep_drvdata *drvdata;
|
||||
};
|
||||
|
||||
static int ls_pcie_establish_link(struct dw_pcie *pci)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dw_pcie_ops dw_ls_pcie_ep_ops = {
|
||||
.start_link = ls_pcie_establish_link,
|
||||
};
|
||||
|
||||
static const struct pci_epc_features*
|
||||
ls_pcie_ep_get_features(struct dw_pcie_ep *ep)
|
||||
{
|
||||
@ -106,19 +97,16 @@ static const struct dw_pcie_ep_ops ls_pcie_ep_ops = {
|
||||
|
||||
static const struct ls_pcie_ep_drvdata ls1_ep_drvdata = {
|
||||
.ops = &ls_pcie_ep_ops,
|
||||
.dw_pcie_ops = &dw_ls_pcie_ep_ops,
|
||||
};
|
||||
|
||||
static const struct ls_pcie_ep_drvdata ls2_ep_drvdata = {
|
||||
.func_offset = 0x20000,
|
||||
.ops = &ls_pcie_ep_ops,
|
||||
.dw_pcie_ops = &dw_ls_pcie_ep_ops,
|
||||
};
|
||||
|
||||
static const struct ls_pcie_ep_drvdata lx2_ep_drvdata = {
|
||||
.func_offset = 0x8000,
|
||||
.ops = &ls_pcie_ep_ops,
|
||||
.dw_pcie_ops = &dw_ls_pcie_ep_ops,
|
||||
};
|
||||
|
||||
static const struct of_device_id ls_pcie_ep_of_match[] = {
|
||||
|
@ -74,7 +74,7 @@ static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
|
||||
iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);
|
||||
}
|
||||
|
||||
static int ls_pcie_host_init(struct pcie_port *pp)
|
||||
static int ls_pcie_host_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct ls_pcie *pcie = to_ls_pcie(pci);
|
||||
|
@ -370,7 +370,7 @@ static int meson_pcie_link_up(struct dw_pcie *pci)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int meson_pcie_host_init(struct pcie_port *pp)
|
||||
static int meson_pcie_host_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct meson_pcie *mp = to_meson_pcie(pci);
|
||||
|
@ -217,7 +217,7 @@ static inline void al_pcie_target_bus_set(struct al_pcie *pcie,
|
||||
static void __iomem *al_pcie_conf_addr_map_bus(struct pci_bus *bus,
|
||||
unsigned int devfn, int where)
|
||||
{
|
||||
struct pcie_port *pp = bus->sysdata;
|
||||
struct dw_pcie_rp *pp = bus->sysdata;
|
||||
struct al_pcie *pcie = to_al_pcie(to_dw_pcie_from_pp(pp));
|
||||
unsigned int busnr = bus->number;
|
||||
struct al_pcie_target_bus_cfg *target_bus_cfg = &pcie->target_bus_cfg;
|
||||
@ -245,7 +245,7 @@ static struct pci_ops al_child_pci_ops = {
|
||||
static void al_pcie_config_prepare(struct al_pcie *pcie)
|
||||
{
|
||||
struct al_pcie_target_bus_cfg *target_bus_cfg;
|
||||
struct pcie_port *pp = &pcie->pci->pp;
|
||||
struct dw_pcie_rp *pp = &pcie->pci->pp;
|
||||
unsigned int ecam_bus_mask;
|
||||
u32 cfg_control_offset;
|
||||
u8 subordinate_bus;
|
||||
@ -289,7 +289,7 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)
|
||||
al_pcie_controller_writel(pcie, cfg_control_offset, reg);
|
||||
}
|
||||
|
||||
static int al_pcie_host_init(struct pcie_port *pp)
|
||||
static int al_pcie_host_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct al_pcie *pcie = to_al_pcie(pci);
|
||||
|
@ -166,7 +166,7 @@ static int armada8k_pcie_start_link(struct dw_pcie *pci)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int armada8k_pcie_host_init(struct pcie_port *pp)
|
||||
static int armada8k_pcie_host_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
u32 reg;
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
@ -233,7 +233,7 @@ static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct dw_pcie *pci = pcie->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct dw_pcie_rp *pp = &pci->pp;
|
||||
struct device *dev = &pdev->dev;
|
||||
int ret;
|
||||
|
||||
|
@ -97,7 +97,7 @@ static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u
|
||||
static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
|
||||
{
|
||||
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct dw_pcie_rp *pp = &pci->pp;
|
||||
struct dw_pcie_ep *ep = &pci->ep;
|
||||
|
||||
switch (artpec6_pcie->mode) {
|
||||
@ -315,7 +315,7 @@ static void artpec6_pcie_deassert_core_reset(struct artpec6_pcie *artpec6_pcie)
|
||||
usleep_range(100, 200);
|
||||
}
|
||||
|
||||
static int artpec6_pcie_host_init(struct pcie_port *pp)
|
||||
static int artpec6_pcie_host_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
|
||||
|
@ -154,9 +154,8 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no,
|
||||
enum pci_barno bar, dma_addr_t cpu_addr,
|
||||
enum dw_pcie_as_type as_type)
|
||||
static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
|
||||
dma_addr_t cpu_addr, enum pci_barno bar)
|
||||
{
|
||||
int ret;
|
||||
u32 free_win;
|
||||
@ -168,8 +167,8 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = dw_pcie_prog_inbound_atu(pci, func_no, free_win, bar, cpu_addr,
|
||||
as_type);
|
||||
ret = dw_pcie_prog_inbound_atu(pci, func_no, free_win, type,
|
||||
cpu_addr, bar);
|
||||
if (ret < 0) {
|
||||
dev_err(pci->dev, "Failed to program IB window\n");
|
||||
return ret;
|
||||
@ -185,8 +184,9 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
|
||||
phys_addr_t phys_addr,
|
||||
u64 pci_addr, size_t size)
|
||||
{
|
||||
u32 free_win;
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
u32 free_win;
|
||||
int ret;
|
||||
|
||||
free_win = find_first_zero_bit(ep->ob_window_map, pci->num_ob_windows);
|
||||
if (free_win >= pci->num_ob_windows) {
|
||||
@ -194,8 +194,10 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM,
|
||||
phys_addr, pci_addr, size);
|
||||
ret = dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM,
|
||||
phys_addr, pci_addr, size);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
set_bit(free_win, ep->ob_window_map);
|
||||
ep->outbound_addr[free_win] = phys_addr;
|
||||
@ -213,7 +215,7 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
|
||||
|
||||
__dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
|
||||
|
||||
dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
|
||||
dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, atu_index);
|
||||
clear_bit(atu_index, ep->ib_window_map);
|
||||
ep->epf_bar[bar] = NULL;
|
||||
}
|
||||
@ -221,27 +223,25 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
|
||||
static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
|
||||
struct pci_epf_bar *epf_bar)
|
||||
{
|
||||
int ret;
|
||||
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
enum pci_barno bar = epf_bar->barno;
|
||||
size_t size = epf_bar->size;
|
||||
int flags = epf_bar->flags;
|
||||
enum dw_pcie_as_type as_type;
|
||||
u32 reg;
|
||||
unsigned int func_offset = 0;
|
||||
int ret, type;
|
||||
u32 reg;
|
||||
|
||||
func_offset = dw_pcie_ep_func_select(ep, func_no);
|
||||
|
||||
reg = PCI_BASE_ADDRESS_0 + (4 * bar) + func_offset;
|
||||
|
||||
if (!(flags & PCI_BASE_ADDRESS_SPACE))
|
||||
as_type = DW_PCIE_AS_MEM;
|
||||
type = PCIE_ATU_TYPE_MEM;
|
||||
else
|
||||
as_type = DW_PCIE_AS_IO;
|
||||
type = PCIE_ATU_TYPE_IO;
|
||||
|
||||
ret = dw_pcie_ep_inbound_atu(ep, func_no, bar,
|
||||
epf_bar->phys_addr, as_type);
|
||||
ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -289,7 +289,7 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
|
||||
if (ret < 0)
|
||||
return;
|
||||
|
||||
dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND);
|
||||
dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, atu_index);
|
||||
clear_bit(atu_index, ep->ob_window_map);
|
||||
}
|
||||
|
||||
@ -435,8 +435,7 @@ static void dw_pcie_ep_stop(struct pci_epc *epc)
|
||||
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
|
||||
if (pci->ops && pci->ops->stop_link)
|
||||
pci->ops->stop_link(pci);
|
||||
dw_pcie_stop_link(pci);
|
||||
}
|
||||
|
||||
static int dw_pcie_ep_start(struct pci_epc *epc)
|
||||
@ -444,10 +443,7 @@ static int dw_pcie_ep_start(struct pci_epc *epc)
|
||||
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
|
||||
if (!pci->ops || !pci->ops->start_link)
|
||||
return -EINVAL;
|
||||
|
||||
return pci->ops->start_link(pci);
|
||||
return dw_pcie_start_link(pci);
|
||||
}
|
||||
|
||||
static const struct pci_epc_features*
|
||||
@ -699,17 +695,15 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
|
||||
|
||||
if (!pci->dbi_base2) {
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
|
||||
if (!res)
|
||||
if (!res) {
|
||||
pci->dbi_base2 = pci->dbi_base + SZ_4K;
|
||||
else {
|
||||
} else {
|
||||
pci->dbi_base2 = devm_pci_remap_cfg_resource(dev, res);
|
||||
if (IS_ERR(pci->dbi_base2))
|
||||
return PTR_ERR(pci->dbi_base2);
|
||||
}
|
||||
}
|
||||
|
||||
dw_pcie_iatu_detect(pci);
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
|
||||
if (!res)
|
||||
return -EINVAL;
|
||||
@ -717,17 +711,17 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
|
||||
ep->phys_base = res->start;
|
||||
ep->addr_size = resource_size(res);
|
||||
|
||||
ep->ib_window_map = devm_kcalloc(dev,
|
||||
BITS_TO_LONGS(pci->num_ib_windows),
|
||||
sizeof(long),
|
||||
GFP_KERNEL);
|
||||
dw_pcie_version_detect(pci);
|
||||
|
||||
dw_pcie_iatu_detect(pci);
|
||||
|
||||
ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
|
||||
GFP_KERNEL);
|
||||
if (!ep->ib_window_map)
|
||||
return -ENOMEM;
|
||||
|
||||
ep->ob_window_map = devm_kcalloc(dev,
|
||||
BITS_TO_LONGS(pci->num_ob_windows),
|
||||
sizeof(long),
|
||||
GFP_KERNEL);
|
||||
ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
|
||||
GFP_KERNEL);
|
||||
if (!ep->ob_window_map)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -780,8 +774,9 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
|
||||
ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
|
||||
epc->mem->window.page_size);
|
||||
if (!ep->msi_mem) {
|
||||
ret = -ENOMEM;
|
||||
dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
|
||||
return -ENOMEM;
|
||||
goto err_exit_epc_mem;
|
||||
}
|
||||
|
||||
if (ep->ops->get_features) {
|
||||
@ -790,6 +785,19 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return dw_pcie_ep_init_complete(ep);
|
||||
ret = dw_pcie_ep_init_complete(ep);
|
||||
if (ret)
|
||||
goto err_free_epc_mem;
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_epc_mem:
|
||||
pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
|
||||
epc->mem->window.page_size);
|
||||
|
||||
err_exit_epc_mem:
|
||||
pci_epc_mem_exit(epc);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
|
||||
|
@ -53,7 +53,7 @@ static struct msi_domain_info dw_pcie_msi_domain_info = {
|
||||
};
|
||||
|
||||
/* MSI int handler */
|
||||
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
|
||||
irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
|
||||
{
|
||||
int i, pos;
|
||||
unsigned long val;
|
||||
@ -88,7 +88,7 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
|
||||
static void dw_chained_msi_isr(struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
struct pcie_port *pp;
|
||||
struct dw_pcie_rp *pp;
|
||||
|
||||
chained_irq_enter(chip, desc);
|
||||
|
||||
@ -100,7 +100,7 @@ static void dw_chained_msi_isr(struct irq_desc *desc)
|
||||
|
||||
static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
|
||||
{
|
||||
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
||||
struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
u64 msi_target;
|
||||
|
||||
@ -123,7 +123,7 @@ static int dw_pci_msi_set_affinity(struct irq_data *d,
|
||||
|
||||
static void dw_pci_bottom_mask(struct irq_data *d)
|
||||
{
|
||||
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
||||
struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
unsigned int res, bit, ctrl;
|
||||
unsigned long flags;
|
||||
@ -142,7 +142,7 @@ static void dw_pci_bottom_mask(struct irq_data *d)
|
||||
|
||||
static void dw_pci_bottom_unmask(struct irq_data *d)
|
||||
{
|
||||
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
||||
struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
unsigned int res, bit, ctrl;
|
||||
unsigned long flags;
|
||||
@ -161,7 +161,7 @@ static void dw_pci_bottom_unmask(struct irq_data *d)
|
||||
|
||||
static void dw_pci_bottom_ack(struct irq_data *d)
|
||||
{
|
||||
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
||||
struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
unsigned int res, bit, ctrl;
|
||||
|
||||
@ -185,7 +185,7 @@ static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
|
||||
unsigned int virq, unsigned int nr_irqs,
|
||||
void *args)
|
||||
{
|
||||
struct pcie_port *pp = domain->host_data;
|
||||
struct dw_pcie_rp *pp = domain->host_data;
|
||||
unsigned long flags;
|
||||
u32 i;
|
||||
int bit;
|
||||
@ -213,7 +213,7 @@ static void dw_pcie_irq_domain_free(struct irq_domain *domain,
|
||||
unsigned int virq, unsigned int nr_irqs)
|
||||
{
|
||||
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
|
||||
struct pcie_port *pp = domain->host_data;
|
||||
struct dw_pcie_rp *pp = domain->host_data;
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&pp->lock, flags);
|
||||
@ -229,7 +229,7 @@ static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
|
||||
.free = dw_pcie_irq_domain_free,
|
||||
};
|
||||
|
||||
int dw_pcie_allocate_domains(struct pcie_port *pp)
|
||||
int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
|
||||
@ -255,10 +255,15 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dw_pcie_free_msi(struct pcie_port *pp)
|
||||
static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
|
||||
{
|
||||
if (pp->msi_irq)
|
||||
irq_set_chained_handler_and_data(pp->msi_irq, NULL, NULL);
|
||||
u32 ctrl;
|
||||
|
||||
for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
|
||||
if (pp->msi_irq[ctrl] > 0)
|
||||
irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
|
||||
NULL, NULL);
|
||||
}
|
||||
|
||||
irq_domain_remove(pp->msi_domain);
|
||||
irq_domain_remove(pp->irq_domain);
|
||||
@ -267,12 +272,13 @@ static void dw_pcie_free_msi(struct pcie_port *pp)
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct device *dev = pci->dev;
|
||||
|
||||
dma_unmap_single_attrs(dev, pp->msi_data, sizeof(pp->msi_msg),
|
||||
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
dma_unmap_page(dev, pp->msi_data, PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
if (pp->msi_page)
|
||||
__free_page(pp->msi_page);
|
||||
}
|
||||
}
|
||||
|
||||
static void dw_pcie_msi_init(struct pcie_port *pp)
|
||||
static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
u64 msi_target = (u64)pp->msi_data;
|
||||
@ -285,7 +291,112 @@ static void dw_pcie_msi_init(struct pcie_port *pp)
|
||||
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
|
||||
}
|
||||
|
||||
int dw_pcie_host_init(struct pcie_port *pp)
|
||||
static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct device *dev = pci->dev;
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
u32 ctrl, max_vectors;
|
||||
int irq;
|
||||
|
||||
/* Parse any "msiX" IRQs described in the devicetree */
|
||||
for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
|
||||
char msi_name[] = "msiX";
|
||||
|
||||
msi_name[3] = '0' + ctrl;
|
||||
irq = platform_get_irq_byname_optional(pdev, msi_name);
|
||||
if (irq == -ENXIO)
|
||||
break;
|
||||
if (irq < 0)
|
||||
return dev_err_probe(dev, irq,
|
||||
"Failed to parse MSI IRQ '%s'\n",
|
||||
msi_name);
|
||||
|
||||
pp->msi_irq[ctrl] = irq;
|
||||
}
|
||||
|
||||
/* If no "msiX" IRQs, caller should fallback to "msi" IRQ */
|
||||
if (ctrl == 0)
|
||||
return -ENXIO;
|
||||
|
||||
max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL;
|
||||
if (pp->num_vectors > max_vectors) {
|
||||
dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n",
|
||||
max_vectors);
|
||||
pp->num_vectors = max_vectors;
|
||||
}
|
||||
if (!pp->num_vectors)
|
||||
pp->num_vectors = max_vectors;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct device *dev = pci->dev;
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
int ret;
|
||||
u32 ctrl, num_ctrls;
|
||||
|
||||
for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++)
|
||||
pp->irq_mask[ctrl] = ~0;
|
||||
|
||||
if (!pp->msi_irq[0]) {
|
||||
ret = dw_pcie_parse_split_msi_irq(pp);
|
||||
if (ret < 0 && ret != -ENXIO)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!pp->num_vectors)
|
||||
pp->num_vectors = MSI_DEF_NUM_VECTORS;
|
||||
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
|
||||
|
||||
if (!pp->msi_irq[0]) {
|
||||
pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi");
|
||||
if (pp->msi_irq[0] < 0) {
|
||||
pp->msi_irq[0] = platform_get_irq(pdev, 0);
|
||||
if (pp->msi_irq[0] < 0)
|
||||
return pp->msi_irq[0];
|
||||
}
|
||||
}
|
||||
|
||||
dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors);
|
||||
|
||||
pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
|
||||
|
||||
ret = dw_pcie_allocate_domains(pp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
|
||||
if (pp->msi_irq[ctrl] > 0)
|
||||
irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
|
||||
dw_chained_msi_isr, pp);
|
||||
}
|
||||
|
||||
ret = dma_set_mask(dev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
|
||||
|
||||
pp->msi_page = alloc_page(GFP_DMA32);
|
||||
pp->msi_data = dma_map_page(dev, pp->msi_page, 0,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
ret = dma_mapping_error(dev, pp->msi_data);
|
||||
if (ret) {
|
||||
dev_err(pci->dev, "Failed to map MSI data\n");
|
||||
__free_page(pp->msi_page);
|
||||
pp->msi_page = NULL;
|
||||
pp->msi_data = 0;
|
||||
dw_pcie_free_msi(pp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dw_pcie_host_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct device *dev = pci->dev;
|
||||
@ -293,17 +404,17 @@ int dw_pcie_host_init(struct pcie_port *pp)
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct resource_entry *win;
|
||||
struct pci_host_bridge *bridge;
|
||||
struct resource *cfg_res;
|
||||
struct resource *res;
|
||||
int ret;
|
||||
|
||||
raw_spin_lock_init(&pci->pp.lock);
|
||||
raw_spin_lock_init(&pp->lock);
|
||||
|
||||
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
|
||||
if (cfg_res) {
|
||||
pp->cfg0_size = resource_size(cfg_res);
|
||||
pp->cfg0_base = cfg_res->start;
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
|
||||
if (res) {
|
||||
pp->cfg0_size = resource_size(res);
|
||||
pp->cfg0_base = res->start;
|
||||
|
||||
pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, cfg_res);
|
||||
pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
|
||||
if (IS_ERR(pp->va_cfg0_base))
|
||||
return PTR_ERR(pp->va_cfg0_base);
|
||||
} else {
|
||||
@ -312,8 +423,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
|
||||
}
|
||||
|
||||
if (!pci->dbi_base) {
|
||||
struct resource *dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
|
||||
pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res);
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
|
||||
pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
|
||||
if (IS_ERR(pci->dbi_base))
|
||||
return PTR_ERR(pci->dbi_base);
|
||||
}
|
||||
@ -350,67 +461,39 @@ int dw_pcie_host_init(struct pcie_port *pp)
|
||||
of_property_read_bool(np, "msi-parent") ||
|
||||
of_property_read_bool(np, "msi-map"));
|
||||
|
||||
if (!pp->num_vectors) {
|
||||
/*
|
||||
* For the has_msi_ctrl case the default assignment is handled
|
||||
* in the dw_pcie_msi_host_init().
|
||||
*/
|
||||
if (!pp->has_msi_ctrl && !pp->num_vectors) {
|
||||
pp->num_vectors = MSI_DEF_NUM_VECTORS;
|
||||
} else if (pp->num_vectors > MAX_MSI_IRQS) {
|
||||
dev_err(dev, "Invalid number of vectors\n");
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto err_deinit_host;
|
||||
}
|
||||
|
||||
if (pp->ops->msi_host_init) {
|
||||
ret = pp->ops->msi_host_init(pp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto err_deinit_host;
|
||||
} else if (pp->has_msi_ctrl) {
|
||||
u32 ctrl, num_ctrls;
|
||||
|
||||
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
|
||||
for (ctrl = 0; ctrl < num_ctrls; ctrl++)
|
||||
pp->irq_mask[ctrl] = ~0;
|
||||
|
||||
if (!pp->msi_irq) {
|
||||
pp->msi_irq = platform_get_irq_byname_optional(pdev, "msi");
|
||||
if (pp->msi_irq < 0) {
|
||||
pp->msi_irq = platform_get_irq(pdev, 0);
|
||||
if (pp->msi_irq < 0)
|
||||
return pp->msi_irq;
|
||||
}
|
||||
}
|
||||
|
||||
pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
|
||||
|
||||
ret = dw_pcie_allocate_domains(pp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (pp->msi_irq > 0)
|
||||
irq_set_chained_handler_and_data(pp->msi_irq,
|
||||
dw_chained_msi_isr,
|
||||
pp);
|
||||
|
||||
ret = dma_set_mask(pci->dev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
dev_warn(pci->dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
|
||||
|
||||
pp->msi_data = dma_map_single_attrs(pci->dev, &pp->msi_msg,
|
||||
sizeof(pp->msi_msg),
|
||||
DMA_FROM_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
ret = dma_mapping_error(pci->dev, pp->msi_data);
|
||||
if (ret) {
|
||||
dev_err(pci->dev, "Failed to map MSI data\n");
|
||||
pp->msi_data = 0;
|
||||
goto err_free_msi;
|
||||
}
|
||||
ret = dw_pcie_msi_host_init(pp);
|
||||
if (ret < 0)
|
||||
goto err_deinit_host;
|
||||
}
|
||||
}
|
||||
|
||||
dw_pcie_version_detect(pci);
|
||||
|
||||
dw_pcie_iatu_detect(pci);
|
||||
|
||||
dw_pcie_setup_rc(pp);
|
||||
ret = dw_pcie_setup_rc(pp);
|
||||
if (ret)
|
||||
goto err_free_msi;
|
||||
|
||||
if (!dw_pcie_link_up(pci) && pci->ops && pci->ops->start_link) {
|
||||
ret = pci->ops->start_link(pci);
|
||||
if (!dw_pcie_link_up(pci)) {
|
||||
ret = dw_pcie_start_link(pci);
|
||||
if (ret)
|
||||
goto err_free_msi;
|
||||
}
|
||||
@ -421,32 +504,50 @@ int dw_pcie_host_init(struct pcie_port *pp)
|
||||
bridge->sysdata = pp;
|
||||
|
||||
ret = pci_host_probe(bridge);
|
||||
if (!ret)
|
||||
return 0;
|
||||
if (ret)
|
||||
goto err_stop_link;
|
||||
|
||||
return 0;
|
||||
|
||||
err_stop_link:
|
||||
dw_pcie_stop_link(pci);
|
||||
|
||||
err_free_msi:
|
||||
if (pp->has_msi_ctrl)
|
||||
dw_pcie_free_msi(pp);
|
||||
|
||||
err_deinit_host:
|
||||
if (pp->ops->host_deinit)
|
||||
pp->ops->host_deinit(pp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_pcie_host_init);
|
||||
|
||||
void dw_pcie_host_deinit(struct pcie_port *pp)
|
||||
void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
|
||||
pci_stop_root_bus(pp->bridge->bus);
|
||||
pci_remove_root_bus(pp->bridge->bus);
|
||||
|
||||
dw_pcie_stop_link(pci);
|
||||
|
||||
if (pp->has_msi_ctrl)
|
||||
dw_pcie_free_msi(pp);
|
||||
|
||||
if (pp->ops->host_deinit)
|
||||
pp->ops->host_deinit(pp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
|
||||
|
||||
static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
|
||||
unsigned int devfn, int where)
|
||||
{
|
||||
int type;
|
||||
u32 busdev;
|
||||
struct pcie_port *pp = bus->sysdata;
|
||||
struct dw_pcie_rp *pp = bus->sysdata;
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
int type, ret;
|
||||
u32 busdev;
|
||||
|
||||
/*
|
||||
* Checking whether the link is up here is a last line of defense
|
||||
@ -467,8 +568,10 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
|
||||
else
|
||||
type = PCIE_ATU_TYPE_CFG1;
|
||||
|
||||
|
||||
dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev, pp->cfg0_size);
|
||||
ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev,
|
||||
pp->cfg0_size);
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
return pp->va_cfg0_base + where;
|
||||
}
|
||||
@ -476,33 +579,45 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
|
||||
static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
|
||||
int where, int size, u32 *val)
|
||||
{
|
||||
int ret;
|
||||
struct pcie_port *pp = bus->sysdata;
|
||||
struct dw_pcie_rp *pp = bus->sysdata;
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
int ret;
|
||||
|
||||
ret = pci_generic_config_read(bus, devfn, where, size, val);
|
||||
if (ret != PCIBIOS_SUCCESSFUL)
|
||||
return ret;
|
||||
|
||||
if (!ret && pci->io_cfg_atu_shared)
|
||||
dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
|
||||
pp->io_bus_addr, pp->io_size);
|
||||
if (pp->cfg0_io_shared) {
|
||||
ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
|
||||
pp->io_base, pp->io_bus_addr,
|
||||
pp->io_size);
|
||||
if (ret)
|
||||
return PCIBIOS_SET_FAILED;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return PCIBIOS_SUCCESSFUL;
|
||||
}
|
||||
|
||||
static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
|
||||
int where, int size, u32 val)
|
||||
{
|
||||
int ret;
|
||||
struct pcie_port *pp = bus->sysdata;
|
||||
struct dw_pcie_rp *pp = bus->sysdata;
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
int ret;
|
||||
|
||||
ret = pci_generic_config_write(bus, devfn, where, size, val);
|
||||
if (ret != PCIBIOS_SUCCESSFUL)
|
||||
return ret;
|
||||
|
||||
if (!ret && pci->io_cfg_atu_shared)
|
||||
dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
|
||||
pp->io_bus_addr, pp->io_size);
|
||||
if (pp->cfg0_io_shared) {
|
||||
ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
|
||||
pp->io_base, pp->io_bus_addr,
|
||||
pp->io_size);
|
||||
if (ret)
|
||||
return PCIBIOS_SET_FAILED;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return PCIBIOS_SUCCESSFUL;
|
||||
}
|
||||
|
||||
static struct pci_ops dw_child_pcie_ops = {
|
||||
@ -513,7 +628,7 @@ static struct pci_ops dw_child_pcie_ops = {
|
||||
|
||||
void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
|
||||
{
|
||||
struct pcie_port *pp = bus->sysdata;
|
||||
struct dw_pcie_rp *pp = bus->sysdata;
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
|
||||
if (PCI_SLOT(devfn) > 0)
|
||||
@ -529,11 +644,72 @@ static struct pci_ops dw_pcie_ops = {
|
||||
.write = pci_generic_config_write,
|
||||
};
|
||||
|
||||
void dw_pcie_setup_rc(struct pcie_port *pp)
|
||||
static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
|
||||
{
|
||||
int i;
|
||||
u32 val, ctrl, num_ctrls;
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct resource_entry *entry;
|
||||
int i, ret;
|
||||
|
||||
/* Note the very first outbound ATU is used for CFG IOs */
|
||||
if (!pci->num_ob_windows) {
|
||||
dev_err(pci->dev, "No outbound iATU found\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure all outbound windows are disabled before proceeding with
|
||||
* the MEM/IO ranges setups.
|
||||
*/
|
||||
for (i = 0; i < pci->num_ob_windows; i++)
|
||||
dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
|
||||
|
||||
i = 0;
|
||||
resource_list_for_each_entry(entry, &pp->bridge->windows) {
|
||||
if (resource_type(entry->res) != IORESOURCE_MEM)
|
||||
continue;
|
||||
|
||||
if (pci->num_ob_windows <= ++i)
|
||||
break;
|
||||
|
||||
ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM,
|
||||
entry->res->start,
|
||||
entry->res->start - entry->offset,
|
||||
resource_size(entry->res));
|
||||
if (ret) {
|
||||
dev_err(pci->dev, "Failed to set MEM range %pr\n",
|
||||
entry->res);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (pp->io_size) {
|
||||
if (pci->num_ob_windows > ++i) {
|
||||
ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO,
|
||||
pp->io_base,
|
||||
pp->io_bus_addr,
|
||||
pp->io_size);
|
||||
if (ret) {
|
||||
dev_err(pci->dev, "Failed to set IO range %pr\n",
|
||||
entry->res);
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
pp->cfg0_io_shared = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (pci->num_ob_windows <= i)
|
||||
dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)\n",
|
||||
pci->num_ob_windows);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
u32 val, ctrl, num_ctrls;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Enable DBI read-only registers for writing/updating configuration.
|
||||
@ -582,45 +758,15 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
|
||||
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
|
||||
dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
|
||||
|
||||
/* Ensure all outbound windows are disabled so there are multiple matches */
|
||||
for (i = 0; i < pci->num_ob_windows; i++)
|
||||
dw_pcie_disable_atu(pci, i, DW_PCIE_REGION_OUTBOUND);
|
||||
|
||||
/*
|
||||
* If the platform provides its own child bus config accesses, it means
|
||||
* the platform uses its own address translation component rather than
|
||||
* ATU, so we should not program the ATU here.
|
||||
*/
|
||||
if (pp->bridge->child_ops == &dw_child_pcie_ops) {
|
||||
int atu_idx = 0;
|
||||
struct resource_entry *entry;
|
||||
|
||||
/* Get last memory resource entry */
|
||||
resource_list_for_each_entry(entry, &pp->bridge->windows) {
|
||||
if (resource_type(entry->res) != IORESOURCE_MEM)
|
||||
continue;
|
||||
|
||||
if (pci->num_ob_windows <= ++atu_idx)
|
||||
break;
|
||||
|
||||
dw_pcie_prog_outbound_atu(pci, atu_idx,
|
||||
PCIE_ATU_TYPE_MEM, entry->res->start,
|
||||
entry->res->start - entry->offset,
|
||||
resource_size(entry->res));
|
||||
}
|
||||
|
||||
if (pp->io_size) {
|
||||
if (pci->num_ob_windows > ++atu_idx)
|
||||
dw_pcie_prog_outbound_atu(pci, atu_idx,
|
||||
PCIE_ATU_TYPE_IO, pp->io_base,
|
||||
pp->io_bus_addr, pp->io_size);
|
||||
else
|
||||
pci->io_cfg_atu_shared = true;
|
||||
}
|
||||
|
||||
if (pci->num_ob_windows <= atu_idx)
|
||||
dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)",
|
||||
pci->num_ob_windows);
|
||||
ret = dw_pcie_iatu_setup(pp);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
|
||||
@ -633,5 +779,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
|
||||
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
|
||||
|
||||
dw_pcie_dbi_ro_wr_dis(pci);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
|
||||
|
@ -17,13 +17,11 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/resource.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/regmap.h>
|
||||
|
||||
#include "pcie-designware.h"
|
||||
|
||||
struct dw_plat_pcie {
|
||||
struct dw_pcie *pci;
|
||||
struct regmap *regmap;
|
||||
enum dw_pcie_device_mode mode;
|
||||
};
|
||||
|
||||
@ -31,20 +29,9 @@ struct dw_plat_pcie_of_data {
|
||||
enum dw_pcie_device_mode mode;
|
||||
};
|
||||
|
||||
static const struct of_device_id dw_plat_pcie_of_match[];
|
||||
|
||||
static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
|
||||
};
|
||||
|
||||
static int dw_plat_pcie_establish_link(struct dw_pcie *pci)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dw_pcie_ops dw_pcie_ops = {
|
||||
.start_link = dw_plat_pcie_establish_link,
|
||||
};
|
||||
|
||||
static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
@ -96,7 +83,7 @@ static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct dw_pcie *pci = dw_plat_pcie->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct dw_pcie_rp *pp = &pci->pp;
|
||||
struct device *dev = &pdev->dev;
|
||||
int ret;
|
||||
|
||||
@ -140,7 +127,6 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
|
||||
return -ENOMEM;
|
||||
|
||||
pci->dev = dev;
|
||||
pci->ops = &dw_pcie_ops;
|
||||
|
||||
dw_plat_pcie->pci = pci;
|
||||
dw_plat_pcie->mode = mode;
|
||||
@ -153,20 +139,21 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
|
||||
return -ENODEV;
|
||||
|
||||
ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
case DW_PCIE_EP_TYPE:
|
||||
if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))
|
||||
return -ENODEV;
|
||||
|
||||
pci->ep.ops = &pcie_ep_ops;
|
||||
return dw_pcie_ep_init(&pci->ep);
|
||||
ret = dw_pcie_ep_init(&pci->ep);
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = {
|
||||
|
@ -8,14 +8,41 @@
|
||||
* Author: Jingoo Han <jg1.han@samsung.com>
|
||||
*/
|
||||
|
||||
#include <linux/align.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "../../pci.h"
|
||||
#include "pcie-designware.h"
|
||||
|
||||
void dw_pcie_version_detect(struct dw_pcie *pci)
|
||||
{
|
||||
u32 ver;
|
||||
|
||||
/* The content of the CSR is zero on DWC PCIe older than v4.70a */
|
||||
ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_NUMBER);
|
||||
if (!ver)
|
||||
return;
|
||||
|
||||
if (pci->version && pci->version != ver)
|
||||
dev_warn(pci->dev, "Versions don't match (%08x != %08x)\n",
|
||||
pci->version, ver);
|
||||
else
|
||||
pci->version = ver;
|
||||
|
||||
ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_TYPE);
|
||||
|
||||
if (pci->type && pci->type != ver)
|
||||
dev_warn(pci->dev, "Types don't match (%08x != %08x)\n",
|
||||
pci->type, ver);
|
||||
else
|
||||
pci->type = ver;
|
||||
}
|
||||
|
||||
/*
|
||||
* These interfaces resemble the pci_find_*capability() interfaces, but these
|
||||
* are for configuring host controllers, which are bridges *to* PCI devices but
|
||||
@ -181,48 +208,61 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
|
||||
dev_err(pci->dev, "write DBI address failed\n");
|
||||
}
|
||||
|
||||
static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
|
||||
static inline void __iomem *dw_pcie_select_atu(struct dw_pcie *pci, u32 dir,
|
||||
u32 index)
|
||||
{
|
||||
if (pci->iatu_unroll_enabled)
|
||||
return pci->atu_base + PCIE_ATU_UNROLL_BASE(dir, index);
|
||||
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, dir | index);
|
||||
return pci->atu_base;
|
||||
}
|
||||
|
||||
static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 dir, u32 index, u32 reg)
|
||||
{
|
||||
void __iomem *base;
|
||||
int ret;
|
||||
u32 val;
|
||||
|
||||
if (pci->ops && pci->ops->read_dbi)
|
||||
return pci->ops->read_dbi(pci, pci->atu_base, reg, 4);
|
||||
base = dw_pcie_select_atu(pci, dir, index);
|
||||
|
||||
ret = dw_pcie_read(pci->atu_base + reg, 4, &val);
|
||||
if (pci->ops && pci->ops->read_dbi)
|
||||
return pci->ops->read_dbi(pci, base, reg, 4);
|
||||
|
||||
ret = dw_pcie_read(base + reg, 4, &val);
|
||||
if (ret)
|
||||
dev_err(pci->dev, "Read ATU address failed\n");
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
|
||||
static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 dir, u32 index,
|
||||
u32 reg, u32 val)
|
||||
{
|
||||
void __iomem *base;
|
||||
int ret;
|
||||
|
||||
base = dw_pcie_select_atu(pci, dir, index);
|
||||
|
||||
if (pci->ops && pci->ops->write_dbi) {
|
||||
pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val);
|
||||
pci->ops->write_dbi(pci, base, reg, 4, val);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = dw_pcie_write(pci->atu_base + reg, 4, val);
|
||||
ret = dw_pcie_write(base + reg, 4, val);
|
||||
if (ret)
|
||||
dev_err(pci->dev, "Write ATU address failed\n");
|
||||
}
|
||||
|
||||
static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
|
||||
static inline u32 dw_pcie_readl_atu_ob(struct dw_pcie *pci, u32 index, u32 reg)
|
||||
{
|
||||
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
|
||||
|
||||
return dw_pcie_readl_atu(pci, offset + reg);
|
||||
return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg);
|
||||
}
|
||||
|
||||
static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
|
||||
u32 val)
|
||||
static inline void dw_pcie_writel_atu_ob(struct dw_pcie *pci, u32 index, u32 reg,
|
||||
u32 val)
|
||||
{
|
||||
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
|
||||
|
||||
dw_pcie_writel_atu(pci, offset + reg, val);
|
||||
dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg, val);
|
||||
}
|
||||
|
||||
static inline u32 dw_pcie_enable_ecrc(u32 val)
|
||||
@ -266,264 +306,160 @@ static inline u32 dw_pcie_enable_ecrc(u32 val)
|
||||
return val | PCIE_ATU_TD;
|
||||
}
|
||||
|
||||
static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
|
||||
int index, int type,
|
||||
u64 cpu_addr, u64 pci_addr,
|
||||
u64 size)
|
||||
{
|
||||
u32 retries, val;
|
||||
u64 limit_addr = cpu_addr + size - 1;
|
||||
|
||||
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
|
||||
lower_32_bits(cpu_addr));
|
||||
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
|
||||
upper_32_bits(cpu_addr));
|
||||
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT,
|
||||
lower_32_bits(limit_addr));
|
||||
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT,
|
||||
upper_32_bits(limit_addr));
|
||||
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
|
||||
lower_32_bits(pci_addr));
|
||||
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
|
||||
upper_32_bits(pci_addr));
|
||||
val = type | PCIE_ATU_FUNC_NUM(func_no);
|
||||
val = upper_32_bits(size - 1) ?
|
||||
val | PCIE_ATU_INCREASE_REGION_SIZE : val;
|
||||
if (pci->version == 0x490A)
|
||||
val = dw_pcie_enable_ecrc(val);
|
||||
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, val);
|
||||
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
|
||||
PCIE_ATU_ENABLE);
|
||||
|
||||
/*
|
||||
* Make sure ATU enable takes effect before any subsequent config
|
||||
* and I/O accesses.
|
||||
*/
|
||||
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
|
||||
val = dw_pcie_readl_ob_unroll(pci, index,
|
||||
PCIE_ATU_UNR_REGION_CTRL2);
|
||||
if (val & PCIE_ATU_ENABLE)
|
||||
return;
|
||||
|
||||
mdelay(LINK_WAIT_IATU);
|
||||
}
|
||||
dev_err(pci->dev, "Outbound iATU is not being enabled\n");
|
||||
}
|
||||
|
||||
static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
|
||||
int index, int type, u64 cpu_addr,
|
||||
u64 pci_addr, u64 size)
|
||||
static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
|
||||
int index, int type, u64 cpu_addr,
|
||||
u64 pci_addr, u64 size)
|
||||
{
|
||||
u32 retries, val;
|
||||
u64 limit_addr;
|
||||
|
||||
if (pci->ops && pci->ops->cpu_addr_fixup)
|
||||
cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
|
||||
|
||||
if (pci->iatu_unroll_enabled) {
|
||||
dw_pcie_prog_outbound_atu_unroll(pci, func_no, index, type,
|
||||
cpu_addr, pci_addr, size);
|
||||
return;
|
||||
}
|
||||
limit_addr = cpu_addr + size - 1;
|
||||
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
|
||||
PCIE_ATU_REGION_OUTBOUND | index);
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
|
||||
lower_32_bits(cpu_addr));
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
|
||||
upper_32_bits(cpu_addr));
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
|
||||
lower_32_bits(cpu_addr + size - 1));
|
||||
if (pci->version >= 0x460A)
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_LIMIT,
|
||||
upper_32_bits(cpu_addr + size - 1));
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
|
||||
lower_32_bits(pci_addr));
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
|
||||
upper_32_bits(pci_addr));
|
||||
val = type | PCIE_ATU_FUNC_NUM(func_no);
|
||||
val = ((upper_32_bits(size - 1)) && (pci->version >= 0x460A)) ?
|
||||
val | PCIE_ATU_INCREASE_REGION_SIZE : val;
|
||||
if (pci->version == 0x490A)
|
||||
val = dw_pcie_enable_ecrc(val);
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, val);
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
|
||||
|
||||
/*
|
||||
* Make sure ATU enable takes effect before any subsequent config
|
||||
* and I/O accesses.
|
||||
*/
|
||||
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
|
||||
val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
|
||||
if (val & PCIE_ATU_ENABLE)
|
||||
return;
|
||||
|
||||
mdelay(LINK_WAIT_IATU);
|
||||
}
|
||||
dev_err(pci->dev, "Outbound iATU is not being enabled\n");
|
||||
}
|
||||
|
||||
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
|
||||
u64 cpu_addr, u64 pci_addr, u64 size)
|
||||
{
|
||||
__dw_pcie_prog_outbound_atu(pci, 0, index, type,
|
||||
cpu_addr, pci_addr, size);
|
||||
}
|
||||
|
||||
void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
|
||||
int type, u64 cpu_addr, u64 pci_addr,
|
||||
u64 size)
|
||||
{
|
||||
__dw_pcie_prog_outbound_atu(pci, func_no, index, type,
|
||||
cpu_addr, pci_addr, size);
|
||||
}
|
||||
|
||||
static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
|
||||
{
|
||||
u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
|
||||
|
||||
return dw_pcie_readl_atu(pci, offset + reg);
|
||||
}
|
||||
|
||||
static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
|
||||
u32 val)
|
||||
{
|
||||
u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
|
||||
|
||||
dw_pcie_writel_atu(pci, offset + reg, val);
|
||||
}
|
||||
|
||||
static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
|
||||
int index, int bar, u64 cpu_addr,
|
||||
enum dw_pcie_as_type as_type)
|
||||
{
|
||||
int type;
|
||||
u32 retries, val;
|
||||
|
||||
dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
|
||||
lower_32_bits(cpu_addr));
|
||||
dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
|
||||
upper_32_bits(cpu_addr));
|
||||
|
||||
switch (as_type) {
|
||||
case DW_PCIE_AS_MEM:
|
||||
type = PCIE_ATU_TYPE_MEM;
|
||||
break;
|
||||
case DW_PCIE_AS_IO:
|
||||
type = PCIE_ATU_TYPE_IO;
|
||||
break;
|
||||
default:
|
||||
if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) ||
|
||||
!IS_ALIGNED(cpu_addr, pci->region_align) ||
|
||||
!IS_ALIGNED(pci_addr, pci->region_align) || !size) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type |
|
||||
PCIE_ATU_FUNC_NUM(func_no));
|
||||
dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
|
||||
PCIE_ATU_FUNC_NUM_MATCH_EN |
|
||||
PCIE_ATU_ENABLE |
|
||||
PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
|
||||
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_BASE,
|
||||
lower_32_bits(cpu_addr));
|
||||
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_BASE,
|
||||
upper_32_bits(cpu_addr));
|
||||
|
||||
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LIMIT,
|
||||
lower_32_bits(limit_addr));
|
||||
if (dw_pcie_ver_is_ge(pci, 460A))
|
||||
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_LIMIT,
|
||||
upper_32_bits(limit_addr));
|
||||
|
||||
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_TARGET,
|
||||
lower_32_bits(pci_addr));
|
||||
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_TARGET,
|
||||
upper_32_bits(pci_addr));
|
||||
|
||||
val = type | PCIE_ATU_FUNC_NUM(func_no);
|
||||
if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) &&
|
||||
dw_pcie_ver_is_ge(pci, 460A))
|
||||
val |= PCIE_ATU_INCREASE_REGION_SIZE;
|
||||
if (dw_pcie_ver_is(pci, 490A))
|
||||
val = dw_pcie_enable_ecrc(val);
|
||||
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL1, val);
|
||||
|
||||
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);
|
||||
|
||||
/*
|
||||
* Make sure ATU enable takes effect before any subsequent config
|
||||
* and I/O accesses.
|
||||
*/
|
||||
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
|
||||
val = dw_pcie_readl_ib_unroll(pci, index,
|
||||
PCIE_ATU_UNR_REGION_CTRL2);
|
||||
val = dw_pcie_readl_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2);
|
||||
if (val & PCIE_ATU_ENABLE)
|
||||
return 0;
|
||||
|
||||
mdelay(LINK_WAIT_IATU);
|
||||
}
|
||||
dev_err(pci->dev, "Inbound iATU is not being enabled\n");
|
||||
|
||||
return -EBUSY;
|
||||
dev_err(pci->dev, "Outbound iATU is not being enabled\n");
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
|
||||
u64 cpu_addr, u64 pci_addr, u64 size)
|
||||
{
|
||||
return __dw_pcie_prog_outbound_atu(pci, 0, index, type,
|
||||
cpu_addr, pci_addr, size);
|
||||
}
|
||||
|
||||
int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
|
||||
int type, u64 cpu_addr, u64 pci_addr,
|
||||
u64 size)
|
||||
{
|
||||
return __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
|
||||
cpu_addr, pci_addr, size);
|
||||
}
|
||||
|
||||
static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg)
|
||||
{
|
||||
return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg);
|
||||
}
|
||||
|
||||
static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg,
|
||||
u32 val)
|
||||
{
|
||||
dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg, val);
|
||||
}
|
||||
|
||||
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
|
||||
int bar, u64 cpu_addr,
|
||||
enum dw_pcie_as_type as_type)
|
||||
int type, u64 cpu_addr, u8 bar)
|
||||
{
|
||||
int type;
|
||||
u32 retries, val;
|
||||
|
||||
if (pci->iatu_unroll_enabled)
|
||||
return dw_pcie_prog_inbound_atu_unroll(pci, func_no, index, bar,
|
||||
cpu_addr, as_type);
|
||||
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
|
||||
index);
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
|
||||
|
||||
switch (as_type) {
|
||||
case DW_PCIE_AS_MEM:
|
||||
type = PCIE_ATU_TYPE_MEM;
|
||||
break;
|
||||
case DW_PCIE_AS_IO:
|
||||
type = PCIE_ATU_TYPE_IO;
|
||||
break;
|
||||
default:
|
||||
if (!IS_ALIGNED(cpu_addr, pci->region_align))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
|
||||
PCIE_ATU_FUNC_NUM(func_no));
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE |
|
||||
PCIE_ATU_FUNC_NUM_MATCH_EN |
|
||||
PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
|
||||
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
|
||||
lower_32_bits(cpu_addr));
|
||||
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
|
||||
upper_32_bits(cpu_addr));
|
||||
|
||||
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type |
|
||||
PCIE_ATU_FUNC_NUM(func_no));
|
||||
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2,
|
||||
PCIE_ATU_ENABLE | PCIE_ATU_FUNC_NUM_MATCH_EN |
|
||||
PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
|
||||
|
||||
/*
|
||||
* Make sure ATU enable takes effect before any subsequent config
|
||||
* and I/O accesses.
|
||||
*/
|
||||
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
|
||||
val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
|
||||
val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2);
|
||||
if (val & PCIE_ATU_ENABLE)
|
||||
return 0;
|
||||
|
||||
mdelay(LINK_WAIT_IATU);
|
||||
}
|
||||
|
||||
dev_err(pci->dev, "Inbound iATU is not being enabled\n");
|
||||
|
||||
return -EBUSY;
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
|
||||
enum dw_pcie_region_type type)
|
||||
void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index)
|
||||
{
|
||||
int region;
|
||||
|
||||
switch (type) {
|
||||
case DW_PCIE_REGION_INBOUND:
|
||||
region = PCIE_ATU_REGION_INBOUND;
|
||||
break;
|
||||
case DW_PCIE_REGION_OUTBOUND:
|
||||
region = PCIE_ATU_REGION_OUTBOUND;
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~(u32)PCIE_ATU_ENABLE);
|
||||
dw_pcie_writel_atu(pci, dir, index, PCIE_ATU_REGION_CTRL2, 0);
|
||||
}
|
||||
|
||||
int dw_pcie_wait_for_link(struct dw_pcie *pci)
|
||||
{
|
||||
u32 offset, val;
|
||||
int retries;
|
||||
|
||||
/* Check if the link is up or not */
|
||||
for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
|
||||
if (dw_pcie_link_up(pci)) {
|
||||
dev_info(pci->dev, "Link up\n");
|
||||
return 0;
|
||||
}
|
||||
if (dw_pcie_link_up(pci))
|
||||
break;
|
||||
|
||||
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
|
||||
}
|
||||
|
||||
dev_info(pci->dev, "Phy link never came up\n");
|
||||
if (retries >= LINK_WAIT_MAX_RETRIES) {
|
||||
dev_err(pci->dev, "Phy link never came up\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
return -ETIMEDOUT;
|
||||
offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
|
||||
val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
|
||||
|
||||
dev_info(pci->dev, "PCIe Gen.%u x%u link up\n",
|
||||
FIELD_GET(PCI_EXP_LNKSTA_CLS, val),
|
||||
FIELD_GET(PCI_EXP_LNKSTA_NLW, val));
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
|
||||
|
||||
@ -534,7 +470,7 @@ int dw_pcie_link_up(struct dw_pcie *pci)
|
||||
if (pci->ops && pci->ops->link_up)
|
||||
return pci->ops->link_up(pci);
|
||||
|
||||
val = readl(pci->dbi_base + PCIE_PORT_DEBUG1);
|
||||
val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1);
|
||||
return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
|
||||
(!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
|
||||
}
|
||||
@ -586,95 +522,81 @@ static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
|
||||
|
||||
}
|
||||
|
||||
static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
|
||||
static bool dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
|
||||
if (val == 0xffffffff)
|
||||
return 1;
|
||||
return true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dw_pcie_iatu_detect_regions_unroll(struct dw_pcie *pci)
|
||||
{
|
||||
int max_region, i, ob = 0, ib = 0;
|
||||
u32 val;
|
||||
|
||||
max_region = min((int)pci->atu_size / 512, 256);
|
||||
|
||||
for (i = 0; i < max_region; i++) {
|
||||
dw_pcie_writel_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET,
|
||||
0x11110000);
|
||||
|
||||
val = dw_pcie_readl_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET);
|
||||
if (val == 0x11110000)
|
||||
ob++;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < max_region; i++) {
|
||||
dw_pcie_writel_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET,
|
||||
0x11110000);
|
||||
|
||||
val = dw_pcie_readl_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET);
|
||||
if (val == 0x11110000)
|
||||
ib++;
|
||||
else
|
||||
break;
|
||||
}
|
||||
pci->num_ib_windows = ib;
|
||||
pci->num_ob_windows = ob;
|
||||
return false;
|
||||
}
|
||||
|
||||
static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
|
||||
{
|
||||
int max_region, i, ob = 0, ib = 0;
|
||||
u32 val;
|
||||
int max_region, ob, ib;
|
||||
u32 val, min, dir;
|
||||
u64 max;
|
||||
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
|
||||
max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
|
||||
if (pci->iatu_unroll_enabled) {
|
||||
max_region = min((int)pci->atu_size / 512, 256);
|
||||
} else {
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
|
||||
max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
|
||||
}
|
||||
|
||||
for (i = 0; i < max_region; i++) {
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_OUTBOUND | i);
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000);
|
||||
val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET);
|
||||
if (val == 0x11110000)
|
||||
ob++;
|
||||
else
|
||||
for (ob = 0; ob < max_region; ob++) {
|
||||
dw_pcie_writel_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET, 0x11110000);
|
||||
val = dw_pcie_readl_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET);
|
||||
if (val != 0x11110000)
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < max_region; i++) {
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | i);
|
||||
dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000);
|
||||
val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET);
|
||||
if (val == 0x11110000)
|
||||
ib++;
|
||||
else
|
||||
for (ib = 0; ib < max_region; ib++) {
|
||||
dw_pcie_writel_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET, 0x11110000);
|
||||
val = dw_pcie_readl_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET);
|
||||
if (val != 0x11110000)
|
||||
break;
|
||||
}
|
||||
|
||||
pci->num_ib_windows = ib;
|
||||
if (ob) {
|
||||
dir = PCIE_ATU_REGION_DIR_OB;
|
||||
} else if (ib) {
|
||||
dir = PCIE_ATU_REGION_DIR_IB;
|
||||
} else {
|
||||
dev_err(pci->dev, "No iATU regions found\n");
|
||||
return;
|
||||
}
|
||||
|
||||
dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_LIMIT, 0x0);
|
||||
min = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_LIMIT);
|
||||
|
||||
if (dw_pcie_ver_is_ge(pci, 460A)) {
|
||||
dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT, 0xFFFFFFFF);
|
||||
max = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT);
|
||||
} else {
|
||||
max = 0;
|
||||
}
|
||||
|
||||
pci->num_ob_windows = ob;
|
||||
pci->num_ib_windows = ib;
|
||||
pci->region_align = 1 << fls(min);
|
||||
pci->region_limit = (max << 32) | (SZ_4G - 1);
|
||||
}
|
||||
|
||||
void dw_pcie_iatu_detect(struct dw_pcie *pci)
|
||||
{
|
||||
struct device *dev = pci->dev;
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct platform_device *pdev = to_platform_device(pci->dev);
|
||||
|
||||
if (pci->version >= 0x480A || (!pci->version &&
|
||||
dw_pcie_iatu_unroll_enabled(pci))) {
|
||||
pci->iatu_unroll_enabled = true;
|
||||
pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
|
||||
if (pci->iatu_unroll_enabled) {
|
||||
if (!pci->atu_base) {
|
||||
struct resource *res =
|
||||
platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
|
||||
if (res) {
|
||||
pci->atu_size = resource_size(res);
|
||||
pci->atu_base = devm_ioremap_resource(dev, res);
|
||||
pci->atu_base = devm_ioremap_resource(pci->dev, res);
|
||||
}
|
||||
if (!pci->atu_base || IS_ERR(pci->atu_base))
|
||||
pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
|
||||
@ -683,23 +605,25 @@ void dw_pcie_iatu_detect(struct dw_pcie *pci)
|
||||
if (!pci->atu_size)
|
||||
/* Pick a minimal default, enough for 8 in and 8 out windows */
|
||||
pci->atu_size = SZ_4K;
|
||||
} else {
|
||||
pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE;
|
||||
pci->atu_size = PCIE_ATU_VIEWPORT_SIZE;
|
||||
}
|
||||
|
||||
dw_pcie_iatu_detect_regions_unroll(pci);
|
||||
} else
|
||||
dw_pcie_iatu_detect_regions(pci);
|
||||
dw_pcie_iatu_detect_regions(pci);
|
||||
|
||||
dev_info(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
|
||||
"enabled" : "disabled");
|
||||
|
||||
dev_info(pci->dev, "Detected iATU regions: %u outbound, %u inbound",
|
||||
pci->num_ob_windows, pci->num_ib_windows);
|
||||
dev_info(pci->dev, "iATU regions: %u ob, %u ib, align %uK, limit %lluG\n",
|
||||
pci->num_ob_windows, pci->num_ib_windows,
|
||||
pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G);
|
||||
}
|
||||
|
||||
void dw_pcie_setup(struct dw_pcie *pci)
|
||||
{
|
||||
struct device_node *np = pci->dev->of_node;
|
||||
u32 val;
|
||||
struct device *dev = pci->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
|
||||
if (pci->link_gen > 0)
|
||||
dw_pcie_link_set_max_speed(pci, pci->link_gen);
|
||||
@ -726,6 +650,13 @@ void dw_pcie_setup(struct dw_pcie *pci)
|
||||
val |= PORT_LINK_DLL_LINK_EN;
|
||||
dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
|
||||
|
||||
if (of_property_read_bool(np, "snps,enable-cdm-check")) {
|
||||
val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
|
||||
val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
|
||||
PCIE_PL_CHK_REG_CHK_REG_START;
|
||||
dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
|
||||
}
|
||||
|
||||
of_property_read_u32(np, "num-lanes", &pci->num_lanes);
|
||||
if (!pci->num_lanes) {
|
||||
dev_dbg(pci->dev, "Using h/w default number of lanes\n");
|
||||
@ -772,11 +703,4 @@ void dw_pcie_setup(struct dw_pcie *pci)
|
||||
break;
|
||||
}
|
||||
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
|
||||
|
||||
if (of_property_read_bool(np, "snps,enable-cdm-check")) {
|
||||
val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
|
||||
val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
|
||||
PCIE_PL_CHK_REG_CHK_REG_START;
|
||||
dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
|
||||
}
|
||||
}
|
||||
|
@ -20,6 +20,29 @@
|
||||
#include <linux/pci-epc.h>
|
||||
#include <linux/pci-epf.h>
|
||||
|
||||
/* DWC PCIe IP-core versions (native support since v4.70a) */
|
||||
#define DW_PCIE_VER_365A 0x3336352a
|
||||
#define DW_PCIE_VER_460A 0x3436302a
|
||||
#define DW_PCIE_VER_470A 0x3437302a
|
||||
#define DW_PCIE_VER_480A 0x3438302a
|
||||
#define DW_PCIE_VER_490A 0x3439302a
|
||||
#define DW_PCIE_VER_520A 0x3532302a
|
||||
|
||||
#define __dw_pcie_ver_cmp(_pci, _ver, _op) \
|
||||
((_pci)->version _op DW_PCIE_VER_ ## _ver)
|
||||
|
||||
#define dw_pcie_ver_is(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, ==)
|
||||
|
||||
#define dw_pcie_ver_is_ge(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, >=)
|
||||
|
||||
#define dw_pcie_ver_type_is(_pci, _ver, _type) \
|
||||
(__dw_pcie_ver_cmp(_pci, _ver, ==) && \
|
||||
__dw_pcie_ver_cmp(_pci, TYPE_ ## _type, ==))
|
||||
|
||||
#define dw_pcie_ver_type_is_ge(_pci, _ver, _type) \
|
||||
(__dw_pcie_ver_cmp(_pci, _ver, ==) && \
|
||||
__dw_pcie_ver_cmp(_pci, TYPE_ ## _type, >=))
|
||||
|
||||
/* Parameters for the waiting for link up routine */
|
||||
#define LINK_WAIT_MAX_RETRIES 10
|
||||
#define LINK_WAIT_USLEEP_MIN 90000
|
||||
@ -77,10 +100,24 @@
|
||||
#define PCIE_PORT_MULTI_LANE_CTRL 0x8C0
|
||||
#define PORT_MLTI_UPCFG_SUPPORT BIT(7)
|
||||
|
||||
#define PCIE_VERSION_NUMBER 0x8F8
|
||||
#define PCIE_VERSION_TYPE 0x8FC
|
||||
|
||||
/*
|
||||
* iATU inbound and outbound windows CSRs. Before the IP-core v4.80a each
|
||||
* iATU region CSRs had been indirectly accessible by means of the dedicated
|
||||
* viewport selector. The iATU/eDMA CSRs space was re-designed in DWC PCIe
|
||||
* v4.80a in a way so the viewport was unrolled into the directly accessible
|
||||
* iATU/eDMA CSRs space.
|
||||
*/
|
||||
#define PCIE_ATU_VIEWPORT 0x900
|
||||
#define PCIE_ATU_REGION_INBOUND BIT(31)
|
||||
#define PCIE_ATU_REGION_OUTBOUND 0
|
||||
#define PCIE_ATU_CR1 0x904
|
||||
#define PCIE_ATU_REGION_DIR_IB BIT(31)
|
||||
#define PCIE_ATU_REGION_DIR_OB 0
|
||||
#define PCIE_ATU_VIEWPORT_BASE 0x904
|
||||
#define PCIE_ATU_UNROLL_BASE(dir, index) \
|
||||
(((index) << 9) | ((dir == PCIE_ATU_REGION_DIR_IB) ? BIT(8) : 0))
|
||||
#define PCIE_ATU_VIEWPORT_SIZE 0x2C
|
||||
#define PCIE_ATU_REGION_CTRL1 0x000
|
||||
#define PCIE_ATU_INCREASE_REGION_SIZE BIT(13)
|
||||
#define PCIE_ATU_TYPE_MEM 0x0
|
||||
#define PCIE_ATU_TYPE_IO 0x2
|
||||
@ -88,19 +125,19 @@
|
||||
#define PCIE_ATU_TYPE_CFG1 0x5
|
||||
#define PCIE_ATU_TD BIT(8)
|
||||
#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20)
|
||||
#define PCIE_ATU_CR2 0x908
|
||||
#define PCIE_ATU_REGION_CTRL2 0x004
|
||||
#define PCIE_ATU_ENABLE BIT(31)
|
||||
#define PCIE_ATU_BAR_MODE_ENABLE BIT(30)
|
||||
#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19)
|
||||
#define PCIE_ATU_LOWER_BASE 0x90C
|
||||
#define PCIE_ATU_UPPER_BASE 0x910
|
||||
#define PCIE_ATU_LIMIT 0x914
|
||||
#define PCIE_ATU_LOWER_TARGET 0x918
|
||||
#define PCIE_ATU_LOWER_BASE 0x008
|
||||
#define PCIE_ATU_UPPER_BASE 0x00C
|
||||
#define PCIE_ATU_LIMIT 0x010
|
||||
#define PCIE_ATU_LOWER_TARGET 0x014
|
||||
#define PCIE_ATU_BUS(x) FIELD_PREP(GENMASK(31, 24), x)
|
||||
#define PCIE_ATU_DEV(x) FIELD_PREP(GENMASK(23, 19), x)
|
||||
#define PCIE_ATU_FUNC(x) FIELD_PREP(GENMASK(18, 16), x)
|
||||
#define PCIE_ATU_UPPER_TARGET 0x91C
|
||||
#define PCIE_ATU_UPPER_LIMIT 0x924
|
||||
#define PCIE_ATU_UPPER_TARGET 0x018
|
||||
#define PCIE_ATU_UPPER_LIMIT 0x020
|
||||
|
||||
#define PCIE_MISC_CONTROL_1_OFF 0x8BC
|
||||
#define PCIE_DBI_RO_WR_EN BIT(0)
|
||||
@ -117,19 +154,6 @@
|
||||
|
||||
#define PCIE_PL_CHK_REG_ERR_ADDR 0xB28
|
||||
|
||||
/*
|
||||
* iATU Unroll-specific register definitions
|
||||
* From 4.80 core version the address translation will be made by unroll
|
||||
*/
|
||||
#define PCIE_ATU_UNR_REGION_CTRL1 0x00
|
||||
#define PCIE_ATU_UNR_REGION_CTRL2 0x04
|
||||
#define PCIE_ATU_UNR_LOWER_BASE 0x08
|
||||
#define PCIE_ATU_UNR_UPPER_BASE 0x0C
|
||||
#define PCIE_ATU_UNR_LOWER_LIMIT 0x10
|
||||
#define PCIE_ATU_UNR_LOWER_TARGET 0x14
|
||||
#define PCIE_ATU_UNR_UPPER_TARGET 0x18
|
||||
#define PCIE_ATU_UNR_UPPER_LIMIT 0x20
|
||||
|
||||
/*
|
||||
* The default address offset between dbi_base and atu_base. Root controller
|
||||
* drivers are not required to initialize atu_base if the offset matches this
|
||||
@ -138,13 +162,6 @@
|
||||
*/
|
||||
#define DEFAULT_DBI_ATU_OFFSET (0x3 << 20)
|
||||
|
||||
/* Register address builder */
|
||||
#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \
|
||||
((region) << 9)
|
||||
|
||||
#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
|
||||
(((region) << 9) | BIT(8))
|
||||
|
||||
#define MAX_MSI_IRQS 256
|
||||
#define MAX_MSI_IRQS_PER_CTRL 32
|
||||
#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL)
|
||||
@ -155,16 +172,10 @@
|
||||
#define MAX_IATU_IN 256
|
||||
#define MAX_IATU_OUT 256
|
||||
|
||||
struct pcie_port;
|
||||
struct dw_pcie;
|
||||
struct dw_pcie_rp;
|
||||
struct dw_pcie_ep;
|
||||
|
||||
enum dw_pcie_region_type {
|
||||
DW_PCIE_REGION_UNKNOWN,
|
||||
DW_PCIE_REGION_INBOUND,
|
||||
DW_PCIE_REGION_OUTBOUND,
|
||||
};
|
||||
|
||||
enum dw_pcie_device_mode {
|
||||
DW_PCIE_UNKNOWN_TYPE,
|
||||
DW_PCIE_EP_TYPE,
|
||||
@ -173,12 +184,14 @@ enum dw_pcie_device_mode {
|
||||
};
|
||||
|
||||
struct dw_pcie_host_ops {
|
||||
int (*host_init)(struct pcie_port *pp);
|
||||
int (*msi_host_init)(struct pcie_port *pp);
|
||||
int (*host_init)(struct dw_pcie_rp *pp);
|
||||
void (*host_deinit)(struct dw_pcie_rp *pp);
|
||||
int (*msi_host_init)(struct dw_pcie_rp *pp);
|
||||
};
|
||||
|
||||
struct pcie_port {
|
||||
struct dw_pcie_rp {
|
||||
bool has_msi_ctrl:1;
|
||||
bool cfg0_io_shared:1;
|
||||
u64 cfg0_base;
|
||||
void __iomem *va_cfg0_base;
|
||||
u32 cfg0_size;
|
||||
@ -187,11 +200,11 @@ struct pcie_port {
|
||||
u32 io_size;
|
||||
int irq;
|
||||
const struct dw_pcie_host_ops *ops;
|
||||
int msi_irq;
|
||||
int msi_irq[MAX_MSI_CTRLS];
|
||||
struct irq_domain *irq_domain;
|
||||
struct irq_domain *msi_domain;
|
||||
u16 msi_msg;
|
||||
dma_addr_t msi_data;
|
||||
struct page *msi_page;
|
||||
struct irq_chip *msi_irq_chip;
|
||||
u32 num_vectors;
|
||||
u32 irq_mask[MAX_MSI_CTRLS];
|
||||
@ -200,12 +213,6 @@ struct pcie_port {
|
||||
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
|
||||
};
|
||||
|
||||
enum dw_pcie_as_type {
|
||||
DW_PCIE_AS_UNKNOWN,
|
||||
DW_PCIE_AS_MEM,
|
||||
DW_PCIE_AS_IO,
|
||||
};
|
||||
|
||||
struct dw_pcie_ep_ops {
|
||||
void (*ep_init)(struct dw_pcie_ep *ep);
|
||||
int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
|
||||
@ -261,20 +268,21 @@ struct dw_pcie {
|
||||
struct device *dev;
|
||||
void __iomem *dbi_base;
|
||||
void __iomem *dbi_base2;
|
||||
/* Used when iatu_unroll_enabled is true */
|
||||
void __iomem *atu_base;
|
||||
size_t atu_size;
|
||||
u32 num_ib_windows;
|
||||
u32 num_ob_windows;
|
||||
struct pcie_port pp;
|
||||
u32 region_align;
|
||||
u64 region_limit;
|
||||
struct dw_pcie_rp pp;
|
||||
struct dw_pcie_ep ep;
|
||||
const struct dw_pcie_ops *ops;
|
||||
unsigned int version;
|
||||
u32 version;
|
||||
u32 type;
|
||||
int num_lanes;
|
||||
int link_gen;
|
||||
u8 n_fts[2];
|
||||
bool iatu_unroll_enabled: 1;
|
||||
bool io_cfg_atu_shared: 1;
|
||||
};
|
||||
|
||||
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
|
||||
@ -282,6 +290,8 @@ struct dw_pcie {
|
||||
#define to_dw_pcie_from_ep(endpoint) \
|
||||
container_of((endpoint), struct dw_pcie, ep)
|
||||
|
||||
void dw_pcie_version_detect(struct dw_pcie *pci);
|
||||
|
||||
u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);
|
||||
u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap);
|
||||
|
||||
@ -294,17 +304,13 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
|
||||
int dw_pcie_link_up(struct dw_pcie *pci);
|
||||
void dw_pcie_upconfig_setup(struct dw_pcie *pci);
|
||||
int dw_pcie_wait_for_link(struct dw_pcie *pci);
|
||||
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
|
||||
int type, u64 cpu_addr, u64 pci_addr,
|
||||
u64 size);
|
||||
void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
|
||||
int type, u64 cpu_addr, u64 pci_addr,
|
||||
u64 size);
|
||||
int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
|
||||
u64 cpu_addr, u64 pci_addr, u64 size);
|
||||
int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
|
||||
int type, u64 cpu_addr, u64 pci_addr, u64 size);
|
||||
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
|
||||
int bar, u64 cpu_addr,
|
||||
enum dw_pcie_as_type as_type);
|
||||
void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
|
||||
enum dw_pcie_region_type type);
|
||||
int type, u64 cpu_addr, u8 bar);
|
||||
void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
|
||||
void dw_pcie_setup(struct dw_pcie *pci);
|
||||
void dw_pcie_iatu_detect(struct dw_pcie *pci);
|
||||
|
||||
@ -365,34 +371,49 @@ static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci)
|
||||
dw_pcie_writel_dbi(pci, reg, val);
|
||||
}
|
||||
|
||||
static inline int dw_pcie_start_link(struct dw_pcie *pci)
|
||||
{
|
||||
if (pci->ops && pci->ops->start_link)
|
||||
return pci->ops->start_link(pci);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void dw_pcie_stop_link(struct dw_pcie *pci)
|
||||
{
|
||||
if (pci->ops && pci->ops->stop_link)
|
||||
pci->ops->stop_link(pci);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCIE_DW_HOST
|
||||
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
|
||||
void dw_pcie_setup_rc(struct pcie_port *pp);
|
||||
int dw_pcie_host_init(struct pcie_port *pp);
|
||||
void dw_pcie_host_deinit(struct pcie_port *pp);
|
||||
int dw_pcie_allocate_domains(struct pcie_port *pp);
|
||||
irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp);
|
||||
int dw_pcie_setup_rc(struct dw_pcie_rp *pp);
|
||||
int dw_pcie_host_init(struct dw_pcie_rp *pp);
|
||||
void dw_pcie_host_deinit(struct dw_pcie_rp *pp);
|
||||
int dw_pcie_allocate_domains(struct dw_pcie_rp *pp);
|
||||
void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,
|
||||
int where);
|
||||
#else
|
||||
static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
|
||||
static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
|
||||
{
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
static inline void dw_pcie_setup_rc(struct pcie_port *pp)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int dw_pcie_host_init(struct pcie_port *pp)
|
||||
static inline int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void dw_pcie_host_deinit(struct pcie_port *pp)
|
||||
static inline int dw_pcie_host_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int dw_pcie_allocate_domains(struct pcie_port *pp)
|
||||
static inline int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -186,7 +186,7 @@ static int rockchip_pcie_start_link(struct dw_pcie *pci)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rockchip_pcie_host_init(struct pcie_port *pp)
|
||||
static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
|
||||
@ -288,7 +288,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct rockchip_pcie *rockchip;
|
||||
struct pcie_port *pp;
|
||||
struct dw_pcie_rp *pp;
|
||||
int ret;
|
||||
|
||||
rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
|
||||
|
@ -236,7 +236,7 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int fu740_pcie_host_init(struct pcie_port *pp)
|
||||
static int fu740_pcie_host_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct fu740_pcie *afp = to_fu740_pcie(pci);
|
||||
|
@ -74,7 +74,7 @@ static void histb_pcie_writel(struct histb_pcie *histb_pcie, u32 reg, u32 val)
|
||||
writel(val, histb_pcie->ctrl + reg);
|
||||
}
|
||||
|
||||
static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable)
|
||||
static void histb_pcie_dbi_w_mode(struct dw_pcie_rp *pp, bool enable)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct histb_pcie *hipcie = to_histb_pcie(pci);
|
||||
@ -88,7 +88,7 @@ static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable)
|
||||
histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, val);
|
||||
}
|
||||
|
||||
static void histb_pcie_dbi_r_mode(struct pcie_port *pp, bool enable)
|
||||
static void histb_pcie_dbi_r_mode(struct dw_pcie_rp *pp, bool enable)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct histb_pcie *hipcie = to_histb_pcie(pci);
|
||||
@ -180,7 +180,7 @@ static int histb_pcie_start_link(struct dw_pcie *pci)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int histb_pcie_host_init(struct pcie_port *pp)
|
||||
static int histb_pcie_host_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct histb_pcie *hipcie = to_histb_pcie(pci);
|
||||
@ -219,7 +219,7 @@ static void histb_pcie_host_disable(struct histb_pcie *hipcie)
|
||||
regulator_disable(hipcie->vpcie);
|
||||
}
|
||||
|
||||
static int histb_pcie_host_enable(struct pcie_port *pp)
|
||||
static int histb_pcie_host_enable(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct histb_pcie *hipcie = to_histb_pcie(pci);
|
||||
@ -297,7 +297,7 @@ static int histb_pcie_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct histb_pcie *hipcie;
|
||||
struct dw_pcie *pci;
|
||||
struct pcie_port *pp;
|
||||
struct dw_pcie_rp *pp;
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
struct device *dev = &pdev->dev;
|
||||
enum of_gpio_flags of_flags;
|
||||
|
@ -58,10 +58,6 @@
|
||||
#define BUS_IATU_OFFSET SZ_256M
|
||||
#define RESET_INTERVAL_MS 100
|
||||
|
||||
struct intel_pcie_soc {
|
||||
unsigned int pcie_ver;
|
||||
};
|
||||
|
||||
struct intel_pcie {
|
||||
struct dw_pcie pci;
|
||||
void __iomem *app_base;
|
||||
@ -306,7 +302,11 @@ static int intel_pcie_host_setup(struct intel_pcie *pcie)
|
||||
intel_pcie_ltssm_disable(pcie);
|
||||
intel_pcie_link_setup(pcie);
|
||||
intel_pcie_init_n_fts(pci);
|
||||
dw_pcie_setup_rc(&pci->pp);
|
||||
|
||||
ret = dw_pcie_setup_rc(&pci->pp);
|
||||
if (ret)
|
||||
goto app_init_err;
|
||||
|
||||
dw_pcie_upconfig_setup(pci);
|
||||
|
||||
intel_pcie_device_rst_deassert(pcie);
|
||||
@ -343,7 +343,7 @@ static void __intel_pcie_remove(struct intel_pcie *pcie)
|
||||
static int intel_pcie_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct intel_pcie *pcie = platform_get_drvdata(pdev);
|
||||
struct pcie_port *pp = &pcie->pci.pp;
|
||||
struct dw_pcie_rp *pp = &pcie->pci.pp;
|
||||
|
||||
dw_pcie_host_deinit(pp);
|
||||
__intel_pcie_remove(pcie);
|
||||
@ -373,7 +373,7 @@ static int __maybe_unused intel_pcie_resume_noirq(struct device *dev)
|
||||
return intel_pcie_host_setup(pcie);
|
||||
}
|
||||
|
||||
static int intel_pcie_rc_init(struct pcie_port *pp)
|
||||
static int intel_pcie_rc_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct intel_pcie *pcie = dev_get_drvdata(pci->dev);
|
||||
@ -394,16 +394,11 @@ static const struct dw_pcie_host_ops intel_pcie_dw_ops = {
|
||||
.host_init = intel_pcie_rc_init,
|
||||
};
|
||||
|
||||
static const struct intel_pcie_soc pcie_data = {
|
||||
.pcie_ver = 0x520A,
|
||||
};
|
||||
|
||||
static int intel_pcie_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct intel_pcie_soc *data;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct intel_pcie *pcie;
|
||||
struct pcie_port *pp;
|
||||
struct dw_pcie_rp *pp;
|
||||
struct dw_pcie *pci;
|
||||
int ret;
|
||||
|
||||
@ -424,12 +419,7 @@ static int intel_pcie_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
data = device_get_match_data(dev);
|
||||
if (!data)
|
||||
return -ENODEV;
|
||||
|
||||
pci->ops = &intel_pcie_ops;
|
||||
pci->version = data->pcie_ver;
|
||||
pp->ops = &intel_pcie_dw_ops;
|
||||
|
||||
ret = dw_pcie_host_init(pp);
|
||||
@ -447,7 +437,7 @@ static const struct dev_pm_ops intel_pcie_pm_ops = {
|
||||
};
|
||||
|
||||
static const struct of_device_id of_intel_pcie_match[] = {
|
||||
{ .compatible = "intel,lgm-pcie", .data = &pcie_data },
|
||||
{ .compatible = "intel,lgm-pcie" },
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -231,7 +231,7 @@ static void keembay_pcie_msi_irq_handler(struct irq_desc *desc)
|
||||
struct keembay_pcie *pcie = irq_desc_get_handler_data(desc);
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
u32 val, mask, status;
|
||||
struct pcie_port *pp;
|
||||
struct dw_pcie_rp *pp;
|
||||
|
||||
/*
|
||||
* Keem Bay PCIe Controller provides an additional IP logic on top of
|
||||
@ -332,13 +332,13 @@ static int keembay_pcie_add_pcie_port(struct keembay_pcie *pcie,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct dw_pcie *pci = &pcie->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct dw_pcie_rp *pp = &pci->pp;
|
||||
struct device *dev = &pdev->dev;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
pp->ops = &keembay_pcie_host_ops;
|
||||
pp->msi_irq = -ENODEV;
|
||||
pp->msi_irq[0] = -ENODEV;
|
||||
|
||||
ret = keembay_pcie_setup_msi_irq(pcie);
|
||||
if (ret)
|
||||
|
@ -620,7 +620,7 @@ static int kirin_pcie_start_link(struct dw_pcie *pci)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kirin_pcie_host_init(struct pcie_port *pp)
|
||||
static int kirin_pcie_host_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
pp->bridge->ops = &kirin_pci_ops;
|
||||
|
||||
|
@ -1381,7 +1381,7 @@ static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qcom_pcie_host_init(struct pcie_port *pp)
|
||||
static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct qcom_pcie *pcie = to_qcom_pcie(pci);
|
||||
@ -1564,7 +1564,7 @@ static const struct dw_pcie_ops dw_pcie_ops = {
|
||||
static int qcom_pcie_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct pcie_port *pp;
|
||||
struct dw_pcie_rp *pp;
|
||||
struct dw_pcie *pci;
|
||||
struct qcom_pcie *pcie;
|
||||
const struct qcom_pcie_cfg *pcie_cfg;
|
||||
|
@ -85,7 +85,7 @@ static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
|
||||
struct spear13xx_pcie *spear13xx_pcie = arg;
|
||||
struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
|
||||
struct dw_pcie *pci = spear13xx_pcie->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct dw_pcie_rp *pp = &pci->pp;
|
||||
unsigned int status;
|
||||
|
||||
status = readl(&app_reg->int_sts);
|
||||
@ -121,7 +121,7 @@ static int spear13xx_pcie_link_up(struct dw_pcie *pci)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int spear13xx_pcie_host_init(struct pcie_port *pp)
|
||||
static int spear13xx_pcie_host_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
|
||||
@ -155,7 +155,7 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct dw_pcie *pci = spear13xx_pcie->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct dw_pcie_rp *pp = &pci->pp;
|
||||
struct device *dev = &pdev->dev;
|
||||
int ret;
|
||||
|
||||
@ -172,7 +172,7 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
|
||||
}
|
||||
|
||||
pp->ops = &spear13xx_pcie_host_ops;
|
||||
pp->msi_irq = -ENODEV;
|
||||
pp->msi_irq[0] = -ENODEV;
|
||||
|
||||
ret = dw_pcie_host_init(pp);
|
||||
if (ret) {
|
||||
|
@ -39,7 +39,8 @@ static int tegra194_acpi_init(struct pci_config_window *cfg)
|
||||
static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index,
|
||||
u32 val, u32 reg)
|
||||
{
|
||||
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
|
||||
u32 offset = PCIE_ATU_UNROLL_BASE(PCIE_ATU_REGION_DIR_OB, index) +
|
||||
PCIE_ATU_VIEWPORT_BASE;
|
||||
|
||||
writel(val, pcie_ecam->iatu_base + offset + reg);
|
||||
}
|
||||
@ -58,8 +59,8 @@ static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam,
|
||||
PCIE_ATU_LIMIT);
|
||||
atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr),
|
||||
PCIE_ATU_UPPER_TARGET);
|
||||
atu_reg_write(pcie_ecam, index, type, PCIE_ATU_CR1);
|
||||
atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
|
||||
atu_reg_write(pcie_ecam, index, type, PCIE_ATU_REGION_CTRL1);
|
||||
atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_REGION_CTRL2);
|
||||
}
|
||||
|
||||
static void __iomem *tegra194_map_bus(struct pci_bus *bus,
|
||||
|
@ -311,7 +311,7 @@ struct tegra_pcie_soc {
|
||||
enum dw_pcie_device_mode mode;
|
||||
};
|
||||
|
||||
static void apply_bad_link_workaround(struct pcie_port *pp)
|
||||
static void apply_bad_link_workaround(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct tegra194_pcie *pcie = to_tegra_pcie(pci);
|
||||
@ -349,7 +349,7 @@ static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct tegra194_pcie *pcie = arg;
|
||||
struct dw_pcie *pci = &pcie->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct dw_pcie_rp *pp = &pci->pp;
|
||||
u32 val, tmp;
|
||||
u16 val_w;
|
||||
|
||||
@ -698,7 +698,7 @@ static inline void init_host_aspm(struct tegra194_pcie *pcie) { return; }
|
||||
static inline void init_debugfs(struct tegra194_pcie *pcie) { return; }
|
||||
#endif
|
||||
|
||||
static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
|
||||
static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct tegra194_pcie *pcie = to_tegra_pcie(pci);
|
||||
@ -736,7 +736,7 @@ static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
|
||||
val_w);
|
||||
}
|
||||
|
||||
static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp)
|
||||
static void tegra_pcie_enable_legacy_interrupts(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct tegra194_pcie *pcie = to_tegra_pcie(pci);
|
||||
@ -757,7 +757,7 @@ static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp)
|
||||
appl_writel(pcie, val, APPL_INTR_EN_L1_8_0);
|
||||
}
|
||||
|
||||
static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
|
||||
static void tegra_pcie_enable_msi_interrupts(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct tegra194_pcie *pcie = to_tegra_pcie(pci);
|
||||
@ -770,7 +770,7 @@ static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
|
||||
appl_writel(pcie, val, APPL_INTR_EN_L0_0);
|
||||
}
|
||||
|
||||
static void tegra_pcie_enable_interrupts(struct pcie_port *pp)
|
||||
static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct tegra194_pcie *pcie = to_tegra_pcie(pci);
|
||||
@ -851,7 +851,7 @@ static void config_gen3_gen4_eq_presets(struct tegra194_pcie *pcie)
|
||||
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
|
||||
}
|
||||
|
||||
static int tegra194_pcie_host_init(struct pcie_port *pp)
|
||||
static int tegra194_pcie_host_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct tegra194_pcie *pcie = to_tegra_pcie(pci);
|
||||
@ -916,7 +916,7 @@ static int tegra194_pcie_start_link(struct dw_pcie *pci)
|
||||
{
|
||||
u32 val, offset, speed, tmp;
|
||||
struct tegra194_pcie *pcie = to_tegra_pcie(pci);
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct dw_pcie_rp *pp = &pci->pp;
|
||||
bool retry = true;
|
||||
|
||||
if (pcie->mode == DW_PCIE_EP_TYPE) {
|
||||
@ -1212,7 +1212,7 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra194_pcie *pcie,
|
||||
|
||||
static void tegra_pcie_downstream_dev_to_D0(struct tegra194_pcie *pcie)
|
||||
{
|
||||
struct pcie_port *pp = &pcie->pci.pp;
|
||||
struct dw_pcie_rp *pp = &pcie->pci.pp;
|
||||
struct pci_bus *child, *root_bus = NULL;
|
||||
struct pci_dev *pdev;
|
||||
|
||||
@ -1443,7 +1443,7 @@ static void tegra_pcie_unconfig_controller(struct tegra194_pcie *pcie)
|
||||
static int tegra_pcie_init_controller(struct tegra194_pcie *pcie)
|
||||
{
|
||||
struct dw_pcie *pci = &pcie->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct dw_pcie_rp *pp = &pci->pp;
|
||||
int ret;
|
||||
|
||||
ret = tegra_pcie_config_controller(pcie, false);
|
||||
@ -1961,7 +1961,7 @@ static int tegra194_pcie_probe(struct platform_device *pdev)
|
||||
struct device *dev = &pdev->dev;
|
||||
struct resource *atu_dma_res;
|
||||
struct tegra194_pcie *pcie;
|
||||
struct pcie_port *pp;
|
||||
struct dw_pcie_rp *pp;
|
||||
struct dw_pcie *pci;
|
||||
struct phy **phys;
|
||||
char *name;
|
||||
@ -1979,7 +1979,6 @@ static int tegra194_pcie_probe(struct platform_device *pdev)
|
||||
pci->ops = &tegra_dw_pcie_ops;
|
||||
pci->n_fts[0] = N_FTS_VAL;
|
||||
pci->n_fts[1] = FTS_VAL;
|
||||
pci->version = 0x490A;
|
||||
|
||||
pp = &pci->pp;
|
||||
pp->num_vectors = MAX_MSI_IRQS;
|
||||
@ -2262,7 +2261,7 @@ static void tegra194_pcie_shutdown(struct platform_device *pdev)
|
||||
|
||||
disable_irq(pcie->pci.pp.irq);
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI))
|
||||
disable_irq(pcie->pci.pp.msi_irq);
|
||||
disable_irq(pcie->pci.pp.msi_irq[0]);
|
||||
|
||||
tegra194_pcie_pme_turnoff(pcie);
|
||||
tegra_pcie_unconfig_controller(pcie);
|
||||
|
@ -171,7 +171,7 @@ static void uniphier_pcie_irq_enable(struct uniphier_pcie *pcie)
|
||||
|
||||
static void uniphier_pcie_irq_mask(struct irq_data *d)
|
||||
{
|
||||
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
||||
struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
|
||||
unsigned long flags;
|
||||
@ -188,7 +188,7 @@ static void uniphier_pcie_irq_mask(struct irq_data *d)
|
||||
|
||||
static void uniphier_pcie_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
||||
struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
|
||||
unsigned long flags;
|
||||
@ -225,7 +225,7 @@ static const struct irq_domain_ops uniphier_intx_domain_ops = {
|
||||
|
||||
static void uniphier_pcie_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
struct pcie_port *pp = irq_desc_get_handler_data(desc);
|
||||
struct dw_pcie_rp *pp = irq_desc_get_handler_data(desc);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
@ -258,7 +258,7 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc)
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp)
|
||||
static int uniphier_pcie_config_legacy_irq(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
|
||||
@ -295,7 +295,7 @@ out_put_node:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int uniphier_pcie_host_init(struct pcie_port *pp)
|
||||
static int uniphier_pcie_host_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
|
||||
|
@ -178,7 +178,7 @@ static void visconti_pcie_stop_link(struct dw_pcie *pci)
|
||||
*/
|
||||
static u64 visconti_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
|
||||
{
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct dw_pcie_rp *pp = &pci->pp;
|
||||
|
||||
return cpu_addr & ~pp->io_base;
|
||||
}
|
||||
@ -190,7 +190,7 @@ static const struct dw_pcie_ops dw_pcie_ops = {
|
||||
.stop_link = visconti_pcie_stop_link,
|
||||
};
|
||||
|
||||
static int visconti_pcie_host_init(struct pcie_port *pp)
|
||||
static int visconti_pcie_host_init(struct dw_pcie_rp *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
|
||||
@ -278,7 +278,7 @@ static int visconti_add_pcie_port(struct visconti_pcie *pcie,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct dw_pcie *pci = &pcie->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct dw_pcie_rp *pp = &pci->pp;
|
||||
|
||||
pp->irq = platform_get_irq_byname(pdev, "intr");
|
||||
if (pp->irq < 0)
|
||||
|
Loading…
Reference in New Issue
Block a user