2019-05-27 06:55:01 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2011-09-19 17:45:05 +00:00
|
|
|
/*
|
|
|
|
* Support PCI/PCIe on PowerNV platforms
|
|
|
|
*
|
|
|
|
* Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/irq.h>
|
|
|
|
#include <linux/io.h>
|
2011-09-19 17:45:06 +00:00
|
|
|
#include <linux/msi.h>
|
2013-05-21 03:33:09 +00:00
|
|
|
#include <linux/iommu.h>
|
2018-03-02 09:56:11 +00:00
|
|
|
#include <linux/sched/mm.h>
|
2011-09-19 17:45:05 +00:00
|
|
|
|
|
|
|
#include <asm/sections.h>
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/pci-bridge.h>
|
|
|
|
#include <asm/machdep.h>
|
2013-03-05 21:12:37 +00:00
|
|
|
#include <asm/msi_bitmap.h>
|
2011-09-19 17:45:05 +00:00
|
|
|
#include <asm/ppc-pci.h>
|
2016-05-20 06:41:40 +00:00
|
|
|
#include <asm/pnv-pci.h>
|
2011-09-19 17:45:05 +00:00
|
|
|
#include <asm/opal.h>
|
|
|
|
#include <asm/iommu.h>
|
|
|
|
#include <asm/tce.h>
|
2012-03-15 18:18:00 +00:00
|
|
|
#include <asm/firmware.h>
|
2013-06-20 05:21:15 +00:00
|
|
|
#include <asm/eeh_event.h>
|
|
|
|
#include <asm/eeh.h>
|
2011-09-19 17:45:05 +00:00
|
|
|
|
|
|
|
#include "powernv.h"
|
|
|
|
#include "pci.h"
|
|
|
|
|
2018-03-02 09:56:11 +00:00
|
|
|
static DEFINE_MUTEX(tunnel_mutex);
|
powerpc/powernv: Enable PCI peer-to-peer
P9 has support for PCI peer-to-peer, enabling a device to write in the
MMIO space of another device directly, without interrupting the CPU.
This patch adds support for it on powernv, by adding a new API to be
called by drivers. The pnv_pci_set_p2p(...) call configures an
'initiator', i.e the device which will issue the MMIO operation, and a
'target', i.e. the device on the receiving side.
P9 really only supports MMIO stores for the time being but that's
expected to change in the future, so the API allows to define both
load and store operations.
/* PCI p2p descriptor */
#define OPAL_PCI_P2P_ENABLE 0x1
#define OPAL_PCI_P2P_LOAD 0x2
#define OPAL_PCI_P2P_STORE 0x4
int pnv_pci_set_p2p(struct pci_dev *initiator, struct pci_dev *target,
u64 desc)
It uses a new OPAL call, as the configuration magic is done on the
PHBs by skiboot.
Signed-off-by: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
Reviewed-by: Russell Currey <ruscur@russell.cc>
[mpe: Drop unrelated OPAL calls, s/uint64_t/u64/, minor formatting]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-08-04 09:55:14 +00:00
|
|
|
|
2016-05-20 06:41:40 +00:00
|
|
|
int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id)
|
|
|
|
{
|
2019-11-21 13:49:12 +00:00
|
|
|
struct device_node *node = np;
|
2016-05-20 06:41:40 +00:00
|
|
|
u32 bdfn;
|
|
|
|
u64 phbid;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = of_property_read_u32(np, "reg", &bdfn);
|
|
|
|
if (ret)
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
bdfn = ((bdfn & 0x00ffff00) >> 8);
|
2019-11-21 13:49:12 +00:00
|
|
|
for (node = np; node; node = of_get_parent(node)) {
|
|
|
|
if (!PCI_DN(node)) {
|
|
|
|
of_node_put(node);
|
2016-05-20 06:41:40 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:49:12 +00:00
|
|
|
if (!of_device_is_compatible(node, "ibm,ioda2-phb") &&
|
|
|
|
!of_device_is_compatible(node, "ibm,ioda3-phb") &&
|
|
|
|
!of_device_is_compatible(node, "ibm,ioda2-npu2-opencapi-phb")) {
|
|
|
|
of_node_put(node);
|
2016-05-20 06:41:40 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:49:12 +00:00
|
|
|
ret = of_property_read_u64(node, "ibm,opal-phbid", &phbid);
|
2016-05-20 06:41:40 +00:00
|
|
|
if (ret) {
|
2019-11-21 13:49:12 +00:00
|
|
|
of_node_put(node);
|
2016-05-20 06:41:40 +00:00
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:49:12 +00:00
|
|
|
if (of_device_is_compatible(node, "ibm,ioda2-npu2-opencapi-phb"))
|
|
|
|
*id = PCI_PHB_SLOT_ID(phbid);
|
|
|
|
else
|
|
|
|
*id = PCI_SLOT_ID(phbid, bdfn);
|
2016-05-20 06:41:40 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnv_pci_get_slot_id);
|
|
|
|
|
2016-05-20 06:41:41 +00:00
|
|
|
int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len)
|
|
|
|
{
|
|
|
|
int64_t rc;
|
|
|
|
|
|
|
|
if (!opal_check_token(OPAL_GET_DEVICE_TREE))
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
rc = opal_get_device_tree(phandle, (uint64_t)buf, len);
|
|
|
|
if (rc < OPAL_SUCCESS)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnv_pci_get_device_tree);
|
|
|
|
|
|
|
|
int pnv_pci_get_presence_state(uint64_t id, uint8_t *state)
|
|
|
|
{
|
|
|
|
int64_t rc;
|
|
|
|
|
|
|
|
if (!opal_check_token(OPAL_PCI_GET_PRESENCE_STATE))
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
rc = opal_pci_get_presence_state(id, (uint64_t)state);
|
|
|
|
if (rc != OPAL_SUCCESS)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnv_pci_get_presence_state);
|
|
|
|
|
|
|
|
int pnv_pci_get_power_state(uint64_t id, uint8_t *state)
|
|
|
|
{
|
|
|
|
int64_t rc;
|
|
|
|
|
|
|
|
if (!opal_check_token(OPAL_PCI_GET_POWER_STATE))
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
rc = opal_pci_get_power_state(id, (uint64_t)state);
|
|
|
|
if (rc != OPAL_SUCCESS)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnv_pci_get_power_state);
|
|
|
|
|
|
|
|
int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg)
|
|
|
|
{
|
|
|
|
struct opal_msg m;
|
|
|
|
int token, ret;
|
|
|
|
int64_t rc;
|
|
|
|
|
|
|
|
if (!opal_check_token(OPAL_PCI_SET_POWER_STATE))
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
token = opal_async_get_token_interruptible();
|
|
|
|
if (unlikely(token < 0))
|
|
|
|
return token;
|
|
|
|
|
|
|
|
rc = opal_pci_set_power_state(token, id, (uint64_t)&state);
|
|
|
|
if (rc == OPAL_SUCCESS) {
|
|
|
|
ret = 0;
|
|
|
|
goto exit;
|
|
|
|
} else if (rc != OPAL_ASYNC_COMPLETION) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = opal_async_wait_response(token, &m);
|
|
|
|
if (ret < 0)
|
|
|
|
goto exit;
|
|
|
|
|
|
|
|
if (msg) {
|
|
|
|
ret = 1;
|
|
|
|
memcpy(msg, &m, sizeof(m));
|
|
|
|
}
|
|
|
|
|
|
|
|
exit:
|
|
|
|
opal_async_release_token(token);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnv_pci_set_power_state);
|
|
|
|
|
2017-06-14 04:19:58 +00:00
|
|
|
/* Nicely print the contents of the PE State Tables (PEST). */
|
|
|
|
static void pnv_pci_dump_pest(__be64 pestA[], __be64 pestB[], int pest_size)
|
|
|
|
{
|
|
|
|
__be64 prevA = ULONG_MAX, prevB = ULONG_MAX;
|
|
|
|
bool dup = false;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < pest_size; i++) {
|
|
|
|
__be64 peA = be64_to_cpu(pestA[i]);
|
|
|
|
__be64 peB = be64_to_cpu(pestB[i]);
|
|
|
|
|
|
|
|
if (peA != prevA || peB != prevB) {
|
|
|
|
if (dup) {
|
|
|
|
pr_info("PE[..%03x] A/B: as above\n", i-1);
|
|
|
|
dup = false;
|
|
|
|
}
|
|
|
|
prevA = peA;
|
|
|
|
prevB = peB;
|
|
|
|
if (peA & PNV_IODA_STOPPED_STATE ||
|
|
|
|
peB & PNV_IODA_STOPPED_STATE)
|
|
|
|
pr_info("PE[%03x] A/B: %016llx %016llx\n",
|
|
|
|
i, peA, peB);
|
|
|
|
} else if (!dup && (peA & PNV_IODA_STOPPED_STATE ||
|
|
|
|
peB & PNV_IODA_STOPPED_STATE)) {
|
|
|
|
dup = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-22 08:28:45 +00:00
|
|
|
static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
|
|
|
|
struct OpalIoPhbErrorCommon *common)
|
2011-11-29 18:22:53 +00:00
|
|
|
{
|
2013-11-22 08:28:45 +00:00
|
|
|
struct OpalIoP7IOCPhbErrorData *data;
|
2011-11-29 18:22:53 +00:00
|
|
|
|
2013-11-22 08:28:45 +00:00
|
|
|
data = (struct OpalIoP7IOCPhbErrorData *)common;
|
2016-11-16 03:02:15 +00:00
|
|
|
pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n",
|
2014-07-17 04:41:42 +00:00
|
|
|
hose->global_number, be32_to_cpu(common->version));
|
2013-11-22 08:28:45 +00:00
|
|
|
|
2014-02-25 07:28:38 +00:00
|
|
|
if (data->brdgCtl)
|
2014-04-24 08:00:10 +00:00
|
|
|
pr_info("brdgCtl: %08x\n",
|
2014-07-17 04:41:42 +00:00
|
|
|
be32_to_cpu(data->brdgCtl));
|
2014-02-25 07:28:38 +00:00
|
|
|
if (data->portStatusReg || data->rootCmplxStatus ||
|
|
|
|
data->busAgentStatus)
|
2014-04-24 08:00:10 +00:00
|
|
|
pr_info("UtlSts: %08x %08x %08x\n",
|
2014-07-17 04:41:42 +00:00
|
|
|
be32_to_cpu(data->portStatusReg),
|
|
|
|
be32_to_cpu(data->rootCmplxStatus),
|
|
|
|
be32_to_cpu(data->busAgentStatus));
|
2014-02-25 07:28:38 +00:00
|
|
|
if (data->deviceStatus || data->slotStatus ||
|
|
|
|
data->linkStatus || data->devCmdStatus ||
|
|
|
|
data->devSecStatus)
|
2014-04-24 08:00:10 +00:00
|
|
|
pr_info("RootSts: %08x %08x %08x %08x %08x\n",
|
2014-07-17 04:41:42 +00:00
|
|
|
be32_to_cpu(data->deviceStatus),
|
|
|
|
be32_to_cpu(data->slotStatus),
|
|
|
|
be32_to_cpu(data->linkStatus),
|
|
|
|
be32_to_cpu(data->devCmdStatus),
|
|
|
|
be32_to_cpu(data->devSecStatus));
|
2014-02-25 07:28:38 +00:00
|
|
|
if (data->rootErrorStatus || data->uncorrErrorStatus ||
|
|
|
|
data->corrErrorStatus)
|
2014-04-24 08:00:10 +00:00
|
|
|
pr_info("RootErrSts: %08x %08x %08x\n",
|
2014-07-17 04:41:42 +00:00
|
|
|
be32_to_cpu(data->rootErrorStatus),
|
|
|
|
be32_to_cpu(data->uncorrErrorStatus),
|
|
|
|
be32_to_cpu(data->corrErrorStatus));
|
2014-02-25 07:28:38 +00:00
|
|
|
if (data->tlpHdr1 || data->tlpHdr2 ||
|
|
|
|
data->tlpHdr3 || data->tlpHdr4)
|
2014-04-24 08:00:10 +00:00
|
|
|
pr_info("RootErrLog: %08x %08x %08x %08x\n",
|
2014-07-17 04:41:42 +00:00
|
|
|
be32_to_cpu(data->tlpHdr1),
|
|
|
|
be32_to_cpu(data->tlpHdr2),
|
|
|
|
be32_to_cpu(data->tlpHdr3),
|
|
|
|
be32_to_cpu(data->tlpHdr4));
|
2014-02-25 07:28:38 +00:00
|
|
|
if (data->sourceId || data->errorClass ||
|
|
|
|
data->correlator)
|
2014-04-24 08:00:10 +00:00
|
|
|
pr_info("RootErrLog1: %08x %016llx %016llx\n",
|
2014-07-17 04:41:42 +00:00
|
|
|
be32_to_cpu(data->sourceId),
|
|
|
|
be64_to_cpu(data->errorClass),
|
|
|
|
be64_to_cpu(data->correlator));
|
2014-02-25 07:28:38 +00:00
|
|
|
if (data->p7iocPlssr || data->p7iocCsr)
|
2014-04-24 08:00:10 +00:00
|
|
|
pr_info("PhbSts: %016llx %016llx\n",
|
2014-07-17 04:41:42 +00:00
|
|
|
be64_to_cpu(data->p7iocPlssr),
|
|
|
|
be64_to_cpu(data->p7iocCsr));
|
2014-04-24 08:00:10 +00:00
|
|
|
if (data->lemFir)
|
|
|
|
pr_info("Lem: %016llx %016llx %016llx\n",
|
2014-07-17 04:41:42 +00:00
|
|
|
be64_to_cpu(data->lemFir),
|
|
|
|
be64_to_cpu(data->lemErrorMask),
|
|
|
|
be64_to_cpu(data->lemWOF));
|
2014-04-24 08:00:10 +00:00
|
|
|
if (data->phbErrorStatus)
|
|
|
|
pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
|
2014-07-17 04:41:42 +00:00
|
|
|
be64_to_cpu(data->phbErrorStatus),
|
|
|
|
be64_to_cpu(data->phbFirstErrorStatus),
|
|
|
|
be64_to_cpu(data->phbErrorLog0),
|
|
|
|
be64_to_cpu(data->phbErrorLog1));
|
2014-04-24 08:00:10 +00:00
|
|
|
if (data->mmioErrorStatus)
|
|
|
|
pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
|
2014-07-17 04:41:42 +00:00
|
|
|
be64_to_cpu(data->mmioErrorStatus),
|
|
|
|
be64_to_cpu(data->mmioFirstErrorStatus),
|
|
|
|
be64_to_cpu(data->mmioErrorLog0),
|
|
|
|
be64_to_cpu(data->mmioErrorLog1));
|
2014-04-24 08:00:10 +00:00
|
|
|
if (data->dma0ErrorStatus)
|
|
|
|
pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
|
2014-07-17 04:41:42 +00:00
|
|
|
be64_to_cpu(data->dma0ErrorStatus),
|
|
|
|
be64_to_cpu(data->dma0FirstErrorStatus),
|
|
|
|
be64_to_cpu(data->dma0ErrorLog0),
|
|
|
|
be64_to_cpu(data->dma0ErrorLog1));
|
2014-04-24 08:00:10 +00:00
|
|
|
if (data->dma1ErrorStatus)
|
|
|
|
pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
|
2014-07-17 04:41:42 +00:00
|
|
|
be64_to_cpu(data->dma1ErrorStatus),
|
|
|
|
be64_to_cpu(data->dma1FirstErrorStatus),
|
|
|
|
be64_to_cpu(data->dma1ErrorLog0),
|
|
|
|
be64_to_cpu(data->dma1ErrorLog1));
|
2011-11-29 18:22:53 +00:00
|
|
|
|
2017-06-14 04:19:58 +00:00
|
|
|
pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_P7IOC_NUM_PEST_REGS);
|
2011-11-29 18:22:53 +00:00
|
|
|
}
|
|
|
|
|
2013-11-22 08:28:45 +00:00
|
|
|
static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
|
|
|
|
struct OpalIoPhbErrorCommon *common)
|
2011-11-29 18:22:53 +00:00
|
|
|
{
|
2013-11-22 08:28:45 +00:00
|
|
|
struct OpalIoPhb3ErrorData *data;
|
|
|
|
|
|
|
|
data = (struct OpalIoPhb3ErrorData*)common;
|
2016-11-16 03:02:15 +00:00
|
|
|
pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n",
|
2014-06-09 08:58:51 +00:00
|
|
|
hose->global_number, be32_to_cpu(common->version));
|
2014-02-25 07:28:38 +00:00
|
|
|
if (data->brdgCtl)
|
2014-04-24 08:00:10 +00:00
|
|
|
pr_info("brdgCtl: %08x\n",
|
2014-06-09 08:58:51 +00:00
|
|
|
be32_to_cpu(data->brdgCtl));
|
2014-02-25 07:28:38 +00:00
|
|
|
if (data->portStatusReg || data->rootCmplxStatus ||
|
|
|
|
data->busAgentStatus)
|
2014-04-24 08:00:10 +00:00
|
|
|
pr_info("UtlSts: %08x %08x %08x\n",
|
2014-06-09 08:58:51 +00:00
|
|
|
be32_to_cpu(data->portStatusReg),
|
|
|
|
be32_to_cpu(data->rootCmplxStatus),
|
|
|
|
be32_to_cpu(data->busAgentStatus));
|
2014-02-25 07:28:38 +00:00
|
|
|
if (data->deviceStatus || data->slotStatus ||
|
|
|
|
data->linkStatus || data->devCmdStatus ||
|
|
|
|
data->devSecStatus)
|
2014-04-24 08:00:10 +00:00
|
|
|
pr_info("RootSts: %08x %08x %08x %08x %08x\n",
|
2014-06-09 08:58:51 +00:00
|
|
|
be32_to_cpu(data->deviceStatus),
|
|
|
|
be32_to_cpu(data->slotStatus),
|
|
|
|
be32_to_cpu(data->linkStatus),
|
|
|
|
be32_to_cpu(data->devCmdStatus),
|
|
|
|
be32_to_cpu(data->devSecStatus));
|
2014-02-25 07:28:38 +00:00
|
|
|
if (data->rootErrorStatus || data->uncorrErrorStatus ||
|
|
|
|
data->corrErrorStatus)
|
2014-04-24 08:00:10 +00:00
|
|
|
pr_info("RootErrSts: %08x %08x %08x\n",
|
2014-06-09 08:58:51 +00:00
|
|
|
be32_to_cpu(data->rootErrorStatus),
|
|
|
|
be32_to_cpu(data->uncorrErrorStatus),
|
|
|
|
be32_to_cpu(data->corrErrorStatus));
|
2014-02-25 07:28:38 +00:00
|
|
|
if (data->tlpHdr1 || data->tlpHdr2 ||
|
|
|
|
data->tlpHdr3 || data->tlpHdr4)
|
2014-04-24 08:00:10 +00:00
|
|
|
pr_info("RootErrLog: %08x %08x %08x %08x\n",
|
2014-06-09 08:58:51 +00:00
|
|
|
be32_to_cpu(data->tlpHdr1),
|
|
|
|
be32_to_cpu(data->tlpHdr2),
|
|
|
|
be32_to_cpu(data->tlpHdr3),
|
|
|
|
be32_to_cpu(data->tlpHdr4));
|
2014-02-25 07:28:38 +00:00
|
|
|
if (data->sourceId || data->errorClass ||
|
|
|
|
data->correlator)
|
2014-04-24 08:00:10 +00:00
|
|
|
pr_info("RootErrLog1: %08x %016llx %016llx\n",
|
2014-06-09 08:58:51 +00:00
|
|
|
be32_to_cpu(data->sourceId),
|
|
|
|
be64_to_cpu(data->errorClass),
|
|
|
|
be64_to_cpu(data->correlator));
|
2014-04-24 08:00:10 +00:00
|
|
|
if (data->nFir)
|
|
|
|
pr_info("nFir: %016llx %016llx %016llx\n",
|
2014-06-09 08:58:51 +00:00
|
|
|
be64_to_cpu(data->nFir),
|
|
|
|
be64_to_cpu(data->nFirMask),
|
|
|
|
be64_to_cpu(data->nFirWOF));
|
2014-02-25 07:28:38 +00:00
|
|
|
if (data->phbPlssr || data->phbCsr)
|
2014-04-24 08:00:10 +00:00
|
|
|
pr_info("PhbSts: %016llx %016llx\n",
|
2014-06-09 08:58:51 +00:00
|
|
|
be64_to_cpu(data->phbPlssr),
|
|
|
|
be64_to_cpu(data->phbCsr));
|
2014-04-24 08:00:10 +00:00
|
|
|
if (data->lemFir)
|
|
|
|
pr_info("Lem: %016llx %016llx %016llx\n",
|
2014-06-09 08:58:51 +00:00
|
|
|
be64_to_cpu(data->lemFir),
|
|
|
|
be64_to_cpu(data->lemErrorMask),
|
|
|
|
be64_to_cpu(data->lemWOF));
|
2014-04-24 08:00:10 +00:00
|
|
|
if (data->phbErrorStatus)
|
|
|
|
pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
|
2014-06-09 08:58:51 +00:00
|
|
|
be64_to_cpu(data->phbErrorStatus),
|
|
|
|
be64_to_cpu(data->phbFirstErrorStatus),
|
|
|
|
be64_to_cpu(data->phbErrorLog0),
|
|
|
|
be64_to_cpu(data->phbErrorLog1));
|
2014-04-24 08:00:10 +00:00
|
|
|
if (data->mmioErrorStatus)
|
|
|
|
pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
|
2014-06-09 08:58:51 +00:00
|
|
|
be64_to_cpu(data->mmioErrorStatus),
|
|
|
|
be64_to_cpu(data->mmioFirstErrorStatus),
|
|
|
|
be64_to_cpu(data->mmioErrorLog0),
|
|
|
|
be64_to_cpu(data->mmioErrorLog1));
|
2014-04-24 08:00:10 +00:00
|
|
|
if (data->dma0ErrorStatus)
|
|
|
|
pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
|
2014-06-09 08:58:51 +00:00
|
|
|
be64_to_cpu(data->dma0ErrorStatus),
|
|
|
|
be64_to_cpu(data->dma0FirstErrorStatus),
|
|
|
|
be64_to_cpu(data->dma0ErrorLog0),
|
|
|
|
be64_to_cpu(data->dma0ErrorLog1));
|
2014-04-24 08:00:10 +00:00
|
|
|
if (data->dma1ErrorStatus)
|
|
|
|
pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
|
2014-06-09 08:58:51 +00:00
|
|
|
be64_to_cpu(data->dma1ErrorStatus),
|
|
|
|
be64_to_cpu(data->dma1FirstErrorStatus),
|
|
|
|
be64_to_cpu(data->dma1ErrorLog0),
|
|
|
|
be64_to_cpu(data->dma1ErrorLog1));
|
2013-11-22 08:28:45 +00:00
|
|
|
|
2017-06-14 04:19:58 +00:00
|
|
|
pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB3_NUM_PEST_REGS);
|
2013-11-22 08:28:45 +00:00
|
|
|
}
|
|
|
|
|
2017-06-14 04:20:00 +00:00
|
|
|
static void pnv_pci_dump_phb4_diag_data(struct pci_controller *hose,
|
|
|
|
struct OpalIoPhbErrorCommon *common)
|
|
|
|
{
|
|
|
|
struct OpalIoPhb4ErrorData *data;
|
|
|
|
|
|
|
|
data = (struct OpalIoPhb4ErrorData*)common;
|
|
|
|
pr_info("PHB4 PHB#%d Diag-data (Version: %d)\n",
|
|
|
|
hose->global_number, be32_to_cpu(common->version));
|
|
|
|
if (data->brdgCtl)
|
|
|
|
pr_info("brdgCtl: %08x\n",
|
|
|
|
be32_to_cpu(data->brdgCtl));
|
|
|
|
if (data->deviceStatus || data->slotStatus ||
|
|
|
|
data->linkStatus || data->devCmdStatus ||
|
|
|
|
data->devSecStatus)
|
|
|
|
pr_info("RootSts: %08x %08x %08x %08x %08x\n",
|
|
|
|
be32_to_cpu(data->deviceStatus),
|
|
|
|
be32_to_cpu(data->slotStatus),
|
|
|
|
be32_to_cpu(data->linkStatus),
|
|
|
|
be32_to_cpu(data->devCmdStatus),
|
|
|
|
be32_to_cpu(data->devSecStatus));
|
|
|
|
if (data->rootErrorStatus || data->uncorrErrorStatus ||
|
|
|
|
data->corrErrorStatus)
|
|
|
|
pr_info("RootErrSts: %08x %08x %08x\n",
|
|
|
|
be32_to_cpu(data->rootErrorStatus),
|
|
|
|
be32_to_cpu(data->uncorrErrorStatus),
|
|
|
|
be32_to_cpu(data->corrErrorStatus));
|
|
|
|
if (data->tlpHdr1 || data->tlpHdr2 ||
|
|
|
|
data->tlpHdr3 || data->tlpHdr4)
|
|
|
|
pr_info("RootErrLog: %08x %08x %08x %08x\n",
|
|
|
|
be32_to_cpu(data->tlpHdr1),
|
|
|
|
be32_to_cpu(data->tlpHdr2),
|
|
|
|
be32_to_cpu(data->tlpHdr3),
|
|
|
|
be32_to_cpu(data->tlpHdr4));
|
|
|
|
if (data->sourceId)
|
|
|
|
pr_info("sourceId: %08x\n", be32_to_cpu(data->sourceId));
|
|
|
|
if (data->nFir)
|
|
|
|
pr_info("nFir: %016llx %016llx %016llx\n",
|
|
|
|
be64_to_cpu(data->nFir),
|
|
|
|
be64_to_cpu(data->nFirMask),
|
|
|
|
be64_to_cpu(data->nFirWOF));
|
|
|
|
if (data->phbPlssr || data->phbCsr)
|
|
|
|
pr_info("PhbSts: %016llx %016llx\n",
|
|
|
|
be64_to_cpu(data->phbPlssr),
|
|
|
|
be64_to_cpu(data->phbCsr));
|
|
|
|
if (data->lemFir)
|
|
|
|
pr_info("Lem: %016llx %016llx %016llx\n",
|
|
|
|
be64_to_cpu(data->lemFir),
|
|
|
|
be64_to_cpu(data->lemErrorMask),
|
|
|
|
be64_to_cpu(data->lemWOF));
|
|
|
|
if (data->phbErrorStatus)
|
|
|
|
pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
|
|
|
|
be64_to_cpu(data->phbErrorStatus),
|
|
|
|
be64_to_cpu(data->phbFirstErrorStatus),
|
|
|
|
be64_to_cpu(data->phbErrorLog0),
|
|
|
|
be64_to_cpu(data->phbErrorLog1));
|
|
|
|
if (data->phbTxeErrorStatus)
|
|
|
|
pr_info("PhbTxeErr: %016llx %016llx %016llx %016llx\n",
|
|
|
|
be64_to_cpu(data->phbTxeErrorStatus),
|
|
|
|
be64_to_cpu(data->phbTxeFirstErrorStatus),
|
|
|
|
be64_to_cpu(data->phbTxeErrorLog0),
|
|
|
|
be64_to_cpu(data->phbTxeErrorLog1));
|
|
|
|
if (data->phbRxeArbErrorStatus)
|
|
|
|
pr_info("RxeArbErr: %016llx %016llx %016llx %016llx\n",
|
|
|
|
be64_to_cpu(data->phbRxeArbErrorStatus),
|
|
|
|
be64_to_cpu(data->phbRxeArbFirstErrorStatus),
|
|
|
|
be64_to_cpu(data->phbRxeArbErrorLog0),
|
|
|
|
be64_to_cpu(data->phbRxeArbErrorLog1));
|
|
|
|
if (data->phbRxeMrgErrorStatus)
|
|
|
|
pr_info("RxeMrgErr: %016llx %016llx %016llx %016llx\n",
|
|
|
|
be64_to_cpu(data->phbRxeMrgErrorStatus),
|
|
|
|
be64_to_cpu(data->phbRxeMrgFirstErrorStatus),
|
|
|
|
be64_to_cpu(data->phbRxeMrgErrorLog0),
|
|
|
|
be64_to_cpu(data->phbRxeMrgErrorLog1));
|
|
|
|
if (data->phbRxeTceErrorStatus)
|
|
|
|
pr_info("RxeTceErr: %016llx %016llx %016llx %016llx\n",
|
|
|
|
be64_to_cpu(data->phbRxeTceErrorStatus),
|
|
|
|
be64_to_cpu(data->phbRxeTceFirstErrorStatus),
|
|
|
|
be64_to_cpu(data->phbRxeTceErrorLog0),
|
|
|
|
be64_to_cpu(data->phbRxeTceErrorLog1));
|
|
|
|
|
|
|
|
if (data->phbPblErrorStatus)
|
|
|
|
pr_info("PblErr: %016llx %016llx %016llx %016llx\n",
|
|
|
|
be64_to_cpu(data->phbPblErrorStatus),
|
|
|
|
be64_to_cpu(data->phbPblFirstErrorStatus),
|
|
|
|
be64_to_cpu(data->phbPblErrorLog0),
|
|
|
|
be64_to_cpu(data->phbPblErrorLog1));
|
|
|
|
if (data->phbPcieDlpErrorStatus)
|
|
|
|
pr_info("PcieDlp: %016llx %016llx %016llx\n",
|
|
|
|
be64_to_cpu(data->phbPcieDlpErrorLog1),
|
|
|
|
be64_to_cpu(data->phbPcieDlpErrorLog2),
|
|
|
|
be64_to_cpu(data->phbPcieDlpErrorStatus));
|
|
|
|
if (data->phbRegbErrorStatus)
|
|
|
|
pr_info("RegbErr: %016llx %016llx %016llx %016llx\n",
|
|
|
|
be64_to_cpu(data->phbRegbErrorStatus),
|
|
|
|
be64_to_cpu(data->phbRegbFirstErrorStatus),
|
|
|
|
be64_to_cpu(data->phbRegbErrorLog0),
|
|
|
|
be64_to_cpu(data->phbRegbErrorLog1));
|
|
|
|
|
|
|
|
|
|
|
|
pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB4_NUM_PEST_REGS);
|
|
|
|
}
|
|
|
|
|
2013-11-22 08:28:45 +00:00
|
|
|
void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
|
|
|
|
unsigned char *log_buff)
|
|
|
|
{
|
|
|
|
struct OpalIoPhbErrorCommon *common;
|
|
|
|
|
|
|
|
if (!hose || !log_buff)
|
|
|
|
return;
|
|
|
|
|
|
|
|
common = (struct OpalIoPhbErrorCommon *)log_buff;
|
2014-06-09 08:58:51 +00:00
|
|
|
switch (be32_to_cpu(common->ioType)) {
|
2013-11-22 08:28:45 +00:00
|
|
|
case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
|
|
|
|
pnv_pci_dump_p7ioc_diag_data(hose, common);
|
|
|
|
break;
|
|
|
|
case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
|
|
|
|
pnv_pci_dump_phb3_diag_data(hose, common);
|
2011-11-29 18:22:53 +00:00
|
|
|
break;
|
2017-06-14 04:20:00 +00:00
|
|
|
case OPAL_PHB_ERROR_DATA_TYPE_PHB4:
|
|
|
|
pnv_pci_dump_phb4_diag_data(hose, common);
|
|
|
|
break;
|
2011-11-29 18:22:53 +00:00
|
|
|
default:
|
2013-11-22 08:28:45 +00:00
|
|
|
pr_warn("%s: Unrecognized ioType %d\n",
|
2014-06-09 08:58:51 +00:00
|
|
|
__func__, be32_to_cpu(common->ioType));
|
2011-11-29 18:22:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
|
|
|
|
{
|
|
|
|
unsigned long flags, rc;
|
2014-07-21 04:42:35 +00:00
|
|
|
int has_diag, ret = 0;
|
2011-11-29 18:22:53 +00:00
|
|
|
|
|
|
|
spin_lock_irqsave(&phb->lock, flags);
|
|
|
|
|
2014-07-21 04:42:35 +00:00
|
|
|
/* Fetch PHB diag-data */
|
2017-06-14 04:19:59 +00:00
|
|
|
rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data,
|
|
|
|
phb->diag_data_size);
|
2011-11-29 18:22:53 +00:00
|
|
|
has_diag = (rc == OPAL_SUCCESS);
|
|
|
|
|
2014-07-21 04:42:35 +00:00
|
|
|
/* If PHB supports compound PE, to handle it */
|
|
|
|
if (phb->unfreeze_pe) {
|
|
|
|
ret = phb->unfreeze_pe(phb,
|
|
|
|
pe_no,
|
2011-11-29 18:22:53 +00:00
|
|
|
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
|
2014-07-21 04:42:35 +00:00
|
|
|
} else {
|
|
|
|
rc = opal_pci_eeh_freeze_clear(phb->opal_id,
|
|
|
|
pe_no,
|
|
|
|
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
|
|
|
|
if (rc) {
|
|
|
|
pr_warn("%s: Failure %ld clearing frozen "
|
|
|
|
"PHB#%x-PE#%x\n",
|
|
|
|
__func__, rc, phb->hose->global_number,
|
|
|
|
pe_no);
|
|
|
|
ret = -EIO;
|
|
|
|
}
|
2011-11-29 18:22:53 +00:00
|
|
|
}
|
|
|
|
|
2014-07-21 04:42:35 +00:00
|
|
|
/*
|
|
|
|
* For now, let's only display the diag buffer when we fail to clear
|
|
|
|
* the EEH status. We'll do more sensible things later when we have
|
|
|
|
* proper EEH support. We need to make sure we don't pollute ourselves
|
|
|
|
* with the normal errors generated when probing empty slots
|
|
|
|
*/
|
|
|
|
if (has_diag && ret)
|
2017-06-14 04:19:59 +00:00
|
|
|
pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data);
|
2014-07-21 04:42:35 +00:00
|
|
|
|
2011-11-29 18:22:53 +00:00
|
|
|
spin_unlock_irqrestore(&phb->lock, flags);
|
|
|
|
}
|
|
|
|
|
2015-03-17 05:15:03 +00:00
|
|
|
static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
|
2011-09-19 17:45:05 +00:00
|
|
|
{
|
2015-03-17 05:15:03 +00:00
|
|
|
struct pnv_phb *phb = pdn->phb->private_data;
|
2018-11-19 04:25:17 +00:00
|
|
|
u8 fstate = 0;
|
|
|
|
__be16 pcierr = 0;
|
2016-05-03 05:41:25 +00:00
|
|
|
unsigned int pe_no;
|
2014-07-21 04:42:35 +00:00
|
|
|
s64 rc;
|
2011-09-19 17:45:05 +00:00
|
|
|
|
2013-06-27 05:46:48 +00:00
|
|
|
/*
|
|
|
|
* Get the PE#. During the PCI probe stage, we might not
|
|
|
|
* setup that yet. So all ER errors should be mapped to
|
2013-11-04 08:32:47 +00:00
|
|
|
* reserved PE.
|
2013-06-27 05:46:48 +00:00
|
|
|
*/
|
2015-03-17 05:15:03 +00:00
|
|
|
pe_no = pdn->pe_number;
|
2013-11-04 08:32:47 +00:00
|
|
|
if (pe_no == IODA_INVALID_PE) {
|
2016-05-03 05:41:24 +00:00
|
|
|
pe_no = phb->ioda.reserved_pe_idx;
|
2013-11-04 08:32:47 +00:00
|
|
|
}
|
2011-09-19 17:45:05 +00:00
|
|
|
|
2014-07-21 04:42:35 +00:00
|
|
|
/*
|
|
|
|
* Fetch frozen state. If the PHB support compound PE,
|
|
|
|
* we need handle that case.
|
|
|
|
*/
|
|
|
|
if (phb->get_pe_state) {
|
|
|
|
fstate = phb->get_pe_state(phb, pe_no);
|
|
|
|
} else {
|
|
|
|
rc = opal_pci_eeh_freeze_status(phb->opal_id,
|
|
|
|
pe_no,
|
|
|
|
&fstate,
|
|
|
|
&pcierr,
|
|
|
|
NULL);
|
|
|
|
if (rc) {
|
|
|
|
pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n",
|
|
|
|
__func__, rc, phb->hose->global_number, pe_no);
|
|
|
|
return;
|
|
|
|
}
|
2011-09-19 17:45:05 +00:00
|
|
|
}
|
2014-07-21 04:42:35 +00:00
|
|
|
|
2016-11-16 03:02:15 +00:00
|
|
|
pr_devel(" -> EEH check, bdfn=%04x PE#%x fstate=%x\n",
|
2016-05-02 07:06:12 +00:00
|
|
|
(pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
|
2014-07-21 04:42:35 +00:00
|
|
|
|
|
|
|
/* Clear the frozen state if applicable */
|
|
|
|
if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE ||
|
|
|
|
fstate == OPAL_EEH_STOPPED_DMA_FREEZE ||
|
|
|
|
fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) {
|
|
|
|
/*
|
|
|
|
* If PHB supports compound PE, freeze it for
|
|
|
|
* consistency.
|
|
|
|
*/
|
|
|
|
if (phb->freeze_pe)
|
|
|
|
phb->freeze_pe(phb, pe_no);
|
|
|
|
|
2011-11-29 18:22:53 +00:00
|
|
|
pnv_pci_handle_eeh_config(phb, pe_no);
|
2014-07-21 04:42:35 +00:00
|
|
|
}
|
2011-09-19 17:45:05 +00:00
|
|
|
}
|
|
|
|
|
2015-03-17 05:15:03 +00:00
|
|
|
int pnv_pci_cfg_read(struct pci_dn *pdn,
|
2013-06-27 05:46:48 +00:00
|
|
|
int where, int size, u32 *val)
|
2011-09-19 17:45:05 +00:00
|
|
|
{
|
2013-06-27 05:46:48 +00:00
|
|
|
struct pnv_phb *phb = pdn->phb->private_data;
|
|
|
|
u32 bdfn = (pdn->busno << 8) | pdn->devfn;
|
2011-09-19 17:45:05 +00:00
|
|
|
s64 rc;
|
|
|
|
|
|
|
|
switch (size) {
|
|
|
|
case 1: {
|
|
|
|
u8 v8;
|
|
|
|
rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8);
|
|
|
|
*val = (rc == OPAL_SUCCESS) ? v8 : 0xff;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case 2: {
|
2013-09-23 02:05:01 +00:00
|
|
|
__be16 v16;
|
2011-09-19 17:45:05 +00:00
|
|
|
rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
|
|
|
|
&v16);
|
2013-09-23 02:05:01 +00:00
|
|
|
*val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff;
|
2011-09-19 17:45:05 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case 4: {
|
2013-09-23 02:05:01 +00:00
|
|
|
__be32 v32;
|
2011-09-19 17:45:05 +00:00
|
|
|
rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
|
2013-09-23 02:05:01 +00:00
|
|
|
*val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff;
|
2011-09-19 17:45:05 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return PCIBIOS_FUNC_NOT_SUPPORTED;
|
|
|
|
}
|
2014-04-24 08:00:12 +00:00
|
|
|
|
2016-05-02 07:06:12 +00:00
|
|
|
pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
|
|
|
|
__func__, pdn->busno, pdn->devfn, where, size, *val);
|
2011-09-19 17:45:05 +00:00
|
|
|
return PCIBIOS_SUCCESSFUL;
|
|
|
|
}
|
|
|
|
|
2015-03-17 05:15:03 +00:00
|
|
|
int pnv_pci_cfg_write(struct pci_dn *pdn,
|
2013-06-27 05:46:48 +00:00
|
|
|
int where, int size, u32 val)
|
2011-09-19 17:45:05 +00:00
|
|
|
{
|
2013-06-27 05:46:48 +00:00
|
|
|
struct pnv_phb *phb = pdn->phb->private_data;
|
|
|
|
u32 bdfn = (pdn->busno << 8) | pdn->devfn;
|
2011-09-19 17:45:05 +00:00
|
|
|
|
2016-05-02 07:06:12 +00:00
|
|
|
pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
|
|
|
|
__func__, pdn->busno, pdn->devfn, where, size, val);
|
2011-09-19 17:45:05 +00:00
|
|
|
switch (size) {
|
|
|
|
case 1:
|
|
|
|
opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val);
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
opal_pci_config_write_word(phb->opal_id, bdfn, where, val);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return PCIBIOS_FUNC_NOT_SUPPORTED;
|
|
|
|
}
|
2013-06-20 05:21:15 +00:00
|
|
|
|
2014-04-24 08:00:12 +00:00
|
|
|
return PCIBIOS_SUCCESSFUL;
|
|
|
|
}
|
|
|
|
|
2021-05-18 20:40:41 +00:00
|
|
|
#ifdef CONFIG_EEH
|
2015-03-17 05:15:03 +00:00
|
|
|
static bool pnv_pci_cfg_check(struct pci_dn *pdn)
|
2014-04-24 08:00:12 +00:00
|
|
|
{
|
|
|
|
struct eeh_dev *edev = NULL;
|
2015-03-17 05:15:03 +00:00
|
|
|
struct pnv_phb *phb = pdn->phb->private_data;
|
2014-04-24 08:00:12 +00:00
|
|
|
|
|
|
|
/* EEH not enabled ? */
|
2014-04-24 08:00:09 +00:00
|
|
|
if (!(phb->flags & PNV_PHB_FLAG_EEH))
|
2014-04-24 08:00:12 +00:00
|
|
|
return true;
|
2011-09-19 17:45:05 +00:00
|
|
|
|
powerpc/eeh: No hotplug on permanently removed dev
The issue was detected in a bit complicated test case where
we have multiple hierarchical PEs shown as following figure:
+-----------------+
| PE#3 p2p#0 |
| p2p#1 |
+-----------------+
|
+-----------------+
| PE#4 pdev#0 |
| pdev#1 |
+-----------------+
PE#4 (have 2 PCI devices) is the child of PE#3, which has 2 p2p
bridges. We accidentally had less-known scenario: PE#4 was removed
permanently from the system because of permanent failure (e.g.
exceeding the max allowd failure times in last hour), then we detects
EEH errors on PE#3 and tried to recover it. However, eeh_dev instances
for pdev#0/1 were not detached from PE#4, which was still connected to
PE#3. All of that was because of the fact that we rely on count-based
pcibios_release_device(), which isn't reliable enough. When doing
recovery for PE#3, we still apply hotplug on PE#4 and pdev#0/1, which
are not valid any more. Eventually, we run into kernel crash.
The patch fixes above issue from two aspects. For unplug, we simply
skip those permanently removed PE, whose state is (EEH_PE_STATE_ISOLATED
&& !EEH_PE_STATE_RECOVERING) and its frozen count should be greater
than EEH_MAX_ALLOWED_FREEZES. For plug, we marked all permanently
removed EEH devices with EEH_DEV_REMOVED and return 0xFF's on read
its PCI config so that PCI core will omit them.
Signed-off-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2014-04-24 08:00:19 +00:00
|
|
|
/* PE reset or device removed ? */
|
2015-03-17 05:15:03 +00:00
|
|
|
edev = pdn->edev;
|
powerpc/eeh: No hotplug on permanently removed dev
The issue was detected in a bit complicated test case where
we have multiple hierarchical PEs shown as following figure:
+-----------------+
| PE#3 p2p#0 |
| p2p#1 |
+-----------------+
|
+-----------------+
| PE#4 pdev#0 |
| pdev#1 |
+-----------------+
PE#4 (have 2 PCI devices) is the child of PE#3, which has 2 p2p
bridges. We accidentally had less-known scenario: PE#4 was removed
permanently from the system because of permanent failure (e.g.
exceeding the max allowd failure times in last hour), then we detects
EEH errors on PE#3 and tried to recover it. However, eeh_dev instances
for pdev#0/1 were not detached from PE#4, which was still connected to
PE#3. All of that was because of the fact that we rely on count-based
pcibios_release_device(), which isn't reliable enough. When doing
recovery for PE#3, we still apply hotplug on PE#4 and pdev#0/1, which
are not valid any more. Eventually, we run into kernel crash.
The patch fixes above issue from two aspects. For unplug, we simply
skip those permanently removed PE, whose state is (EEH_PE_STATE_ISOLATED
&& !EEH_PE_STATE_RECOVERING) and its frozen count should be greater
than EEH_MAX_ALLOWED_FREEZES. For plug, we marked all permanently
removed EEH devices with EEH_DEV_REMOVED and return 0xFF's on read
its PCI config so that PCI core will omit them.
Signed-off-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2014-04-24 08:00:19 +00:00
|
|
|
if (edev) {
|
|
|
|
if (edev->pe &&
|
2014-10-01 07:07:50 +00:00
|
|
|
(edev->pe->state & EEH_PE_CFG_BLOCKED))
|
powerpc/eeh: No hotplug on permanently removed dev
The issue was detected in a bit complicated test case where
we have multiple hierarchical PEs shown as following figure:
+-----------------+
| PE#3 p2p#0 |
| p2p#1 |
+-----------------+
|
+-----------------+
| PE#4 pdev#0 |
| pdev#1 |
+-----------------+
PE#4 (have 2 PCI devices) is the child of PE#3, which has 2 p2p
bridges. We accidentally had less-known scenario: PE#4 was removed
permanently from the system because of permanent failure (e.g.
exceeding the max allowd failure times in last hour), then we detects
EEH errors on PE#3 and tried to recover it. However, eeh_dev instances
for pdev#0/1 were not detached from PE#4, which was still connected to
PE#3. All of that was because of the fact that we rely on count-based
pcibios_release_device(), which isn't reliable enough. When doing
recovery for PE#3, we still apply hotplug on PE#4 and pdev#0/1, which
are not valid any more. Eventually, we run into kernel crash.
The patch fixes above issue from two aspects. For unplug, we simply
skip those permanently removed PE, whose state is (EEH_PE_STATE_ISOLATED
&& !EEH_PE_STATE_RECOVERING) and its frozen count should be greater
than EEH_MAX_ALLOWED_FREEZES. For plug, we marked all permanently
removed EEH devices with EEH_DEV_REMOVED and return 0xFF's on read
its PCI config so that PCI core will omit them.
Signed-off-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2014-04-24 08:00:19 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
if (edev->mode & EEH_DEV_REMOVED)
|
|
|
|
return false;
|
|
|
|
}
|
2014-04-24 08:00:12 +00:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#else
|
2015-03-17 05:15:03 +00:00
|
|
|
static inline pnv_pci_cfg_check(struct pci_dn *pdn)
|
2014-04-24 08:00:12 +00:00
|
|
|
{
|
|
|
|
return true;
|
2011-09-19 17:45:05 +00:00
|
|
|
}
|
2014-04-24 08:00:12 +00:00
|
|
|
#endif /* CONFIG_EEH */
|
2011-09-19 17:45:05 +00:00
|
|
|
|
2013-06-27 05:46:48 +00:00
|
|
|
static int pnv_pci_read_config(struct pci_bus *bus,
|
|
|
|
unsigned int devfn,
|
|
|
|
int where, int size, u32 *val)
|
|
|
|
{
|
|
|
|
struct pci_dn *pdn;
|
2014-04-24 08:00:12 +00:00
|
|
|
struct pnv_phb *phb;
|
|
|
|
int ret;
|
2013-06-27 05:46:48 +00:00
|
|
|
|
2014-04-24 08:00:12 +00:00
|
|
|
*val = 0xFFFFFFFF;
|
2015-03-17 05:15:03 +00:00
|
|
|
pdn = pci_get_pdn_by_devfn(bus, devfn);
|
|
|
|
if (!pdn)
|
|
|
|
return PCIBIOS_DEVICE_NOT_FOUND;
|
2013-06-27 05:46:48 +00:00
|
|
|
|
2015-03-17 05:15:03 +00:00
|
|
|
if (!pnv_pci_cfg_check(pdn))
|
2014-04-24 08:00:12 +00:00
|
|
|
return PCIBIOS_DEVICE_NOT_FOUND;
|
|
|
|
|
2015-03-17 05:15:03 +00:00
|
|
|
ret = pnv_pci_cfg_read(pdn, where, size, val);
|
|
|
|
phb = pdn->phb->private_data;
|
|
|
|
if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) {
|
2014-04-24 08:00:12 +00:00
|
|
|
if (*val == EEH_IO_ERROR_VALUE(size) &&
|
2015-03-17 05:15:03 +00:00
|
|
|
eeh_dev_check_failure(pdn->edev))
|
2014-04-24 08:00:12 +00:00
|
|
|
return PCIBIOS_DEVICE_NOT_FOUND;
|
|
|
|
} else {
|
2015-03-17 05:15:03 +00:00
|
|
|
pnv_pci_config_check_eeh(pdn);
|
2014-04-24 08:00:12 +00:00
|
|
|
}
|
2013-06-27 05:46:48 +00:00
|
|
|
|
2014-04-24 08:00:12 +00:00
|
|
|
return ret;
|
2013-06-27 05:46:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int pnv_pci_write_config(struct pci_bus *bus,
|
|
|
|
unsigned int devfn,
|
|
|
|
int where, int size, u32 val)
|
|
|
|
{
|
|
|
|
struct pci_dn *pdn;
|
2014-04-24 08:00:12 +00:00
|
|
|
struct pnv_phb *phb;
|
|
|
|
int ret;
|
2013-06-27 05:46:48 +00:00
|
|
|
|
2015-03-17 05:15:03 +00:00
|
|
|
pdn = pci_get_pdn_by_devfn(bus, devfn);
|
|
|
|
if (!pdn)
|
|
|
|
return PCIBIOS_DEVICE_NOT_FOUND;
|
2013-06-27 05:46:48 +00:00
|
|
|
|
2015-03-17 05:15:03 +00:00
|
|
|
if (!pnv_pci_cfg_check(pdn))
|
2014-04-24 08:00:12 +00:00
|
|
|
return PCIBIOS_DEVICE_NOT_FOUND;
|
|
|
|
|
2015-03-17 05:15:03 +00:00
|
|
|
ret = pnv_pci_cfg_write(pdn, where, size, val);
|
|
|
|
phb = pdn->phb->private_data;
|
2014-04-24 08:00:12 +00:00
|
|
|
if (!(phb->flags & PNV_PHB_FLAG_EEH))
|
2015-03-17 05:15:03 +00:00
|
|
|
pnv_pci_config_check_eeh(pdn);
|
2014-04-24 08:00:12 +00:00
|
|
|
|
|
|
|
return ret;
|
2013-06-27 05:46:48 +00:00
|
|
|
}
|
|
|
|
|
2011-09-19 17:45:05 +00:00
|
|
|
struct pci_ops pnv_pci_ops = {
|
2013-06-27 05:46:48 +00:00
|
|
|
.read = pnv_pci_read_config,
|
2011-09-19 17:45:05 +00:00
|
|
|
.write = pnv_pci_write_config,
|
|
|
|
};
|
|
|
|
|
2015-06-05 06:35:09 +00:00
|
|
|
struct iommu_table *pnv_pci_table_alloc(int nid)
|
|
|
|
{
|
|
|
|
struct iommu_table *tbl;
|
|
|
|
|
|
|
|
tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid);
|
2017-03-27 08:27:37 +00:00
|
|
|
if (!tbl)
|
|
|
|
return NULL;
|
|
|
|
|
2015-06-05 06:35:09 +00:00
|
|
|
INIT_LIST_HEAD_RCU(&tbl->it_group_list);
|
2017-03-22 04:21:50 +00:00
|
|
|
kref_init(&tbl->it_kref);
|
2015-06-05 06:35:09 +00:00
|
|
|
|
|
|
|
return tbl;
|
|
|
|
}
|
|
|
|
|
2018-03-02 09:56:11 +00:00
|
|
|
struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose = pci_bus_to_host(dev->bus);
|
|
|
|
|
|
|
|
return of_node_get(hose->dn);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(pnv_pci_get_phb_node);
|
|
|
|
|
|
|
|
int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable)
|
|
|
|
{
|
2020-07-22 06:57:00 +00:00
|
|
|
struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus);
|
2018-03-02 09:56:11 +00:00
|
|
|
u64 tunnel_bar;
|
2020-07-22 06:57:00 +00:00
|
|
|
__be64 val;
|
2018-03-02 09:56:11 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR))
|
|
|
|
return -ENXIO;
|
|
|
|
if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR))
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
mutex_lock(&tunnel_mutex);
|
|
|
|
rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
rc = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
tunnel_bar = be64_to_cpu(val);
|
|
|
|
if (enable) {
|
|
|
|
/*
|
|
|
|
* Only one device per PHB can use atomics.
|
|
|
|
* Our policy is first-come, first-served.
|
|
|
|
*/
|
|
|
|
if (tunnel_bar) {
|
|
|
|
if (tunnel_bar != addr)
|
|
|
|
rc = -EBUSY;
|
|
|
|
else
|
|
|
|
rc = 0; /* Setting same address twice is ok */
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* The device that owns atomics and wants to release
|
|
|
|
* them must pass the same address with enable == 0.
|
|
|
|
*/
|
|
|
|
if (tunnel_bar != addr) {
|
|
|
|
rc = -EPERM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
addr = 0x0ULL;
|
|
|
|
}
|
|
|
|
rc = opal_pci_set_pbcq_tunnel_bar(phb->opal_id, addr);
|
|
|
|
rc = opal_error_code(rc);
|
|
|
|
out:
|
|
|
|
mutex_unlock(&tunnel_mutex);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar);
|
|
|
|
|
2013-05-10 06:59:18 +00:00
|
|
|
void pnv_pci_shutdown(void)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose;
|
|
|
|
|
2015-05-27 06:06:59 +00:00
|
|
|
list_for_each_entry(hose, &hose_list, list_node)
|
|
|
|
if (hose->controller_ops.shutdown)
|
|
|
|
hose->controller_ops.shutdown(hose);
|
2013-05-10 06:59:18 +00:00
|
|
|
}
|
|
|
|
|
2013-04-25 19:20:57 +00:00
|
|
|
/* Fixup wrong class code in p7ioc and p8 root complex */
|
2012-12-21 22:04:10 +00:00
|
|
|
static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
|
2011-11-06 18:56:00 +00:00
|
|
|
{
|
2022-02-14 11:41:08 +00:00
|
|
|
dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
|
2011-11-06 18:56:00 +00:00
|
|
|
}
|
|
|
|
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
|
|
|
|
|
2011-09-19 17:45:05 +00:00
|
|
|
void __init pnv_pci_init(void)
|
|
|
|
{
|
|
|
|
struct device_node *np;
|
|
|
|
|
2012-02-24 03:18:58 +00:00
|
|
|
pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
|
2011-09-19 17:45:05 +00:00
|
|
|
|
2015-03-12 06:27:11 +00:00
|
|
|
/* If we don't have OPAL, eg. in sim, just skip PCI probe */
|
|
|
|
if (!firmware_has_feature(FW_FEATURE_OPAL))
|
|
|
|
return;
|
2011-11-15 17:29:08 +00:00
|
|
|
|
powerpc/powernv: Disable native PCIe port management
On PowerNV the PCIe topology is (currently) managed by the powernv platform
code in Linux in cooperation with the platform firmware. Linux's native
PCIe port service drivers operate independently of both and this can cause
problems.
The main issue is that the portbus driver will conflict with the platform
specific hotplug driver (pnv_php) over ownership of the MSI used to notify
the host when a hotplug event occurs. The portbus driver claims this MSI on
behalf of the individual port services because the same interrupt is used
for hotplug events, PMEs (on root ports), and link bandwidth change
notifications. The portbus driver will always claim the interrupt even if
the individual port service drivers, such as pciehp, are compiled out.
The second, bigger, problem is that the hotplug port service driver
fundamentally does not work on PowerNV. The platform assumes that all
PCI devices have a corresponding arch-specific handle derived from the DT
node for the device (pci_dn) and without one the platform will not allow
a PCI device to be enabled. This problem is largely due to historical
baggage, but it can't be resolved without significant re-factoring of the
platform PCI support.
We can fix these problems in the interim by setting the
"pcie_ports_disabled" flag during platform initialisation. The flag
indicates the platform owns the PCIe ports which stops the portbus driver
from being registered.
This does have the side effect of disabling all port services drivers
that is: AER, PME, BW notifications, hotplug, and DPC. However, this is
not a huge disadvantage on PowerNV since these services are either unused
or handled through other means.
Fixes: 66725152fb9f ("PCI/hotplug: PowerPC PowerNV PCI hotplug driver")
Signed-off-by: Oliver O'Halloran <oohall@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20191118065553.30362-1-oohall@gmail.com
2019-11-18 06:55:53 +00:00
|
|
|
#ifdef CONFIG_PCIEPORTBUS
|
|
|
|
/*
|
|
|
|
* On PowerNV PCIe devices are (currently) managed in cooperation
|
|
|
|
* with firmware. This isn't *strictly* required, but there's enough
|
|
|
|
* assumptions baked into both firmware and the platform code that
|
|
|
|
* it's unwise to allow the portbus services to be used.
|
|
|
|
*
|
|
|
|
* We need to fix this eventually, but for now set this flag to disable
|
|
|
|
* the portbus driver. The AER service isn't required since that AER
|
|
|
|
* events are handled via EEH. The pciehp hotplug driver can't work
|
|
|
|
* without kernel changes (and portbus binding breaks pnv_php). The
|
|
|
|
* other services also require some thinking about how we're going
|
|
|
|
* to integrate them.
|
|
|
|
*/
|
|
|
|
pcie_ports_disabled = true;
|
|
|
|
#endif
|
|
|
|
|
2016-02-08 04:08:20 +00:00
|
|
|
/* Look for IODA IO-Hubs. */
|
2015-03-12 06:27:11 +00:00
|
|
|
for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
|
|
|
|
pnv_pci_init_ioda_hub(np);
|
|
|
|
}
|
2011-09-19 17:45:05 +00:00
|
|
|
|
2015-03-12 06:27:11 +00:00
|
|
|
/* Look for ioda2 built-in PHB3's */
|
|
|
|
for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
|
|
|
|
pnv_pci_init_ioda2_phb(np);
|
2011-09-19 17:45:05 +00:00
|
|
|
|
2016-07-08 06:37:09 +00:00
|
|
|
/* Look for ioda3 built-in PHB4's, we treat them as IODA2 */
|
|
|
|
for_each_compatible_node(np, NULL, "ibm,ioda3-phb")
|
|
|
|
pnv_pci_init_ioda2_phb(np);
|
|
|
|
|
2018-01-23 11:31:36 +00:00
|
|
|
/* Look for NPU2 OpenCAPI PHBs */
|
|
|
|
for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-opencapi-phb")
|
|
|
|
pnv_pci_init_npu2_opencapi_phb(np);
|
|
|
|
|
2011-09-19 17:45:05 +00:00
|
|
|
/* Configure IOMMU DMA hooks */
|
|
|
|
set_pci_dma_ops(&dma_iommu_ops);
|
|
|
|
}
|
2013-11-21 06:43:14 +00:00
|
|
|
|
2018-12-19 08:52:21 +00:00
|
|
|
static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb,
|
|
|
|
unsigned long action, void *data)
|
|
|
|
{
|
|
|
|
struct device *dev = data;
|
|
|
|
|
|
|
|
switch (action) {
|
|
|
|
case BUS_NOTIFY_DEL_DEVICE:
|
|
|
|
iommu_del_device(dev);
|
|
|
|
return 0;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block pnv_tce_iommu_bus_nb = {
|
|
|
|
.notifier_call = pnv_tce_iommu_bus_notifier,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init pnv_tce_iommu_bus_notifier_init(void)
|
|
|
|
{
|
|
|
|
bus_register_notifier(&pci_bus_type, &pnv_tce_iommu_bus_nb);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
machine_subsys_initcall_sync(powernv, pnv_tce_iommu_bus_notifier_init);
|