2013-06-20 05:21:13 +00:00
|
|
|
/*
|
|
|
|
* The file intends to implement the platform dependent EEH operations on
|
|
|
|
* powernv platform. Actually, the powernv was created in order to fully
|
|
|
|
* hypervisor support.
|
|
|
|
*
|
|
|
|
* Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/atomic.h>
|
2015-02-16 03:45:41 +00:00
|
|
|
#include <linux/debugfs.h>
|
2013-06-20 05:21:13 +00:00
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/export.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/msi.h>
|
|
|
|
#include <linux/of.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/proc_fs.h>
|
|
|
|
#include <linux/rbtree.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
|
|
|
|
#include <asm/eeh.h>
|
|
|
|
#include <asm/eeh_event.h>
|
|
|
|
#include <asm/firmware.h>
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/iommu.h>
|
|
|
|
#include <asm/machdep.h>
|
|
|
|
#include <asm/msi_bitmap.h>
|
|
|
|
#include <asm/opal.h>
|
|
|
|
#include <asm/ppc-pci.h>
|
|
|
|
|
|
|
|
#include "powernv.h"
|
|
|
|
#include "pci.h"
|
|
|
|
|
2015-02-16 03:45:41 +00:00
|
|
|
static bool pnv_eeh_nb_init = false;
|
|
|
|
|
2013-06-20 05:21:13 +00:00
|
|
|
/**
|
2015-02-16 03:45:39 +00:00
|
|
|
* pnv_eeh_init - EEH platform dependent initialization
|
2013-06-20 05:21:13 +00:00
|
|
|
*
|
|
|
|
* EEH platform dependent initialization on powernv
|
|
|
|
*/
|
2015-02-16 03:45:39 +00:00
|
|
|
static int pnv_eeh_init(void)
|
2013-06-20 05:21:13 +00:00
|
|
|
{
|
2014-07-17 04:41:39 +00:00
|
|
|
struct pci_controller *hose;
|
|
|
|
struct pnv_phb *phb;
|
|
|
|
|
2013-06-20 05:21:13 +00:00
|
|
|
/* We require OPALv3 */
|
|
|
|
if (!firmware_has_feature(FW_FEATURE_OPALv3)) {
|
2014-07-17 04:41:41 +00:00
|
|
|
pr_warn("%s: OPALv3 is required !\n",
|
|
|
|
__func__);
|
2013-06-20 05:21:13 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2014-07-17 04:41:38 +00:00
|
|
|
/* Set probe mode */
|
|
|
|
eeh_add_flag(EEH_PROBE_MODE_DEV);
|
2013-06-20 05:21:13 +00:00
|
|
|
|
2014-07-17 04:41:39 +00:00
|
|
|
/*
|
|
|
|
* P7IOC blocks PCI config access to frozen PE, but PHB3
|
|
|
|
* doesn't do that. So we have to selectively enable I/O
|
|
|
|
* prior to collecting error log.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(hose, &hose_list, list_node) {
|
|
|
|
phb = hose->private_data;
|
|
|
|
|
|
|
|
if (phb->model == PNV_PHB_MODEL_P7IOC)
|
|
|
|
eeh_add_flag(EEH_ENABLE_IO_FOR_LOG);
|
2014-11-24 22:27:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* PE#0 should be regarded as valid by EEH core
|
|
|
|
* if it's not the reserved one. Currently, we
|
|
|
|
* have the reserved PE#0 and PE#127 for PHB3
|
|
|
|
* and P7IOC separately. So we should regard
|
|
|
|
* PE#0 as valid for P7IOC.
|
|
|
|
*/
|
|
|
|
if (phb->ioda.reserved_pe != 0)
|
|
|
|
eeh_add_flag(EEH_VALID_PE_ZERO);
|
|
|
|
|
2014-07-17 04:41:39 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-06-20 05:21:13 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-02-16 03:45:41 +00:00
|
|
|
static int pnv_eeh_event(struct notifier_block *nb,
|
|
|
|
unsigned long events, void *change)
|
|
|
|
{
|
|
|
|
uint64_t changed_evts = (uint64_t)change;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We simply send special EEH event if EEH has
|
|
|
|
* been enabled, or clear pending events in
|
|
|
|
* case that we enable EEH soon
|
|
|
|
*/
|
|
|
|
if (!(changed_evts & OPAL_EVENT_PCI_ERROR) ||
|
|
|
|
!(events & OPAL_EVENT_PCI_ERROR))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (eeh_enabled())
|
|
|
|
eeh_send_failure_event(NULL);
|
|
|
|
else
|
|
|
|
opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block pnv_eeh_nb = {
|
|
|
|
.notifier_call = pnv_eeh_event,
|
|
|
|
.next = NULL,
|
|
|
|
.priority = 0
|
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
static ssize_t pnv_eeh_ei_write(struct file *filp,
|
|
|
|
const char __user *user_buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose = filp->private_data;
|
|
|
|
struct eeh_dev *edev;
|
|
|
|
struct eeh_pe *pe;
|
|
|
|
int pe_no, type, func;
|
|
|
|
unsigned long addr, mask;
|
|
|
|
char buf[50];
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!eeh_ops || !eeh_ops->err_inject)
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
/* Copy over argument buffer */
|
|
|
|
ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
|
|
|
|
if (!ret)
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
/* Retrieve parameters */
|
|
|
|
ret = sscanf(buf, "%x:%x:%x:%lx:%lx",
|
|
|
|
&pe_no, &type, &func, &addr, &mask);
|
|
|
|
if (ret != 5)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Retrieve PE */
|
|
|
|
edev = kzalloc(sizeof(*edev), GFP_KERNEL);
|
|
|
|
if (!edev)
|
|
|
|
return -ENOMEM;
|
|
|
|
edev->phb = hose;
|
|
|
|
edev->pe_config_addr = pe_no;
|
|
|
|
pe = eeh_pe_get(edev);
|
|
|
|
kfree(edev);
|
|
|
|
if (!pe)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
/* Do error injection */
|
|
|
|
ret = eeh_ops->err_inject(pe, type, func, addr, mask);
|
|
|
|
return ret < 0 ? ret : count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations pnv_eeh_ei_fops = {
|
|
|
|
.open = simple_open,
|
|
|
|
.llseek = no_llseek,
|
|
|
|
.write = pnv_eeh_ei_write,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int pnv_eeh_dbgfs_set(void *data, int offset, u64 val)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose = data;
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
|
|
|
|
out_be64(phb->regs + offset, val);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pnv_eeh_dbgfs_get(void *data, int offset, u64 *val)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose = data;
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
|
|
|
|
*val = in_be64(phb->regs + offset);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pnv_eeh_outb_dbgfs_set(void *data, u64 val)
|
|
|
|
{
|
|
|
|
return pnv_eeh_dbgfs_set(data, 0xD10, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pnv_eeh_outb_dbgfs_get(void *data, u64 *val)
|
|
|
|
{
|
|
|
|
return pnv_eeh_dbgfs_get(data, 0xD10, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pnv_eeh_inbA_dbgfs_set(void *data, u64 val)
|
|
|
|
{
|
|
|
|
return pnv_eeh_dbgfs_set(data, 0xD90, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pnv_eeh_inbA_dbgfs_get(void *data, u64 *val)
|
|
|
|
{
|
|
|
|
return pnv_eeh_dbgfs_get(data, 0xD90, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pnv_eeh_inbB_dbgfs_set(void *data, u64 val)
|
|
|
|
{
|
|
|
|
return pnv_eeh_dbgfs_set(data, 0xE10, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pnv_eeh_inbB_dbgfs_get(void *data, u64 *val)
|
|
|
|
{
|
|
|
|
return pnv_eeh_dbgfs_get(data, 0xE10, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_outb_dbgfs_ops, pnv_eeh_outb_dbgfs_get,
|
|
|
|
pnv_eeh_outb_dbgfs_set, "0x%llx\n");
|
|
|
|
DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_inbA_dbgfs_ops, pnv_eeh_inbA_dbgfs_get,
|
|
|
|
pnv_eeh_inbA_dbgfs_set, "0x%llx\n");
|
|
|
|
DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_inbB_dbgfs_ops, pnv_eeh_inbB_dbgfs_get,
|
|
|
|
pnv_eeh_inbB_dbgfs_set, "0x%llx\n");
|
|
|
|
#endif /* CONFIG_DEBUG_FS */
|
|
|
|
|
2013-06-20 05:21:13 +00:00
|
|
|
/**
|
2015-02-16 03:45:39 +00:00
|
|
|
* pnv_eeh_post_init - EEH platform dependent post initialization
|
2013-06-20 05:21:13 +00:00
|
|
|
*
|
|
|
|
* EEH platform dependent post initialization on powernv. When
|
|
|
|
* the function is called, the EEH PEs and devices should have
|
|
|
|
* been built. If the I/O cache staff has been built, EEH is
|
|
|
|
* ready to supply service.
|
|
|
|
*/
|
2015-02-16 03:45:39 +00:00
|
|
|
static int pnv_eeh_post_init(void)
|
2013-06-20 05:21:13 +00:00
|
|
|
{
|
|
|
|
struct pci_controller *hose;
|
|
|
|
struct pnv_phb *phb;
|
|
|
|
int ret = 0;
|
|
|
|
|
2015-02-16 03:45:41 +00:00
|
|
|
/* Register OPAL event notifier */
|
|
|
|
if (!pnv_eeh_nb_init) {
|
|
|
|
ret = opal_notifier_register(&pnv_eeh_nb);
|
|
|
|
if (ret) {
|
|
|
|
pr_warn("%s: Can't register OPAL event notifier (%d)\n",
|
|
|
|
__func__, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
pnv_eeh_nb_init = true;
|
|
|
|
}
|
|
|
|
|
2013-06-20 05:21:13 +00:00
|
|
|
list_for_each_entry(hose, &hose_list, list_node) {
|
|
|
|
phb = hose->private_data;
|
|
|
|
|
2015-02-16 03:45:41 +00:00
|
|
|
/*
|
|
|
|
* If EEH is enabled, we're going to rely on that.
|
|
|
|
* Otherwise, we restore to conventional mechanism
|
|
|
|
* to clear frozen PE during PCI config access.
|
|
|
|
*/
|
|
|
|
if (eeh_enabled())
|
|
|
|
phb->flags |= PNV_PHB_FLAG_EEH;
|
|
|
|
else
|
|
|
|
phb->flags &= ~PNV_PHB_FLAG_EEH;
|
|
|
|
|
|
|
|
/* Create debugfs entries */
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
if (phb->has_dbgfs || !phb->dbgfs)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
phb->has_dbgfs = 1;
|
|
|
|
debugfs_create_file("err_injct", 0200,
|
|
|
|
phb->dbgfs, hose,
|
|
|
|
&pnv_eeh_ei_fops);
|
|
|
|
|
|
|
|
debugfs_create_file("err_injct_outbound", 0600,
|
|
|
|
phb->dbgfs, hose,
|
|
|
|
&pnv_eeh_outb_dbgfs_ops);
|
|
|
|
debugfs_create_file("err_injct_inboundA", 0600,
|
|
|
|
phb->dbgfs, hose,
|
|
|
|
&pnv_eeh_inbA_dbgfs_ops);
|
|
|
|
debugfs_create_file("err_injct_inboundB", 0600,
|
|
|
|
phb->dbgfs, hose,
|
|
|
|
&pnv_eeh_inbB_dbgfs_ops);
|
|
|
|
#endif /* CONFIG_DEBUG_FS */
|
2013-06-20 05:21:13 +00:00
|
|
|
}
|
|
|
|
|
2015-02-16 03:45:41 +00:00
|
|
|
|
2013-06-20 05:21:13 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2015-02-16 03:45:39 +00:00
|
|
|
* pnv_eeh_dev_probe - Do probe on PCI device
|
2013-06-20 05:21:13 +00:00
|
|
|
* @dev: PCI device
|
|
|
|
* @flag: unused
|
|
|
|
*
|
|
|
|
* When EEH module is installed during system boot, all PCI devices
|
|
|
|
* are checked one by one to see if it supports EEH. The function
|
|
|
|
* is introduced for the purpose. By default, EEH has been enabled
|
|
|
|
* on all PCI devices. That's to say, we only need do necessary
|
|
|
|
* initialization on the corresponding eeh device and create PE
|
|
|
|
* accordingly.
|
|
|
|
*
|
|
|
|
* It's notable that's unsafe to retrieve the EEH device through
|
|
|
|
* the corresponding PCI device. During the PCI device hotplug, which
|
|
|
|
* was possiblly triggered by EEH core, the binding between EEH device
|
|
|
|
* and the PCI device isn't built yet.
|
|
|
|
*/
|
2015-02-16 03:45:39 +00:00
|
|
|
static int pnv_eeh_dev_probe(struct pci_dev *dev, void *flag)
|
2013-06-20 05:21:13 +00:00
|
|
|
{
|
|
|
|
struct pci_controller *hose = pci_bus_to_host(dev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
struct device_node *dn = pci_device_to_OF_node(dev);
|
|
|
|
struct eeh_dev *edev = of_node_to_eeh_dev(dn);
|
2014-06-26 06:58:47 +00:00
|
|
|
int ret;
|
2013-06-20 05:21:13 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* When probing the root bridge, which doesn't have any
|
|
|
|
* subordinate PCI devices. We don't have OF node for
|
|
|
|
* the root bridge. So it's not reasonable to continue
|
|
|
|
* the probing.
|
|
|
|
*/
|
powerpc/eeh: Use partial hotplug for EEH unaware drivers
When EEH error happens to one specific PE, some devices with drivers
supporting EEH won't except hotplug on the device. However, there
might have other deivces without driver, or with driver without EEH
support. For the case, we need do partial hotplug in order to make
sure that the PE becomes absolutely quite during reset. Otherise,
the PE reset might fail and leads to failure of error recovery.
The current code doesn't handle that 'mixed' case properly, it either
uses the error callbacks to the drivers, or tries hotplug, but doesn't
handle a PE (EEH domain) composed of a combination of the two.
The patch intends to support so-called "partial" hotplug for EEH:
Before we do reset, we stop and remove those PCI devices without
EEH sensitive driver. The corresponding EEH devices are not detached
from its PE, but with special flag. After the reset is done, those
EEH devices with the special flag will be scanned one by one.
Signed-off-by: Gavin Shan <shangw@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2013-07-24 02:24:58 +00:00
|
|
|
if (!dn || !edev || edev->pe)
|
2013-06-20 05:21:13 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Skip for PCI-ISA bridge */
|
|
|
|
if ((dev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Initialize eeh device */
|
2013-07-24 02:25:01 +00:00
|
|
|
edev->class_code = dev->class;
|
|
|
|
edev->mode &= 0xFFFFFF00;
|
2013-07-24 02:24:59 +00:00
|
|
|
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
|
|
|
|
edev->mode |= EEH_DEV_BRIDGE;
|
2014-04-24 08:00:16 +00:00
|
|
|
edev->pcix_cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
|
2013-07-24 02:24:59 +00:00
|
|
|
if (pci_is_pcie(dev)) {
|
|
|
|
edev->pcie_cap = pci_pcie_cap(dev);
|
|
|
|
|
|
|
|
if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
|
|
|
|
edev->mode |= EEH_DEV_ROOT_PORT;
|
|
|
|
else if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)
|
|
|
|
edev->mode |= EEH_DEV_DS_PORT;
|
2014-04-24 08:00:16 +00:00
|
|
|
|
|
|
|
edev->aer_cap = pci_find_ext_capability(dev,
|
|
|
|
PCI_EXT_CAP_ID_ERR);
|
2013-07-24 02:24:59 +00:00
|
|
|
}
|
|
|
|
|
2013-06-20 05:21:13 +00:00
|
|
|
edev->config_addr = ((dev->bus->number << 8) | dev->devfn);
|
|
|
|
edev->pe_config_addr = phb->bdfn_to_pe(phb, dev->bus, dev->devfn & 0xff);
|
|
|
|
|
|
|
|
/* Create PE */
|
2014-06-26 06:58:47 +00:00
|
|
|
ret = eeh_add_to_parent_pe(edev);
|
|
|
|
if (ret) {
|
|
|
|
pr_warn("%s: Can't add PCI dev %s to parent PE (%d)\n",
|
|
|
|
__func__, pci_name(dev), ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-10-01 07:07:53 +00:00
|
|
|
/*
|
|
|
|
* If the PE contains any one of following adapters, the
|
|
|
|
* PCI config space can't be accessed when dumping EEH log.
|
|
|
|
* Otherwise, we will run into fenced PHB caused by shortage
|
|
|
|
* of outbound credits in the adapter. The PCI config access
|
|
|
|
* should be blocked until PE reset. MMIO access is dropped
|
|
|
|
* by hardware certainly. In order to drop PCI config requests,
|
|
|
|
* one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which
|
|
|
|
* will be checked in the backend for PE state retrival. If
|
|
|
|
* the PE becomes frozen for the first time and the flag has
|
|
|
|
* been set for the PE, we will set EEH_PE_CFG_BLOCKED for
|
|
|
|
* that PE to block its config space.
|
|
|
|
*
|
|
|
|
* Broadcom Austin 4-ports NICs (14e4:1657)
|
2014-10-03 04:58:32 +00:00
|
|
|
* Broadcom Shiner 2-ports 10G NICs (14e4:168e)
|
2014-10-01 07:07:53 +00:00
|
|
|
*/
|
2014-10-03 04:58:32 +00:00
|
|
|
if ((dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x1657) ||
|
|
|
|
(dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x168e))
|
2014-10-01 07:07:53 +00:00
|
|
|
edev->pe->state |= EEH_PE_CFG_RESTRICTED;
|
|
|
|
|
2014-06-26 06:58:47 +00:00
|
|
|
/*
|
|
|
|
* Cache the PE primary bus, which can't be fetched when
|
|
|
|
* full hotplug is in progress. In that case, all child
|
|
|
|
* PCI devices of the PE are expected to be removed prior
|
|
|
|
* to PE reset.
|
|
|
|
*/
|
|
|
|
if (!edev->pe->bus)
|
|
|
|
edev->pe->bus = dev->bus;
|
2013-06-20 05:21:13 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable EEH explicitly so that we will do EEH check
|
|
|
|
* while accessing I/O stuff
|
|
|
|
*/
|
2014-07-17 04:41:38 +00:00
|
|
|
eeh_add_flag(EEH_ENABLED);
|
2013-06-20 05:21:13 +00:00
|
|
|
|
|
|
|
/* Save memory bars */
|
|
|
|
eeh_save_bars(edev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2015-02-16 03:45:39 +00:00
|
|
|
* pnv_eeh_set_option - Initialize EEH or MMIO/DMA reenable
|
2013-06-20 05:21:13 +00:00
|
|
|
* @pe: EEH PE
|
|
|
|
* @option: operation to be issued
|
|
|
|
*
|
|
|
|
* The function is used to control the EEH functionality globally.
|
|
|
|
* Currently, following options are support according to PAPR:
|
|
|
|
* Enable EEH, Disable EEH, Enable MMIO and Enable DMA
|
|
|
|
*/
|
2015-02-16 03:45:39 +00:00
|
|
|
static int pnv_eeh_set_option(struct eeh_pe *pe, int option)
|
2013-06-20 05:21:13 +00:00
|
|
|
{
|
|
|
|
struct pci_controller *hose = pe->phb;
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
int ret = -EEXIST;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* What we need do is pass it down for hardware
|
|
|
|
* implementation to handle it.
|
|
|
|
*/
|
|
|
|
if (phb->eeh_ops && phb->eeh_ops->set_option)
|
|
|
|
ret = phb->eeh_ops->set_option(pe, option);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2015-02-16 03:45:39 +00:00
|
|
|
* pnv_eeh_get_pe_addr - Retrieve PE address
|
2013-06-20 05:21:13 +00:00
|
|
|
* @pe: EEH PE
|
|
|
|
*
|
|
|
|
* Retrieve the PE address according to the given tranditional
|
|
|
|
* PCI BDF (Bus/Device/Function) address.
|
|
|
|
*/
|
2015-02-16 03:45:39 +00:00
|
|
|
static int pnv_eeh_get_pe_addr(struct eeh_pe *pe)
|
2013-06-20 05:21:13 +00:00
|
|
|
{
|
|
|
|
return pe->addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2015-02-16 03:45:39 +00:00
|
|
|
* pnv_eeh_get_state - Retrieve PE state
|
2013-06-20 05:21:13 +00:00
|
|
|
* @pe: EEH PE
|
|
|
|
* @delay: delay while PE state is temporarily unavailable
|
|
|
|
*
|
|
|
|
* Retrieve the state of the specified PE. For IODA-compitable
|
|
|
|
* platform, it should be retrieved from IODA table. Therefore,
|
|
|
|
* we prefer passing down to hardware implementation to handle
|
|
|
|
* it.
|
|
|
|
*/
|
2015-02-16 03:45:39 +00:00
|
|
|
static int pnv_eeh_get_state(struct eeh_pe *pe, int *delay)
|
2013-06-20 05:21:13 +00:00
|
|
|
{
|
|
|
|
struct pci_controller *hose = pe->phb;
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
int ret = EEH_STATE_NOT_SUPPORT;
|
|
|
|
|
|
|
|
if (phb->eeh_ops && phb->eeh_ops->get_state) {
|
|
|
|
ret = phb->eeh_ops->get_state(pe);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the PE state is temporarily unavailable,
|
|
|
|
* to inform the EEH core delay for default
|
|
|
|
* period (1 second)
|
|
|
|
*/
|
|
|
|
if (delay) {
|
|
|
|
*delay = 0;
|
|
|
|
if (ret & EEH_STATE_UNAVAILABLE)
|
|
|
|
*delay = 1000;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2015-02-16 03:45:39 +00:00
|
|
|
* pnv_eeh_reset - Reset the specified PE
|
2013-06-20 05:21:13 +00:00
|
|
|
* @pe: EEH PE
|
|
|
|
* @option: reset option
|
|
|
|
*
|
|
|
|
* Reset the specified PE
|
|
|
|
*/
|
2015-02-16 03:45:39 +00:00
|
|
|
static int pnv_eeh_reset(struct eeh_pe *pe, int option)
|
2013-06-20 05:21:13 +00:00
|
|
|
{
|
|
|
|
struct pci_controller *hose = pe->phb;
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
int ret = -EEXIST;
|
|
|
|
|
|
|
|
if (phb->eeh_ops && phb->eeh_ops->reset)
|
|
|
|
ret = phb->eeh_ops->reset(pe, option);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2015-02-16 03:45:39 +00:00
|
|
|
* pnv_eeh_wait_state - Wait for PE state
|
2013-06-20 05:21:13 +00:00
|
|
|
* @pe: EEH PE
|
|
|
|
* @max_wait: maximal period in microsecond
|
|
|
|
*
|
|
|
|
* Wait for the state of associated PE. It might take some time
|
|
|
|
* to retrieve the PE's state.
|
|
|
|
*/
|
2015-02-16 03:45:39 +00:00
|
|
|
static int pnv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
|
2013-06-20 05:21:13 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
int mwait;
|
|
|
|
|
|
|
|
while (1) {
|
2015-02-16 03:45:39 +00:00
|
|
|
ret = pnv_eeh_get_state(pe, &mwait);
|
2013-06-20 05:21:13 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the PE's state is temporarily unavailable,
|
|
|
|
* we have to wait for the specified time. Otherwise,
|
|
|
|
* the PE's state will be returned immediately.
|
|
|
|
*/
|
|
|
|
if (ret != EEH_STATE_UNAVAILABLE)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
max_wait -= mwait;
|
|
|
|
if (max_wait <= 0) {
|
2014-07-17 04:41:41 +00:00
|
|
|
pr_warn("%s: Timeout getting PE#%x's state (%d)\n",
|
|
|
|
__func__, pe->addr, max_wait);
|
2013-06-20 05:21:13 +00:00
|
|
|
return EEH_STATE_NOT_SUPPORT;
|
|
|
|
}
|
|
|
|
|
|
|
|
msleep(mwait);
|
|
|
|
}
|
|
|
|
|
|
|
|
return EEH_STATE_NOT_SUPPORT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2015-02-16 03:45:39 +00:00
|
|
|
* pnv_eeh_get_log - Retrieve error log
|
2013-06-20 05:21:13 +00:00
|
|
|
* @pe: EEH PE
|
|
|
|
* @severity: temporary or permanent error log
|
|
|
|
* @drv_log: driver log to be combined with retrieved error log
|
|
|
|
* @len: length of driver log
|
|
|
|
*
|
|
|
|
* Retrieve the temporary or permanent error from the PE.
|
|
|
|
*/
|
2015-02-16 03:45:39 +00:00
|
|
|
static int pnv_eeh_get_log(struct eeh_pe *pe, int severity,
|
|
|
|
char *drv_log, unsigned long len)
|
2013-06-20 05:21:13 +00:00
|
|
|
{
|
2015-02-16 03:45:42 +00:00
|
|
|
if (!eeh_has_flag(EEH_EARLY_DUMP_LOG))
|
|
|
|
pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
|
2013-06-20 05:21:13 +00:00
|
|
|
|
2015-02-16 03:45:42 +00:00
|
|
|
return 0;
|
2013-06-20 05:21:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2015-02-16 03:45:39 +00:00
|
|
|
* pnv_eeh_configure_bridge - Configure PCI bridges in the indicated PE
|
2013-06-20 05:21:13 +00:00
|
|
|
* @pe: EEH PE
|
|
|
|
*
|
|
|
|
* The function will be called to reconfigure the bridges included
|
|
|
|
* in the specified PE so that the mulfunctional PE would be recovered
|
|
|
|
* again.
|
|
|
|
*/
|
2015-02-16 03:45:39 +00:00
|
|
|
static int pnv_eeh_configure_bridge(struct eeh_pe *pe)
|
2013-06-20 05:21:13 +00:00
|
|
|
{
|
|
|
|
struct pci_controller *hose = pe->phb;
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (phb->eeh_ops && phb->eeh_ops->configure_bridge)
|
|
|
|
ret = phb->eeh_ops->configure_bridge(pe);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-09-30 02:38:56 +00:00
|
|
|
/**
|
2015-02-16 03:45:39 +00:00
|
|
|
* pnv_pe_err_inject - Inject specified error to the indicated PE
|
2014-09-30 02:38:56 +00:00
|
|
|
* @pe: the indicated PE
|
|
|
|
* @type: error type
|
|
|
|
* @func: specific error type
|
|
|
|
* @addr: address
|
|
|
|
* @mask: address mask
|
|
|
|
*
|
|
|
|
* The routine is called to inject specified error, which is
|
|
|
|
* determined by @type and @func, to the indicated PE for
|
|
|
|
* testing purpose.
|
|
|
|
*/
|
2015-02-16 03:45:39 +00:00
|
|
|
static int pnv_eeh_err_inject(struct eeh_pe *pe, int type, int func,
|
|
|
|
unsigned long addr, unsigned long mask)
|
2014-09-30 02:38:56 +00:00
|
|
|
{
|
|
|
|
struct pci_controller *hose = pe->phb;
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
2015-02-16 03:45:40 +00:00
|
|
|
s64 rc;
|
|
|
|
|
|
|
|
/* Sanity check on error type */
|
|
|
|
if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR &&
|
|
|
|
type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) {
|
|
|
|
pr_warn("%s: Invalid error type %d\n",
|
|
|
|
__func__, type);
|
|
|
|
return -ERANGE;
|
|
|
|
}
|
2014-09-30 02:38:56 +00:00
|
|
|
|
2015-02-16 03:45:40 +00:00
|
|
|
if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR ||
|
|
|
|
func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) {
|
|
|
|
pr_warn("%s: Invalid error function %d\n",
|
|
|
|
__func__, func);
|
|
|
|
return -ERANGE;
|
|
|
|
}
|
2014-09-30 02:38:56 +00:00
|
|
|
|
2015-02-16 03:45:40 +00:00
|
|
|
/* Firmware supports error injection ? */
|
|
|
|
if (!opal_check_token(OPAL_PCI_ERR_INJECT)) {
|
|
|
|
pr_warn("%s: Firmware doesn't support error injection\n",
|
|
|
|
__func__);
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Do error injection */
|
|
|
|
rc = opal_pci_err_inject(phb->opal_id, pe->addr,
|
|
|
|
type, func, addr, mask);
|
|
|
|
if (rc != OPAL_SUCCESS) {
|
|
|
|
pr_warn("%s: Failure %lld injecting error "
|
|
|
|
"%d-%d to PHB#%x-PE#%x\n",
|
|
|
|
__func__, rc, type, func,
|
|
|
|
hose->global_number, pe->addr);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2014-09-30 02:38:56 +00:00
|
|
|
}
|
|
|
|
|
2015-02-16 03:45:39 +00:00
|
|
|
static inline bool pnv_eeh_cfg_blocked(struct device_node *dn)
|
2014-10-01 07:07:51 +00:00
|
|
|
{
|
|
|
|
struct eeh_dev *edev = of_node_to_eeh_dev(dn);
|
|
|
|
|
|
|
|
if (!edev || !edev->pe)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (edev->pe->state & EEH_PE_CFG_BLOCKED)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-02-16 03:45:39 +00:00
|
|
|
static int pnv_eeh_read_config(struct device_node *dn,
|
|
|
|
int where, int size, u32 *val)
|
2014-10-01 07:07:51 +00:00
|
|
|
{
|
2015-02-16 03:45:39 +00:00
|
|
|
if (pnv_eeh_cfg_blocked(dn)) {
|
2014-10-01 07:07:51 +00:00
|
|
|
*val = 0xFFFFFFFF;
|
|
|
|
return PCIBIOS_SET_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
return pnv_pci_cfg_read(dn, where, size, val);
|
|
|
|
}
|
|
|
|
|
2015-02-16 03:45:39 +00:00
|
|
|
static int pnv_eeh_write_config(struct device_node *dn,
|
|
|
|
int where, int size, u32 val)
|
2014-10-01 07:07:51 +00:00
|
|
|
{
|
2015-02-16 03:45:39 +00:00
|
|
|
if (pnv_eeh_cfg_blocked(dn))
|
2014-10-01 07:07:51 +00:00
|
|
|
return PCIBIOS_SET_FAILED;
|
|
|
|
|
|
|
|
return pnv_pci_cfg_write(dn, where, size, val);
|
|
|
|
}
|
|
|
|
|
2013-06-20 05:21:13 +00:00
|
|
|
/**
|
2015-02-16 03:45:39 +00:00
|
|
|
* pnv_eeh_next_error - Retrieve next EEH error to handle
|
2013-06-20 05:21:13 +00:00
|
|
|
* @pe: Affected PE
|
|
|
|
*
|
|
|
|
* Using OPAL API, to retrieve next EEH error for EEH core to handle
|
|
|
|
*/
|
2015-02-16 03:45:39 +00:00
|
|
|
static int pnv_eeh_next_error(struct eeh_pe **pe)
|
2013-06-20 05:21:13 +00:00
|
|
|
{
|
|
|
|
struct pci_controller *hose;
|
|
|
|
struct pnv_phb *phb = NULL;
|
|
|
|
|
|
|
|
list_for_each_entry(hose, &hose_list, list_node) {
|
|
|
|
phb = hose->private_data;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (phb && phb->eeh_ops->next_error)
|
|
|
|
return phb->eeh_ops->next_error(pe);
|
|
|
|
|
|
|
|
return -EEXIST;
|
|
|
|
}
|
|
|
|
|
2015-02-16 03:45:39 +00:00
|
|
|
static int pnv_eeh_restore_config(struct device_node *dn)
|
2014-01-03 09:47:13 +00:00
|
|
|
{
|
|
|
|
struct eeh_dev *edev = of_node_to_eeh_dev(dn);
|
|
|
|
struct pnv_phb *phb;
|
|
|
|
s64 ret;
|
|
|
|
|
|
|
|
if (!edev)
|
|
|
|
return -EEXIST;
|
|
|
|
|
|
|
|
phb = edev->phb->private_data;
|
|
|
|
ret = opal_pci_reinit(phb->opal_id,
|
|
|
|
OPAL_REINIT_PCI_DEV, edev->config_addr);
|
|
|
|
if (ret) {
|
|
|
|
pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n",
|
|
|
|
__func__, edev->config_addr, ret);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-02-16 03:45:39 +00:00
|
|
|
static struct eeh_ops pnv_eeh_ops = {
|
2013-06-20 05:21:13 +00:00
|
|
|
.name = "powernv",
|
2015-02-16 03:45:39 +00:00
|
|
|
.init = pnv_eeh_init,
|
|
|
|
.post_init = pnv_eeh_post_init,
|
2013-06-20 05:21:13 +00:00
|
|
|
.of_probe = NULL,
|
2015-02-16 03:45:39 +00:00
|
|
|
.dev_probe = pnv_eeh_dev_probe,
|
|
|
|
.set_option = pnv_eeh_set_option,
|
|
|
|
.get_pe_addr = pnv_eeh_get_pe_addr,
|
|
|
|
.get_state = pnv_eeh_get_state,
|
|
|
|
.reset = pnv_eeh_reset,
|
|
|
|
.wait_state = pnv_eeh_wait_state,
|
|
|
|
.get_log = pnv_eeh_get_log,
|
|
|
|
.configure_bridge = pnv_eeh_configure_bridge,
|
|
|
|
.err_inject = pnv_eeh_err_inject,
|
|
|
|
.read_config = pnv_eeh_read_config,
|
|
|
|
.write_config = pnv_eeh_write_config,
|
|
|
|
.next_error = pnv_eeh_next_error,
|
|
|
|
.restore_config = pnv_eeh_restore_config
|
2013-06-20 05:21:13 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* eeh_powernv_init - Register platform dependent EEH operations
|
|
|
|
*
|
|
|
|
* EEH initialization on powernv platform. This function should be
|
|
|
|
* called before any EEH related functions.
|
|
|
|
*/
|
|
|
|
static int __init eeh_powernv_init(void)
|
|
|
|
{
|
|
|
|
int ret = -EINVAL;
|
|
|
|
|
2014-07-17 04:41:43 +00:00
|
|
|
eeh_set_pe_aux_size(PNV_PCI_DIAG_BUF_SIZE);
|
2015-02-16 03:45:39 +00:00
|
|
|
ret = eeh_ops_register(&pnv_eeh_ops);
|
2013-06-20 05:21:13 +00:00
|
|
|
if (!ret)
|
|
|
|
pr_info("EEH: PowerNV platform initialized\n");
|
|
|
|
else
|
|
|
|
pr_info("EEH: Failed to initialize PowerNV platform (%d)\n", ret);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2014-07-15 12:22:24 +00:00
|
|
|
machine_early_initcall(powernv, eeh_powernv_init);
|