linux/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
Igor Russkikh 90869ddfef net: aquantia: Implement pci shutdown callback
We should close link and all NIC operations during shutdown.
On some systems graceful reboot never closes NIC interface on its own,
but only indicates pci device shutdown. Without explicit handler, NIC
rx rings continued to transfer DMA data into prepared buffers while CPU
rebooted already. That caused memory corruptions on soft reboot.

Signed-off-by: Igor Russkikh <igor.russkikh@aquantia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-03-22 12:02:49 -04:00

366 lines
9.4 KiB
C

/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File aq_pci_func.c: Definition of PCI functions. */
#include <linux/interrupt.h>
#include <linux/module.h>
#include "aq_main.h"
#include "aq_nic.h"
#include "aq_vec.h"
#include "aq_hw.h"
#include "aq_pci_func.h"
#include "hw_atl/hw_atl_a0.h"
#include "hw_atl/hw_atl_b0.h"
static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D100), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D107), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D108), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D109), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100S), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107S), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108S), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109S), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111S), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112S), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111E), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112E), },
{}
};
static const struct aq_board_revision_s hw_atl_boards[] = {
{ AQ_DEVICE_ID_0001, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
{ AQ_DEVICE_ID_D100, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc100, },
{ AQ_DEVICE_ID_D107, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
{ AQ_DEVICE_ID_D108, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc108, },
{ AQ_DEVICE_ID_D109, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc109, },
{ AQ_DEVICE_ID_0001, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
{ AQ_DEVICE_ID_D100, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc100, },
{ AQ_DEVICE_ID_D107, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
{ AQ_DEVICE_ID_D108, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, },
{ AQ_DEVICE_ID_D109, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, },
{ AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
{ AQ_DEVICE_ID_AQC107, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
{ AQ_DEVICE_ID_AQC108, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, },
{ AQ_DEVICE_ID_AQC109, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, },
{ AQ_DEVICE_ID_AQC111, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111, },
{ AQ_DEVICE_ID_AQC112, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112, },
{ AQ_DEVICE_ID_AQC100S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100s, },
{ AQ_DEVICE_ID_AQC107S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107s, },
{ AQ_DEVICE_ID_AQC108S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108s, },
{ AQ_DEVICE_ID_AQC109S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109s, },
{ AQ_DEVICE_ID_AQC111S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111s, },
{ AQ_DEVICE_ID_AQC112S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112s, },
{ AQ_DEVICE_ID_AQC111E, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111e, },
{ AQ_DEVICE_ID_AQC112E, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112e, },
};
MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev,
const struct aq_hw_ops **ops,
const struct aq_hw_caps_s **caps)
{
int i = 0;
if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(hw_atl_boards); i++) {
if (hw_atl_boards[i].devid == pdev->device &&
(hw_atl_boards[i].revision == AQ_HWREV_ANY ||
hw_atl_boards[i].revision == pdev->revision)) {
*ops = hw_atl_boards[i].ops;
*caps = hw_atl_boards[i].caps;
break;
}
}
if (i == ARRAY_SIZE(hw_atl_boards))
return -EINVAL;
return 0;
}
int aq_pci_func_init(struct pci_dev *pdev)
{
int err = 0;
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (!err) {
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
}
if (err) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (!err)
err = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32));
}
if (err != 0) {
err = -ENOSR;
goto err_exit;
}
err = pci_request_regions(pdev, AQ_CFG_DRV_NAME "_mmio");
if (err < 0)
goto err_exit;
pci_set_master(pdev);
return 0;
err_exit:
return err;
}
int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
char *name, void *aq_vec, cpumask_t *affinity_mask)
{
struct pci_dev *pdev = self->pdev;
int err = 0;
if (pdev->msix_enabled || pdev->msi_enabled)
err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr, 0,
name, aq_vec);
else
err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr_legacy,
IRQF_SHARED, name, aq_vec);
if (err >= 0) {
self->msix_entry_mask |= (1 << i);
self->aq_vec[i] = aq_vec;
if (pdev->msix_enabled)
irq_set_affinity_hint(pci_irq_vector(pdev, i),
affinity_mask);
}
return err;
}
void aq_pci_func_free_irqs(struct aq_nic_s *self)
{
struct pci_dev *pdev = self->pdev;
unsigned int i = 0U;
for (i = 32U; i--;) {
if (!((1U << i) & self->msix_entry_mask))
continue;
if (pdev->msix_enabled)
irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
self->msix_entry_mask &= ~(1U << i);
}
}
unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self)
{
if (self->pdev->msix_enabled)
return AQ_HW_IRQ_MSIX;
if (self->pdev->msi_enabled)
return AQ_HW_IRQ_MSIX;
return AQ_HW_IRQ_LEGACY;
}
static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
{
pci_free_irq_vectors(self->pdev);
}
static int aq_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
struct aq_nic_s *self = NULL;
int err = 0;
struct net_device *ndev;
resource_size_t mmio_pa;
u32 bar;
u32 numvecs;
err = pci_enable_device(pdev);
if (err)
return err;
err = aq_pci_func_init(pdev);
if (err)
goto err_pci_func;
ndev = aq_ndev_alloc();
if (!ndev) {
err = -ENOMEM;
goto err_ndev;
}
self = netdev_priv(ndev);
self->pdev = pdev;
SET_NETDEV_DEV(ndev, &pdev->dev);
pci_set_drvdata(pdev, self);
err = aq_pci_probe_get_hw_by_id(pdev, &self->aq_hw_ops,
&aq_nic_get_cfg(self)->aq_hw_caps);
if (err)
goto err_ioremap;
self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL);
if (!self->aq_hw) {
err = -ENOMEM;
goto err_ioremap;
}
self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self);
for (bar = 0; bar < 4; ++bar) {
if (IORESOURCE_MEM & pci_resource_flags(pdev, bar)) {
resource_size_t reg_sz;
mmio_pa = pci_resource_start(pdev, bar);
if (mmio_pa == 0U) {
err = -EIO;
goto err_free_aq_hw;
}
reg_sz = pci_resource_len(pdev, bar);
if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
err = -EIO;
goto err_free_aq_hw;
}
self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz);
if (!self->aq_hw->mmio) {
err = -EIO;
goto err_free_aq_hw;
}
break;
}
}
if (bar == 4) {
err = -EIO;
goto err_free_aq_hw;
}
numvecs = min((u8)AQ_CFG_VECS_DEF,
aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs);
numvecs = min(numvecs, num_online_cpus());
/*enable interrupts */
#if !AQ_CFG_FORCE_LEGACY_INT
err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs,
PCI_IRQ_MSIX);
if (err < 0) {
err = pci_alloc_irq_vectors(self->pdev, 1, 1,
PCI_IRQ_MSI | PCI_IRQ_LEGACY);
if (err < 0)
goto err_hwinit;
}
#endif
/* net device init */
aq_nic_cfg_start(self);
aq_nic_ndev_init(self);
err = aq_nic_ndev_register(self);
if (err < 0)
goto err_register;
return 0;
err_register:
aq_nic_free_vectors(self);
aq_pci_free_irq_vectors(self);
err_hwinit:
iounmap(self->aq_hw->mmio);
err_free_aq_hw:
kfree(self->aq_hw);
err_ioremap:
free_netdev(ndev);
err_pci_func:
pci_release_regions(pdev);
err_ndev:
pci_disable_device(pdev);
return err;
}
static void aq_pci_remove(struct pci_dev *pdev)
{
struct aq_nic_s *self = pci_get_drvdata(pdev);
if (self->ndev) {
if (self->ndev->reg_state == NETREG_REGISTERED)
unregister_netdev(self->ndev);
aq_nic_free_vectors(self);
aq_pci_free_irq_vectors(self);
iounmap(self->aq_hw->mmio);
kfree(self->aq_hw);
pci_release_regions(pdev);
free_netdev(self->ndev);
}
pci_disable_device(pdev);
}
static void aq_pci_shutdown(struct pci_dev *pdev)
{
struct aq_nic_s *self = pci_get_drvdata(pdev);
aq_nic_shutdown(self);
pci_disable_device(pdev);
if (system_state == SYSTEM_POWER_OFF) {
pci_wake_from_d3(pdev, false);
pci_set_power_state(pdev, PCI_D3hot);
}
}
static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
{
struct aq_nic_s *self = pci_get_drvdata(pdev);
return aq_nic_change_pm_state(self, &pm_msg);
}
static int aq_pci_resume(struct pci_dev *pdev)
{
struct aq_nic_s *self = pci_get_drvdata(pdev);
pm_message_t pm_msg = PMSG_RESTORE;
return aq_nic_change_pm_state(self, &pm_msg);
}
static struct pci_driver aq_pci_ops = {
.name = AQ_CFG_DRV_NAME,
.id_table = aq_pci_tbl,
.probe = aq_pci_probe,
.remove = aq_pci_remove,
.suspend = aq_pci_suspend,
.resume = aq_pci_resume,
.shutdown = aq_pci_shutdown,
};
module_pci_driver(aq_pci_ops);