mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 21:51:40 +00:00
Merge branch 'there-are-some-bugfix-for-the-hns3-ethernet-driver'
Jijie Shao says: ==================== There are some bugfix for the HNS3 ethernet driver ChangeLog: v2 -> v3: - Rewrite the commit logs of net: hns3: add sync command to sync io-pgtable' to add more verbose explanation, suggested Paolo. - Add fixes tag for hardware issue, suggested Paolo and Simon Horman. v2: https://lore.kernel.org/all/20241018101059.1718375-1-shaojijie@huawei.com/ v1 -> v2: - Pass IRQF_NO_AUTOEN to request_irq(), suggested by Jakub. - Rewrite the commit logs of 'net: hns3: default enable tx bounce buffer when smmu enabled' and 'net: hns3: add sync command to sync io-pgtable'. v1: https://lore.kernel.org/all/20241011094521.3008298-1-shaojijie@huawei.com/ ==================== Link: https://patch.msgid.link/20241025092938.2912958-1-shaojijie@huawei.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
commit
d80a309130
@ -1293,8 +1293,10 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
|
||||
|
||||
/* save the buffer addr until the last read operation */
|
||||
*save_buf = read_buf;
|
||||
}
|
||||
|
||||
/* get data ready for the first time to read */
|
||||
/* get data ready for the first time to read */
|
||||
if (!*ppos) {
|
||||
ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
|
||||
read_buf, hns3_dbg_cmd[index].buf_len);
|
||||
if (ret)
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/irq.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/ipv6.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/skbuff.h>
|
||||
@ -380,6 +381,24 @@ static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {
|
||||
#define HNS3_INVALID_PTYPE \
|
||||
ARRAY_SIZE(hns3_rx_ptype_tbl)
|
||||
|
||||
static void hns3_dma_map_sync(struct device *dev, unsigned long iova)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
struct iommu_iotlb_gather iotlb_gather;
|
||||
size_t granule;
|
||||
|
||||
if (!domain || !iommu_is_dma_domain(domain))
|
||||
return;
|
||||
|
||||
granule = 1 << __ffs(domain->pgsize_bitmap);
|
||||
iova = ALIGN_DOWN(iova, granule);
|
||||
iotlb_gather.start = iova;
|
||||
iotlb_gather.end = iova + granule - 1;
|
||||
iotlb_gather.pgsize = granule;
|
||||
|
||||
iommu_iotlb_sync(domain, &iotlb_gather);
|
||||
}
|
||||
|
||||
static irqreturn_t hns3_irq_handle(int irq, void *vector)
|
||||
{
|
||||
struct hns3_enet_tqp_vector *tqp_vector = vector;
|
||||
@ -1032,6 +1051,8 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
|
||||
static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
|
||||
{
|
||||
u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
|
||||
struct net_device *netdev = ring_to_netdev(ring);
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
struct hns3_tx_spare *tx_spare;
|
||||
struct page *page;
|
||||
dma_addr_t dma;
|
||||
@ -1073,6 +1094,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
|
||||
tx_spare->buf = page_address(page);
|
||||
tx_spare->len = PAGE_SIZE << order;
|
||||
ring->tx_spare = tx_spare;
|
||||
ring->tx_copybreak = priv->tx_copybreak;
|
||||
return;
|
||||
|
||||
dma_mapping_error:
|
||||
@ -1724,7 +1746,9 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||
unsigned int type)
|
||||
{
|
||||
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
|
||||
struct hnae3_handle *handle = ring->tqp->handle;
|
||||
struct device *dev = ring_to_dev(ring);
|
||||
struct hnae3_ae_dev *ae_dev;
|
||||
unsigned int size;
|
||||
dma_addr_t dma;
|
||||
|
||||
@ -1756,6 +1780,13 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Add a SYNC command to sync io-pgtale to avoid errors in pgtable
|
||||
* prefetch
|
||||
*/
|
||||
ae_dev = hns3_get_ae_dev(handle);
|
||||
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
|
||||
hns3_dma_map_sync(dev, dma);
|
||||
|
||||
desc_cb->priv = priv;
|
||||
desc_cb->length = size;
|
||||
desc_cb->dma = dma;
|
||||
@ -2452,7 +2483,6 @@ static int hns3_nic_set_features(struct net_device *netdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
netdev->features = features;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -4868,6 +4898,30 @@ static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
|
||||
devm_kfree(&pdev->dev, priv->tqp_vector);
|
||||
}
|
||||
|
||||
static void hns3_update_tx_spare_buf_config(struct hns3_nic_priv *priv)
|
||||
{
|
||||
#define HNS3_MIN_SPARE_BUF_SIZE (2 * 1024 * 1024)
|
||||
#define HNS3_MAX_PACKET_SIZE (64 * 1024)
|
||||
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev);
|
||||
struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
|
||||
struct hnae3_handle *handle = priv->ae_handle;
|
||||
|
||||
if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
|
||||
return;
|
||||
|
||||
if (!(domain && iommu_is_dma_domain(domain)))
|
||||
return;
|
||||
|
||||
priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE;
|
||||
priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE;
|
||||
|
||||
if (priv->tx_copybreak < priv->min_tx_copybreak)
|
||||
priv->tx_copybreak = priv->min_tx_copybreak;
|
||||
if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size)
|
||||
handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size;
|
||||
}
|
||||
|
||||
static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
|
||||
unsigned int ring_type)
|
||||
{
|
||||
@ -5101,6 +5155,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
|
||||
int i, j;
|
||||
int ret;
|
||||
|
||||
hns3_update_tx_spare_buf_config(priv);
|
||||
for (i = 0; i < ring_num; i++) {
|
||||
ret = hns3_alloc_ring_memory(&priv->ring[i]);
|
||||
if (ret) {
|
||||
@ -5305,6 +5360,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
|
||||
priv->ae_handle = handle;
|
||||
priv->tx_timeout_count = 0;
|
||||
priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
|
||||
priv->min_tx_copybreak = 0;
|
||||
priv->min_tx_spare_buf_size = 0;
|
||||
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
|
||||
|
||||
handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
|
||||
|
@ -596,6 +596,8 @@ struct hns3_nic_priv {
|
||||
struct hns3_enet_coalesce rx_coal;
|
||||
u32 tx_copybreak;
|
||||
u32 rx_copybreak;
|
||||
u32 min_tx_copybreak;
|
||||
u32 min_tx_spare_buf_size;
|
||||
};
|
||||
|
||||
union l3_hdr_info {
|
||||
|
@ -1933,6 +1933,31 @@ static int hns3_set_tx_spare_buf_size(struct net_device *netdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns3_check_tx_copybreak(struct net_device *netdev, u32 copybreak)
|
||||
{
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
|
||||
if (copybreak < priv->min_tx_copybreak) {
|
||||
netdev_err(netdev, "tx copybreak %u should be no less than %u!\n",
|
||||
copybreak, priv->min_tx_copybreak);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns3_check_tx_spare_buf_size(struct net_device *netdev, u32 buf_size)
|
||||
{
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
|
||||
if (buf_size < priv->min_tx_spare_buf_size) {
|
||||
netdev_err(netdev,
|
||||
"tx spare buf size %u should be no less than %u!\n",
|
||||
buf_size, priv->min_tx_spare_buf_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns3_set_tunable(struct net_device *netdev,
|
||||
const struct ethtool_tunable *tuna,
|
||||
const void *data)
|
||||
@ -1949,6 +1974,10 @@ static int hns3_set_tunable(struct net_device *netdev,
|
||||
|
||||
switch (tuna->id) {
|
||||
case ETHTOOL_TX_COPYBREAK:
|
||||
ret = hns3_check_tx_copybreak(netdev, *(u32 *)data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->tx_copybreak = *(u32 *)data;
|
||||
|
||||
for (i = 0; i < h->kinfo.num_tqps; i++)
|
||||
@ -1963,6 +1992,10 @@ static int hns3_set_tunable(struct net_device *netdev,
|
||||
|
||||
break;
|
||||
case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
|
||||
ret = hns3_check_tx_spare_buf_size(netdev, *(u32 *)data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
|
||||
new_tx_spare_buf_size = *(u32 *)data;
|
||||
netdev_info(netdev, "request to set tx spare buf size from %u to %u\n",
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/netdevice.h>
|
||||
@ -3584,6 +3585,17 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hclge_set_reset_pending(struct hclge_dev *hdev,
|
||||
enum hnae3_reset_type reset_type)
|
||||
{
|
||||
/* When an incorrect reset type is executed, the get_reset_level
|
||||
* function generates the HNAE3_NONE_RESET flag. As a result, this
|
||||
* type do not need to pending.
|
||||
*/
|
||||
if (reset_type != HNAE3_NONE_RESET)
|
||||
set_bit(reset_type, &hdev->reset_pending);
|
||||
}
|
||||
|
||||
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
|
||||
{
|
||||
u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
|
||||
@ -3604,7 +3616,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
|
||||
*/
|
||||
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
|
||||
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
|
||||
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
|
||||
hclge_set_reset_pending(hdev, HNAE3_IMP_RESET);
|
||||
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
|
||||
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
|
||||
hdev->rst_stats.imp_rst_cnt++;
|
||||
@ -3614,7 +3626,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
|
||||
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
|
||||
dev_info(&hdev->pdev->dev, "global reset interrupt\n");
|
||||
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
|
||||
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
|
||||
hclge_set_reset_pending(hdev, HNAE3_GLOBAL_RESET);
|
||||
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
|
||||
hdev->rst_stats.global_rst_cnt++;
|
||||
return HCLGE_VECTOR0_EVENT_RST;
|
||||
@ -3769,7 +3781,7 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
|
||||
snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
|
||||
HCLGE_NAME, pci_name(hdev->pdev));
|
||||
ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
|
||||
0, hdev->misc_vector.name, hdev);
|
||||
IRQ_NOAUTOEN, hdev->misc_vector.name, hdev);
|
||||
if (ret) {
|
||||
hclge_free_vector(hdev, 0);
|
||||
dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
|
||||
@ -4062,7 +4074,7 @@ static void hclge_do_reset(struct hclge_dev *hdev)
|
||||
case HNAE3_FUNC_RESET:
|
||||
dev_info(&pdev->dev, "PF reset requested\n");
|
||||
/* schedule again to check later */
|
||||
set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
|
||||
hclge_set_reset_pending(hdev, HNAE3_FUNC_RESET);
|
||||
hclge_reset_task_schedule(hdev);
|
||||
break;
|
||||
default:
|
||||
@ -4096,6 +4108,8 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
|
||||
clear_bit(HNAE3_FLR_RESET, addr);
|
||||
}
|
||||
|
||||
clear_bit(HNAE3_NONE_RESET, addr);
|
||||
|
||||
if (hdev->reset_type != HNAE3_NONE_RESET &&
|
||||
rst_level < hdev->reset_type)
|
||||
return HNAE3_NONE_RESET;
|
||||
@ -4237,7 +4251,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
|
||||
return false;
|
||||
} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
|
||||
hdev->rst_stats.reset_fail_cnt++;
|
||||
set_bit(hdev->reset_type, &hdev->reset_pending);
|
||||
hclge_set_reset_pending(hdev, hdev->reset_type);
|
||||
dev_info(&hdev->pdev->dev,
|
||||
"re-schedule reset task(%u)\n",
|
||||
hdev->rst_stats.reset_fail_cnt);
|
||||
@ -4480,8 +4494,20 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
|
||||
static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
|
||||
enum hnae3_reset_type rst_type)
|
||||
{
|
||||
#define HCLGE_SUPPORT_RESET_TYPE \
|
||||
(BIT(HNAE3_FLR_RESET) | BIT(HNAE3_FUNC_RESET) | \
|
||||
BIT(HNAE3_GLOBAL_RESET) | BIT(HNAE3_IMP_RESET))
|
||||
|
||||
struct hclge_dev *hdev = ae_dev->priv;
|
||||
|
||||
if (!(BIT(rst_type) & HCLGE_SUPPORT_RESET_TYPE)) {
|
||||
/* To prevent reset triggered by hclge_reset_event */
|
||||
set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
|
||||
dev_warn(&hdev->pdev->dev, "unsupported reset type %d\n",
|
||||
rst_type);
|
||||
return;
|
||||
}
|
||||
|
||||
set_bit(rst_type, &hdev->default_reset_request);
|
||||
}
|
||||
|
||||
@ -11891,9 +11917,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
|
||||
hclge_init_rxd_adv_layout(hdev);
|
||||
|
||||
/* Enable MISC vector(vector0) */
|
||||
hclge_enable_vector(&hdev->misc_vector, true);
|
||||
|
||||
ret = hclge_init_wol(hdev);
|
||||
if (ret)
|
||||
dev_warn(&pdev->dev,
|
||||
@ -11906,6 +11929,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
hclge_state_init(hdev);
|
||||
hdev->last_reset_time = jiffies;
|
||||
|
||||
/* Enable MISC vector(vector0) */
|
||||
enable_irq(hdev->misc_vector.vector_irq);
|
||||
hclge_enable_vector(&hdev->misc_vector, true);
|
||||
|
||||
dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
|
||||
HCLGE_DRIVER_NAME);
|
||||
|
||||
@ -12311,7 +12338,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
|
||||
/* Disable MISC vector(vector0) */
|
||||
hclge_enable_vector(&hdev->misc_vector, false);
|
||||
synchronize_irq(hdev->misc_vector.vector_irq);
|
||||
disable_irq(hdev->misc_vector.vector_irq);
|
||||
|
||||
/* Disable all hw interrupts */
|
||||
hclge_config_mac_tnl_int(hdev, false);
|
||||
|
@ -58,6 +58,9 @@ bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb)
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
struct hclge_ptp *ptp = hdev->ptp;
|
||||
|
||||
if (!ptp)
|
||||
return false;
|
||||
|
||||
if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ||
|
||||
test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) {
|
||||
ptp->tx_skipped++;
|
||||
|
@ -510,9 +510,9 @@ out:
|
||||
static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
|
||||
struct hnae3_knic_private_info *kinfo)
|
||||
{
|
||||
#define HCLGE_RING_REG_OFFSET 0x200
|
||||
#define HCLGE_RING_INT_REG_OFFSET 0x4
|
||||
|
||||
struct hnae3_queue *tqp;
|
||||
int i, j, reg_num;
|
||||
int data_num_sum;
|
||||
u32 *reg = data;
|
||||
@ -533,10 +533,11 @@ static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
|
||||
reg_num = ARRAY_SIZE(ring_reg_addr_list);
|
||||
for (j = 0; j < kinfo->num_tqps; j++) {
|
||||
reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RING, reg_num, reg);
|
||||
tqp = kinfo->tqp[j];
|
||||
for (i = 0; i < reg_num; i++)
|
||||
*reg++ = hclge_read_dev(&hdev->hw,
|
||||
ring_reg_addr_list[i] +
|
||||
HCLGE_RING_REG_OFFSET * j);
|
||||
*reg++ = readl_relaxed(tqp->io_base -
|
||||
HCLGE_TQP_REG_OFFSET +
|
||||
ring_reg_addr_list[i]);
|
||||
}
|
||||
data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) * kinfo->num_tqps;
|
||||
|
||||
|
@ -1395,6 +1395,17 @@ static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hclgevf_set_reset_pending(struct hclgevf_dev *hdev,
|
||||
enum hnae3_reset_type reset_type)
|
||||
{
|
||||
/* When an incorrect reset type is executed, the get_reset_level
|
||||
* function generates the HNAE3_NONE_RESET flag. As a result, this
|
||||
* type do not need to pending.
|
||||
*/
|
||||
if (reset_type != HNAE3_NONE_RESET)
|
||||
set_bit(reset_type, &hdev->reset_pending);
|
||||
}
|
||||
|
||||
static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
|
||||
{
|
||||
#define HCLGEVF_RESET_WAIT_US 20000
|
||||
@ -1544,7 +1555,7 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
|
||||
hdev->rst_stats.rst_fail_cnt);
|
||||
|
||||
if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
|
||||
set_bit(hdev->reset_type, &hdev->reset_pending);
|
||||
hclgevf_set_reset_pending(hdev, hdev->reset_type);
|
||||
|
||||
if (hclgevf_is_reset_pending(hdev)) {
|
||||
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
|
||||
@ -1664,6 +1675,8 @@ static enum hnae3_reset_type hclgevf_get_reset_level(unsigned long *addr)
|
||||
clear_bit(HNAE3_FLR_RESET, addr);
|
||||
}
|
||||
|
||||
clear_bit(HNAE3_NONE_RESET, addr);
|
||||
|
||||
return rst_level;
|
||||
}
|
||||
|
||||
@ -1673,14 +1686,15 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
|
||||
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
|
||||
struct hclgevf_dev *hdev = ae_dev->priv;
|
||||
|
||||
dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
|
||||
|
||||
if (hdev->default_reset_request)
|
||||
hdev->reset_level =
|
||||
hclgevf_get_reset_level(&hdev->default_reset_request);
|
||||
else
|
||||
hdev->reset_level = HNAE3_VF_FUNC_RESET;
|
||||
|
||||
dev_info(&hdev->pdev->dev, "received reset request from VF enet, reset level is %d\n",
|
||||
hdev->reset_level);
|
||||
|
||||
/* reset of this VF requested */
|
||||
set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
|
||||
hclgevf_reset_task_schedule(hdev);
|
||||
@ -1691,8 +1705,20 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
|
||||
static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
|
||||
enum hnae3_reset_type rst_type)
|
||||
{
|
||||
#define HCLGEVF_SUPPORT_RESET_TYPE \
|
||||
(BIT(HNAE3_VF_RESET) | BIT(HNAE3_VF_FUNC_RESET) | \
|
||||
BIT(HNAE3_VF_PF_FUNC_RESET) | BIT(HNAE3_VF_FULL_RESET) | \
|
||||
BIT(HNAE3_FLR_RESET) | BIT(HNAE3_VF_EXP_RESET))
|
||||
|
||||
struct hclgevf_dev *hdev = ae_dev->priv;
|
||||
|
||||
if (!(BIT(rst_type) & HCLGEVF_SUPPORT_RESET_TYPE)) {
|
||||
/* To prevent reset triggered by hclge_reset_event */
|
||||
set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
|
||||
dev_info(&hdev->pdev->dev, "unsupported reset type %d\n",
|
||||
rst_type);
|
||||
return;
|
||||
}
|
||||
set_bit(rst_type, &hdev->default_reset_request);
|
||||
}
|
||||
|
||||
@ -1849,14 +1875,14 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
|
||||
*/
|
||||
if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
|
||||
/* prepare for full reset of stack + pcie interface */
|
||||
set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
|
||||
hclgevf_set_reset_pending(hdev, HNAE3_VF_FULL_RESET);
|
||||
|
||||
/* "defer" schedule the reset task again */
|
||||
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
|
||||
} else {
|
||||
hdev->reset_attempts++;
|
||||
|
||||
set_bit(hdev->reset_level, &hdev->reset_pending);
|
||||
hclgevf_set_reset_pending(hdev, hdev->reset_level);
|
||||
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
|
||||
}
|
||||
hclgevf_reset_task_schedule(hdev);
|
||||
@ -1979,7 +2005,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
|
||||
rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
|
||||
dev_info(&hdev->pdev->dev,
|
||||
"receive reset interrupt 0x%x!\n", rst_ing_reg);
|
||||
set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
|
||||
hclgevf_set_reset_pending(hdev, HNAE3_VF_RESET);
|
||||
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
|
||||
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
|
||||
*clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
|
||||
@ -2289,6 +2315,7 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev)
|
||||
clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
|
||||
|
||||
INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
|
||||
timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
|
||||
|
||||
mutex_init(&hdev->mbx_resp.mbx_mutex);
|
||||
sema_init(&hdev->reset_sem, 1);
|
||||
@ -2988,7 +3015,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
|
||||
HCLGEVF_DRIVER_NAME);
|
||||
|
||||
hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
|
||||
timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -123,10 +123,10 @@ int hclgevf_get_regs_len(struct hnae3_handle *handle)
|
||||
void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
|
||||
void *data)
|
||||
{
|
||||
#define HCLGEVF_RING_REG_OFFSET 0x200
|
||||
#define HCLGEVF_RING_INT_REG_OFFSET 0x4
|
||||
|
||||
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
||||
struct hnae3_queue *tqp;
|
||||
int i, j, reg_um;
|
||||
u32 *reg = data;
|
||||
|
||||
@ -147,10 +147,11 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
|
||||
reg_um = ARRAY_SIZE(ring_reg_addr_list);
|
||||
for (j = 0; j < hdev->num_tqps; j++) {
|
||||
reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg);
|
||||
tqp = &hdev->htqp[j].q;
|
||||
for (i = 0; i < reg_um; i++)
|
||||
*reg++ = hclgevf_read_dev(&hdev->hw,
|
||||
ring_reg_addr_list[i] +
|
||||
HCLGEVF_RING_REG_OFFSET * j);
|
||||
*reg++ = readl_relaxed(tqp->io_base -
|
||||
HCLGEVF_TQP_REG_OFFSET +
|
||||
ring_reg_addr_list[i]);
|
||||
}
|
||||
|
||||
reg_um = ARRAY_SIZE(tqp_intr_reg_addr_list);
|
||||
|
Loading…
Reference in New Issue
Block a user