forked from Minki/linux
Merge branch 'hns3-next'
Salil Mehta says: ==================== Bug fixes and some minor changes to HNS3 driver This patch-set presents some fixes and minor changes to the HNS3 Ethernet Driver. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
35edb56e94
@ -3085,7 +3085,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
|
||||
priv->dev = &pdev->dev;
|
||||
priv->netdev = netdev;
|
||||
priv->ae_handle = handle;
|
||||
priv->ae_handle->reset_level = HNAE3_NONE_RESET;
|
||||
priv->ae_handle->last_reset_time = jiffies;
|
||||
priv->tx_timeout_count = 0;
|
||||
|
||||
@ -3106,6 +3105,11 @@ static int hns3_client_init(struct hnae3_handle *handle)
|
||||
/* Carrier off reporting is important to ethtool even BEFORE open */
|
||||
netif_carrier_off(netdev);
|
||||
|
||||
if (handle->flags & HNAE3_SUPPORT_VF)
|
||||
handle->reset_level = HNAE3_VF_RESET;
|
||||
else
|
||||
handle->reset_level = HNAE3_FUNC_RESET;
|
||||
|
||||
ret = hns3_get_ring_config(priv);
|
||||
if (ret) {
|
||||
ret = -ENOMEM;
|
||||
@ -3396,7 +3400,7 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
|
||||
struct net_device *ndev = kinfo->netdev;
|
||||
|
||||
if (!netif_running(ndev))
|
||||
return -EIO;
|
||||
return 0;
|
||||
|
||||
return hns3_nic_net_stop(ndev);
|
||||
}
|
||||
@ -3436,10 +3440,6 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
|
||||
/* Carrier off reporting is important to ethtool even BEFORE open */
|
||||
netif_carrier_off(netdev);
|
||||
|
||||
ret = hns3_get_ring_config(priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = hns3_nic_init_vector_data(priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -3471,10 +3471,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
|
||||
if (ret)
|
||||
netdev_err(netdev, "uninit ring error\n");
|
||||
|
||||
hns3_put_ring_config(priv);
|
||||
|
||||
priv->ring_data = NULL;
|
||||
|
||||
hns3_uninit_mac_addr(netdev);
|
||||
|
||||
return ret;
|
||||
|
@ -119,8 +119,8 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
|
||||
hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG,
|
||||
(ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
|
||||
HCLGE_NIC_CMQ_ENABLE);
|
||||
hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
|
||||
hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
|
||||
hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
|
||||
} else {
|
||||
hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG,
|
||||
lower_32_bits(dma));
|
||||
@ -129,8 +129,8 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
|
||||
hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
|
||||
(ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
|
||||
HCLGE_NIC_CMQ_ENABLE);
|
||||
hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
|
||||
hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
|
||||
hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2495,7 +2495,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
|
||||
u32 cmdq_src_reg;
|
||||
|
||||
/* fetch the events from their corresponding regs */
|
||||
rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
|
||||
rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
|
||||
cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
|
||||
|
||||
/* Assumption: If by any chance reset and mailbox events are reported
|
||||
@ -2819,16 +2819,17 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
|
||||
|
||||
static void hclge_reset(struct hclge_dev *hdev)
|
||||
{
|
||||
/* perform reset of the stack & ae device for a client */
|
||||
struct hnae3_handle *handle;
|
||||
|
||||
/* perform reset of the stack & ae device for a client */
|
||||
handle = &hdev->vport[0].nic;
|
||||
rtnl_lock();
|
||||
hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
|
||||
|
||||
if (!hclge_reset_wait(hdev)) {
|
||||
rtnl_lock();
|
||||
hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
|
||||
hclge_reset_ae_dev(hdev->ae_dev);
|
||||
hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
|
||||
rtnl_unlock();
|
||||
|
||||
hclge_clear_reset_cause(hdev);
|
||||
} else {
|
||||
@ -2838,6 +2839,8 @@ static void hclge_reset(struct hclge_dev *hdev)
|
||||
}
|
||||
|
||||
hclge_notify_client(hdev, HNAE3_UP_CLIENT);
|
||||
handle->last_reset_time = jiffies;
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static void hclge_reset_event(struct hnae3_handle *handle)
|
||||
@ -2850,8 +2853,13 @@ static void hclge_reset_event(struct hnae3_handle *handle)
|
||||
* know this if last reset request did not occur very recently (watchdog
|
||||
* timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
|
||||
* In case of new request we reset the "reset level" to PF reset.
|
||||
* And if it is a repeat reset request of the most recent one then we
|
||||
* want to make sure we throttle the reset request. Therefore, we will
|
||||
* not allow it again before 3*HZ times.
|
||||
*/
|
||||
if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
|
||||
if (time_before(jiffies, (handle->last_reset_time + 3 * HZ)))
|
||||
return;
|
||||
else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
|
||||
handle->reset_level = HNAE3_FUNC_RESET;
|
||||
|
||||
dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
|
||||
@ -2863,8 +2871,6 @@ static void hclge_reset_event(struct hnae3_handle *handle)
|
||||
|
||||
if (handle->reset_level < HNAE3_GLOBAL_RESET)
|
||||
handle->reset_level++;
|
||||
|
||||
handle->last_reset_time = jiffies;
|
||||
}
|
||||
|
||||
static void hclge_reset_subtask(struct hclge_dev *hdev)
|
||||
|
@ -89,6 +89,7 @@
|
||||
|
||||
/* Reset related Registers */
|
||||
#define HCLGE_MISC_RESET_STS_REG 0x20700
|
||||
#define HCLGE_MISC_VECTOR_INT_STS 0x20800
|
||||
#define HCLGE_GLOBAL_RESET_REG 0x20A00
|
||||
#define HCLGE_GLOBAL_RESET_BIT 0x0
|
||||
#define HCLGE_CORE_RESET_BIT 0x1
|
||||
|
@ -104,13 +104,15 @@ static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
|
||||
}
|
||||
}
|
||||
|
||||
/* hclge_get_ring_chain_from_mbx: get ring type & tqpid from mailbox message
|
||||
/* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx
|
||||
* from mailbox message
|
||||
* msg[0]: opcode
|
||||
* msg[1]: <not relevant to this function>
|
||||
* msg[2]: ring_num
|
||||
* msg[3]: first ring type (TX|RX)
|
||||
* msg[4]: first tqp id
|
||||
* msg[5] ~ msg[14]: other ring type and tqp id
|
||||
* msg[5]: first int_gl idx
|
||||
* msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx
|
||||
*/
|
||||
static int hclge_get_ring_chain_from_mbx(
|
||||
struct hclge_mbx_vf_to_pf_cmd *req,
|
||||
@ -131,8 +133,8 @@ static int hclge_get_ring_chain_from_mbx(
|
||||
hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
|
||||
ring_chain->tqp_index =
|
||||
hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
|
||||
hnae3_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
|
||||
HCLGE_INT_GL_IDX_S,
|
||||
hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
|
||||
HNAE3_RING_GL_IDX_S,
|
||||
req->msg[5]);
|
||||
|
||||
cur_chain = ring_chain;
|
||||
@ -151,8 +153,8 @@ static int hclge_get_ring_chain_from_mbx(
|
||||
[req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
|
||||
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]);
|
||||
|
||||
hnae3_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
|
||||
HCLGE_INT_GL_IDX_S,
|
||||
hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
|
||||
HNAE3_RING_GL_IDX_S,
|
||||
req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
|
||||
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user