Merge branch 'hns3-cleanups'
Guangbin Huang says: ==================== net: hns3: add some cleanups This series includes some cleanups for the HNS3 ethernet driver. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
e31a8cf502
@ -38,9 +38,8 @@ static struct hns3_dbg_dentry_info hns3_dbg_dentry[] = {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, unsigned int cmd);
|
static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd);
|
||||||
static int hns3_dbg_common_file_init(struct hnae3_handle *handle,
|
static int hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd);
|
||||||
unsigned int cmd);
|
|
||||||
|
|
||||||
static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
|
static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
|
||||||
{
|
{
|
||||||
|
@ -5063,6 +5063,24 @@ void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
|
|||||||
hns3_set_cq_period_mode(priv, rx_mode, false);
|
hns3_set_cq_period_mode(priv, rx_mode, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void hns3_state_init(struct hnae3_handle *handle)
|
||||||
|
{
|
||||||
|
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
|
||||||
|
struct net_device *netdev = handle->kinfo.netdev;
|
||||||
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||||
|
|
||||||
|
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
|
||||||
|
|
||||||
|
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
|
||||||
|
set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags);
|
||||||
|
|
||||||
|
if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps))
|
||||||
|
set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state);
|
||||||
|
|
||||||
|
if (hnae3_ae_dev_rxd_adv_layout_supported(ae_dev))
|
||||||
|
set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state);
|
||||||
|
}
|
||||||
|
|
||||||
static int hns3_client_init(struct hnae3_handle *handle)
|
static int hns3_client_init(struct hnae3_handle *handle)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = handle->pdev;
|
struct pci_dev *pdev = handle->pdev;
|
||||||
@ -5166,16 +5184,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
|
|||||||
|
|
||||||
netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size);
|
netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size);
|
||||||
|
|
||||||
if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps))
|
hns3_state_init(handle);
|
||||||
set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state);
|
|
||||||
|
|
||||||
if (hnae3_ae_dev_rxd_adv_layout_supported(ae_dev))
|
|
||||||
set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state);
|
|
||||||
|
|
||||||
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
|
|
||||||
|
|
||||||
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
|
|
||||||
set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags);
|
|
||||||
|
|
||||||
ret = register_netdev(netdev);
|
ret = register_netdev(netdev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -1017,16 +1017,6 @@ struct hclge_common_lb_cmd {
|
|||||||
|
|
||||||
#define HCLGE_TYPE_CRQ 0
|
#define HCLGE_TYPE_CRQ 0
|
||||||
#define HCLGE_TYPE_CSQ 1
|
#define HCLGE_TYPE_CSQ 1
|
||||||
#define HCLGE_NIC_CSQ_BASEADDR_L_REG 0x27000
|
|
||||||
#define HCLGE_NIC_CSQ_BASEADDR_H_REG 0x27004
|
|
||||||
#define HCLGE_NIC_CSQ_DEPTH_REG 0x27008
|
|
||||||
#define HCLGE_NIC_CSQ_TAIL_REG 0x27010
|
|
||||||
#define HCLGE_NIC_CSQ_HEAD_REG 0x27014
|
|
||||||
#define HCLGE_NIC_CRQ_BASEADDR_L_REG 0x27018
|
|
||||||
#define HCLGE_NIC_CRQ_BASEADDR_H_REG 0x2701c
|
|
||||||
#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020
|
|
||||||
#define HCLGE_NIC_CRQ_TAIL_REG 0x27024
|
|
||||||
#define HCLGE_NIC_CRQ_HEAD_REG 0x27028
|
|
||||||
|
|
||||||
/* this bit indicates that the driver is ready for hardware reset */
|
/* this bit indicates that the driver is ready for hardware reset */
|
||||||
#define HCLGE_NIC_SW_RST_RDY_B 16
|
#define HCLGE_NIC_SW_RST_RDY_B 16
|
||||||
@ -1201,6 +1191,19 @@ struct hclge_dev_specs_1_cmd {
|
|||||||
u8 rsv1[18];
|
u8 rsv1[18];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* mac speed type defined in firmware command */
|
||||||
|
enum HCLGE_FIRMWARE_MAC_SPEED {
|
||||||
|
HCLGE_FW_MAC_SPEED_1G,
|
||||||
|
HCLGE_FW_MAC_SPEED_10G,
|
||||||
|
HCLGE_FW_MAC_SPEED_25G,
|
||||||
|
HCLGE_FW_MAC_SPEED_40G,
|
||||||
|
HCLGE_FW_MAC_SPEED_50G,
|
||||||
|
HCLGE_FW_MAC_SPEED_100G,
|
||||||
|
HCLGE_FW_MAC_SPEED_10M,
|
||||||
|
HCLGE_FW_MAC_SPEED_100M,
|
||||||
|
HCLGE_FW_MAC_SPEED_200G,
|
||||||
|
};
|
||||||
|
|
||||||
#define HCLGE_PHY_LINK_SETTING_BD_NUM 2
|
#define HCLGE_PHY_LINK_SETTING_BD_NUM 2
|
||||||
|
|
||||||
struct hclge_phy_link_ksetting_0_cmd {
|
struct hclge_phy_link_ksetting_0_cmd {
|
||||||
|
@ -92,23 +92,23 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
|
|||||||
|
|
||||||
MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
|
MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
|
||||||
|
|
||||||
static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
|
static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG,
|
||||||
HCLGE_CMDQ_TX_ADDR_H_REG,
|
HCLGE_NIC_CSQ_BASEADDR_H_REG,
|
||||||
HCLGE_CMDQ_TX_DEPTH_REG,
|
HCLGE_NIC_CSQ_DEPTH_REG,
|
||||||
HCLGE_CMDQ_TX_TAIL_REG,
|
HCLGE_NIC_CSQ_TAIL_REG,
|
||||||
HCLGE_CMDQ_TX_HEAD_REG,
|
HCLGE_NIC_CSQ_HEAD_REG,
|
||||||
HCLGE_CMDQ_RX_ADDR_L_REG,
|
HCLGE_NIC_CRQ_BASEADDR_L_REG,
|
||||||
HCLGE_CMDQ_RX_ADDR_H_REG,
|
HCLGE_NIC_CRQ_BASEADDR_H_REG,
|
||||||
HCLGE_CMDQ_RX_DEPTH_REG,
|
HCLGE_NIC_CRQ_DEPTH_REG,
|
||||||
HCLGE_CMDQ_RX_TAIL_REG,
|
HCLGE_NIC_CRQ_TAIL_REG,
|
||||||
HCLGE_CMDQ_RX_HEAD_REG,
|
HCLGE_NIC_CRQ_HEAD_REG,
|
||||||
HCLGE_VECTOR0_CMDQ_SRC_REG,
|
HCLGE_VECTOR0_CMDQ_SRC_REG,
|
||||||
HCLGE_CMDQ_INTR_STS_REG,
|
HCLGE_CMDQ_INTR_STS_REG,
|
||||||
HCLGE_CMDQ_INTR_EN_REG,
|
HCLGE_CMDQ_INTR_EN_REG,
|
||||||
HCLGE_CMDQ_INTR_GEN_REG};
|
HCLGE_CMDQ_INTR_GEN_REG};
|
||||||
|
|
||||||
static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
|
static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
|
||||||
HCLGE_VECTOR0_OTER_EN_REG,
|
HCLGE_PF_OTHER_INT_REG,
|
||||||
HCLGE_MISC_RESET_STS_REG,
|
HCLGE_MISC_RESET_STS_REG,
|
||||||
HCLGE_MISC_VECTOR_INT_STS,
|
HCLGE_MISC_VECTOR_INT_STS,
|
||||||
HCLGE_GLOBAL_RESET_REG,
|
HCLGE_GLOBAL_RESET_REG,
|
||||||
@ -959,31 +959,31 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
|
|||||||
static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
|
static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
|
||||||
{
|
{
|
||||||
switch (speed_cmd) {
|
switch (speed_cmd) {
|
||||||
case 6:
|
case HCLGE_FW_MAC_SPEED_10M:
|
||||||
*speed = HCLGE_MAC_SPEED_10M;
|
*speed = HCLGE_MAC_SPEED_10M;
|
||||||
break;
|
break;
|
||||||
case 7:
|
case HCLGE_FW_MAC_SPEED_100M:
|
||||||
*speed = HCLGE_MAC_SPEED_100M;
|
*speed = HCLGE_MAC_SPEED_100M;
|
||||||
break;
|
break;
|
||||||
case 0:
|
case HCLGE_FW_MAC_SPEED_1G:
|
||||||
*speed = HCLGE_MAC_SPEED_1G;
|
*speed = HCLGE_MAC_SPEED_1G;
|
||||||
break;
|
break;
|
||||||
case 1:
|
case HCLGE_FW_MAC_SPEED_10G:
|
||||||
*speed = HCLGE_MAC_SPEED_10G;
|
*speed = HCLGE_MAC_SPEED_10G;
|
||||||
break;
|
break;
|
||||||
case 2:
|
case HCLGE_FW_MAC_SPEED_25G:
|
||||||
*speed = HCLGE_MAC_SPEED_25G;
|
*speed = HCLGE_MAC_SPEED_25G;
|
||||||
break;
|
break;
|
||||||
case 3:
|
case HCLGE_FW_MAC_SPEED_40G:
|
||||||
*speed = HCLGE_MAC_SPEED_40G;
|
*speed = HCLGE_MAC_SPEED_40G;
|
||||||
break;
|
break;
|
||||||
case 4:
|
case HCLGE_FW_MAC_SPEED_50G:
|
||||||
*speed = HCLGE_MAC_SPEED_50G;
|
*speed = HCLGE_MAC_SPEED_50G;
|
||||||
break;
|
break;
|
||||||
case 5:
|
case HCLGE_FW_MAC_SPEED_100G:
|
||||||
*speed = HCLGE_MAC_SPEED_100G;
|
*speed = HCLGE_MAC_SPEED_100G;
|
||||||
break;
|
break;
|
||||||
case 8:
|
case HCLGE_FW_MAC_SPEED_200G:
|
||||||
*speed = HCLGE_MAC_SPEED_200G;
|
*speed = HCLGE_MAC_SPEED_200G;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@ -2582,39 +2582,39 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
|
|||||||
switch (speed) {
|
switch (speed) {
|
||||||
case HCLGE_MAC_SPEED_10M:
|
case HCLGE_MAC_SPEED_10M:
|
||||||
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||||
HCLGE_CFG_SPEED_S, 6);
|
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M);
|
||||||
break;
|
break;
|
||||||
case HCLGE_MAC_SPEED_100M:
|
case HCLGE_MAC_SPEED_100M:
|
||||||
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||||
HCLGE_CFG_SPEED_S, 7);
|
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M);
|
||||||
break;
|
break;
|
||||||
case HCLGE_MAC_SPEED_1G:
|
case HCLGE_MAC_SPEED_1G:
|
||||||
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||||
HCLGE_CFG_SPEED_S, 0);
|
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G);
|
||||||
break;
|
break;
|
||||||
case HCLGE_MAC_SPEED_10G:
|
case HCLGE_MAC_SPEED_10G:
|
||||||
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||||
HCLGE_CFG_SPEED_S, 1);
|
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G);
|
||||||
break;
|
break;
|
||||||
case HCLGE_MAC_SPEED_25G:
|
case HCLGE_MAC_SPEED_25G:
|
||||||
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||||
HCLGE_CFG_SPEED_S, 2);
|
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G);
|
||||||
break;
|
break;
|
||||||
case HCLGE_MAC_SPEED_40G:
|
case HCLGE_MAC_SPEED_40G:
|
||||||
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||||
HCLGE_CFG_SPEED_S, 3);
|
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G);
|
||||||
break;
|
break;
|
||||||
case HCLGE_MAC_SPEED_50G:
|
case HCLGE_MAC_SPEED_50G:
|
||||||
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||||
HCLGE_CFG_SPEED_S, 4);
|
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G);
|
||||||
break;
|
break;
|
||||||
case HCLGE_MAC_SPEED_100G:
|
case HCLGE_MAC_SPEED_100G:
|
||||||
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||||
HCLGE_CFG_SPEED_S, 5);
|
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G);
|
||||||
break;
|
break;
|
||||||
case HCLGE_MAC_SPEED_200G:
|
case HCLGE_MAC_SPEED_200G:
|
||||||
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
|
||||||
HCLGE_CFG_SPEED_S, 8);
|
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
|
dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
|
||||||
|
@ -38,22 +38,22 @@
|
|||||||
#define HCLGE_VECTOR_REG_OFFSET_H 0x1000
|
#define HCLGE_VECTOR_REG_OFFSET_H 0x1000
|
||||||
#define HCLGE_VECTOR_VF_OFFSET 0x100000
|
#define HCLGE_VECTOR_VF_OFFSET 0x100000
|
||||||
|
|
||||||
#define HCLGE_CMDQ_TX_ADDR_L_REG 0x27000
|
#define HCLGE_NIC_CSQ_BASEADDR_L_REG 0x27000
|
||||||
#define HCLGE_CMDQ_TX_ADDR_H_REG 0x27004
|
#define HCLGE_NIC_CSQ_BASEADDR_H_REG 0x27004
|
||||||
#define HCLGE_CMDQ_TX_DEPTH_REG 0x27008
|
#define HCLGE_NIC_CSQ_DEPTH_REG 0x27008
|
||||||
#define HCLGE_CMDQ_TX_TAIL_REG 0x27010
|
#define HCLGE_NIC_CSQ_TAIL_REG 0x27010
|
||||||
#define HCLGE_CMDQ_TX_HEAD_REG 0x27014
|
#define HCLGE_NIC_CSQ_HEAD_REG 0x27014
|
||||||
#define HCLGE_CMDQ_RX_ADDR_L_REG 0x27018
|
#define HCLGE_NIC_CRQ_BASEADDR_L_REG 0x27018
|
||||||
#define HCLGE_CMDQ_RX_ADDR_H_REG 0x2701C
|
#define HCLGE_NIC_CRQ_BASEADDR_H_REG 0x2701C
|
||||||
#define HCLGE_CMDQ_RX_DEPTH_REG 0x27020
|
#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020
|
||||||
#define HCLGE_CMDQ_RX_TAIL_REG 0x27024
|
#define HCLGE_NIC_CRQ_TAIL_REG 0x27024
|
||||||
#define HCLGE_CMDQ_RX_HEAD_REG 0x27028
|
#define HCLGE_NIC_CRQ_HEAD_REG 0x27028
|
||||||
|
|
||||||
#define HCLGE_CMDQ_INTR_STS_REG 0x27104
|
#define HCLGE_CMDQ_INTR_STS_REG 0x27104
|
||||||
#define HCLGE_CMDQ_INTR_EN_REG 0x27108
|
#define HCLGE_CMDQ_INTR_EN_REG 0x27108
|
||||||
#define HCLGE_CMDQ_INTR_GEN_REG 0x2710C
|
#define HCLGE_CMDQ_INTR_GEN_REG 0x2710C
|
||||||
|
|
||||||
/* bar registers for common func */
|
/* bar registers for common func */
|
||||||
#define HCLGE_VECTOR0_OTER_EN_REG 0x20600
|
|
||||||
#define HCLGE_GRO_EN_REG 0x28000
|
#define HCLGE_GRO_EN_REG 0x28000
|
||||||
#define HCLGE_RXD_ADV_LAYOUT_EN_REG 0x28008
|
#define HCLGE_RXD_ADV_LAYOUT_EN_REG 0x28008
|
||||||
|
|
||||||
|
@ -266,16 +266,6 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
|
|||||||
|
|
||||||
#define HCLGEVF_TYPE_CRQ 0
|
#define HCLGEVF_TYPE_CRQ 0
|
||||||
#define HCLGEVF_TYPE_CSQ 1
|
#define HCLGEVF_TYPE_CSQ 1
|
||||||
#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000
|
|
||||||
#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG 0x27004
|
|
||||||
#define HCLGEVF_NIC_CSQ_DEPTH_REG 0x27008
|
|
||||||
#define HCLGEVF_NIC_CSQ_TAIL_REG 0x27010
|
|
||||||
#define HCLGEVF_NIC_CSQ_HEAD_REG 0x27014
|
|
||||||
#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG 0x27018
|
|
||||||
#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG 0x2701c
|
|
||||||
#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020
|
|
||||||
#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024
|
|
||||||
#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028
|
|
||||||
|
|
||||||
/* this bit indicates that the driver is ready for hardware reset */
|
/* this bit indicates that the driver is ready for hardware reset */
|
||||||
#define HCLGEVF_NIC_SW_RST_RDY_B 16
|
#define HCLGEVF_NIC_SW_RST_RDY_B 16
|
||||||
|
@ -40,16 +40,16 @@ static const u8 hclgevf_hash_key[] = {
|
|||||||
|
|
||||||
MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
|
MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
|
||||||
|
|
||||||
static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG,
|
static const u32 cmdq_reg_addr_list[] = {HCLGEVF_NIC_CSQ_BASEADDR_L_REG,
|
||||||
HCLGEVF_CMDQ_TX_ADDR_H_REG,
|
HCLGEVF_NIC_CSQ_BASEADDR_H_REG,
|
||||||
HCLGEVF_CMDQ_TX_DEPTH_REG,
|
HCLGEVF_NIC_CSQ_DEPTH_REG,
|
||||||
HCLGEVF_CMDQ_TX_TAIL_REG,
|
HCLGEVF_NIC_CSQ_TAIL_REG,
|
||||||
HCLGEVF_CMDQ_TX_HEAD_REG,
|
HCLGEVF_NIC_CSQ_HEAD_REG,
|
||||||
HCLGEVF_CMDQ_RX_ADDR_L_REG,
|
HCLGEVF_NIC_CRQ_BASEADDR_L_REG,
|
||||||
HCLGEVF_CMDQ_RX_ADDR_H_REG,
|
HCLGEVF_NIC_CRQ_BASEADDR_H_REG,
|
||||||
HCLGEVF_CMDQ_RX_DEPTH_REG,
|
HCLGEVF_NIC_CRQ_DEPTH_REG,
|
||||||
HCLGEVF_CMDQ_RX_TAIL_REG,
|
HCLGEVF_NIC_CRQ_TAIL_REG,
|
||||||
HCLGEVF_CMDQ_RX_HEAD_REG,
|
HCLGEVF_NIC_CRQ_HEAD_REG,
|
||||||
HCLGEVF_VECTOR0_CMDQ_SRC_REG,
|
HCLGEVF_VECTOR0_CMDQ_SRC_REG,
|
||||||
HCLGEVF_VECTOR0_CMDQ_STATE_REG,
|
HCLGEVF_VECTOR0_CMDQ_STATE_REG,
|
||||||
HCLGEVF_CMDQ_INTR_EN_REG,
|
HCLGEVF_CMDQ_INTR_EN_REG,
|
||||||
@ -1963,7 +1963,7 @@ static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
|
|||||||
dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
|
dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
|
||||||
hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG));
|
hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG));
|
||||||
dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
|
dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
|
||||||
hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG));
|
hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG));
|
||||||
dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
|
dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
|
||||||
hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING));
|
hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING));
|
||||||
dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
|
dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
|
||||||
|
@ -33,16 +33,17 @@
|
|||||||
#define HCLGEVF_VECTOR_VF_OFFSET 0x100000
|
#define HCLGEVF_VECTOR_VF_OFFSET 0x100000
|
||||||
|
|
||||||
/* bar registers for cmdq */
|
/* bar registers for cmdq */
|
||||||
#define HCLGEVF_CMDQ_TX_ADDR_L_REG 0x27000
|
#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000
|
||||||
#define HCLGEVF_CMDQ_TX_ADDR_H_REG 0x27004
|
#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG 0x27004
|
||||||
#define HCLGEVF_CMDQ_TX_DEPTH_REG 0x27008
|
#define HCLGEVF_NIC_CSQ_DEPTH_REG 0x27008
|
||||||
#define HCLGEVF_CMDQ_TX_TAIL_REG 0x27010
|
#define HCLGEVF_NIC_CSQ_TAIL_REG 0x27010
|
||||||
#define HCLGEVF_CMDQ_TX_HEAD_REG 0x27014
|
#define HCLGEVF_NIC_CSQ_HEAD_REG 0x27014
|
||||||
#define HCLGEVF_CMDQ_RX_ADDR_L_REG 0x27018
|
#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG 0x27018
|
||||||
#define HCLGEVF_CMDQ_RX_ADDR_H_REG 0x2701C
|
#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG 0x2701C
|
||||||
#define HCLGEVF_CMDQ_RX_DEPTH_REG 0x27020
|
#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020
|
||||||
#define HCLGEVF_CMDQ_RX_TAIL_REG 0x27024
|
#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024
|
||||||
#define HCLGEVF_CMDQ_RX_HEAD_REG 0x27028
|
#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028
|
||||||
|
|
||||||
#define HCLGEVF_CMDQ_INTR_EN_REG 0x27108
|
#define HCLGEVF_CMDQ_INTR_EN_REG 0x27108
|
||||||
#define HCLGEVF_CMDQ_INTR_GEN_REG 0x2710C
|
#define HCLGEVF_CMDQ_INTR_GEN_REG 0x2710C
|
||||||
|
|
||||||
@ -317,7 +318,6 @@ struct hclgevf_dev {
|
|||||||
|
|
||||||
struct hclgevf_mac_table_cfg mac_table;
|
struct hclgevf_mac_table_cfg mac_table;
|
||||||
|
|
||||||
bool mbx_event_pending;
|
|
||||||
struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
|
struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
|
||||||
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
|
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
|
||||||
|
|
||||||
|
@ -155,18 +155,66 @@ static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw)
|
|||||||
return tail == hw->cmq.crq.next_to_use;
|
return tail == hw->cmq.crq.next_to_use;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
|
||||||
|
struct hclge_mbx_pf_to_vf_cmd *req)
|
||||||
|
{
|
||||||
|
struct hclgevf_mbx_resp_status *resp = &hdev->mbx_resp;
|
||||||
|
|
||||||
|
if (resp->received_resp)
|
||||||
|
dev_warn(&hdev->pdev->dev,
|
||||||
|
"VF mbx resp flag not clear(%u)\n",
|
||||||
|
req->msg.vf_mbx_msg_code);
|
||||||
|
|
||||||
|
resp->origin_mbx_msg =
|
||||||
|
(req->msg.vf_mbx_msg_code << 16);
|
||||||
|
resp->origin_mbx_msg |= req->msg.vf_mbx_msg_subcode;
|
||||||
|
resp->resp_status =
|
||||||
|
hclgevf_resp_to_errno(req->msg.resp_status);
|
||||||
|
memcpy(resp->additional_info, req->msg.resp_data,
|
||||||
|
HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8));
|
||||||
|
if (req->match_id) {
|
||||||
|
/* If match_id is not zero, it means PF support match_id.
|
||||||
|
* if the match_id is right, VF get the right response, or
|
||||||
|
* ignore the response. and driver will clear hdev->mbx_resp
|
||||||
|
* when send next message which need response.
|
||||||
|
*/
|
||||||
|
if (req->match_id == resp->match_id)
|
||||||
|
resp->received_resp = true;
|
||||||
|
} else {
|
||||||
|
resp->received_resp = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void hclgevf_handle_mbx_msg(struct hclgevf_dev *hdev,
|
||||||
|
struct hclge_mbx_pf_to_vf_cmd *req)
|
||||||
|
{
|
||||||
|
/* we will drop the async msg if we find ARQ as full
|
||||||
|
* and continue with next message
|
||||||
|
*/
|
||||||
|
if (atomic_read(&hdev->arq.count) >=
|
||||||
|
HCLGE_MBX_MAX_ARQ_MSG_NUM) {
|
||||||
|
dev_warn(&hdev->pdev->dev,
|
||||||
|
"Async Q full, dropping msg(%u)\n",
|
||||||
|
req->msg.code);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* tail the async message in arq */
|
||||||
|
memcpy(hdev->arq.msg_q[hdev->arq.tail], &req->msg,
|
||||||
|
HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
|
||||||
|
hclge_mbx_tail_ptr_move_arq(hdev->arq);
|
||||||
|
atomic_inc(&hdev->arq.count);
|
||||||
|
|
||||||
|
hclgevf_mbx_task_schedule(hdev);
|
||||||
|
}
|
||||||
|
|
||||||
void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
|
void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
|
||||||
{
|
{
|
||||||
struct hclgevf_mbx_resp_status *resp;
|
|
||||||
struct hclge_mbx_pf_to_vf_cmd *req;
|
struct hclge_mbx_pf_to_vf_cmd *req;
|
||||||
struct hclgevf_cmq_ring *crq;
|
struct hclgevf_cmq_ring *crq;
|
||||||
struct hclgevf_desc *desc;
|
struct hclgevf_desc *desc;
|
||||||
u16 *msg_q;
|
|
||||||
u16 flag;
|
u16 flag;
|
||||||
u8 *temp;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
resp = &hdev->mbx_resp;
|
|
||||||
crq = &hdev->hw.cmq.crq;
|
crq = &hdev->hw.cmq.crq;
|
||||||
|
|
||||||
while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
|
while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
|
||||||
@ -200,69 +248,14 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
|
|||||||
*/
|
*/
|
||||||
switch (req->msg.code) {
|
switch (req->msg.code) {
|
||||||
case HCLGE_MBX_PF_VF_RESP:
|
case HCLGE_MBX_PF_VF_RESP:
|
||||||
if (resp->received_resp)
|
hclgevf_handle_mbx_response(hdev, req);
|
||||||
dev_warn(&hdev->pdev->dev,
|
|
||||||
"VF mbx resp flag not clear(%u)\n",
|
|
||||||
req->msg.vf_mbx_msg_code);
|
|
||||||
resp->received_resp = true;
|
|
||||||
|
|
||||||
resp->origin_mbx_msg =
|
|
||||||
(req->msg.vf_mbx_msg_code << 16);
|
|
||||||
resp->origin_mbx_msg |= req->msg.vf_mbx_msg_subcode;
|
|
||||||
resp->resp_status =
|
|
||||||
hclgevf_resp_to_errno(req->msg.resp_status);
|
|
||||||
|
|
||||||
temp = (u8 *)req->msg.resp_data;
|
|
||||||
for (i = 0; i < HCLGE_MBX_MAX_RESP_DATA_SIZE; i++) {
|
|
||||||
resp->additional_info[i] = *temp;
|
|
||||||
temp++;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If match_id is not zero, it means PF support
|
|
||||||
* match_id. If the match_id is right, VF get the
|
|
||||||
* right response, otherwise ignore the response.
|
|
||||||
* Driver will clear hdev->mbx_resp when send
|
|
||||||
* next message which need response.
|
|
||||||
*/
|
|
||||||
if (req->match_id) {
|
|
||||||
if (req->match_id == resp->match_id)
|
|
||||||
resp->received_resp = true;
|
|
||||||
} else {
|
|
||||||
resp->received_resp = true;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case HCLGE_MBX_LINK_STAT_CHANGE:
|
case HCLGE_MBX_LINK_STAT_CHANGE:
|
||||||
case HCLGE_MBX_ASSERTING_RESET:
|
case HCLGE_MBX_ASSERTING_RESET:
|
||||||
case HCLGE_MBX_LINK_STAT_MODE:
|
case HCLGE_MBX_LINK_STAT_MODE:
|
||||||
case HCLGE_MBX_PUSH_VLAN_INFO:
|
case HCLGE_MBX_PUSH_VLAN_INFO:
|
||||||
case HCLGE_MBX_PUSH_PROMISC_INFO:
|
case HCLGE_MBX_PUSH_PROMISC_INFO:
|
||||||
/* set this mbx event as pending. This is required as we
|
hclgevf_handle_mbx_msg(hdev, req);
|
||||||
* might loose interrupt event when mbx task is busy
|
|
||||||
* handling. This shall be cleared when mbx task just
|
|
||||||
* enters handling state.
|
|
||||||
*/
|
|
||||||
hdev->mbx_event_pending = true;
|
|
||||||
|
|
||||||
/* we will drop the async msg if we find ARQ as full
|
|
||||||
* and continue with next message
|
|
||||||
*/
|
|
||||||
if (atomic_read(&hdev->arq.count) >=
|
|
||||||
HCLGE_MBX_MAX_ARQ_MSG_NUM) {
|
|
||||||
dev_warn(&hdev->pdev->dev,
|
|
||||||
"Async Q full, dropping msg(%u)\n",
|
|
||||||
req->msg.code);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* tail the async message in arq */
|
|
||||||
msg_q = hdev->arq.msg_q[hdev->arq.tail];
|
|
||||||
memcpy(&msg_q[0], &req->msg,
|
|
||||||
HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
|
|
||||||
hclge_mbx_tail_ptr_move_arq(hdev->arq);
|
|
||||||
atomic_inc(&hdev->arq.count);
|
|
||||||
|
|
||||||
hclgevf_mbx_task_schedule(hdev);
|
|
||||||
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
dev_err(&hdev->pdev->dev,
|
dev_err(&hdev->pdev->dev,
|
||||||
@ -298,11 +291,6 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
|
|||||||
u8 flag;
|
u8 flag;
|
||||||
u8 idx;
|
u8 idx;
|
||||||
|
|
||||||
/* we can safely clear it now as we are at start of the async message
|
|
||||||
* processing
|
|
||||||
*/
|
|
||||||
hdev->mbx_event_pending = false;
|
|
||||||
|
|
||||||
tail = hdev->arq.tail;
|
tail = hdev->arq.tail;
|
||||||
|
|
||||||
/* process all the async queue messages */
|
/* process all the async queue messages */
|
||||||
|
Loading…
Reference in New Issue
Block a user