Merge branch 'hns3-next'

Huazhong Tan says:

====================
net: hns3: misc updates for -net-next

This series includes some misc updates for the HNS3 ethernet driver.

[patch 1] adds FE bit check before calling hns3_add_frag().
[patch 2] removes an unnecessary lock.
[patch 3] adds a little optimization for CMDQ uninitialization.
[patch 4] refactors the dump of FD tcams.
[patch 5] implements ndo_features_check ops.
[patch 6] adds some VF VLAN information for command "ip link show".
[patch 7] adds a debug print.
[patch 8] modifies the timing of print misc interrupt status when
handling hardware error event.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-12-20 21:20:39 -08:00
commit 994baea289
7 changed files with 130 additions and 72 deletions

View File

@ -1556,6 +1556,37 @@ static int hns3_nic_set_features(struct net_device *netdev,
return 0;
}
static netdev_features_t hns3_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
#define HNS3_MAX_HDR_LEN 480U
#define HNS3_MAX_L4_HDR_LEN 60U
size_t len;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return features;
if (skb->encapsulation)
len = skb_inner_transport_header(skb) - skb->data;
else
len = skb_transport_header(skb) - skb->data;
/* Assume L4 is 60 byte as TCP is the only protocol with a
* a flexible value, and it's max len is 60 bytes.
*/
len += HNS3_MAX_L4_HDR_LEN;
/* Hardware only supports checksum on the skb with a max header
* len of 480 bytes.
*/
if (len > HNS3_MAX_HDR_LEN)
features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
return features;
}
static void hns3_nic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@ -1970,6 +2001,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_do_ioctl = hns3_nic_do_ioctl,
.ndo_change_mtu = hns3_nic_change_mtu,
.ndo_set_features = hns3_nic_set_features,
.ndo_features_check = hns3_features_check,
.ndo_get_stats64 = hns3_nic_get_stats64,
.ndo_setup_tc = hns3_nic_setup_tc,
.ndo_set_rx_mode = hns3_nic_set_rx_mode,
@ -2788,7 +2820,6 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
unsigned char *va)
{
#define HNS3_NEED_ADD_FRAG 1
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
struct net_device *netdev = ring_to_netdev(ring);
struct sk_buff *skb;
@ -2832,33 +2863,19 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
desc_cb);
ring_ptr_move_fw(ring, next_to_clean);
return HNS3_NEED_ADD_FRAG;
return 0;
}
static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
bool pending)
static int hns3_add_frag(struct hns3_enet_ring *ring)
{
struct sk_buff *skb = ring->skb;
struct sk_buff *head_skb = skb;
struct sk_buff *new_skb;
struct hns3_desc_cb *desc_cb;
struct hns3_desc *pre_desc;
struct hns3_desc *desc;
u32 bd_base_info;
int pre_bd;
/* if there is pending bd, the SW param next_to_clean has moved
* to next and the next is NULL
*/
if (pending) {
pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
ring->desc_num;
pre_desc = &ring->desc[pre_bd];
bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
} else {
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
}
while (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
do {
desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean];
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
@ -2895,7 +2912,7 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
ring_ptr_move_fw(ring, next_to_clean);
ring->pending_buf++;
}
} while (!(bd_base_info & BIT(HNS3_RXD_FE_B)));
return 0;
}
@ -3063,28 +3080,23 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
if (ret < 0) /* alloc buffer fail */
return ret;
if (ret > 0) { /* need add frag */
ret = hns3_add_frag(ring, desc, false);
if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { /* need add frag */
ret = hns3_add_frag(ring);
if (ret)
return ret;
/* As the head data may be changed when GRO enable, copy
* the head data in after other data rx completed
*/
memcpy(skb->data, ring->va,
ALIGN(ring->pull_len, sizeof(long)));
}
} else {
ret = hns3_add_frag(ring, desc, true);
ret = hns3_add_frag(ring);
if (ret)
return ret;
}
/* As the head data may be changed when GRO enable, copy
* the head data in after other data rx completed
*/
/* As the head data may be changed when GRO enable, copy
* the head data in after other data rx completed
*/
if (skb->len > HNS3_RX_HEAD_SIZE)
memcpy(skb->data, ring->va,
ALIGN(ring->pull_len, sizeof(long)));
}
ret = hns3_handle_bdinfo(ring, skb);
if (unlikely(ret)) {
@ -3590,7 +3602,12 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
continue;
hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain);
/* Since the mapping can be overwritten, when fail to get the
* chain between vector and ring, we should go on to deal with
* the remaining options.
*/
if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain))
dev_warn(priv->dev, "failed to get ring chain\n");
h->ae_algo->ops->unmap_ring_from_vector(h,
tqp_vector->vector_irq, &vector_ring_chain);

View File

@ -479,19 +479,6 @@ static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
}
static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
{
spin_lock(&ring->lock);
hclge_free_cmd_desc(ring);
spin_unlock(&ring->lock);
}
static void hclge_destroy_cmd_queue(struct hclge_hw *hw)
{
hclge_destroy_queue(&hw->cmq.csq);
hclge_destroy_queue(&hw->cmq.crq);
}
void hclge_cmd_uninit(struct hclge_dev *hdev)
{
spin_lock_bh(&hdev->hw.cmq.csq.lock);
@ -501,5 +488,6 @@ void hclge_cmd_uninit(struct hclge_dev *hdev)
spin_unlock(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
hclge_destroy_cmd_queue(&hdev->hw);
hclge_free_cmd_desc(&hdev->hw.cmq.csq);
hclge_free_cmd_desc(&hdev->hw.cmq.crq);
}

View File

@ -886,8 +886,8 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
}
}
static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
bool sel_x, u32 loc)
static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
bool sel_x, u32 loc)
{
struct hclge_fd_tcam_config_1_cmd *req1;
struct hclge_fd_tcam_config_2_cmd *req2;
@ -912,7 +912,7 @@ static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
ret = hclge_cmd_send(&hdev->hw, desc, 3);
if (ret)
return;
return ret;
dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
sel_x ? "x" : "y", loc);
@ -931,16 +931,76 @@ static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
req = (u32 *)req3->tcam_data;
for (i = 0; i < 5; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++);
return ret;
}
static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
{
struct hclge_fd_rule *rule;
struct hlist_node *node;
int cnt = 0;
spin_lock_bh(&hdev->fd_rule_lock);
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
rule_locs[cnt] = rule->location;
cnt++;
}
spin_unlock_bh(&hdev->fd_rule_lock);
if (cnt != hdev->hclge_fd_rule_num)
return -EINVAL;
return cnt;
}
static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
{
u32 i;
int i, ret, rule_cnt;
u16 *rule_locs;
for (i = 0; i < hdev->fd_cfg.rule_num[0]; i++) {
hclge_dbg_fd_tcam_read(hdev, 0, true, i);
hclge_dbg_fd_tcam_read(hdev, 0, false, i);
if (!hnae3_dev_fd_supported(hdev)) {
dev_err(&hdev->pdev->dev,
"Only FD-supported dev supports dump fd tcam\n");
return;
}
if (!hdev->hclge_fd_rule_num ||
!hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
return;
rule_locs = kcalloc(hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
sizeof(u16), GFP_KERNEL);
if (!rule_locs)
return;
rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
if (rule_cnt <= 0) {
dev_err(&hdev->pdev->dev,
"failed to get rule number, ret = %d\n", rule_cnt);
kfree(rule_locs);
return;
}
for (i = 0; i < rule_cnt; i++) {
ret = hclge_dbg_fd_tcam_read(hdev, 0, true, rule_locs[i]);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get fd tcam key x, ret = %d\n", ret);
kfree(rule_locs);
return;
}
ret = hclge_dbg_fd_tcam_read(hdev, 0, false, rule_locs[i]);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get fd tcam key y, ret = %d\n", ret);
kfree(rule_locs);
return;
}
}
kfree(rule_locs);
}
void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)

View File

@ -2944,6 +2944,9 @@ static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
ivf->trusted = vport->vf_info.trusted;
ivf->min_tx_rate = 0;
ivf->max_tx_rate = vport->vf_info.max_tx_rate;
ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
ether_addr_copy(ivf->mac, vport->vf_info.mac);
return 0;
@ -3002,8 +3005,6 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
/* check for vector0 msix event source */
if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
dev_info(&hdev->pdev->dev, "received event 0x%x\n",
msix_src_reg);
*clearval = msix_src_reg;
return HCLGE_VECTOR0_EVENT_ERR;
}
@ -3502,10 +3503,15 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
/* first, resolve any unknown reset type to the known type(s) */
if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
/* we will intentionally ignore any errors from this function
* as we will end up in *some* reset request in any case
*/
hclge_handle_hw_msix_error(hdev, addr);
if (hclge_handle_hw_msix_error(hdev, addr))
dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
msix_sts_reg);
clear_bit(HNAE3_UNKNOWN_RESET, addr);
/* We defered the clearing of the error event which caused
* interrupt since it was not posssible to do that in
@ -7534,7 +7540,6 @@ void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
struct hclge_vport *vport;
int i;
mutex_lock(&hdev->vport_cfg_mutex);
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
@ -7547,7 +7552,6 @@ void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
kfree(mac);
}
}
mutex_unlock(&hdev->vport_cfg_mutex);
}
static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
@ -8308,7 +8312,6 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
struct hclge_vport *vport;
int i;
mutex_lock(&hdev->vport_cfg_mutex);
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
@ -8316,7 +8319,6 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
kfree(vlan);
}
}
mutex_unlock(&hdev->vport_cfg_mutex);
}
static void hclge_restore_vlan_table(struct hnae3_handle *handle)
@ -8328,7 +8330,6 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle)
u16 state, vlan_id;
int i;
mutex_lock(&hdev->vport_cfg_mutex);
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
@ -8354,8 +8355,6 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle)
break;
}
}
mutex_unlock(&hdev->vport_cfg_mutex);
}
int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
@ -9390,7 +9389,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
mutex_init(&hdev->vport_lock);
mutex_init(&hdev->vport_cfg_mutex);
spin_lock_init(&hdev->fd_rule_lock);
ret = hclge_pci_init(hdev);
@ -9943,7 +9941,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
mutex_destroy(&hdev->vport_lock);
hclge_uninit_vport_mac_table(hdev);
hclge_uninit_vport_vlan_table(hdev);
mutex_destroy(&hdev->vport_cfg_mutex);
ae_dev->priv = NULL;
}

View File

@ -821,8 +821,6 @@ struct hclge_dev {
u16 share_umv_size;
struct mutex umv_mutex; /* protect share_umv_size */
struct mutex vport_cfg_mutex; /* Protect stored vf table */
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
HCLGE_MAC_TNL_LOG_SIZE);

View File

@ -797,13 +797,11 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
hclge_get_link_mode(vport, req);
break;
case HCLGE_MBX_GET_VF_FLR_STATUS:
mutex_lock(&hdev->vport_cfg_mutex);
hclge_rm_vport_all_mac_table(vport, true,
HCLGE_MAC_ADDR_UC);
hclge_rm_vport_all_mac_table(vport, true,
HCLGE_MAC_ADDR_MC);
hclge_rm_vport_all_vlan_table(vport, true);
mutex_unlock(&hdev->vport_cfg_mutex);
break;
case HCLGE_MBX_GET_MEDIA_TYPE:
ret = hclge_get_vf_media_type(vport, req);

View File

@ -443,7 +443,7 @@ void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
{
spin_lock_bh(&hdev->hw.cmq.csq.lock);
spin_lock(&hdev->hw.cmq.crq.lock);
clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
hclgevf_cmd_uninit_regs(&hdev->hw);
spin_unlock(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);