diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 1fedefd499ac..ac61c9352535 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -116,6 +116,9 @@ enum board_idx { BCM57508, BCM57504, BCM57502, + BCM57508_NPAR, + BCM57504_NPAR, + BCM57502_NPAR, BCM58802, BCM58804, BCM58808, @@ -161,6 +164,9 @@ static const struct { [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" }, + [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" }, + [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" }, + [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" }, [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, @@ -209,6 +215,12 @@ static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 }, + { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR }, { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, #ifdef CONFIG_BNXT_SRIOV @@ -828,16 +840,41 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp, return 0; } -static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons, - u32 agg_bufs) +static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, + u16 cp_cons, u16 curr) +{ + struct rx_agg_cmp *agg; + + cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr)); + agg = (struct rx_agg_cmp *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + return agg; +} + +static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + u16 agg_id, u16 curr) +{ + struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; + + return &tpa_info->agg_arr[curr]; +} + +static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, + u16 start, u32 agg_bufs, bool tpa) { struct bnxt_napi *bnapi = cpr->bnapi; struct bnxt *bp = bnapi->bp; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; u16 prod = rxr->rx_agg_prod; u16 sw_prod = rxr->rx_sw_agg_prod; + bool p5_tpa = false; u32 i; + if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) + p5_tpa = true; + for (i = 0; i < agg_bufs; i++) { u16 cons; struct rx_agg_cmp *agg; @@ -845,8 +882,10 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons, struct rx_bd *prod_bd; struct page *page; - agg = (struct rx_agg_cmp *) - &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + if (p5_tpa) + agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i); + else + agg = bnxt_get_agg(bp, cpr, idx, start + i); cons = agg->rx_agg_cmp_opaque; __clear_bit(cons, rxr->rx_agg_bmap); @@ -874,7 +913,6 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons, prod = NEXT_RX_AGG(prod); sw_prod = NEXT_RX_AGG(sw_prod); - cp_cons = NEXT_CMP(cp_cons); } rxr->rx_agg_prod = prod; rxr->rx_sw_agg_prod = sw_prod; @@ -957,15 +995,19 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, - struct sk_buff *skb, u16 cp_cons, - u32 agg_bufs) + struct sk_buff *skb, u16 idx, + u32 agg_bufs, bool tpa) { struct bnxt_napi *bnapi = cpr->bnapi; struct pci_dev *pdev = bp->pdev; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; u16 prod = rxr->rx_agg_prod; + bool p5_tpa = false; u32 i; + if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) + p5_tpa = true; + for (i = 0; i < agg_bufs; i++) { u16 cons, frag_len; struct rx_agg_cmp *agg; @@ -973,8 +1015,10 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct page *page; dma_addr_t mapping; - agg = (struct rx_agg_cmp *) - &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + if (p5_tpa) + agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i); + else + agg = bnxt_get_agg(bp, cpr, idx, i); cons = agg->rx_agg_cmp_opaque; frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; @@ -1008,7 +1052,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, * allocated already. */ rxr->rx_agg_prod = prod; - bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs - i); + bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa); return NULL; } @@ -1021,7 +1065,6 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, skb->truesize += PAGE_SIZE; prod = NEXT_RX_AGG(prod); - cp_cons = NEXT_CMP(cp_cons); } rxr->rx_agg_prod = prod; return skb; @@ -1081,9 +1124,10 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { struct rx_tpa_end_cmp *tpa_end = cmp; - agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & - RX_TPA_END_CMP_AGG_BUFS) >> - RX_TPA_END_CMP_AGG_BUFS_SHIFT; + if (bp->flags & BNXT_FLAG_CHIP_P5) + return 0; + + agg_bufs = TPA_END_AGG_BUFS(tpa_end); } if (agg_bufs) { @@ -1120,26 +1164,60 @@ static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) rxr->rx_next_cons = 0xffff; } +static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) +{ + struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; + u16 idx = agg_id & MAX_TPA_P5_MASK; + + if (test_bit(idx, map->agg_idx_bmap)) + idx = find_first_zero_bit(map->agg_idx_bmap, + BNXT_AGG_IDX_BMAP_SIZE); + __set_bit(idx, map->agg_idx_bmap); + map->agg_id_tbl[agg_id] = idx; + return idx; +} + +static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) +{ + struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; + + __clear_bit(idx, map->agg_idx_bmap); +} + +static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) +{ + struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; + + return map->agg_id_tbl[agg_id]; +} + static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, struct rx_tpa_start_cmp *tpa_start, struct rx_tpa_start_cmp_ext *tpa_start1) { - u8 agg_id = TPA_START_AGG_ID(tpa_start); - u16 cons, prod; - struct bnxt_tpa_info *tpa_info; struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; + struct bnxt_tpa_info *tpa_info; + u16 cons, prod, agg_id; struct rx_bd *prod_bd; dma_addr_t mapping; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + agg_id = TPA_START_AGG_ID_P5(tpa_start); + agg_id = bnxt_alloc_agg_idx(rxr, agg_id); + } else { + agg_id = TPA_START_AGG_ID(tpa_start); + } cons = tpa_start->rx_tpa_start_cmp_opaque; prod = rxr->rx_prod; cons_rx_buf = &rxr->rx_buf_ring[cons]; prod_rx_buf = &rxr->rx_buf_ring[prod]; tpa_info = &rxr->rx_tpa[agg_id]; - if (unlikely(cons != rxr->rx_next_cons)) { - netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n", - cons, rxr->rx_next_cons); + if (unlikely(cons != rxr->rx_next_cons || + TPA_START_ERROR(tpa_start))) { + netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n", + cons, rxr->rx_next_cons, + TPA_START_ERROR_CODE(tpa_start1)); bnxt_sched_reset(bp, rxr); return; } @@ -1184,6 +1262,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); + tpa_info->agg_count = 0; rxr->rx_prod = NEXT_RX(prod); cons = NEXT_RX(cons); @@ -1195,13 +1274,37 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, cons_rx_buf->data = NULL; } -static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 cp_cons, - u32 agg_bufs) +static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs) { if (agg_bufs) - bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs); + bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true); } +#ifdef CONFIG_INET +static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto) +{ + struct udphdr *uh = NULL; + + if (ip_proto == htons(ETH_P_IP)) { + struct iphdr *iph = (struct iphdr *)skb->data; + + if (iph->protocol == IPPROTO_UDP) + uh = (struct udphdr *)(iph + 1); + } else { + struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; + + if (iph->nexthdr == IPPROTO_UDP) + uh = (struct udphdr *)(iph + 1); + } + if (uh) { + if (uh->check) + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; + else + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; + } +} +#endif + static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, int payload_off, int tcp_ts, struct sk_buff *skb) @@ -1259,28 +1362,39 @@ static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, } if (inner_mac_off) { /* tunnel */ - struct udphdr *uh = NULL; __be16 proto = *((__be16 *)(skb->data + outer_ip_off - ETH_HLEN - 2)); - if (proto == htons(ETH_P_IP)) { - struct iphdr *iph = (struct iphdr *)skb->data; + bnxt_gro_tunnel(skb, proto); + } +#endif + return skb; +} - if (iph->protocol == IPPROTO_UDP) - uh = (struct udphdr *)(iph + 1); - } else { - struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; +static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info, + int payload_off, int tcp_ts, + struct sk_buff *skb) +{ +#ifdef CONFIG_INET + u16 outer_ip_off, inner_ip_off, inner_mac_off; + u32 hdr_info = tpa_info->hdr_info; + int iphdr_len, nw_off; - if (iph->nexthdr == IPPROTO_UDP) - uh = (struct udphdr *)(iph + 1); - } - if (uh) { - if (uh->check) - skb_shinfo(skb)->gso_type |= - SKB_GSO_UDP_TUNNEL_CSUM; - else - skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; - } + inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); + inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); + outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); + + nw_off = inner_ip_off - ETH_HLEN; + skb_set_network_header(skb, nw_off); + iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ? + sizeof(struct ipv6hdr) : sizeof(struct iphdr); + skb_set_transport_header(skb, nw_off + iphdr_len); + + if (inner_mac_off) { /* tunnel */ + __be16 proto = *((__be16 *)(skb->data + outer_ip_off - + ETH_HLEN - 2)); + + bnxt_gro_tunnel(skb, proto); } #endif return skb; @@ -1327,28 +1441,8 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, return NULL; } - if (nw_off) { /* tunnel */ - struct udphdr *uh = NULL; - - if (skb->protocol == htons(ETH_P_IP)) { - struct iphdr *iph = (struct iphdr *)skb->data; - - if (iph->protocol == IPPROTO_UDP) - uh = (struct udphdr *)(iph + 1); - } else { - struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; - - if (iph->nexthdr == IPPROTO_UDP) - uh = (struct udphdr *)(iph + 1); - } - if (uh) { - if (uh->check) - skb_shinfo(skb)->gso_type |= - SKB_GSO_UDP_TUNNEL_CSUM; - else - skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; - } - } + if (nw_off) /* tunnel */ + bnxt_gro_tunnel(skb, skb->protocol); #endif return skb; } @@ -1371,9 +1465,10 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, skb_shinfo(skb)->gso_size = le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); skb_shinfo(skb)->gso_type = tpa_info->gso_type; - payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & - RX_TPA_END_CMP_PAYLOAD_OFFSET) >> - RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; + if (bp->flags & BNXT_FLAG_CHIP_P5) + payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1); + else + payload_off = TPA_END_PAYLOAD_OFF(tpa_end); skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); if (likely(skb)) tcp_gro_complete(skb); @@ -1401,14 +1496,14 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, { struct bnxt_napi *bnapi = cpr->bnapi; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; - u8 agg_id = TPA_END_AGG_ID(tpa_end); u8 *data_ptr, agg_bufs; - u16 cp_cons = RING_CMP(*raw_cons); unsigned int len; struct bnxt_tpa_info *tpa_info; dma_addr_t mapping; struct sk_buff *skb; + u16 idx = 0, agg_id; void *data; + bool gro; if (unlikely(bnapi->in_reset)) { int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end); @@ -1418,26 +1513,43 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, return NULL; } - tpa_info = &rxr->rx_tpa[agg_id]; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + agg_id = TPA_END_AGG_ID_P5(tpa_end); + agg_id = bnxt_lookup_agg_idx(rxr, agg_id); + agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1); + tpa_info = &rxr->rx_tpa[agg_id]; + if (unlikely(agg_bufs != tpa_info->agg_count)) { + netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n", + agg_bufs, tpa_info->agg_count); + agg_bufs = tpa_info->agg_count; + } + tpa_info->agg_count = 0; + *event |= BNXT_AGG_EVENT; + bnxt_free_agg_idx(rxr, agg_id); + idx = agg_id; + gro = !!(bp->flags & BNXT_FLAG_GRO); + } else { + agg_id = TPA_END_AGG_ID(tpa_end); + agg_bufs = TPA_END_AGG_BUFS(tpa_end); + tpa_info = &rxr->rx_tpa[agg_id]; + idx = RING_CMP(*raw_cons); + if (agg_bufs) { + if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) + return ERR_PTR(-EBUSY); + + *event |= BNXT_AGG_EVENT; + idx = NEXT_CMP(idx); + } + gro = !!TPA_END_GRO(tpa_end); + } data = tpa_info->data; data_ptr = tpa_info->data_ptr; prefetch(data_ptr); len = tpa_info->len; mapping = tpa_info->mapping; - agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & - RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT; - - if (agg_bufs) { - if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) - return ERR_PTR(-EBUSY); - - *event |= BNXT_AGG_EVENT; - cp_cons = NEXT_CMP(cp_cons); - } - if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { - bnxt_abort_tpa(cpr, cp_cons, agg_bufs); + bnxt_abort_tpa(cpr, idx, agg_bufs); if (agg_bufs > MAX_SKB_FRAGS) netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", agg_bufs, (int)MAX_SKB_FRAGS); @@ -1447,7 +1559,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, if (len <= bp->rx_copy_thresh) { skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); if (!skb) { - bnxt_abort_tpa(cpr, cp_cons, agg_bufs); + bnxt_abort_tpa(cpr, idx, agg_bufs); return NULL; } } else { @@ -1456,7 +1568,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); if (!new_data) { - bnxt_abort_tpa(cpr, cp_cons, agg_bufs); + bnxt_abort_tpa(cpr, idx, agg_bufs); return NULL; } @@ -1471,7 +1583,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, if (!skb) { kfree(data); - bnxt_abort_tpa(cpr, cp_cons, agg_bufs); + bnxt_abort_tpa(cpr, idx, agg_bufs); return NULL; } skb_reserve(skb, bp->rx_offset); @@ -1479,7 +1591,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, } if (agg_bufs) { - skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs); + skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true); if (!skb) { /* Page reuse already handled by bnxt_rx_pages(). */ return NULL; @@ -1508,12 +1620,24 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; } - if (TPA_END_GRO(tpa_end)) + if (gro) skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); return skb; } +static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + struct rx_agg_cmp *rx_agg) +{ + u16 agg_id = TPA_AGG_AGG_ID(rx_agg); + struct bnxt_tpa_info *tpa_info; + + agg_id = bnxt_lookup_agg_idx(rxr, agg_id); + tpa_info = &rxr->rx_tpa[agg_id]; + BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS); + tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; +} + static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, struct sk_buff *skb) { @@ -1555,6 +1679,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, rxcmp = (struct rx_cmp *) &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + cmp_type = RX_CMP_TYPE(rxcmp); + + if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) { + bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp); + goto next_rx_no_prod_no_len; + } + tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); cp_cons = RING_CMP(tmp_raw_cons); rxcmp1 = (struct rx_cmp_ext *) @@ -1563,8 +1694,6 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) return -EBUSY; - cmp_type = RX_CMP_TYPE(rxcmp); - prod = rxr->rx_prod; if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) { @@ -1623,7 +1752,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, bnxt_reuse_rx_data(rxr, cons, data); if (agg_bufs) - bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs); + bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, + false); rc = -EIO; if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { @@ -1646,7 +1776,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, bnxt_reuse_rx_data(rxr, cons, data); if (!skb) { if (agg_bufs) - bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs); + bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, + agg_bufs, false); rc = -ENOMEM; goto next_rx; } @@ -1666,7 +1797,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } if (agg_bufs) { - skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs); + skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false); if (!skb) { rc = -ENOMEM; goto next_rx; @@ -2325,10 +2456,11 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + struct bnxt_tpa_idx_map *map; int j; if (rxr->rx_tpa) { - for (j = 0; j < MAX_TPA; j++) { + for (j = 0; j < bp->max_tpa; j++) { struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[j]; u8 *data = tpa_info->data; @@ -2395,6 +2527,9 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) __free_page(rxr->rx_page); rxr->rx_page = NULL; } + map = rxr->rx_tpa_idx_map; + if (map) + memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); } } @@ -2483,6 +2618,61 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) return 0; } +static void bnxt_free_tpa_info(struct bnxt *bp) +{ + int i; + + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + + kfree(rxr->rx_tpa_idx_map); + rxr->rx_tpa_idx_map = NULL; + if (rxr->rx_tpa) { + kfree(rxr->rx_tpa[0].agg_arr); + rxr->rx_tpa[0].agg_arr = NULL; + } + kfree(rxr->rx_tpa); + rxr->rx_tpa = NULL; + } +} + +static int bnxt_alloc_tpa_info(struct bnxt *bp) +{ + int i, j, total_aggs = 0; + + bp->max_tpa = MAX_TPA; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + if (!bp->max_tpa_v2) + return 0; + bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); + total_aggs = bp->max_tpa * MAX_SKB_FRAGS; + } + + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + struct rx_agg_cmp *agg; + + rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), + GFP_KERNEL); + if (!rxr->rx_tpa) + return -ENOMEM; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) + continue; + agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL); + rxr->rx_tpa[0].agg_arr = agg; + if (!agg) + return -ENOMEM; + for (j = 1; j < bp->max_tpa; j++) + rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS; + rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), + GFP_KERNEL); + if (!rxr->rx_tpa_idx_map) + return -ENOMEM; + } + return 0; +} + static void bnxt_free_rx_rings(struct bnxt *bp) { int i; @@ -2490,6 +2680,7 @@ static void bnxt_free_rx_rings(struct bnxt *bp) if (!bp->rx_ring) return; + bnxt_free_tpa_info(bp); for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring; @@ -2503,9 +2694,6 @@ static void bnxt_free_rx_rings(struct bnxt *bp) page_pool_destroy(rxr->page_pool); rxr->page_pool = NULL; - kfree(rxr->rx_tpa); - rxr->rx_tpa = NULL; - kfree(rxr->rx_agg_bmap); rxr->rx_agg_bmap = NULL; @@ -2539,7 +2727,7 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, static int bnxt_alloc_rx_rings(struct bnxt *bp) { - int i, rc, agg_rings = 0, tpa_rings = 0; + int i, rc = 0, agg_rings = 0; if (!bp->rx_ring) return -ENOMEM; @@ -2547,9 +2735,6 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) if (bp->flags & BNXT_FLAG_AGG_RINGS) agg_rings = 1; - if (bp->flags & BNXT_FLAG_TPA) - tpa_rings = 1; - for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring; @@ -2591,17 +2776,11 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); if (!rxr->rx_agg_bmap) return -ENOMEM; - - if (tpa_rings) { - rxr->rx_tpa = kcalloc(MAX_TPA, - sizeof(struct bnxt_tpa_info), - GFP_KERNEL); - if (!rxr->rx_tpa) - return -ENOMEM; - } } } - return 0; + if (bp->flags & BNXT_FLAG_TPA) + rc = bnxt_alloc_tpa_info(bp); + return rc; } static void bnxt_free_tx_rings(struct bnxt *bp) @@ -2953,7 +3132,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) u8 *data; dma_addr_t mapping; - for (i = 0; i < MAX_TPA; i++) { + for (i = 0; i < bp->max_tpa; i++) { data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL); if (!data) @@ -3468,7 +3647,7 @@ static void bnxt_free_ring_stats(struct bnxt *bp) if (!bp->bnapi) return; - size = sizeof(struct ctx_hw_stats); + size = bp->hw_ring_stats_size; for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; @@ -3487,7 +3666,7 @@ static int bnxt_alloc_stats(struct bnxt *bp) u32 size, i; struct pci_dev *pdev = bp->pdev; - size = sizeof(struct ctx_hw_stats); + size = bp->hw_ring_stats_size; for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; @@ -4414,6 +4593,7 @@ static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; struct hwrm_vnic_tpa_cfg_input req = {0}; if (vnic->fw_vnic_id == INVALID_HW_RING_ID) @@ -4453,9 +4633,14 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) nsegs = (MAX_SKB_FRAGS - n) / n; } - segs = ilog2(nsegs); + if (bp->flags & BNXT_FLAG_CHIP_P5) { + segs = MAX_TPA_SEGS_P5; + max_aggs = bp->max_tpa; + } else { + segs = ilog2(nsegs); + } req.max_agg_segs = cpu_to_le16(segs); - req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX); + req.max_aggs = cpu_to_le16(max_aggs); req.min_agg_len = cpu_to_le32(512); } @@ -4815,6 +5000,12 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) if (flags & VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; + bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); + if (bp->max_tpa_v2) + bp->hw_ring_stats_size = + sizeof(struct ctx_hw_stats_ext); + else + bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); } mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@ -6012,6 +6203,7 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); + req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); mutex_lock(&bp->hwrm_cmd_lock); @@ -9292,7 +9484,8 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) if (changes & BNXT_FLAG_TPA) { update_tpa = true; if ((bp->flags & BNXT_FLAG_TPA) == 0 || - (flags & BNXT_FLAG_TPA) == 0) + (flags & BNXT_FLAG_TPA) == 0 || + (bp->flags & BNXT_FLAG_CHIP_P5)) re_init = true; } @@ -9302,9 +9495,8 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) if (flags != bp->flags) { u32 old_flags = bp->flags; - bp->flags = flags; - if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { + bp->flags = flags; if (update_tpa) bnxt_set_ring_params(bp); return rc; @@ -9312,12 +9504,14 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) if (re_init) { bnxt_close_nic(bp, false, false); + bp->flags = flags; if (update_tpa) bnxt_set_ring_params(bp); return bnxt_open_nic(bp, false, false); } if (update_tpa) { + bp->flags = flags; rc = bnxt_set_tpa(bp, (flags & BNXT_FLAG_TPA) ? true : false); @@ -9714,6 +9908,68 @@ static void bnxt_init_dflt_coal(struct bnxt *bp) bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; } +static int bnxt_fw_init_one_p1(struct bnxt *bp) +{ + int rc; + + bp->fw_cap = 0; + rc = bnxt_hwrm_ver_get(bp); + if (rc) + return rc; + + if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) { + rc = bnxt_alloc_kong_hwrm_resources(bp); + if (rc) + bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL; + } + + if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || + bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { + rc = bnxt_alloc_hwrm_short_cmd_req(bp); + if (rc) + return rc; + } + rc = bnxt_hwrm_func_reset(bp); + if (rc) + return -ENODEV; + + bnxt_hwrm_fw_set_time(bp); + return 0; +} + +static int bnxt_fw_init_one_p2(struct bnxt *bp) +{ + int rc; + + /* Get the MAX capabilities for this function */ + rc = bnxt_hwrm_func_qcaps(bp); + if (rc) { + netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", + rc); + return -ENODEV; + } + + rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); + if (rc) + netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", + rc); + + rc = bnxt_hwrm_func_drv_rgtr(bp); + if (rc) + return -ENODEV; + + rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); + if (rc) + return -ENODEV; + + bnxt_hwrm_func_qcfg(bp); + bnxt_hwrm_vnic_qcaps(bp); + bnxt_hwrm_port_led_qcaps(bp); + bnxt_ethtool_init(bp); + bnxt_dcb_init(bp); + return 0; +} + static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) { int rc; @@ -10669,32 +10925,18 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto init_err_pci_clean; mutex_init(&bp->hwrm_cmd_lock); - rc = bnxt_hwrm_ver_get(bp); + + rc = bnxt_fw_init_one_p1(bp); if (rc) goto init_err_pci_clean; - if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) { - rc = bnxt_alloc_kong_hwrm_resources(bp); - if (rc) - bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL; - } - - if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || - bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { - rc = bnxt_alloc_hwrm_short_cmd_req(bp); - if (rc) - goto init_err_pci_clean; - } - if (BNXT_CHIP_P5(bp)) bp->flags |= BNXT_FLAG_CHIP_P5; - rc = bnxt_hwrm_func_reset(bp); + rc = bnxt_fw_init_one_p2(bp); if (rc) goto init_err_pci_clean; - bnxt_hwrm_fw_set_time(bp); - dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | @@ -10732,41 +10974,14 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bp->gro_func = bnxt_gro_func_5730x; if (BNXT_CHIP_P4(bp)) bp->gro_func = bnxt_gro_func_5731x; + else if (BNXT_CHIP_P5(bp)) + bp->gro_func = bnxt_gro_func_5750x; } if (!BNXT_CHIP_P4_PLUS(bp)) bp->flags |= BNXT_FLAG_DOUBLE_DB; - rc = bnxt_hwrm_func_drv_rgtr(bp); - if (rc) - goto init_err_pci_clean; - - rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); - if (rc) - goto init_err_pci_clean; - bp->ulp_probe = bnxt_ulp_probe; - rc = bnxt_hwrm_queue_qportcfg(bp); - if (rc) { - netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", - rc); - rc = -1; - goto init_err_pci_clean; - } - /* Get the MAX capabilities for this function */ - rc = bnxt_hwrm_func_qcaps(bp); - if (rc) { - netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", - rc); - rc = -1; - goto init_err_pci_clean; - } - - rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); - if (rc) - netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", - rc); - rc = bnxt_init_mac_addr(bp); if (rc) { dev_err(&pdev->dev, "Unable to initialize mac address.\n"); @@ -10780,11 +10995,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err_pci_clean; } - bnxt_hwrm_func_qcfg(bp); - bnxt_hwrm_vnic_qcaps(bp); - bnxt_hwrm_port_led_qcaps(bp); - bnxt_ethtool_init(bp); - bnxt_dcb_init(bp); /* MTU range: 60 - FW defined max */ dev->min_mtu = ETH_ZLEN; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 16694b704d15..e3262089b751 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -113,6 +113,7 @@ struct tx_cmp { #define CMP_TYPE_RX_AGG_CMP 18 #define CMP_TYPE_RX_L2_TPA_START_CMP 19 #define CMP_TYPE_RX_L2_TPA_END_CMP 21 + #define CMP_TYPE_RX_TPA_AGG_CMP 22 #define CMP_TYPE_STATUS_CMP 32 #define CMP_TYPE_REMOTE_DRIVER_REQ 34 #define CMP_TYPE_REMOTE_DRIVER_RESP 36 @@ -263,14 +264,21 @@ struct rx_agg_cmp { u32 rx_agg_cmp_opaque; __le32 rx_agg_cmp_v; #define RX_AGG_CMP_V (1 << 0) + #define RX_AGG_CMP_AGG_ID (0xffff << 16) + #define RX_AGG_CMP_AGG_ID_SHIFT 16 __le32 rx_agg_cmp_unused; }; +#define TPA_AGG_AGG_ID(rx_agg) \ + ((le32_to_cpu((rx_agg)->rx_agg_cmp_v) & \ + RX_AGG_CMP_AGG_ID) >> RX_AGG_CMP_AGG_ID_SHIFT) + struct rx_tpa_start_cmp { __le32 rx_tpa_start_cmp_len_flags_type; #define RX_TPA_START_CMP_TYPE (0x3f << 0) #define RX_TPA_START_CMP_FLAGS (0x3ff << 6) #define RX_TPA_START_CMP_FLAGS_SHIFT 6 + #define RX_TPA_START_CMP_FLAGS_ERROR (0x1 << 6) #define RX_TPA_START_CMP_FLAGS_PLACEMENT (0x7 << 7) #define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT 7 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7) @@ -278,6 +286,7 @@ struct rx_tpa_start_cmp { #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7) #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7) #define RX_TPA_START_CMP_FLAGS_RSS_VALID (0x1 << 10) + #define RX_TPA_START_CMP_FLAGS_TIMESTAMP (0x1 << 11) #define RX_TPA_START_CMP_FLAGS_ITYPES (0xf << 12) #define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT 12 #define RX_TPA_START_CMP_FLAGS_ITYPE_TCP (0x2 << 12) @@ -291,6 +300,8 @@ struct rx_tpa_start_cmp { #define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9 #define RX_TPA_START_CMP_AGG_ID (0x7f << 25) #define RX_TPA_START_CMP_AGG_ID_SHIFT 25 + #define RX_TPA_START_CMP_AGG_ID_P5 (0xffff << 16) + #define RX_TPA_START_CMP_AGG_ID_SHIFT_P5 16 __le32 rx_tpa_start_cmp_rss_hash; }; @@ -308,6 +319,14 @@ struct rx_tpa_start_cmp { ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \ RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT) +#define TPA_START_AGG_ID_P5(rx_tpa_start) \ + ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \ + RX_TPA_START_CMP_AGG_ID_P5) >> RX_TPA_START_CMP_AGG_ID_SHIFT_P5) + +#define TPA_START_ERROR(rx_tpa_start) \ + ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \ + cpu_to_le32(RX_TPA_START_CMP_FLAGS_ERROR)) + struct rx_tpa_start_cmp_ext { __le32 rx_tpa_start_cmp_flags2; #define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC (0x1 << 0) @@ -315,10 +334,20 @@ struct rx_tpa_start_cmp_ext { #define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2) #define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3) #define RX_TPA_START_CMP_FLAGS2_IP_TYPE (0x1 << 8) + #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_VALID (0x1 << 9) + #define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT (0x3 << 10) + #define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT_SHIFT 10 + #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL (0xffff << 16) + #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_SHIFT 16 __le32 rx_tpa_start_cmp_metadata; __le32 rx_tpa_start_cmp_cfa_code_v2; #define RX_TPA_START_CMP_V2 (0x1 << 0) + #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK (0x7 << 1) + #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT 1 + #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1) + #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1) + #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1) #define RX_TPA_START_CMP_CFA_CODE (0xffff << 16) #define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16 __le32 rx_tpa_start_cmp_hdr_info; @@ -332,6 +361,11 @@ struct rx_tpa_start_cmp_ext { (!!((rx_tpa_start)->rx_tpa_start_cmp_flags2 & \ cpu_to_le32(RX_TPA_START_CMP_FLAGS2_IP_TYPE))) +#define TPA_START_ERROR_CODE(rx_tpa_start) \ + ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \ + RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK) >> \ + RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT) + struct rx_tpa_end_cmp { __le32 rx_tpa_end_cmp_len_flags_type; #define RX_TPA_END_CMP_TYPE (0x3f << 0) @@ -361,6 +395,8 @@ struct rx_tpa_end_cmp { #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16 #define RX_TPA_END_CMP_AGG_ID (0x7f << 25) #define RX_TPA_END_CMP_AGG_ID_SHIFT 25 + #define RX_TPA_END_CMP_AGG_ID_P5 (0xffff << 16) + #define RX_TPA_END_CMP_AGG_ID_SHIFT_P5 16 __le32 rx_tpa_end_cmp_tsdelta; #define RX_TPA_END_GRO_TS (0x1 << 31) @@ -370,6 +406,18 @@ struct rx_tpa_end_cmp { ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT) +#define TPA_END_AGG_ID_P5(rx_tpa_end) \ + ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ + RX_TPA_END_CMP_AGG_ID_P5) >> RX_TPA_END_CMP_AGG_ID_SHIFT_P5) + +#define TPA_END_PAYLOAD_OFF(rx_tpa_end) \ + ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ + RX_TPA_END_CMP_PAYLOAD_OFFSET) >> RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT) + +#define TPA_END_AGG_BUFS(rx_tpa_end) \ + ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ + RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT) + #define TPA_END_TPA_SEGS(rx_tpa_end) \ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT) @@ -389,6 +437,10 @@ struct rx_tpa_end_cmp { struct rx_tpa_end_cmp_ext { __le32 rx_tpa_end_cmp_dup_acks; #define RX_TPA_END_CMP_TPA_DUP_ACKS (0xf << 0) + #define RX_TPA_END_CMP_PAYLOAD_OFFSET_P5 (0xff << 16) + #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT_P5 16 + #define RX_TPA_END_CMP_AGG_BUFS_P5 (0xff << 24) + #define RX_TPA_END_CMP_AGG_BUFS_SHIFT_P5 24 __le32 rx_tpa_end_cmp_seg_len; #define RX_TPA_END_CMP_TPA_SEG_LEN (0xffff << 0) @@ -396,7 +448,13 @@ struct rx_tpa_end_cmp_ext { __le32 rx_tpa_end_cmp_errors_v2; #define RX_TPA_END_CMP_V2 (0x1 << 0) #define RX_TPA_END_CMP_ERRORS (0x3 << 1) + #define RX_TPA_END_CMP_ERRORS_P5 (0x7 << 1) #define RX_TPA_END_CMPL_ERRORS_SHIFT 1 + #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1) + #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2 << 1) + #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1) + #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_RSV_ERROR (0x4 << 1) + #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1) u32 rx_tpa_end_cmp_start_opaque; }; @@ -405,6 +463,15 @@ struct rx_tpa_end_cmp_ext { ((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 & \ cpu_to_le32(RX_TPA_END_CMP_ERRORS)) +#define TPA_END_PAYLOAD_OFF_P5(rx_tpa_end_ext) \ + ((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \ + RX_TPA_END_CMP_PAYLOAD_OFFSET_P5) >> \ + RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT_P5) + +#define TPA_END_AGG_BUFS_P5(rx_tpa_end_ext) \ + ((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \ + RX_TPA_END_CMP_AGG_BUFS_P5) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT_P5) + struct nqe_cn { __le16 type; #define NQ_CN_TYPE_MASK 0x3fUL @@ -487,6 +554,9 @@ struct nqe_cn { #define BNXT_DEFAULT_TX_RING_SIZE 511 #define MAX_TPA 64 +#define MAX_TPA_P5 256 +#define MAX_TPA_P5_MASK (MAX_TPA_P5 - 1) +#define MAX_TPA_SEGS_P5 0x3f #if (BNXT_PAGE_SHIFT == 16) #define MAX_RX_PAGES 1 @@ -768,6 +838,15 @@ struct bnxt_tpa_info { ((hdr_info) & 0x1ff) u16 cfa_code; /* cfa_code in TPA start compl */ + u8 agg_count; + struct rx_agg_cmp *agg_arr; +}; + +#define BNXT_AGG_IDX_BMAP_SIZE (MAX_TPA_P5 / BITS_PER_LONG) + +struct bnxt_tpa_idx_map { + u16 agg_id_tbl[1024]; + unsigned long agg_idx_bmap[BNXT_AGG_IDX_BMAP_SIZE]; }; struct bnxt_rx_ring_info { @@ -797,6 +876,7 @@ struct bnxt_rx_ring_info { dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES]; struct bnxt_tpa_info *rx_tpa; + struct bnxt_tpa_idx_map *rx_tpa_idx_map; struct bnxt_ring_struct rx_ring_struct; struct bnxt_ring_struct rx_agg_ring_struct; @@ -1282,7 +1362,9 @@ struct bnxt { #define CHIP_NUM_5745X 0xd730 -#define CHIP_NUM_57500 0x1750 +#define CHIP_NUM_57508 0x1750 +#define CHIP_NUM_57504 0x1751 +#define CHIP_NUM_57502 0x1752 #define CHIP_NUM_58802 0xd802 #define CHIP_NUM_58804 0xd804 @@ -1379,12 +1461,14 @@ struct bnxt { #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0) #define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE) #define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \ - !(bp->flags & BNXT_FLAG_CHIP_P5) && \ - !is_kdump_kernel()) + (!((bp)->flags & BNXT_FLAG_CHIP_P5) || \ + (bp)->max_tpa_v2) && !is_kdump_kernel()) /* Chip class phase 5 */ #define BNXT_CHIP_P5(bp) \ - ((bp)->chip_num == CHIP_NUM_57500) + ((bp)->chip_num == CHIP_NUM_57508 || \ + (bp)->chip_num == CHIP_NUM_57504 || \ + (bp)->chip_num == CHIP_NUM_57502) /* Chip class phase 4.x */ #define BNXT_CHIP_P4(bp) \ @@ -1414,6 +1498,8 @@ struct bnxt { u16, void *, u8 *, dma_addr_t, unsigned int); + u16 max_tpa_v2; + u16 max_tpa; u32 rx_buf_size; u32 rx_buf_use_size; /* useable size */ u16 rx_offset; @@ -1525,6 +1611,7 @@ struct bnxt { int hw_port_stats_size; u16 fw_rx_stats_ext_size; u16 fw_tx_stats_ext_size; + u16 hw_ring_stats_size; u8 pri2cos[8]; u8 pri2cos_valid; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index c7ee63d69679..3a3d8a9be5ed 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -137,7 +137,44 @@ reset_coalesce: return rc; } -#define BNXT_NUM_STATS 22 +static const char * const bnxt_ring_stats_str[] = { + "rx_ucast_packets", + "rx_mcast_packets", + "rx_bcast_packets", + "rx_discards", + "rx_drops", + "rx_ucast_bytes", + "rx_mcast_bytes", + "rx_bcast_bytes", + "tx_ucast_packets", + "tx_mcast_packets", + "tx_bcast_packets", + "tx_discards", + "tx_drops", + "tx_ucast_bytes", + "tx_mcast_bytes", + "tx_bcast_bytes", +}; + +static const char * const bnxt_ring_tpa_stats_str[] = { + "tpa_packets", + "tpa_bytes", + "tpa_events", + "tpa_aborts", +}; + +static const char * const bnxt_ring_tpa2_stats_str[] = { + "rx_tpa_eligible_pkt", + "rx_tpa_eligible_bytes", + "rx_tpa_pkt", + "rx_tpa_bytes", + "rx_tpa_errors", +}; + +static const char * const bnxt_ring_sw_stats_str[] = { + "rx_l4_csum_errors", + "missed_irqs", +}; #define BNXT_RX_STATS_ENTRY(counter) \ { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) } @@ -207,6 +244,20 @@ reset_coalesce: BNXT_TX_STATS_EXT_COS_ENTRY(6), \ BNXT_TX_STATS_EXT_COS_ENTRY(7) \ +#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \ + BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \ + BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n) + +#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7) + #define BNXT_RX_STATS_PRI_ENTRY(counter, n) \ { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \ __stringify(counter##_pri##n) } @@ -352,6 +403,7 @@ static const struct { BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold), BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err), BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits), + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES, }; static const struct { @@ -417,9 +469,29 @@ static const struct { ARRAY_SIZE(bnxt_tx_pkts_pri_arr)) #define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr) +static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp) +{ + if (BNXT_SUPPORTS_TPA(bp)) { + if (bp->max_tpa_v2) + return ARRAY_SIZE(bnxt_ring_tpa2_stats_str); + return ARRAY_SIZE(bnxt_ring_tpa_stats_str); + } + return 0; +} + +static int bnxt_get_num_ring_stats(struct bnxt *bp) +{ + int num_stats; + + num_stats = ARRAY_SIZE(bnxt_ring_stats_str) + + ARRAY_SIZE(bnxt_ring_sw_stats_str) + + bnxt_get_num_tpa_ring_stats(bp); + return num_stats * bp->cp_nr_rings; +} + static int bnxt_get_num_stats(struct bnxt *bp) { - int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings; + int num_stats = bnxt_get_num_ring_stats(bp); num_stats += BNXT_NUM_SW_FUNC_STATS; @@ -460,10 +532,11 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, { u32 i, j = 0; struct bnxt *bp = netdev_priv(dev); - u32 stat_fields = sizeof(struct ctx_hw_stats) / 8; + u32 stat_fields = ARRAY_SIZE(bnxt_ring_stats_str) + + bnxt_get_num_tpa_ring_stats(bp); if (!bp->bnapi) { - j += BNXT_NUM_STATS * bp->cp_nr_rings + BNXT_NUM_SW_FUNC_STATS; + j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS; goto skip_ring_stats; } @@ -551,56 +624,39 @@ skip_ring_stats: static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { struct bnxt *bp = netdev_priv(dev); - u32 i; + static const char * const *str; + u32 i, j, num_str; switch (stringset) { - /* The number of strings must match BNXT_NUM_STATS defined above. */ case ETH_SS_STATS: for (i = 0; i < bp->cp_nr_rings; i++) { - sprintf(buf, "[%d]: rx_ucast_packets", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: rx_mcast_packets", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: rx_bcast_packets", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: rx_discards", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: rx_drops", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: rx_ucast_bytes", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: rx_mcast_bytes", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: rx_bcast_bytes", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tx_ucast_packets", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tx_mcast_packets", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tx_bcast_packets", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tx_discards", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tx_drops", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tx_ucast_bytes", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tx_mcast_bytes", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tx_bcast_bytes", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tpa_packets", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tpa_bytes", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tpa_events", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: tpa_aborts", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: rx_l4_csum_errors", i); - buf += ETH_GSTRING_LEN; - sprintf(buf, "[%d]: missed_irqs", i); - buf += ETH_GSTRING_LEN; + num_str = ARRAY_SIZE(bnxt_ring_stats_str); + for (j = 0; j < num_str; j++) { + sprintf(buf, "[%d]: %s", i, + bnxt_ring_stats_str[j]); + buf += ETH_GSTRING_LEN; + } + if (!BNXT_SUPPORTS_TPA(bp)) + goto skip_tpa_stats; + + if (bp->max_tpa_v2) { + num_str = ARRAY_SIZE(bnxt_ring_tpa2_stats_str); + str = bnxt_ring_tpa2_stats_str; + } else { + num_str = ARRAY_SIZE(bnxt_ring_tpa_stats_str); + str = bnxt_ring_tpa_stats_str; + } + for (j = 0; j < num_str; j++) { + sprintf(buf, "[%d]: %s", i, str[j]); + buf += ETH_GSTRING_LEN; + } +skip_tpa_stats: + num_str = ARRAY_SIZE(bnxt_ring_sw_stats_str); + for (j = 0; j < num_str; j++) { + sprintf(buf, "[%d]: %s", i, + bnxt_ring_sw_stats_str[j]); + buf += ETH_GSTRING_LEN; + } } for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) { strcpy(buf, bnxt_sw_func_stats[i].string); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 12bbb2a207d0..2cdef753a1bc 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -1,7 +1,8 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2019 Broadcom Limited + * Copyright (c) 2014-2018 Broadcom Limited + * Copyright (c) 2018-2019 Broadcom Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -39,15 +40,15 @@ struct hwrm_resp_hdr { #define TLV_TYPE_ROCE_SP_COMMAND 0x3UL #define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL #define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL -#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER 0x8001UL -#define TLV_TYPE_ENGINE_CKV_NONCE 0x8002UL +#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL #define TLV_TYPE_ENGINE_CKV_IV 0x8003UL #define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL #define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL #define TLV_TYPE_ENGINE_CKV_ALGORITHMS 0x8006UL -#define TLV_TYPE_ENGINE_CKV_ECC_PUBLIC_KEY 0x8007UL +#define TLV_TYPE_ENGINE_CKV_HOST_ECC_PUBLIC_KEY 0x8007UL #define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL -#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE +#define TLV_TYPE_ENGINE_CKV_SRT_ECC_PUBLIC_KEY 0x8009UL +#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_SRT_ECC_PUBLIC_KEY /* tlv (size:64b/8B) */ @@ -267,7 +268,6 @@ struct cmd_nums { #define HWRM_CFA_EEM_OP 0x123UL #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL #define HWRM_CFA_TFLIB 0x125UL - #define HWRM_ENGINE_CKV_HELLO 0x12dUL #define HWRM_ENGINE_CKV_STATUS 0x12eUL #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL @@ -313,6 +313,7 @@ struct cmd_nums { #define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL #define HWRM_FUNC_VF_BW_CFG 0x195UL #define HWRM_FUNC_VF_BW_QCFG 0x196UL + #define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL #define HWRM_SELFTEST_QLIST 0x200UL #define HWRM_SELFTEST_EXEC 0x201UL #define HWRM_SELFTEST_IRQ 0x202UL @@ -410,8 +411,8 @@ struct hwrm_err_output { #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 10 #define HWRM_VERSION_UPDATE 0 -#define HWRM_VERSION_RSVD 69 -#define HWRM_VERSION_STR "1.10.0.69" +#define HWRM_VERSION_RSVD 89 +#define HWRM_VERSION_STR "1.10.0.89" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -624,6 +625,8 @@ struct hwrm_async_event_cmpl { #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL + #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL + #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR @@ -1122,6 +1125,7 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL #define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL + #define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL u8 mac_address[6]; __le16 pci_id; __le16 alloc_rsscos_ctx; @@ -1241,6 +1245,7 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL + #define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL __le32 enables; #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL @@ -2916,7 +2921,7 @@ struct tx_port_stats_ext { __le64 pfc_pri7_tx_transitions; }; -/* rx_port_stats_ext (size:2624b/328B) */ +/* rx_port_stats_ext (size:3648b/456B) */ struct rx_port_stats_ext { __le64 link_down_events; __le64 continuous_pause_events; @@ -2959,6 +2964,22 @@ struct rx_port_stats_ext { __le64 rx_buffer_passed_threshold; __le64 rx_pcs_symbol_err; __le64 rx_corrected_bits; + __le64 rx_discard_bytes_cos0; + __le64 rx_discard_bytes_cos1; + __le64 rx_discard_bytes_cos2; + __le64 rx_discard_bytes_cos3; + __le64 rx_discard_bytes_cos4; + __le64 rx_discard_bytes_cos5; + __le64 rx_discard_bytes_cos6; + __le64 rx_discard_bytes_cos7; + __le64 rx_discard_packets_cos0; + __le64 rx_discard_packets_cos1; + __le64 rx_discard_packets_cos2; + __le64 rx_discard_packets_cos3; + __le64 rx_discard_packets_cos4; + __le64 rx_discard_packets_cos5; + __le64 rx_discard_packets_cos6; + __le64 rx_discard_packets_cos7; }; /* hwrm_port_qstats_ext_input (size:320b/40B) */ @@ -6115,6 +6136,21 @@ struct hwrm_cfa_flow_alloc_output { u8 valid; }; +/* hwrm_cfa_flow_alloc_cmd_err (size:64b/8B) */ +struct hwrm_cfa_flow_alloc_cmd_err { + u8 code; + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_L2_CONTEXT_TCAM 0x1UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_ACTION_RECORD 0x2UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_COUNTER 0x3UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_WILD_CARD_TCAM 0x4UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_HASH_COLLISION 0x5UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_KEY_EXISTS 0x6UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB 0x7UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_LAST CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB + u8 unused_0[7]; +}; + /* hwrm_cfa_flow_free_input (size:256b/32B) */ struct hwrm_cfa_flow_free_input { __le16 req_type; @@ -6305,7 +6341,7 @@ struct hwrm_cfa_eem_qcaps_input { __le32 unused_0; }; -/* hwrm_cfa_eem_qcaps_output (size:256b/32B) */ +/* hwrm_cfa_eem_qcaps_output (size:320b/40B) */ struct hwrm_cfa_eem_qcaps_output { __le16 error_code; __le16 req_type; @@ -6322,15 +6358,17 @@ struct hwrm_cfa_eem_qcaps_output { #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL + #define CFA_EEM_QCAPS_RESP_SUPPORTED_FID_TABLE 0x10UL __le32 max_entries_supported; __le16 key_entry_size; __le16 record_entry_size; __le16 efc_entry_size; - u8 unused_1; + __le16 fid_entry_size; + u8 unused_1[7]; u8 valid; }; -/* hwrm_cfa_eem_cfg_input (size:320b/40B) */ +/* hwrm_cfa_eem_cfg_input (size:384b/48B) */ struct hwrm_cfa_eem_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -6350,6 +6388,9 @@ struct hwrm_cfa_eem_cfg_input { __le16 key1_ctx_id; __le16 record_ctx_id; __le16 efc_ctx_id; + __le16 fid_ctx_id; + __le16 unused_2; + __le32 unused_3; }; /* hwrm_cfa_eem_cfg_output (size:128b/16B) */ @@ -6375,7 +6416,7 @@ struct hwrm_cfa_eem_qcfg_input { __le32 unused_0; }; -/* hwrm_cfa_eem_qcfg_output (size:192b/24B) */ +/* hwrm_cfa_eem_qcfg_output (size:256b/32B) */ struct hwrm_cfa_eem_qcfg_output { __le16 error_code; __le16 req_type; @@ -6386,7 +6427,12 @@ struct hwrm_cfa_eem_qcfg_output { #define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL #define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL __le32 num_entries; - u8 unused_0[7]; + __le16 key0_ctx_id; + __le16 key1_ctx_id; + __le16 record_ctx_id; + __le16 efc_ctx_id; + __le16 fid_ctx_id; + u8 unused_2[5]; u8 valid; }; @@ -6567,6 +6613,31 @@ struct ctx_hw_stats { __le64 tpa_aborts; }; +/* ctx_hw_stats_ext (size:1344b/168B) */ +struct ctx_hw_stats_ext { + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_drop_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_discard_pkts; + __le64 tx_drop_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_tpa_eligible_pkt; + __le64 rx_tpa_eligible_bytes; + __le64 rx_tpa_pkt; + __le64 rx_tpa_bytes; + __le64 rx_tpa_errors; +}; + /* hwrm_stat_ctx_alloc_input (size:256b/32B) */ struct hwrm_stat_ctx_alloc_input { __le16 req_type; @@ -6578,7 +6649,8 @@ struct hwrm_stat_ctx_alloc_input { __le32 update_period_ms; u8 stat_ctx_flags; #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL - u8 unused_0[3]; + u8 unused_0; + __le16 stats_dma_length; }; /* hwrm_stat_ctx_alloc_output (size:128b/16B) */ @@ -7204,7 +7276,9 @@ struct coredump_segment_record { u8 version_hi; u8 version_low; u8 seg_flags; - u8 unused_0[7]; + u8 compress_flags; + #define SFLAG_COMPRESSED_ZLIB 0x1UL + u8 unused_0[6]; }; /* hwrm_dbg_coredump_list_input (size:256b/32B) */ @@ -7729,6 +7803,9 @@ struct hwrm_nvm_set_variable_input { #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1) #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1) #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH + #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_MASK 0x70UL + #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_SFT 4 + #define NVM_SET_VARIABLE_REQ_FLAGS_FACTORY_DEFAULT 0x80UL u8 unused_0; };