forked from Minki/linux
Merge branch 'bnxt_en-fixes'
Michael Chan says: ==================== bnxt_en: Bug fixes for net. Only use MSIX on VF, and fix rx page buffers on architectures with PAGE_SIZE >= 64K. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
97d601d5de
@ -581,12 +581,30 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
|
||||
struct page *page;
|
||||
dma_addr_t mapping;
|
||||
u16 sw_prod = rxr->rx_sw_agg_prod;
|
||||
unsigned int offset = 0;
|
||||
|
||||
page = alloc_page(gfp);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
|
||||
page = rxr->rx_page;
|
||||
if (!page) {
|
||||
page = alloc_page(gfp);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
rxr->rx_page = page;
|
||||
rxr->rx_page_offset = 0;
|
||||
}
|
||||
offset = rxr->rx_page_offset;
|
||||
rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
|
||||
if (rxr->rx_page_offset == PAGE_SIZE)
|
||||
rxr->rx_page = NULL;
|
||||
else
|
||||
get_page(page);
|
||||
} else {
|
||||
page = alloc_page(gfp);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE,
|
||||
mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
if (dma_mapping_error(&pdev->dev, mapping)) {
|
||||
__free_page(page);
|
||||
@ -601,6 +619,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
|
||||
rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
|
||||
|
||||
rx_agg_buf->page = page;
|
||||
rx_agg_buf->offset = offset;
|
||||
rx_agg_buf->mapping = mapping;
|
||||
rxbd->rx_bd_haddr = cpu_to_le64(mapping);
|
||||
rxbd->rx_bd_opaque = sw_prod;
|
||||
@ -642,6 +661,7 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
|
||||
page = cons_rx_buf->page;
|
||||
cons_rx_buf->page = NULL;
|
||||
prod_rx_buf->page = page;
|
||||
prod_rx_buf->offset = cons_rx_buf->offset;
|
||||
|
||||
prod_rx_buf->mapping = cons_rx_buf->mapping;
|
||||
|
||||
@ -709,7 +729,8 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
|
||||
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
|
||||
|
||||
cons_rx_buf = &rxr->rx_agg_ring[cons];
|
||||
skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len);
|
||||
skb_fill_page_desc(skb, i, cons_rx_buf->page,
|
||||
cons_rx_buf->offset, frag_len);
|
||||
__clear_bit(cons, rxr->rx_agg_bmap);
|
||||
|
||||
/* It is possible for bnxt_alloc_rx_page() to allocate
|
||||
@ -740,7 +761,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE,
|
||||
dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
|
||||
skb->data_len += frag_len;
|
||||
@ -1584,13 +1605,17 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
|
||||
|
||||
dma_unmap_page(&pdev->dev,
|
||||
dma_unmap_addr(rx_agg_buf, mapping),
|
||||
PAGE_SIZE, PCI_DMA_FROMDEVICE);
|
||||
BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
|
||||
|
||||
rx_agg_buf->page = NULL;
|
||||
__clear_bit(j, rxr->rx_agg_bmap);
|
||||
|
||||
__free_page(page);
|
||||
}
|
||||
if (rxr->rx_page) {
|
||||
__free_page(rxr->rx_page);
|
||||
rxr->rx_page = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1973,7 +1998,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
|
||||
if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
|
||||
return 0;
|
||||
|
||||
type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
|
||||
type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
|
||||
RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
|
||||
|
||||
bnxt_init_rxbd_pages(ring, type);
|
||||
@ -2164,7 +2189,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
|
||||
bp->rx_agg_nr_pages = 0;
|
||||
|
||||
if (bp->flags & BNXT_FLAG_TPA)
|
||||
agg_factor = 4;
|
||||
agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
|
||||
|
||||
bp->flags &= ~BNXT_FLAG_JUMBO;
|
||||
if (rx_space > PAGE_SIZE) {
|
||||
@ -3020,12 +3045,12 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
|
||||
/* Number of segs are log2 units, and first packet is not
|
||||
* included as part of this units.
|
||||
*/
|
||||
if (mss <= PAGE_SIZE) {
|
||||
n = PAGE_SIZE / mss;
|
||||
if (mss <= BNXT_RX_PAGE_SIZE) {
|
||||
n = BNXT_RX_PAGE_SIZE / mss;
|
||||
nsegs = (MAX_SKB_FRAGS - 1) * n;
|
||||
} else {
|
||||
n = mss / PAGE_SIZE;
|
||||
if (mss & (PAGE_SIZE - 1))
|
||||
n = mss / BNXT_RX_PAGE_SIZE;
|
||||
if (mss & (BNXT_RX_PAGE_SIZE - 1))
|
||||
n++;
|
||||
nsegs = (MAX_SKB_FRAGS - n) / n;
|
||||
}
|
||||
@ -4309,7 +4334,7 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
|
||||
if (bp->flags & BNXT_FLAG_MSIX_CAP)
|
||||
rc = bnxt_setup_msix(bp);
|
||||
|
||||
if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
|
||||
if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
|
||||
/* fallback to INTA */
|
||||
rc = bnxt_setup_inta(bp);
|
||||
}
|
||||
|
@ -407,6 +407,15 @@ struct rx_tpa_end_cmp_ext {
|
||||
|
||||
#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT)
|
||||
|
||||
/* The RXBD length is 16-bit so we can only support page sizes < 64K */
|
||||
#if (PAGE_SHIFT > 15)
|
||||
#define BNXT_RX_PAGE_SHIFT 15
|
||||
#else
|
||||
#define BNXT_RX_PAGE_SHIFT PAGE_SHIFT
|
||||
#endif
|
||||
|
||||
#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
|
||||
|
||||
#define BNXT_MIN_PKT_SIZE 45
|
||||
|
||||
#define BNXT_NUM_TESTS(bp) 0
|
||||
@ -506,6 +515,7 @@ struct bnxt_sw_rx_bd {
|
||||
|
||||
struct bnxt_sw_rx_agg_bd {
|
||||
struct page *page;
|
||||
unsigned int offset;
|
||||
dma_addr_t mapping;
|
||||
};
|
||||
|
||||
@ -586,6 +596,9 @@ struct bnxt_rx_ring_info {
|
||||
unsigned long *rx_agg_bmap;
|
||||
u16 rx_agg_bmap_size;
|
||||
|
||||
struct page *rx_page;
|
||||
unsigned int rx_page_offset;
|
||||
|
||||
dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
|
||||
dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user