mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 05:02:12 +00:00
ixgbe: avoid premature Rx buffer reuse
The page recycle code, incorrectly, relied on that a page fragment
could not be freed inside xdp_do_redirect(). This assumption leads to
that page fragments that are used by the stack/XDP redirect can be
reused and overwritten.
To avoid this, store the page count prior invoking xdp_do_redirect().
Fixes: 6453073987
("ixgbe: add initial support for xdp redirect")
Reported-and-analyzed-by: Li RongQing <lirongqing@baidu.com>
Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
Tested-by: Sandeep Penigalapati <sandeep.penigalapati@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
This commit is contained in:
parent
75aab4e10a
commit
a06316dc87
@ -1945,7 +1945,8 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
|
||||
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
|
||||
}
|
||||
|
||||
static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
|
||||
static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
|
||||
int rx_buffer_pgcnt)
|
||||
{
|
||||
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
|
||||
struct page *page = rx_buffer->page;
|
||||
@ -1956,7 +1957,7 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
|
||||
|
||||
#if (PAGE_SIZE < 8192)
|
||||
/* if we are only owner of page we can reuse it */
|
||||
if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
|
||||
if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
|
||||
return false;
|
||||
#else
|
||||
/* The last offset is a bit aggressive in that we assume the
|
||||
@ -2021,11 +2022,18 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
|
||||
static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
|
||||
union ixgbe_adv_rx_desc *rx_desc,
|
||||
struct sk_buff **skb,
|
||||
const unsigned int size)
|
||||
const unsigned int size,
|
||||
int *rx_buffer_pgcnt)
|
||||
{
|
||||
struct ixgbe_rx_buffer *rx_buffer;
|
||||
|
||||
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
|
||||
*rx_buffer_pgcnt =
|
||||
#if (PAGE_SIZE < 8192)
|
||||
page_count(rx_buffer->page);
|
||||
#else
|
||||
0;
|
||||
#endif
|
||||
prefetchw(rx_buffer->page);
|
||||
*skb = rx_buffer->skb;
|
||||
|
||||
@ -2055,9 +2063,10 @@ skip_sync:
|
||||
|
||||
static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
|
||||
struct ixgbe_rx_buffer *rx_buffer,
|
||||
struct sk_buff *skb)
|
||||
struct sk_buff *skb,
|
||||
int rx_buffer_pgcnt)
|
||||
{
|
||||
if (ixgbe_can_reuse_rx_page(rx_buffer)) {
|
||||
if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
|
||||
/* hand second half of page back to the ring */
|
||||
ixgbe_reuse_rx_page(rx_ring, rx_buffer);
|
||||
} else {
|
||||
@ -2303,6 +2312,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
||||
union ixgbe_adv_rx_desc *rx_desc;
|
||||
struct ixgbe_rx_buffer *rx_buffer;
|
||||
struct sk_buff *skb;
|
||||
int rx_buffer_pgcnt;
|
||||
unsigned int size;
|
||||
|
||||
/* return some buffers to hardware, one at a time is too slow */
|
||||
@ -2322,7 +2332,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
||||
*/
|
||||
dma_rmb();
|
||||
|
||||
rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
|
||||
rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt);
|
||||
|
||||
/* retrieve a buffer from the ring */
|
||||
if (!skb) {
|
||||
@ -2367,7 +2377,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
||||
break;
|
||||
}
|
||||
|
||||
ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
|
||||
ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
|
||||
cleaned_count++;
|
||||
|
||||
/* place incomplete frames back on ring for completion */
|
||||
|
Loading…
Reference in New Issue
Block a user