ixgbe, xsk: Terminate Rx side of NAPI when XSK Rx queue gets full

When XSK pool uses need_wakeup feature, correlate -ENOBUFS that was
returned from xdp_do_redirect() with a XSK Rx queue being full. In such
case, terminate the Rx processing that is being done on the current HW
Rx ring and let the user space consume descriptors from XSK Rx queue so
that there is room that driver can use later on.

Introduce new internal return code IXGBE_XDP_EXIT that will indicate case
described above.

Note that it does not affect Tx processing that is bound to the same
NAPI context, nor the other Rx rings.

Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20220413153015.453864-8-maciej.fijalkowski@intel.com
This commit is contained in:
Maciej Fijalkowski 2022-04-13 17:30:08 +02:00 committed by Daniel Borkmann
parent b8aef650e5
commit c7dd09fd46
2 changed files with 19 additions and 9 deletions

View File

@ -8,6 +8,7 @@
#define IXGBE_XDP_CONSUMED BIT(0) #define IXGBE_XDP_CONSUMED BIT(0)
#define IXGBE_XDP_TX BIT(1) #define IXGBE_XDP_TX BIT(1)
#define IXGBE_XDP_REDIR BIT(2) #define IXGBE_XDP_REDIR BIT(2)
#define IXGBE_XDP_EXIT BIT(3)
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
IXGBE_TXD_CMD_RS) IXGBE_TXD_CMD_RS)

View File

@ -109,9 +109,13 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
if (likely(act == XDP_REDIRECT)) { if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
if (err) if (!err)
goto out_failure;
return IXGBE_XDP_REDIR; return IXGBE_XDP_REDIR;
if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
result = IXGBE_XDP_EXIT;
else
result = IXGBE_XDP_CONSUMED;
goto out_failure;
} }
switch (act) { switch (act) {
@ -130,16 +134,17 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
if (result == IXGBE_XDP_CONSUMED) if (result == IXGBE_XDP_CONSUMED)
goto out_failure; goto out_failure;
break; break;
case XDP_DROP:
result = IXGBE_XDP_CONSUMED;
break;
default: default:
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough; fallthrough;
case XDP_ABORTED: case XDP_ABORTED:
result = IXGBE_XDP_CONSUMED;
out_failure: out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act); trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough; /* handle aborts by dropping packet */ fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
result = IXGBE_XDP_CONSUMED;
break;
} }
return result; return result;
} }
@ -303,12 +308,16 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool); xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool);
xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp); xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))) if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))) {
xdp_xmit |= xdp_res; xdp_xmit |= xdp_res;
else if (xdp_res == IXGBE_XDP_CONSUMED) } else if (xdp_res == IXGBE_XDP_EXIT) {
failure = true;
break;
} else if (xdp_res == IXGBE_XDP_CONSUMED) {
xsk_buff_free(bi->xdp); xsk_buff_free(bi->xdp);
else } else if (xdp_res == IXGBE_XDP_PASS) {
goto construct_skb; goto construct_skb;
}
bi->xdp = NULL; bi->xdp = NULL;
total_rx_packets++; total_rx_packets++;