net/mlx5e: Drop the len output parameter from mlx5e_xdp_handle

The len parameter of mlx5e_xdp_handle is used to output the new packet
length after XDP has processed the packet and returned XDP_PASS.
However, this value can be calculated on the caller site, as the caller
knows if it was an XDP_PASS.

This commit drops the len parameter and moves the calculation to the
caller, reducing the number of parameters passed to the function and
preparing for XDP support in non-linear legacy RQ.

Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
Maxim Mikityanskiy 2022-01-27 16:42:18 +02:00 committed by Saeed Mahameed
parent e26eceb90b
commit 064990d0b6
4 changed files with 11 additions and 13 deletions

View File

@ -120,8 +120,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
/* returns true if packet was consumed by xdp */
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
struct bpf_prog *prog,
u32 *len, struct xdp_buff *xdp)
struct bpf_prog *prog, struct xdp_buff *xdp)
{
u32 act;
int err;
@ -129,7 +128,6 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_PASS:
*len = xdp->data_end - xdp->data;
return false;
case XDP_TX:
if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, di, xdp)))

View File

@ -48,8 +48,7 @@
struct mlx5e_xsk_param;
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
struct bpf_prog *prog,
u32 *len, struct xdp_buff *xdp);
struct bpf_prog *prog, struct xdp_buff *xdp);
void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);

View File

@ -31,7 +31,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
u32 page_idx)
{
struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk;
u32 cqe_bcnt32 = cqe_bcnt;
struct bpf_prog *prog;
/* Check packet size. Note LRO doesn't use linear SKB */
@ -47,7 +46,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
*/
WARN_ON_ONCE(head_offset);
xdp->data_end = xdp->data + cqe_bcnt32;
xdp->data_end = xdp->data + cqe_bcnt;
xdp_set_data_meta_invalid(xdp);
xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
net_prefetch(xdp->data);
@ -68,7 +67,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
*/
prog = rcu_dereference(rq->xdp_prog);
if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, &cqe_bcnt32, xdp))) {
if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp))) {
if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */
@ -77,7 +76,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
/* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the
* frame. On SKB allocation failure, NULL is returned.
*/
return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt32);
return mlx5e_xsk_construct_skb(rq, xdp->data, xdp->data_end - xdp->data);
}
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
@ -106,12 +105,12 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
}
prog = rcu_dereference(rq->xdp_prog);
if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, &cqe_bcnt, xdp)))
if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp)))
return NULL; /* page/packet was consumed by XDP */
/* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
* will be handled by mlx5e_put_rx_frag.
* On SKB allocation failure, NULL is returned.
*/
return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt);
return mlx5e_xsk_construct_skb(rq, xdp->data, xdp->data_end - xdp->data);
}

View File

@ -1544,11 +1544,12 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
if (mlx5e_xdp_handle(rq, di, prog, &cqe_bcnt, &xdp))
if (mlx5e_xdp_handle(rq, di, prog, &xdp))
return NULL; /* page/packet was consumed by XDP */
rx_headroom = xdp.data - xdp.data_hard_start;
metasize = xdp.data - xdp.data_meta;
cqe_bcnt = xdp.data_end - xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
@ -1874,7 +1875,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp);
if (mlx5e_xdp_handle(rq, di, prog, &cqe_bcnt32, &xdp)) {
if (mlx5e_xdp_handle(rq, di, prog, &xdp)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */
@ -1882,6 +1883,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
rx_headroom = xdp.data - xdp.data_hard_start;
metasize = xdp.data - xdp.data_meta;
cqe_bcnt32 = xdp.data_end - xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32, metasize);