net/mlx5e: Refactor RQ XDP_TX indication
Make the xdp_xmit indication available for Striding RQ by taking it out of the type-specific union. This refactor is a preparation for a downstream patch that adds XDP support over Striding RQ. In addition, use a bitmap instead of a boolean for possible future flags. Signed-off-by: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
parent
619a8f2a42
commit
121e892754
@ -479,6 +479,10 @@ typedef struct sk_buff *
|
|||||||
typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
|
typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
|
||||||
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
|
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
|
||||||
|
|
||||||
|
enum mlx5e_rq_flag {
|
||||||
|
MLX5E_RQ_FLAG_XDP_XMIT = BIT(0),
|
||||||
|
};
|
||||||
|
|
||||||
struct mlx5e_rq {
|
struct mlx5e_rq {
|
||||||
/* data path */
|
/* data path */
|
||||||
struct mlx5_wq_ll wq;
|
struct mlx5_wq_ll wq;
|
||||||
@ -489,7 +493,6 @@ struct mlx5e_rq {
|
|||||||
u32 frag_sz; /* max possible skb frag_sz */
|
u32 frag_sz; /* max possible skb frag_sz */
|
||||||
union {
|
union {
|
||||||
bool page_reuse;
|
bool page_reuse;
|
||||||
bool xdp_xmit;
|
|
||||||
};
|
};
|
||||||
} wqe;
|
} wqe;
|
||||||
struct {
|
struct {
|
||||||
@ -528,6 +531,7 @@ struct mlx5e_rq {
|
|||||||
struct bpf_prog *xdp_prog;
|
struct bpf_prog *xdp_prog;
|
||||||
unsigned int hw_mtu;
|
unsigned int hw_mtu;
|
||||||
struct mlx5e_xdpsq xdpsq;
|
struct mlx5e_xdpsq xdpsq;
|
||||||
|
DECLARE_BITMAP(flags, 8);
|
||||||
|
|
||||||
/* control */
|
/* control */
|
||||||
struct mlx5_wq_ctrl wq_ctrl;
|
struct mlx5_wq_ctrl wq_ctrl;
|
||||||
|
@ -788,7 +788,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
|
|||||||
/* move page to reference to sq responsibility,
|
/* move page to reference to sq responsibility,
|
||||||
* and mark so it's not put back in page-cache.
|
* and mark so it's not put back in page-cache.
|
||||||
*/
|
*/
|
||||||
rq->wqe.xdp_xmit = true;
|
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
|
||||||
sq->db.di[pi] = *di;
|
sq->db.di[pi] = *di;
|
||||||
sq->pc++;
|
sq->pc++;
|
||||||
|
|
||||||
@ -913,9 +913,8 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
|||||||
skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
|
skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
/* probably for XDP */
|
/* probably for XDP */
|
||||||
if (rq->wqe.xdp_xmit) {
|
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
|
||||||
wi->di.page = NULL;
|
wi->di.page = NULL;
|
||||||
rq->wqe.xdp_xmit = false;
|
|
||||||
/* do not return page to cache, it will be returned on XDP_TX completion */
|
/* do not return page to cache, it will be returned on XDP_TX completion */
|
||||||
goto wq_ll_pop;
|
goto wq_ll_pop;
|
||||||
}
|
}
|
||||||
@ -955,9 +954,8 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
|||||||
|
|
||||||
skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
|
skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
if (rq->wqe.xdp_xmit) {
|
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
|
||||||
wi->di.page = NULL;
|
wi->di.page = NULL;
|
||||||
rq->wqe.xdp_xmit = false;
|
|
||||||
/* do not return page to cache, it will be returned on XDP_TX completion */
|
/* do not return page to cache, it will be returned on XDP_TX completion */
|
||||||
goto wq_ll_pop;
|
goto wq_ll_pop;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user