net/mlx5e: RX, Split WQ objects for different RQ types
Replace the common RQ WQ object with two separate ones for the different RQ types. This is in preparation for switching to using a cyclic WQ type in Legacy RQ. Signed-off-by: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
parent
6c3a823e1e
commit
422d4c401e
@ -498,10 +498,9 @@ enum mlx5e_rq_flag {
|
||||
|
||||
struct mlx5e_rq {
|
||||
/* data path */
|
||||
struct mlx5_wq_ll wq;
|
||||
|
||||
union {
|
||||
struct {
|
||||
struct mlx5_wq_ll wq;
|
||||
struct mlx5e_wqe_frag_info *frag_info;
|
||||
u32 frag_sz; /* max possible skb frag_sz */
|
||||
union {
|
||||
@ -509,6 +508,7 @@ struct mlx5e_rq {
|
||||
};
|
||||
} wqe;
|
||||
struct {
|
||||
struct mlx5_wq_ll wq;
|
||||
struct mlx5e_umr_wqe umr_wqe;
|
||||
struct mlx5e_mpw_info *info;
|
||||
mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
|
||||
|
@ -319,10 +319,30 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
|
||||
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
|
||||
}
|
||||
|
||||
static u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
|
||||
{
|
||||
switch (rq->wq_type) {
|
||||
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
|
||||
return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
|
||||
default:
|
||||
return mlx5_wq_ll_get_size(&rq->wqe.wq);
|
||||
}
|
||||
}
|
||||
|
||||
static u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
|
||||
{
|
||||
switch (rq->wq_type) {
|
||||
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
|
||||
return rq->mpwqe.wq.cur_sz;
|
||||
default:
|
||||
return rq->wqe.wq.cur_sz;
|
||||
}
|
||||
}
|
||||
|
||||
static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
|
||||
struct mlx5e_channel *c)
|
||||
{
|
||||
int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
|
||||
int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
|
||||
|
||||
rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
|
||||
GFP_KERNEL, cpu_to_node(c->cpu));
|
||||
@ -370,7 +390,7 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
|
||||
|
||||
static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
|
||||
{
|
||||
u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->wq));
|
||||
u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq));
|
||||
|
||||
return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
|
||||
}
|
||||
@ -397,15 +417,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
|
||||
|
||||
rqp->wq.db_numa_node = cpu_to_node(c->cpu);
|
||||
|
||||
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
|
||||
&rq->wq_ctrl);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
|
||||
|
||||
wq_sz = mlx5_wq_ll_get_size(&rq->wq);
|
||||
|
||||
rq->wq_type = params->rq_wq_type;
|
||||
rq->pdev = c->pdev;
|
||||
rq->netdev = c->netdev;
|
||||
@ -434,8 +445,17 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
|
||||
|
||||
switch (rq->wq_type) {
|
||||
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
|
||||
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
|
||||
&rq->wq_ctrl);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
|
||||
|
||||
wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
|
||||
|
||||
pool_size = MLX5_MPWRQ_PAGES_PER_WQE << mlx5e_mpwqe_get_log_rq_size(params);
|
||||
|
||||
rq->post_wqes = mlx5e_post_rx_mpwqes;
|
||||
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
|
||||
|
||||
@ -472,6 +492,15 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
|
||||
goto err_destroy_umr_mkey;
|
||||
break;
|
||||
default: /* MLX5_WQ_TYPE_LINKED_LIST */
|
||||
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
|
||||
&rq->wq_ctrl);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
|
||||
|
||||
wq_sz = mlx5_wq_ll_get_size(&rq->wqe.wq);
|
||||
|
||||
rq->wqe.frag_info =
|
||||
kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
|
||||
GFP_KERNEL, cpu_to_node(c->cpu));
|
||||
@ -538,16 +567,21 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
|
||||
goto err_rq_wq_destroy;
|
||||
|
||||
for (i = 0; i < wq_sz; i++) {
|
||||
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
|
||||
|
||||
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
|
||||
struct mlx5e_rx_wqe *wqe =
|
||||
mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
|
||||
u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i);
|
||||
|
||||
wqe->data.addr = cpu_to_be64(dma_offset + rq->buff.headroom);
|
||||
}
|
||||
wqe->data.byte_count = cpu_to_be32(byte_count);
|
||||
wqe->data.lkey = rq->mkey_be;
|
||||
} else {
|
||||
struct mlx5e_rx_wqe *wqe =
|
||||
mlx5_wq_ll_get_wqe(&rq->wqe.wq, i);
|
||||
|
||||
wqe->data.byte_count = cpu_to_be32(byte_count);
|
||||
wqe->data.lkey = rq->mkey_be;
|
||||
wqe->data.byte_count = cpu_to_be32(byte_count);
|
||||
wqe->data.lkey = rq->mkey_be;
|
||||
}
|
||||
}
|
||||
|
||||
INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
|
||||
@ -744,51 +778,65 @@ static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
|
||||
unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time);
|
||||
struct mlx5e_channel *c = rq->channel;
|
||||
|
||||
struct mlx5_wq_ll *wq = &rq->wq;
|
||||
u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5_wq_ll_get_size(wq));
|
||||
u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq));
|
||||
|
||||
do {
|
||||
if (wq->cur_sz >= min_wqes)
|
||||
if (mlx5e_rqwq_get_cur_sz(rq) >= min_wqes)
|
||||
return 0;
|
||||
|
||||
msleep(20);
|
||||
} while (time_before(jiffies, exp_time));
|
||||
|
||||
netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
|
||||
c->ix, rq->rqn, wq->cur_sz, min_wqes);
|
||||
c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
|
||||
{
|
||||
struct mlx5_wq_ll *wq = &rq->wq;
|
||||
struct mlx5e_rx_wqe *wqe;
|
||||
__be16 wqe_ix_be;
|
||||
u16 wqe_ix;
|
||||
|
||||
/* UMR WQE (if in progress) is always at wq->head */
|
||||
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
|
||||
rq->mpwqe.umr_in_progress)
|
||||
mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
|
||||
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
|
||||
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
|
||||
|
||||
while (!mlx5_wq_ll_is_empty(wq)) {
|
||||
wqe_ix_be = *wq->tail_next;
|
||||
wqe_ix = be16_to_cpu(wqe_ix_be);
|
||||
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
|
||||
rq->dealloc_wqe(rq, wqe_ix);
|
||||
mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
|
||||
&wqe->next.next_wqe_index);
|
||||
}
|
||||
if (rq->mpwqe.umr_in_progress)
|
||||
mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
|
||||
|
||||
while (!mlx5_wq_ll_is_empty(wq)) {
|
||||
struct mlx5e_rx_wqe *wqe;
|
||||
|
||||
wqe_ix_be = *wq->tail_next;
|
||||
wqe_ix = be16_to_cpu(wqe_ix_be);
|
||||
wqe = mlx5_wq_ll_get_wqe(wq, wqe_ix);
|
||||
rq->dealloc_wqe(rq, wqe_ix);
|
||||
mlx5_wq_ll_pop(wq, wqe_ix_be,
|
||||
&wqe->next.next_wqe_index);
|
||||
}
|
||||
} else {
|
||||
struct mlx5_wq_ll *wq = &rq->wqe.wq;
|
||||
|
||||
while (!mlx5_wq_ll_is_empty(wq)) {
|
||||
struct mlx5e_rx_wqe *wqe;
|
||||
|
||||
wqe_ix_be = *wq->tail_next;
|
||||
wqe_ix = be16_to_cpu(wqe_ix_be);
|
||||
wqe = mlx5_wq_ll_get_wqe(wq, wqe_ix);
|
||||
rq->dealloc_wqe(rq, wqe_ix);
|
||||
mlx5_wq_ll_pop(wq, wqe_ix_be,
|
||||
&wqe->next.next_wqe_index);
|
||||
}
|
||||
|
||||
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST && rq->wqe.page_reuse) {
|
||||
/* Clean outstanding pages on handled WQEs that decided to do page-reuse,
|
||||
* but yet to be re-posted.
|
||||
*/
|
||||
int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
|
||||
if (rq->wqe.page_reuse) {
|
||||
int wq_sz = mlx5_wq_ll_get_size(wq);
|
||||
|
||||
for (wqe_ix = 0; wqe_ix < wq_sz; wqe_ix++)
|
||||
rq->dealloc_wqe(rq, wqe_ix);
|
||||
for (wqe_ix = 0; wqe_ix < wq_sz; wqe_ix++)
|
||||
rq->dealloc_wqe(rq, wqe_ix);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2809,7 +2857,7 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
|
||||
|
||||
param->wq.db_numa_node = param->wq.buf_numa_node;
|
||||
|
||||
err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq,
|
||||
err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wqe.wq,
|
||||
&rq->wq_ctrl);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -113,7 +113,7 @@ static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
|
||||
mpwrq_get_cqe_consumed_strides(&cq->title);
|
||||
else
|
||||
cq->decmprs_wqe_counter =
|
||||
mlx5_wq_ll_ctr2ix(&rq->wq, cq->decmprs_wqe_counter + 1);
|
||||
mlx5_wq_ll_ctr2ix(&rq->wqe.wq, cq->decmprs_wqe_counter + 1);
|
||||
}
|
||||
|
||||
static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
|
||||
@ -369,7 +369,7 @@ void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
|
||||
|
||||
static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
|
||||
{
|
||||
struct mlx5_wq_ll *wq = &rq->wq;
|
||||
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
|
||||
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
|
||||
|
||||
rq->mpwqe.umr_in_progress = false;
|
||||
@ -470,7 +470,7 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
|
||||
|
||||
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
|
||||
{
|
||||
struct mlx5_wq_ll *wq = &rq->wq;
|
||||
struct mlx5_wq_ll *wq = &rq->wqe.wq;
|
||||
int err;
|
||||
|
||||
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
|
||||
@ -546,7 +546,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
|
||||
|
||||
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
|
||||
{
|
||||
struct mlx5_wq_ll *wq = &rq->wq;
|
||||
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
|
||||
|
||||
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
|
||||
return false;
|
||||
@ -987,6 +987,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
|
||||
|
||||
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
||||
{
|
||||
struct mlx5_wq_ll *wq = &rq->wqe.wq;
|
||||
struct mlx5e_wqe_frag_info *wi;
|
||||
struct mlx5e_rx_wqe *wqe;
|
||||
__be16 wqe_counter_be;
|
||||
@ -996,7 +997,7 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
||||
|
||||
wqe_counter_be = cqe->wqe_counter;
|
||||
wqe_counter = be16_to_cpu(wqe_counter_be);
|
||||
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
|
||||
wqe = mlx5_wq_ll_get_wqe(wq, wqe_counter);
|
||||
wi = &rq->wqe.frag_info[wqe_counter];
|
||||
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
|
||||
|
||||
@ -1018,7 +1019,7 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
||||
|
||||
mlx5e_free_rx_wqe_reuse(rq, wi);
|
||||
wq_ll_pop:
|
||||
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
|
||||
mlx5_wq_ll_pop(wq, wqe_counter_be,
|
||||
&wqe->next.next_wqe_index);
|
||||
}
|
||||
|
||||
@ -1029,6 +1030,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5e_rep_priv *rpriv = priv->ppriv;
|
||||
struct mlx5_eswitch_rep *rep = rpriv->rep;
|
||||
struct mlx5_wq_ll *wq = &rq->wqe.wq;
|
||||
struct mlx5e_wqe_frag_info *wi;
|
||||
struct mlx5e_rx_wqe *wqe;
|
||||
struct sk_buff *skb;
|
||||
@ -1038,7 +1040,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
||||
|
||||
wqe_counter_be = cqe->wqe_counter;
|
||||
wqe_counter = be16_to_cpu(wqe_counter_be);
|
||||
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
|
||||
wqe = mlx5_wq_ll_get_wqe(wq, wqe_counter);
|
||||
wi = &rq->wqe.frag_info[wqe_counter];
|
||||
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
|
||||
|
||||
@ -1063,7 +1065,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
||||
|
||||
mlx5e_free_rx_wqe_reuse(rq, wi);
|
||||
wq_ll_pop:
|
||||
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
|
||||
mlx5_wq_ll_pop(wq, wqe_counter_be,
|
||||
&wqe->next.next_wqe_index);
|
||||
}
|
||||
#endif
|
||||
@ -1164,6 +1166,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
||||
u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
|
||||
u32 page_idx = wqe_offset >> PAGE_SHIFT;
|
||||
struct mlx5e_rx_wqe *wqe;
|
||||
struct mlx5_wq_ll *wq;
|
||||
struct sk_buff *skb;
|
||||
u16 cqe_bcnt;
|
||||
|
||||
@ -1193,9 +1196,10 @@ mpwrq_cqe_out:
|
||||
if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
|
||||
return;
|
||||
|
||||
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id);
|
||||
wq = &rq->mpwqe.wq;
|
||||
wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
|
||||
mlx5e_free_rx_mpwqe(rq, wi);
|
||||
mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index);
|
||||
mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
|
||||
}
|
||||
|
||||
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
|
||||
@ -1399,6 +1403,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
|
||||
|
||||
void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
||||
{
|
||||
struct mlx5_wq_ll *wq = &rq->wqe.wq;
|
||||
struct mlx5e_wqe_frag_info *wi;
|
||||
struct mlx5e_rx_wqe *wqe;
|
||||
__be16 wqe_counter_be;
|
||||
@ -1408,7 +1413,7 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
||||
|
||||
wqe_counter_be = cqe->wqe_counter;
|
||||
wqe_counter = be16_to_cpu(wqe_counter_be);
|
||||
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
|
||||
wqe = mlx5_wq_ll_get_wqe(wq, wqe_counter);
|
||||
wi = &rq->wqe.frag_info[wqe_counter];
|
||||
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
|
||||
|
||||
@ -1425,7 +1430,7 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
||||
|
||||
wq_free_wqe:
|
||||
mlx5e_free_rx_wqe_reuse(rq, wi);
|
||||
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
|
||||
mlx5_wq_ll_pop(wq, wqe_counter_be,
|
||||
&wqe->next.next_wqe_index);
|
||||
}
|
||||
|
||||
@ -1435,6 +1440,7 @@ wq_free_wqe:
|
||||
|
||||
void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
||||
{
|
||||
struct mlx5_wq_ll *wq = &rq->wqe.wq;
|
||||
struct mlx5e_wqe_frag_info *wi;
|
||||
struct mlx5e_rx_wqe *wqe;
|
||||
__be16 wqe_counter_be;
|
||||
@ -1444,7 +1450,7 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
||||
|
||||
wqe_counter_be = cqe->wqe_counter;
|
||||
wqe_counter = be16_to_cpu(wqe_counter_be);
|
||||
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
|
||||
wqe = mlx5_wq_ll_get_wqe(wq, wqe_counter);
|
||||
wi = &rq->wqe.frag_info[wqe_counter];
|
||||
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
|
||||
|
||||
@ -1465,8 +1471,7 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
||||
|
||||
mlx5e_free_rx_wqe_reuse(rq, wi);
|
||||
wq_ll_pop:
|
||||
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
|
||||
&wqe->next.next_wqe_index);
|
||||
mlx5_wq_ll_pop(wq, wqe_counter_be, &wqe->next.next_wqe_index);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MLX5_EN_IPSEC */
|
||||
|
Loading…
Reference in New Issue
Block a user