mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 13:11:40 +00:00
net/mlx5: Use order-0 allocations for all WQ types
Complete the transition of all WQ types to use fragmented order-0 coherent memory instead of high-order allocations. CQ-WQ already uses order-0. Here we do the same for cyclic and linked-list WQs. This allows the driver to load cleanly on systems with a highly fragmented coherent memory. Performance tests: ConnectX-5 100Gbps, CPU: Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz Packet rate of 64B packets, single transmit ring, size 8K. No degradation is sensed. Signed-off-by: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
parent
549322f2f9
commit
3a2f703312
@ -314,7 +314,7 @@ struct mlx5e_cq {
|
||||
|
||||
/* control */
|
||||
struct mlx5_core_dev *mdev;
|
||||
struct mlx5_frag_wq_ctrl wq_ctrl;
|
||||
struct mlx5_wq_ctrl wq_ctrl;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
struct mlx5e_tx_wqe_info {
|
||||
|
@ -646,8 +646,8 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq,
|
||||
MLX5_ADAPTER_PAGE_SHIFT);
|
||||
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
|
||||
|
||||
mlx5_fill_page_array(&rq->wq_ctrl.buf,
|
||||
(__be64 *)MLX5_ADDR_OF(wq, wq, pas));
|
||||
mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
|
||||
(__be64 *)MLX5_ADDR_OF(wq, wq, pas));
|
||||
|
||||
err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
|
||||
|
||||
@ -1096,7 +1096,8 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
|
||||
MLX5_ADAPTER_PAGE_SHIFT);
|
||||
MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
|
||||
|
||||
mlx5_fill_page_array(&csp->wq_ctrl->buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
|
||||
mlx5_fill_page_frag_array(&csp->wq_ctrl->buf,
|
||||
(__be64 *)MLX5_ADDR_OF(wq, wq, pas));
|
||||
|
||||
err = mlx5_core_create_sq(mdev, in, inlen, sqn);
|
||||
|
||||
@ -1538,7 +1539,7 @@ static int mlx5e_alloc_cq(struct mlx5e_channel *c,
|
||||
|
||||
static void mlx5e_free_cq(struct mlx5e_cq *cq)
|
||||
{
|
||||
mlx5_cqwq_destroy(&cq->wq_ctrl);
|
||||
mlx5_wq_destroy(&cq->wq_ctrl);
|
||||
}
|
||||
|
||||
static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
||||
@ -1554,7 +1555,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
||||
int err;
|
||||
|
||||
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
|
||||
sizeof(u64) * cq->wq_ctrl.frag_buf.npages;
|
||||
sizeof(u64) * cq->wq_ctrl.buf.npages;
|
||||
in = kvzalloc(inlen, GFP_KERNEL);
|
||||
if (!in)
|
||||
return -ENOMEM;
|
||||
@ -1563,7 +1564,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
||||
|
||||
memcpy(cqc, param->cqc, sizeof(param->cqc));
|
||||
|
||||
mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf,
|
||||
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
|
||||
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
|
||||
|
||||
mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
|
||||
@ -1571,7 +1572,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
||||
MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
|
||||
MLX5_SET(cqc, cqc, c_eqn, eqn);
|
||||
MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
|
||||
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
|
||||
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
|
||||
MLX5_ADAPTER_PAGE_SHIFT);
|
||||
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
|
||||
|
||||
|
@ -383,16 +383,16 @@ static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq)
|
||||
return sq->pc >> MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
|
||||
}
|
||||
|
||||
static inline void mlx5e_fill_icosq_edge(struct mlx5e_icosq *sq,
|
||||
struct mlx5_wq_cyc *wq,
|
||||
u16 pi)
|
||||
static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
|
||||
struct mlx5_wq_cyc *wq,
|
||||
u16 pi, u16 frag_pi)
|
||||
{
|
||||
struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi];
|
||||
u8 nnops = mlx5_wq_cyc_get_size(wq) - pi;
|
||||
u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
|
||||
|
||||
edge_wi = wi + nnops;
|
||||
|
||||
/* fill sq edge with nops to avoid wqe wrapping two pages */
|
||||
/* fill sq frag edge with nops to avoid wqe wrapping two pages */
|
||||
for (; wi < edge_wi; wi++) {
|
||||
wi->opcode = MLX5_OPCODE_NOP;
|
||||
mlx5e_post_nop(wq, sq->sqn, &sq->pc);
|
||||
@ -407,14 +407,15 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
|
||||
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||
struct mlx5e_umr_wqe *umr_wqe;
|
||||
u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
|
||||
u16 pi;
|
||||
u16 pi, frag_pi;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
||||
frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
|
||||
|
||||
if (unlikely(pi + MLX5E_UMR_WQEBBS > mlx5_wq_cyc_get_size(wq))) {
|
||||
mlx5e_fill_icosq_edge(sq, wq, pi);
|
||||
if (unlikely(frag_pi + MLX5E_UMR_WQEBBS > mlx5_wq_cyc_get_frag_size(wq))) {
|
||||
mlx5e_fill_icosq_frag_edge(sq, wq, pi, frag_pi);
|
||||
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
||||
}
|
||||
|
||||
|
@ -296,16 +296,16 @@ dma_unmap_wqe_err:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static inline void mlx5e_fill_sq_edge(struct mlx5e_txqsq *sq,
|
||||
struct mlx5_wq_cyc *wq,
|
||||
u16 pi)
|
||||
static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
|
||||
struct mlx5_wq_cyc *wq,
|
||||
u16 pi, u16 frag_pi)
|
||||
{
|
||||
struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
|
||||
u8 nnops = mlx5_wq_cyc_get_size(wq) - pi;
|
||||
u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
|
||||
|
||||
edge_wi = wi + nnops;
|
||||
|
||||
/* fill sq edge with nops to avoid wqe wrap around */
|
||||
/* fill sq frag edge with nops to avoid wqe wrapping two pages */
|
||||
for (; wi < edge_wi; wi++) {
|
||||
wi->skb = NULL;
|
||||
wi->num_wqebbs = 1;
|
||||
@ -358,8 +358,8 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
unsigned char *skb_data = skb->data;
|
||||
unsigned int skb_len = skb->len;
|
||||
u16 ds_cnt, ds_cnt_inl = 0;
|
||||
u16 headlen, ihs, frag_pi;
|
||||
u8 num_wqebbs, opcode;
|
||||
u16 headlen, ihs;
|
||||
u32 num_bytes;
|
||||
int num_dma;
|
||||
__be16 mss;
|
||||
@ -395,8 +395,9 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
|
||||
if (unlikely(pi + num_wqebbs > mlx5_wq_cyc_get_size(wq))) {
|
||||
mlx5e_fill_sq_edge(sq, wq, pi);
|
||||
frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
|
||||
if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
|
||||
mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
|
||||
mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
|
||||
}
|
||||
|
||||
@ -642,9 +643,9 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
|
||||
unsigned char *skb_data = skb->data;
|
||||
unsigned int skb_len = skb->len;
|
||||
u16 headlen, ihs, pi, frag_pi;
|
||||
u16 ds_cnt, ds_cnt_inl = 0;
|
||||
u8 num_wqebbs, opcode;
|
||||
u16 headlen, ihs, pi;
|
||||
u32 num_bytes;
|
||||
int num_dma;
|
||||
__be16 mss;
|
||||
@ -680,8 +681,9 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
|
||||
if (unlikely(pi + num_wqebbs > mlx5_wq_cyc_get_size(wq))) {
|
||||
mlx5e_fill_sq_edge(sq, wq, pi);
|
||||
frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
|
||||
if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
|
||||
mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
|
||||
mlx5i_sq_fetch_wqe(sq, &wqe, &pi);
|
||||
}
|
||||
|
||||
|
@ -454,7 +454,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
|
||||
}
|
||||
|
||||
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
|
||||
sizeof(u64) * conn->cq.wq_ctrl.frag_buf.npages;
|
||||
sizeof(u64) * conn->cq.wq_ctrl.buf.npages;
|
||||
in = kvzalloc(inlen, GFP_KERNEL);
|
||||
if (!in) {
|
||||
err = -ENOMEM;
|
||||
@ -469,12 +469,12 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
|
||||
MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size));
|
||||
MLX5_SET(cqc, cqc, c_eqn, eqn);
|
||||
MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index);
|
||||
MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.frag_buf.page_shift -
|
||||
MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.buf.page_shift -
|
||||
MLX5_ADAPTER_PAGE_SHIFT);
|
||||
MLX5_SET64(cqc, cqc, dbr_addr, conn->cq.wq_ctrl.db.dma);
|
||||
|
||||
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
|
||||
mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.frag_buf, pas);
|
||||
mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.buf, pas);
|
||||
|
||||
err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen);
|
||||
kvfree(in);
|
||||
@ -500,7 +500,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
|
||||
goto out;
|
||||
|
||||
err_cqwq:
|
||||
mlx5_cqwq_destroy(&conn->cq.wq_ctrl);
|
||||
mlx5_wq_destroy(&conn->cq.wq_ctrl);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
@ -510,7 +510,7 @@ static void mlx5_fpga_conn_destroy_cq(struct mlx5_fpga_conn *conn)
|
||||
tasklet_disable(&conn->cq.tasklet);
|
||||
tasklet_kill(&conn->cq.tasklet);
|
||||
mlx5_core_destroy_cq(conn->fdev->mdev, &conn->cq.mcq);
|
||||
mlx5_cqwq_destroy(&conn->cq.wq_ctrl);
|
||||
mlx5_wq_destroy(&conn->cq.wq_ctrl);
|
||||
}
|
||||
|
||||
static int mlx5_fpga_conn_create_wq(struct mlx5_fpga_conn *conn, void *qpc)
|
||||
@ -591,8 +591,8 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn,
|
||||
if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
|
||||
MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
|
||||
|
||||
mlx5_fill_page_array(&conn->qp.wq_ctrl.buf,
|
||||
(__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas));
|
||||
mlx5_fill_page_frag_array(&conn->qp.wq_ctrl.buf,
|
||||
(__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas));
|
||||
|
||||
err = mlx5_core_create_qp(mdev, &conn->qp.mqp, in, inlen);
|
||||
if (err)
|
||||
|
@ -54,7 +54,7 @@ struct mlx5_fpga_conn {
|
||||
/* CQ */
|
||||
struct {
|
||||
struct mlx5_cqwq wq;
|
||||
struct mlx5_frag_wq_ctrl wq_ctrl;
|
||||
struct mlx5_wq_ctrl wq_ctrl;
|
||||
struct mlx5_core_cq mcq;
|
||||
struct tasklet_struct tasklet;
|
||||
} cq;
|
||||
|
@ -36,7 +36,12 @@
|
||||
|
||||
u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
|
||||
{
|
||||
return (u32)wq->sz_m1 + 1;
|
||||
return (u32)wq->fbc.sz_m1 + 1;
|
||||
}
|
||||
|
||||
u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
|
||||
{
|
||||
return (u32)wq->fbc.frag_sz_m1 + 1;
|
||||
}
|
||||
|
||||
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
|
||||
@ -46,12 +51,12 @@ u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
|
||||
|
||||
u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
|
||||
{
|
||||
return (u32)wq->sz_m1 + 1;
|
||||
return (u32)wq->fbc.sz_m1 + 1;
|
||||
}
|
||||
|
||||
static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
|
||||
{
|
||||
return mlx5_wq_cyc_get_size(wq) << wq->log_stride;
|
||||
return mlx5_wq_cyc_get_size(wq) << wq->fbc.log_stride;
|
||||
}
|
||||
|
||||
static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq)
|
||||
@ -67,17 +72,19 @@ static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
|
||||
|
||||
static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
|
||||
{
|
||||
return mlx5_wq_ll_get_size(wq) << wq->log_stride;
|
||||
return mlx5_wq_ll_get_size(wq) << wq->fbc.log_stride;
|
||||
}
|
||||
|
||||
int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
void *wqc, struct mlx5_wq_cyc *wq,
|
||||
struct mlx5_wq_ctrl *wq_ctrl)
|
||||
{
|
||||
struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
|
||||
int err;
|
||||
|
||||
wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
|
||||
wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
|
||||
mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride),
|
||||
MLX5_GET(wq, wqc, log_wq_sz),
|
||||
fbc);
|
||||
|
||||
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
|
||||
if (err) {
|
||||
@ -85,14 +92,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
|
||||
&wq_ctrl->buf, param->buf_numa_node);
|
||||
err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
|
||||
&wq_ctrl->buf, param->buf_numa_node);
|
||||
if (err) {
|
||||
mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
|
||||
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
|
||||
goto err_db_free;
|
||||
}
|
||||
|
||||
wq->buf = wq_ctrl->buf.frags->buf;
|
||||
fbc->frag_buf = wq_ctrl->buf;
|
||||
wq->db = wq_ctrl->db.db;
|
||||
|
||||
wq_ctrl->mdev = mdev;
|
||||
@ -105,17 +112,35 @@ err_db_free:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5e_qp_set_frag_buf(struct mlx5_frag_buf *buf,
|
||||
struct mlx5_wq_qp *qp)
|
||||
{
|
||||
struct mlx5_frag_buf *rqb, *sqb;
|
||||
|
||||
rqb = &qp->rq.fbc.frag_buf;
|
||||
*rqb = *buf;
|
||||
rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
|
||||
rqb->npages = 1 << get_order(rqb->size);
|
||||
|
||||
sqb = &qp->sq.fbc.frag_buf;
|
||||
*sqb = *buf;
|
||||
sqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
|
||||
sqb->npages = 1 << get_order(sqb->size);
|
||||
sqb->frags += rqb->npages; /* first part is for the rq */
|
||||
}
|
||||
|
||||
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
void *qpc, struct mlx5_wq_qp *wq,
|
||||
struct mlx5_wq_ctrl *wq_ctrl)
|
||||
{
|
||||
int err;
|
||||
|
||||
wq->rq.log_stride = MLX5_GET(qpc, qpc, log_rq_stride) + 4;
|
||||
wq->rq.sz_m1 = (1 << MLX5_GET(qpc, qpc, log_rq_size)) - 1;
|
||||
|
||||
wq->sq.log_stride = ilog2(MLX5_SEND_WQE_BB);
|
||||
wq->sq.sz_m1 = (1 << MLX5_GET(qpc, qpc, log_sq_size)) - 1;
|
||||
mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
|
||||
MLX5_GET(qpc, qpc, log_rq_size),
|
||||
&wq->rq.fbc);
|
||||
mlx5_fill_fbc(ilog2(MLX5_SEND_WQE_BB),
|
||||
MLX5_GET(qpc, qpc, log_sq_size),
|
||||
&wq->sq.fbc);
|
||||
|
||||
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
|
||||
if (err) {
|
||||
@ -123,15 +148,15 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mlx5_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq),
|
||||
&wq_ctrl->buf, param->buf_numa_node);
|
||||
err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq),
|
||||
&wq_ctrl->buf, param->buf_numa_node);
|
||||
if (err) {
|
||||
mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
|
||||
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
|
||||
goto err_db_free;
|
||||
}
|
||||
|
||||
wq->rq.buf = wq_ctrl->buf.frags->buf;
|
||||
wq->sq.buf = wq->rq.buf + mlx5_wq_cyc_get_byte_size(&wq->rq);
|
||||
mlx5e_qp_set_frag_buf(&wq_ctrl->buf, wq);
|
||||
|
||||
wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
|
||||
wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
|
||||
|
||||
@ -147,7 +172,7 @@ err_db_free:
|
||||
|
||||
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
void *cqc, struct mlx5_cqwq *wq,
|
||||
struct mlx5_frag_wq_ctrl *wq_ctrl)
|
||||
struct mlx5_wq_ctrl *wq_ctrl)
|
||||
{
|
||||
int err;
|
||||
|
||||
@ -160,7 +185,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
}
|
||||
|
||||
err = mlx5_frag_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
|
||||
&wq_ctrl->frag_buf,
|
||||
&wq_ctrl->buf,
|
||||
param->buf_numa_node);
|
||||
if (err) {
|
||||
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n",
|
||||
@ -168,7 +193,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
goto err_db_free;
|
||||
}
|
||||
|
||||
wq->fbc.frag_buf = wq_ctrl->frag_buf;
|
||||
wq->fbc.frag_buf = wq_ctrl->buf;
|
||||
wq->db = wq_ctrl->db.db;
|
||||
|
||||
wq_ctrl->mdev = mdev;
|
||||
@ -185,12 +210,14 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
void *wqc, struct mlx5_wq_ll *wq,
|
||||
struct mlx5_wq_ctrl *wq_ctrl)
|
||||
{
|
||||
struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
|
||||
struct mlx5_wqe_srq_next_seg *next_seg;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
|
||||
wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
|
||||
mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride),
|
||||
MLX5_GET(wq, wqc, log_wq_sz),
|
||||
fbc);
|
||||
|
||||
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
|
||||
if (err) {
|
||||
@ -198,17 +225,17 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mlx5_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
|
||||
&wq_ctrl->buf, param->buf_numa_node);
|
||||
err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
|
||||
&wq_ctrl->buf, param->buf_numa_node);
|
||||
if (err) {
|
||||
mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
|
||||
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
|
||||
goto err_db_free;
|
||||
}
|
||||
|
||||
wq->buf = wq_ctrl->buf.frags->buf;
|
||||
wq->fbc.frag_buf = wq_ctrl->buf;
|
||||
wq->db = wq_ctrl->db.db;
|
||||
|
||||
for (i = 0; i < wq->sz_m1; i++) {
|
||||
for (i = 0; i < fbc->sz_m1; i++) {
|
||||
next_seg = mlx5_wq_ll_get_wqe(wq, i);
|
||||
next_seg->next_wqe_index = cpu_to_be16(i + 1);
|
||||
}
|
||||
@ -227,12 +254,7 @@ err_db_free:
|
||||
|
||||
void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
|
||||
{
|
||||
mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
|
||||
mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
|
||||
mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
|
||||
}
|
||||
|
||||
void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl)
|
||||
{
|
||||
mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->frag_buf);
|
||||
mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
|
||||
}
|
||||
|
@ -48,17 +48,9 @@ struct mlx5_wq_ctrl {
|
||||
struct mlx5_db db;
|
||||
};
|
||||
|
||||
struct mlx5_frag_wq_ctrl {
|
||||
struct mlx5_core_dev *mdev;
|
||||
struct mlx5_frag_buf frag_buf;
|
||||
struct mlx5_db db;
|
||||
};
|
||||
|
||||
struct mlx5_wq_cyc {
|
||||
void *buf;
|
||||
struct mlx5_frag_buf_ctrl fbc;
|
||||
__be32 *db;
|
||||
u16 sz_m1;
|
||||
u8 log_stride;
|
||||
};
|
||||
|
||||
struct mlx5_wq_qp {
|
||||
@ -73,20 +65,19 @@ struct mlx5_cqwq {
|
||||
};
|
||||
|
||||
struct mlx5_wq_ll {
|
||||
void *buf;
|
||||
struct mlx5_frag_buf_ctrl fbc;
|
||||
__be32 *db;
|
||||
__be16 *tail_next;
|
||||
u16 sz_m1;
|
||||
u16 head;
|
||||
u16 wqe_ctr;
|
||||
u16 cur_sz;
|
||||
u8 log_stride;
|
||||
};
|
||||
|
||||
int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
void *wqc, struct mlx5_wq_cyc *wq,
|
||||
struct mlx5_wq_ctrl *wq_ctrl);
|
||||
u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
|
||||
u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
|
||||
|
||||
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
void *qpc, struct mlx5_wq_qp *wq,
|
||||
@ -94,7 +85,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
|
||||
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
void *cqc, struct mlx5_cqwq *wq,
|
||||
struct mlx5_frag_wq_ctrl *wq_ctrl);
|
||||
struct mlx5_wq_ctrl *wq_ctrl);
|
||||
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
|
||||
|
||||
int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
@ -103,16 +94,20 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
|
||||
|
||||
void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
|
||||
void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl);
|
||||
|
||||
static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
|
||||
{
|
||||
return ctr & wq->sz_m1;
|
||||
return ctr & wq->fbc.sz_m1;
|
||||
}
|
||||
|
||||
static inline u16 mlx5_wq_cyc_ctr2fragix(struct mlx5_wq_cyc *wq, u16 ctr)
|
||||
{
|
||||
return ctr & wq->fbc.frag_sz_m1;
|
||||
}
|
||||
|
||||
static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
|
||||
{
|
||||
return wq->buf + (ix << wq->log_stride);
|
||||
return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
|
||||
}
|
||||
|
||||
static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
|
||||
@ -176,7 +171,7 @@ static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq)
|
||||
|
||||
static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
|
||||
{
|
||||
return wq->cur_sz == wq->sz_m1;
|
||||
return wq->cur_sz == wq->fbc.sz_m1;
|
||||
}
|
||||
|
||||
static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
|
||||
@ -186,12 +181,12 @@ static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
|
||||
|
||||
static inline u16 mlx5_wq_ll_ctr2ix(struct mlx5_wq_ll *wq, u16 ctr)
|
||||
{
|
||||
return ctr & wq->sz_m1;
|
||||
return ctr & wq->fbc.sz_m1;
|
||||
}
|
||||
|
||||
static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
|
||||
{
|
||||
return wq->buf + (ix << wq->log_stride);
|
||||
return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
|
||||
}
|
||||
|
||||
static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next)
|
||||
|
@ -983,16 +983,24 @@ static inline u32 mlx5_base_mkey(const u32 key)
|
||||
return key & 0xffffff00u;
|
||||
}
|
||||
|
||||
static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
|
||||
void *cqc)
|
||||
static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
|
||||
struct mlx5_frag_buf_ctrl *fbc)
|
||||
{
|
||||
fbc->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
|
||||
fbc->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
|
||||
fbc->log_stride = log_stride;
|
||||
fbc->log_sz = log_sz;
|
||||
fbc->sz_m1 = (1 << fbc->log_sz) - 1;
|
||||
fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
|
||||
fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
|
||||
}
|
||||
|
||||
static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
|
||||
void *cqc)
|
||||
{
|
||||
mlx5_fill_fbc(6 + MLX5_GET(cqc, cqc, cqe_sz),
|
||||
MLX5_GET(cqc, cqc, log_cq_size),
|
||||
fbc);
|
||||
}
|
||||
|
||||
static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
|
||||
u32 ix)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user