forked from Minki/linux
Merge branch 'mlx5-updates'
Saeed Mahameed says:
====================
Mellanox 100G mlx5 updates 2016-11-29
The following series from Tariq and Roi, provides some critical fixes
and updates for the mlx5e driver.
From Tariq:
- Fix driver coherent memory huge allocation issues by fragmenting
completion queues, in a way that is transparent to the netdev driver by
providing a new buffer type "mlx5_frag_buf" with the same access API.
- Create UMR MKey per RQ to have better scalability.
From Roi:
- Some fixes for the encap-decap support and tc flower added lately to the
mlx5e driver.
v1->v2:
- Fix start index in error flow of mlx5_frag_buf_alloc_node, pointed out by Eric.
This series was generated against commit:
31ac1c1945
("geneve: fix ip_hdr_len reserved for geneve6 tunnel.")
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
ee3d7c6e5b
@ -106,6 +106,63 @@ void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mlx5_buf_free);
|
EXPORT_SYMBOL_GPL(mlx5_buf_free);
|
||||||
|
|
||||||
|
int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
|
||||||
|
struct mlx5_frag_buf *buf, int node)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
buf->size = size;
|
||||||
|
buf->npages = 1 << get_order(size);
|
||||||
|
buf->page_shift = PAGE_SHIFT;
|
||||||
|
buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!buf->frags)
|
||||||
|
goto err_out;
|
||||||
|
|
||||||
|
for (i = 0; i < buf->npages; i++) {
|
||||||
|
struct mlx5_buf_list *frag = &buf->frags[i];
|
||||||
|
int frag_sz = min_t(int, size, PAGE_SIZE);
|
||||||
|
|
||||||
|
frag->buf = mlx5_dma_zalloc_coherent_node(dev, frag_sz,
|
||||||
|
&frag->map, node);
|
||||||
|
if (!frag->buf)
|
||||||
|
goto err_free_buf;
|
||||||
|
if (frag->map & ((1 << buf->page_shift) - 1)) {
|
||||||
|
dma_free_coherent(&dev->pdev->dev, frag_sz,
|
||||||
|
buf->frags[i].buf, buf->frags[i].map);
|
||||||
|
mlx5_core_warn(dev, "unexpected map alignment: 0x%p, page_shift=%d\n",
|
||||||
|
(void *)frag->map, buf->page_shift);
|
||||||
|
goto err_free_buf;
|
||||||
|
}
|
||||||
|
size -= frag_sz;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err_free_buf:
|
||||||
|
while (i--)
|
||||||
|
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, buf->frags[i].buf,
|
||||||
|
buf->frags[i].map);
|
||||||
|
kfree(buf->frags);
|
||||||
|
err_out:
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
|
||||||
|
{
|
||||||
|
int size = buf->size;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < buf->npages; i++) {
|
||||||
|
int frag_sz = min_t(int, size, PAGE_SIZE);
|
||||||
|
|
||||||
|
dma_free_coherent(&dev->pdev->dev, frag_sz, buf->frags[i].buf,
|
||||||
|
buf->frags[i].map);
|
||||||
|
size -= frag_sz;
|
||||||
|
}
|
||||||
|
kfree(buf->frags);
|
||||||
|
}
|
||||||
|
|
||||||
static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
|
static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
|
||||||
int node)
|
int node)
|
||||||
{
|
{
|
||||||
@ -230,3 +287,12 @@ void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
|
EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
|
||||||
|
|
||||||
|
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *buf, __be64 *pas)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < buf->npages; i++)
|
||||||
|
pas[i] = cpu_to_be64(buf->frags[i].map);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array);
|
||||||
|
@ -77,9 +77,9 @@
|
|||||||
MLX5_MPWRQ_WQE_PAGE_ORDER)
|
MLX5_MPWRQ_WQE_PAGE_ORDER)
|
||||||
|
|
||||||
#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
|
#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
|
||||||
#define MLX5E_REQUIRED_MTTS(rqs, wqes)\
|
#define MLX5E_REQUIRED_MTTS(wqes) \
|
||||||
(rqs * wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
|
(wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
|
||||||
#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) <= U16_MAX)
|
#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX)
|
||||||
|
|
||||||
#define MLX5_UMR_ALIGN (2048)
|
#define MLX5_UMR_ALIGN (2048)
|
||||||
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
|
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
|
||||||
@ -286,7 +286,7 @@ struct mlx5e_cq {
|
|||||||
u16 decmprs_wqe_counter;
|
u16 decmprs_wqe_counter;
|
||||||
|
|
||||||
/* control */
|
/* control */
|
||||||
struct mlx5_wq_ctrl wq_ctrl;
|
struct mlx5_frag_wq_ctrl wq_ctrl;
|
||||||
} ____cacheline_aligned_in_smp;
|
} ____cacheline_aligned_in_smp;
|
||||||
|
|
||||||
struct mlx5e_rq;
|
struct mlx5e_rq;
|
||||||
@ -347,7 +347,6 @@ struct mlx5e_rq {
|
|||||||
struct {
|
struct {
|
||||||
struct mlx5e_mpw_info *info;
|
struct mlx5e_mpw_info *info;
|
||||||
void *mtt_no_align;
|
void *mtt_no_align;
|
||||||
u32 mtt_offset;
|
|
||||||
} mpwqe;
|
} mpwqe;
|
||||||
};
|
};
|
||||||
struct {
|
struct {
|
||||||
@ -382,6 +381,7 @@ struct mlx5e_rq {
|
|||||||
u32 rqn;
|
u32 rqn;
|
||||||
struct mlx5e_channel *channel;
|
struct mlx5e_channel *channel;
|
||||||
struct mlx5e_priv *priv;
|
struct mlx5e_priv *priv;
|
||||||
|
struct mlx5_core_mkey umr_mkey;
|
||||||
} ____cacheline_aligned_in_smp;
|
} ____cacheline_aligned_in_smp;
|
||||||
|
|
||||||
struct mlx5e_umr_dma_info {
|
struct mlx5e_umr_dma_info {
|
||||||
@ -689,7 +689,6 @@ struct mlx5e_priv {
|
|||||||
|
|
||||||
unsigned long state;
|
unsigned long state;
|
||||||
struct mutex state_lock; /* Protects Interface state */
|
struct mutex state_lock; /* Protects Interface state */
|
||||||
struct mlx5_core_mkey umr_mkey;
|
|
||||||
struct mlx5e_rq drop_rq;
|
struct mlx5e_rq drop_rq;
|
||||||
|
|
||||||
struct mlx5e_channel **channel;
|
struct mlx5e_channel **channel;
|
||||||
@ -838,8 +837,7 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
|
|||||||
|
|
||||||
static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
|
static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
|
||||||
{
|
{
|
||||||
return rq->mpwqe.mtt_offset +
|
return wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
|
||||||
wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
|
static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
|
||||||
|
@ -499,8 +499,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
num_mtts = MLX5E_REQUIRED_MTTS(priv->params.num_channels,
|
num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes);
|
||||||
rx_pending_wqes);
|
|
||||||
if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
|
if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
|
||||||
!MLX5E_VALID_NUM_MTTS(num_mtts)) {
|
!MLX5E_VALID_NUM_MTTS(num_mtts)) {
|
||||||
netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
|
netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
|
||||||
@ -565,7 +564,6 @@ static int mlx5e_set_channels(struct net_device *dev,
|
|||||||
unsigned int count = ch->combined_count;
|
unsigned int count = ch->combined_count;
|
||||||
bool arfs_enabled;
|
bool arfs_enabled;
|
||||||
bool was_opened;
|
bool was_opened;
|
||||||
u32 num_mtts;
|
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
if (!count) {
|
if (!count) {
|
||||||
@ -584,14 +582,6 @@ static int mlx5e_set_channels(struct net_device *dev,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
num_mtts = MLX5E_REQUIRED_MTTS(count, BIT(priv->params.log_rq_size));
|
|
||||||
if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
|
|
||||||
!MLX5E_VALID_NUM_MTTS(num_mtts)) {
|
|
||||||
netdev_info(dev, "%s: rx count (%d) request can't be satisfied, try to reduce.\n",
|
|
||||||
__func__, count);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (priv->params.num_channels == count)
|
if (priv->params.num_channels == count)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -471,6 +471,52 @@ static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
|
|||||||
kfree(rq->mpwqe.info);
|
kfree(rq->mpwqe.info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv,
|
||||||
|
u64 npages, u8 page_shift,
|
||||||
|
struct mlx5_core_mkey *umr_mkey)
|
||||||
|
{
|
||||||
|
struct mlx5_core_dev *mdev = priv->mdev;
|
||||||
|
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
|
||||||
|
void *mkc;
|
||||||
|
u32 *in;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
if (!MLX5E_VALID_NUM_MTTS(npages))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
in = mlx5_vzalloc(inlen);
|
||||||
|
if (!in)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
|
||||||
|
|
||||||
|
MLX5_SET(mkc, mkc, free, 1);
|
||||||
|
MLX5_SET(mkc, mkc, umr_en, 1);
|
||||||
|
MLX5_SET(mkc, mkc, lw, 1);
|
||||||
|
MLX5_SET(mkc, mkc, lr, 1);
|
||||||
|
MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
|
||||||
|
|
||||||
|
MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
||||||
|
MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
|
||||||
|
MLX5_SET64(mkc, mkc, len, npages << page_shift);
|
||||||
|
MLX5_SET(mkc, mkc, translations_octword_size,
|
||||||
|
MLX5_MTT_OCTW(npages));
|
||||||
|
MLX5_SET(mkc, mkc, log_page_size, page_shift);
|
||||||
|
|
||||||
|
err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
|
||||||
|
|
||||||
|
kvfree(in);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mlx5e_create_rq_umr_mkey(struct mlx5e_rq *rq)
|
||||||
|
{
|
||||||
|
struct mlx5e_priv *priv = rq->priv;
|
||||||
|
u64 num_mtts = MLX5E_REQUIRED_MTTS(BIT(priv->params.log_rq_size));
|
||||||
|
|
||||||
|
return mlx5e_create_umr_mkey(priv, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
|
||||||
|
}
|
||||||
|
|
||||||
static int mlx5e_create_rq(struct mlx5e_channel *c,
|
static int mlx5e_create_rq(struct mlx5e_channel *c,
|
||||||
struct mlx5e_rq_param *param,
|
struct mlx5e_rq_param *param,
|
||||||
struct mlx5e_rq *rq)
|
struct mlx5e_rq *rq)
|
||||||
@ -527,18 +573,20 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
|
|||||||
rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
|
rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
|
||||||
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
|
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
|
||||||
|
|
||||||
rq->mpwqe.mtt_offset = c->ix *
|
|
||||||
MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size));
|
|
||||||
|
|
||||||
rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
|
rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
|
||||||
rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
|
rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
|
||||||
|
|
||||||
rq->buff.wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
|
rq->buff.wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
|
||||||
byte_count = rq->buff.wqe_sz;
|
byte_count = rq->buff.wqe_sz;
|
||||||
rq->mkey_be = cpu_to_be32(c->priv->umr_mkey.key);
|
|
||||||
err = mlx5e_rq_alloc_mpwqe_info(rq, c);
|
err = mlx5e_create_rq_umr_mkey(rq);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_rq_wq_destroy;
|
goto err_rq_wq_destroy;
|
||||||
|
rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
|
||||||
|
|
||||||
|
err = mlx5e_rq_alloc_mpwqe_info(rq, c);
|
||||||
|
if (err)
|
||||||
|
goto err_destroy_umr_mkey;
|
||||||
break;
|
break;
|
||||||
default: /* MLX5_WQ_TYPE_LINKED_LIST */
|
default: /* MLX5_WQ_TYPE_LINKED_LIST */
|
||||||
rq->dma_info = kzalloc_node(wq_sz * sizeof(*rq->dma_info),
|
rq->dma_info = kzalloc_node(wq_sz * sizeof(*rq->dma_info),
|
||||||
@ -589,6 +637,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_destroy_umr_mkey:
|
||||||
|
mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
|
||||||
|
|
||||||
err_rq_wq_destroy:
|
err_rq_wq_destroy:
|
||||||
if (rq->xdp_prog)
|
if (rq->xdp_prog)
|
||||||
bpf_prog_put(rq->xdp_prog);
|
bpf_prog_put(rq->xdp_prog);
|
||||||
@ -607,6 +658,7 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
|
|||||||
switch (rq->wq_type) {
|
switch (rq->wq_type) {
|
||||||
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
|
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
|
||||||
mlx5e_rq_free_mpwqe_info(rq);
|
mlx5e_rq_free_mpwqe_info(rq);
|
||||||
|
mlx5_core_destroy_mkey(rq->priv->mdev, &rq->umr_mkey);
|
||||||
break;
|
break;
|
||||||
default: /* MLX5_WQ_TYPE_LINKED_LIST */
|
default: /* MLX5_WQ_TYPE_LINKED_LIST */
|
||||||
kfree(rq->dma_info);
|
kfree(rq->dma_info);
|
||||||
@ -1201,7 +1253,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
|
|||||||
|
|
||||||
static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
|
static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
|
||||||
{
|
{
|
||||||
mlx5_wq_destroy(&cq->wq_ctrl);
|
mlx5_cqwq_destroy(&cq->wq_ctrl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
||||||
@ -1218,7 +1270,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
|||||||
int err;
|
int err;
|
||||||
|
|
||||||
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
|
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
|
||||||
sizeof(u64) * cq->wq_ctrl.buf.npages;
|
sizeof(u64) * cq->wq_ctrl.frag_buf.npages;
|
||||||
in = mlx5_vzalloc(inlen);
|
in = mlx5_vzalloc(inlen);
|
||||||
if (!in)
|
if (!in)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -1227,7 +1279,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
|||||||
|
|
||||||
memcpy(cqc, param->cqc, sizeof(param->cqc));
|
memcpy(cqc, param->cqc, sizeof(param->cqc));
|
||||||
|
|
||||||
mlx5_fill_page_array(&cq->wq_ctrl.buf,
|
mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf,
|
||||||
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
|
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
|
||||||
|
|
||||||
mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
|
mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
|
||||||
@ -1235,7 +1287,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
|||||||
MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
|
MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
|
||||||
MLX5_SET(cqc, cqc, c_eqn, eqn);
|
MLX5_SET(cqc, cqc, c_eqn, eqn);
|
||||||
MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
|
MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
|
||||||
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
|
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
|
||||||
MLX5_ADAPTER_PAGE_SHIFT);
|
MLX5_ADAPTER_PAGE_SHIFT);
|
||||||
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
|
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
|
||||||
|
|
||||||
@ -3625,43 +3677,6 @@ static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
|
|||||||
mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
|
mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
|
|
||||||
{
|
|
||||||
struct mlx5_core_dev *mdev = priv->mdev;
|
|
||||||
u64 npages = MLX5E_REQUIRED_MTTS(priv->profile->max_nch(mdev),
|
|
||||||
BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW));
|
|
||||||
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
|
|
||||||
void *mkc;
|
|
||||||
u32 *in;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
in = mlx5_vzalloc(inlen);
|
|
||||||
if (!in)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
|
|
||||||
|
|
||||||
npages = min_t(u32, ALIGN(U16_MAX, 4) * 2, npages);
|
|
||||||
|
|
||||||
MLX5_SET(mkc, mkc, free, 1);
|
|
||||||
MLX5_SET(mkc, mkc, umr_en, 1);
|
|
||||||
MLX5_SET(mkc, mkc, lw, 1);
|
|
||||||
MLX5_SET(mkc, mkc, lr, 1);
|
|
||||||
MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
|
|
||||||
|
|
||||||
MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
|
||||||
MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
|
|
||||||
MLX5_SET64(mkc, mkc, len, npages << PAGE_SHIFT);
|
|
||||||
MLX5_SET(mkc, mkc, translations_octword_size,
|
|
||||||
MLX5_MTT_OCTW(npages));
|
|
||||||
MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
|
|
||||||
|
|
||||||
err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen);
|
|
||||||
|
|
||||||
kvfree(in);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
|
static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
|
||||||
struct net_device *netdev,
|
struct net_device *netdev,
|
||||||
const struct mlx5e_profile *profile,
|
const struct mlx5e_profile *profile,
|
||||||
@ -3868,15 +3883,9 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
|
|||||||
profile = priv->profile;
|
profile = priv->profile;
|
||||||
clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
|
clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
|
||||||
|
|
||||||
err = mlx5e_create_umr_mkey(priv);
|
|
||||||
if (err) {
|
|
||||||
mlx5_core_err(mdev, "create umr mkey failed, %d\n", err);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = profile->init_tx(priv);
|
err = profile->init_tx(priv);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_destroy_umr_mkey;
|
goto out;
|
||||||
|
|
||||||
err = mlx5e_open_drop_rq(priv);
|
err = mlx5e_open_drop_rq(priv);
|
||||||
if (err) {
|
if (err) {
|
||||||
@ -3916,9 +3925,6 @@ err_close_drop_rq:
|
|||||||
err_cleanup_tx:
|
err_cleanup_tx:
|
||||||
profile->cleanup_tx(priv);
|
profile->cleanup_tx(priv);
|
||||||
|
|
||||||
err_destroy_umr_mkey:
|
|
||||||
mlx5_core_destroy_mkey(mdev, &priv->umr_mkey);
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -3967,7 +3973,6 @@ void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
|
|||||||
profile->cleanup_rx(priv);
|
profile->cleanup_rx(priv);
|
||||||
mlx5e_close_drop_rq(priv);
|
mlx5e_close_drop_rq(priv);
|
||||||
profile->cleanup_tx(priv);
|
profile->cleanup_tx(priv);
|
||||||
mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
|
|
||||||
cancel_delayed_work_sync(&priv->update_stats_work);
|
cancel_delayed_work_sync(&priv->update_stats_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -142,19 +142,39 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
|
|||||||
return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
|
return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
|
||||||
|
struct mlx5e_tc_flow *flow) {
|
||||||
|
struct list_head *next = flow->encap.next;
|
||||||
|
|
||||||
|
list_del(&flow->encap);
|
||||||
|
if (list_empty(next)) {
|
||||||
|
struct mlx5_encap_entry *e;
|
||||||
|
|
||||||
|
e = list_entry(next, struct mlx5_encap_entry, flows);
|
||||||
|
if (e->n) {
|
||||||
|
mlx5_encap_dealloc(priv->mdev, e->encap_id);
|
||||||
|
neigh_release(e->n);
|
||||||
|
}
|
||||||
|
hlist_del_rcu(&e->encap_hlist);
|
||||||
|
kfree(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
|
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
|
||||||
struct mlx5_flow_handle *rule,
|
struct mlx5e_tc_flow *flow)
|
||||||
struct mlx5_esw_flow_attr *attr)
|
|
||||||
{
|
{
|
||||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||||
struct mlx5_fc *counter = NULL;
|
struct mlx5_fc *counter = NULL;
|
||||||
|
|
||||||
counter = mlx5_flow_rule_counter(rule);
|
counter = mlx5_flow_rule_counter(flow->rule);
|
||||||
|
|
||||||
if (esw && esw->mode == SRIOV_OFFLOADS)
|
mlx5_del_flow_rules(flow->rule);
|
||||||
mlx5_eswitch_del_vlan_action(esw, attr);
|
|
||||||
|
|
||||||
mlx5_del_flow_rules(rule);
|
if (esw && esw->mode == SRIOV_OFFLOADS) {
|
||||||
|
mlx5_eswitch_del_vlan_action(esw, flow->attr);
|
||||||
|
if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
|
||||||
|
mlx5e_detach_encap(priv, flow);
|
||||||
|
}
|
||||||
|
|
||||||
mlx5_fc_destroy(priv->mdev, counter);
|
mlx5_fc_destroy(priv->mdev, counter);
|
||||||
|
|
||||||
@ -915,25 +935,17 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
|
|||||||
u32 flow_tag, action;
|
u32 flow_tag, action;
|
||||||
struct mlx5e_tc_flow *flow;
|
struct mlx5e_tc_flow *flow;
|
||||||
struct mlx5_flow_spec *spec;
|
struct mlx5_flow_spec *spec;
|
||||||
struct mlx5_flow_handle *old = NULL;
|
|
||||||
struct mlx5_esw_flow_attr *old_attr = NULL;
|
|
||||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||||
|
|
||||||
if (esw && esw->mode == SRIOV_OFFLOADS)
|
if (esw && esw->mode == SRIOV_OFFLOADS)
|
||||||
fdb_flow = true;
|
fdb_flow = true;
|
||||||
|
|
||||||
flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
|
|
||||||
tc->ht_params);
|
|
||||||
if (flow) {
|
|
||||||
old = flow->rule;
|
|
||||||
old_attr = flow->attr;
|
|
||||||
} else {
|
|
||||||
if (fdb_flow)
|
if (fdb_flow)
|
||||||
flow = kzalloc(sizeof(*flow) + sizeof(struct mlx5_esw_flow_attr),
|
flow = kzalloc(sizeof(*flow) +
|
||||||
|
sizeof(struct mlx5_esw_flow_attr),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
else
|
else
|
||||||
flow = kzalloc(sizeof(*flow), GFP_KERNEL);
|
flow = kzalloc(sizeof(*flow), GFP_KERNEL);
|
||||||
}
|
|
||||||
|
|
||||||
spec = mlx5_vzalloc(sizeof(*spec));
|
spec = mlx5_vzalloc(sizeof(*spec));
|
||||||
if (!spec || !flow) {
|
if (!spec || !flow) {
|
||||||
@ -970,40 +982,18 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
|
|||||||
if (err)
|
if (err)
|
||||||
goto err_del_rule;
|
goto err_del_rule;
|
||||||
|
|
||||||
if (old)
|
|
||||||
mlx5e_tc_del_flow(priv, old, old_attr);
|
|
||||||
|
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
err_del_rule:
|
err_del_rule:
|
||||||
mlx5_del_flow_rules(flow->rule);
|
mlx5_del_flow_rules(flow->rule);
|
||||||
|
|
||||||
err_free:
|
err_free:
|
||||||
if (!old)
|
|
||||||
kfree(flow);
|
kfree(flow);
|
||||||
out:
|
out:
|
||||||
kvfree(spec);
|
kvfree(spec);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
|
|
||||||
struct mlx5e_tc_flow *flow) {
|
|
||||||
struct list_head *next = flow->encap.next;
|
|
||||||
|
|
||||||
list_del(&flow->encap);
|
|
||||||
if (list_empty(next)) {
|
|
||||||
struct mlx5_encap_entry *e;
|
|
||||||
|
|
||||||
e = list_entry(next, struct mlx5_encap_entry, flows);
|
|
||||||
if (e->n) {
|
|
||||||
mlx5_encap_dealloc(priv->mdev, e->encap_id);
|
|
||||||
neigh_release(e->n);
|
|
||||||
}
|
|
||||||
hlist_del_rcu(&e->encap_hlist);
|
|
||||||
kfree(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int mlx5e_delete_flower(struct mlx5e_priv *priv,
|
int mlx5e_delete_flower(struct mlx5e_priv *priv,
|
||||||
struct tc_cls_flower_offload *f)
|
struct tc_cls_flower_offload *f)
|
||||||
{
|
{
|
||||||
@ -1017,10 +1007,8 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
|
|||||||
|
|
||||||
rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
|
rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
|
||||||
|
|
||||||
mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
|
mlx5e_tc_del_flow(priv, flow);
|
||||||
|
|
||||||
if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
|
|
||||||
mlx5e_detach_encap(priv, flow);
|
|
||||||
|
|
||||||
kfree(flow);
|
kfree(flow);
|
||||||
|
|
||||||
@ -1077,7 +1065,7 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg)
|
|||||||
struct mlx5e_tc_flow *flow = ptr;
|
struct mlx5e_tc_flow *flow = ptr;
|
||||||
struct mlx5e_priv *priv = arg;
|
struct mlx5e_priv *priv = arg;
|
||||||
|
|
||||||
mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
|
mlx5e_tc_del_flow(priv, flow);
|
||||||
kfree(flow);
|
kfree(flow);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -101,13 +101,15 @@ err_db_free:
|
|||||||
|
|
||||||
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||||
void *cqc, struct mlx5_cqwq *wq,
|
void *cqc, struct mlx5_cqwq *wq,
|
||||||
struct mlx5_wq_ctrl *wq_ctrl)
|
struct mlx5_frag_wq_ctrl *wq_ctrl)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
|
wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
|
||||||
wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
|
wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
|
||||||
wq->sz_m1 = (1 << wq->log_sz) - 1;
|
wq->sz_m1 = (1 << wq->log_sz) - 1;
|
||||||
|
wq->log_frag_strides = PAGE_SHIFT - wq->log_stride;
|
||||||
|
wq->frag_sz_m1 = (1 << wq->log_frag_strides) - 1;
|
||||||
|
|
||||||
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
|
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
|
||||||
if (err) {
|
if (err) {
|
||||||
@ -115,14 +117,16 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
|
err = mlx5_frag_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
|
||||||
&wq_ctrl->buf, param->buf_numa_node);
|
&wq_ctrl->frag_buf,
|
||||||
|
param->buf_numa_node);
|
||||||
if (err) {
|
if (err) {
|
||||||
mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
|
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n",
|
||||||
|
err);
|
||||||
goto err_db_free;
|
goto err_db_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
wq->buf = wq_ctrl->buf.direct.buf;
|
wq->frag_buf = wq_ctrl->frag_buf;
|
||||||
wq->db = wq_ctrl->db.db;
|
wq->db = wq_ctrl->db.db;
|
||||||
|
|
||||||
wq_ctrl->mdev = mdev;
|
wq_ctrl->mdev = mdev;
|
||||||
@ -184,3 +188,9 @@ void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
|
|||||||
mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
|
mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
|
||||||
mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
|
mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl)
|
||||||
|
{
|
||||||
|
mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->frag_buf);
|
||||||
|
mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
|
||||||
|
}
|
||||||
|
@ -47,6 +47,12 @@ struct mlx5_wq_ctrl {
|
|||||||
struct mlx5_db db;
|
struct mlx5_db db;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct mlx5_frag_wq_ctrl {
|
||||||
|
struct mlx5_core_dev *mdev;
|
||||||
|
struct mlx5_frag_buf frag_buf;
|
||||||
|
struct mlx5_db db;
|
||||||
|
};
|
||||||
|
|
||||||
struct mlx5_wq_cyc {
|
struct mlx5_wq_cyc {
|
||||||
void *buf;
|
void *buf;
|
||||||
__be32 *db;
|
__be32 *db;
|
||||||
@ -55,12 +61,14 @@ struct mlx5_wq_cyc {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5_cqwq {
|
struct mlx5_cqwq {
|
||||||
void *buf;
|
struct mlx5_frag_buf frag_buf;
|
||||||
__be32 *db;
|
__be32 *db;
|
||||||
u32 sz_m1;
|
u32 sz_m1;
|
||||||
|
u32 frag_sz_m1;
|
||||||
u32 cc; /* consumer counter */
|
u32 cc; /* consumer counter */
|
||||||
u8 log_sz;
|
u8 log_sz;
|
||||||
u8 log_stride;
|
u8 log_stride;
|
||||||
|
u8 log_frag_strides;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5_wq_ll {
|
struct mlx5_wq_ll {
|
||||||
@ -81,7 +89,7 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
|
|||||||
|
|
||||||
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||||
void *cqc, struct mlx5_cqwq *wq,
|
void *cqc, struct mlx5_cqwq *wq,
|
||||||
struct mlx5_wq_ctrl *wq_ctrl);
|
struct mlx5_frag_wq_ctrl *wq_ctrl);
|
||||||
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
|
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
|
||||||
|
|
||||||
int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||||
@ -90,6 +98,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
|||||||
u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
|
u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
|
||||||
|
|
||||||
void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
|
void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
|
||||||
|
void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl);
|
||||||
|
|
||||||
static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
|
static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
|
||||||
{
|
{
|
||||||
@ -116,7 +125,10 @@ static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
|
|||||||
|
|
||||||
static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
|
static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
|
||||||
{
|
{
|
||||||
return wq->buf + (ix << wq->log_stride);
|
unsigned int frag = (ix >> wq->log_frag_strides);
|
||||||
|
|
||||||
|
return wq->frag_buf.frags[frag].buf +
|
||||||
|
((wq->frag_sz_m1 & ix) << wq->log_stride);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
|
static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
|
||||||
|
@ -318,6 +318,13 @@ struct mlx5_buf {
|
|||||||
u8 page_shift;
|
u8 page_shift;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct mlx5_frag_buf {
|
||||||
|
struct mlx5_buf_list *frags;
|
||||||
|
int npages;
|
||||||
|
int size;
|
||||||
|
u8 page_shift;
|
||||||
|
};
|
||||||
|
|
||||||
struct mlx5_eq_tasklet {
|
struct mlx5_eq_tasklet {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct list_head process_list;
|
struct list_head process_list;
|
||||||
@ -822,6 +829,9 @@ int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
|
|||||||
struct mlx5_buf *buf, int node);
|
struct mlx5_buf *buf, int node);
|
||||||
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
|
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
|
||||||
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
|
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
|
||||||
|
int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
|
||||||
|
struct mlx5_frag_buf *buf, int node);
|
||||||
|
void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
|
||||||
struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
|
struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
|
||||||
gfp_t flags, int npages);
|
gfp_t flags, int npages);
|
||||||
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
|
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
|
||||||
@ -866,6 +876,7 @@ void mlx5_unregister_debugfs(void);
|
|||||||
int mlx5_eq_init(struct mlx5_core_dev *dev);
|
int mlx5_eq_init(struct mlx5_core_dev *dev);
|
||||||
void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
|
void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
|
||||||
void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
|
void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
|
||||||
|
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
|
||||||
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
|
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
|
||||||
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
|
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
|
||||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||||
|
Loading…
Reference in New Issue
Block a user