mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 21:21:41 +00:00
Merge branch 'mlx' into merge-test
This commit is contained in:
commit
86ef0beaa0
@ -72,9 +72,6 @@ void ib_device_unregister_sysfs(struct ib_device *device);
|
||||
void ib_cache_setup(void);
|
||||
void ib_cache_cleanup(void);
|
||||
|
||||
int ib_resolve_eth_dmac(struct ib_qp *qp,
|
||||
struct ib_qp_attr *qp_attr, int *qp_attr_mask);
|
||||
|
||||
typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port,
|
||||
struct net_device *idev, void *cookie);
|
||||
|
||||
|
@ -289,5 +289,6 @@ IB_UVERBS_DECLARE_EX_CMD(modify_wq);
|
||||
IB_UVERBS_DECLARE_EX_CMD(destroy_wq);
|
||||
IB_UVERBS_DECLARE_EX_CMD(create_rwq_ind_table);
|
||||
IB_UVERBS_DECLARE_EX_CMD(destroy_rwq_ind_table);
|
||||
IB_UVERBS_DECLARE_EX_CMD(modify_qp);
|
||||
|
||||
#endif /* UVERBS_H */
|
||||
|
@ -2328,94 +2328,88 @@ static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
|
||||
}
|
||||
}
|
||||
|
||||
ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
|
||||
struct ib_device *ib_dev,
|
||||
const char __user *buf, int in_len,
|
||||
int out_len)
|
||||
static int modify_qp(struct ib_uverbs_file *file,
|
||||
struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata)
|
||||
{
|
||||
struct ib_uverbs_modify_qp cmd;
|
||||
struct ib_udata udata;
|
||||
struct ib_qp *qp;
|
||||
struct ib_qp_attr *attr;
|
||||
int ret;
|
||||
|
||||
if (copy_from_user(&cmd, buf, sizeof cmd))
|
||||
return -EFAULT;
|
||||
|
||||
INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
|
||||
out_len);
|
||||
struct ib_qp_attr *attr;
|
||||
struct ib_qp *qp;
|
||||
int ret;
|
||||
|
||||
attr = kmalloc(sizeof *attr, GFP_KERNEL);
|
||||
if (!attr)
|
||||
return -ENOMEM;
|
||||
|
||||
qp = idr_read_qp(cmd.qp_handle, file->ucontext);
|
||||
qp = idr_read_qp(cmd->base.qp_handle, file->ucontext);
|
||||
if (!qp) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
attr->qp_state = cmd.qp_state;
|
||||
attr->cur_qp_state = cmd.cur_qp_state;
|
||||
attr->path_mtu = cmd.path_mtu;
|
||||
attr->path_mig_state = cmd.path_mig_state;
|
||||
attr->qkey = cmd.qkey;
|
||||
attr->rq_psn = cmd.rq_psn;
|
||||
attr->sq_psn = cmd.sq_psn;
|
||||
attr->dest_qp_num = cmd.dest_qp_num;
|
||||
attr->qp_access_flags = cmd.qp_access_flags;
|
||||
attr->pkey_index = cmd.pkey_index;
|
||||
attr->alt_pkey_index = cmd.alt_pkey_index;
|
||||
attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
|
||||
attr->max_rd_atomic = cmd.max_rd_atomic;
|
||||
attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
|
||||
attr->min_rnr_timer = cmd.min_rnr_timer;
|
||||
attr->port_num = cmd.port_num;
|
||||
attr->timeout = cmd.timeout;
|
||||
attr->retry_cnt = cmd.retry_cnt;
|
||||
attr->rnr_retry = cmd.rnr_retry;
|
||||
attr->alt_port_num = cmd.alt_port_num;
|
||||
attr->alt_timeout = cmd.alt_timeout;
|
||||
attr->qp_state = cmd->base.qp_state;
|
||||
attr->cur_qp_state = cmd->base.cur_qp_state;
|
||||
attr->path_mtu = cmd->base.path_mtu;
|
||||
attr->path_mig_state = cmd->base.path_mig_state;
|
||||
attr->qkey = cmd->base.qkey;
|
||||
attr->rq_psn = cmd->base.rq_psn;
|
||||
attr->sq_psn = cmd->base.sq_psn;
|
||||
attr->dest_qp_num = cmd->base.dest_qp_num;
|
||||
attr->qp_access_flags = cmd->base.qp_access_flags;
|
||||
attr->pkey_index = cmd->base.pkey_index;
|
||||
attr->alt_pkey_index = cmd->base.alt_pkey_index;
|
||||
attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
|
||||
attr->max_rd_atomic = cmd->base.max_rd_atomic;
|
||||
attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
|
||||
attr->min_rnr_timer = cmd->base.min_rnr_timer;
|
||||
attr->port_num = cmd->base.port_num;
|
||||
attr->timeout = cmd->base.timeout;
|
||||
attr->retry_cnt = cmd->base.retry_cnt;
|
||||
attr->rnr_retry = cmd->base.rnr_retry;
|
||||
attr->alt_port_num = cmd->base.alt_port_num;
|
||||
attr->alt_timeout = cmd->base.alt_timeout;
|
||||
attr->rate_limit = cmd->rate_limit;
|
||||
|
||||
memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
|
||||
attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
|
||||
attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
|
||||
attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
|
||||
attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
|
||||
attr->ah_attr.dlid = cmd.dest.dlid;
|
||||
attr->ah_attr.sl = cmd.dest.sl;
|
||||
attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
|
||||
attr->ah_attr.static_rate = cmd.dest.static_rate;
|
||||
attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
|
||||
attr->ah_attr.port_num = cmd.dest.port_num;
|
||||
memcpy(attr->ah_attr.grh.dgid.raw, cmd->base.dest.dgid, 16);
|
||||
attr->ah_attr.grh.flow_label = cmd->base.dest.flow_label;
|
||||
attr->ah_attr.grh.sgid_index = cmd->base.dest.sgid_index;
|
||||
attr->ah_attr.grh.hop_limit = cmd->base.dest.hop_limit;
|
||||
attr->ah_attr.grh.traffic_class = cmd->base.dest.traffic_class;
|
||||
attr->ah_attr.dlid = cmd->base.dest.dlid;
|
||||
attr->ah_attr.sl = cmd->base.dest.sl;
|
||||
attr->ah_attr.src_path_bits = cmd->base.dest.src_path_bits;
|
||||
attr->ah_attr.static_rate = cmd->base.dest.static_rate;
|
||||
attr->ah_attr.ah_flags = cmd->base.dest.is_global ?
|
||||
IB_AH_GRH : 0;
|
||||
attr->ah_attr.port_num = cmd->base.dest.port_num;
|
||||
|
||||
memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
|
||||
attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
|
||||
attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
|
||||
attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
|
||||
attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
|
||||
attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
|
||||
attr->alt_ah_attr.sl = cmd.alt_dest.sl;
|
||||
attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
|
||||
attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
|
||||
attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
|
||||
attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
|
||||
memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd->base.alt_dest.dgid, 16);
|
||||
attr->alt_ah_attr.grh.flow_label = cmd->base.alt_dest.flow_label;
|
||||
attr->alt_ah_attr.grh.sgid_index = cmd->base.alt_dest.sgid_index;
|
||||
attr->alt_ah_attr.grh.hop_limit = cmd->base.alt_dest.hop_limit;
|
||||
attr->alt_ah_attr.grh.traffic_class = cmd->base.alt_dest.traffic_class;
|
||||
attr->alt_ah_attr.dlid = cmd->base.alt_dest.dlid;
|
||||
attr->alt_ah_attr.sl = cmd->base.alt_dest.sl;
|
||||
attr->alt_ah_attr.src_path_bits = cmd->base.alt_dest.src_path_bits;
|
||||
attr->alt_ah_attr.static_rate = cmd->base.alt_dest.static_rate;
|
||||
attr->alt_ah_attr.ah_flags = cmd->base.alt_dest.is_global ?
|
||||
IB_AH_GRH : 0;
|
||||
attr->alt_ah_attr.port_num = cmd->base.alt_dest.port_num;
|
||||
|
||||
if (qp->real_qp == qp) {
|
||||
ret = ib_resolve_eth_dmac(qp, attr, &cmd.attr_mask);
|
||||
if (ret)
|
||||
goto release_qp;
|
||||
if (cmd->base.attr_mask & IB_QP_AV) {
|
||||
ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
|
||||
if (ret)
|
||||
goto release_qp;
|
||||
}
|
||||
ret = qp->device->modify_qp(qp, attr,
|
||||
modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
|
||||
modify_qp_mask(qp->qp_type,
|
||||
cmd->base.attr_mask),
|
||||
udata);
|
||||
} else {
|
||||
ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
|
||||
ret = ib_modify_qp(qp, attr,
|
||||
modify_qp_mask(qp->qp_type,
|
||||
cmd->base.attr_mask));
|
||||
}
|
||||
|
||||
if (ret)
|
||||
goto release_qp;
|
||||
|
||||
ret = in_len;
|
||||
|
||||
release_qp:
|
||||
put_qp_read(qp);
|
||||
|
||||
@ -2425,6 +2419,68 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
|
||||
struct ib_device *ib_dev,
|
||||
const char __user *buf, int in_len,
|
||||
int out_len)
|
||||
{
|
||||
struct ib_uverbs_ex_modify_qp cmd = {};
|
||||
struct ib_udata udata;
|
||||
int ret;
|
||||
|
||||
if (copy_from_user(&cmd.base, buf, sizeof(cmd.base)))
|
||||
return -EFAULT;
|
||||
|
||||
if (cmd.base.attr_mask &
|
||||
~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
INIT_UDATA(&udata, buf + sizeof(cmd.base), NULL,
|
||||
in_len - sizeof(cmd.base), out_len);
|
||||
|
||||
ret = modify_qp(file, &cmd, &udata);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return in_len;
|
||||
}
|
||||
|
||||
int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file,
|
||||
struct ib_device *ib_dev,
|
||||
struct ib_udata *ucore,
|
||||
struct ib_udata *uhw)
|
||||
{
|
||||
struct ib_uverbs_ex_modify_qp cmd = {};
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Last bit is reserved for extending the attr_mask by
|
||||
* using another field.
|
||||
*/
|
||||
BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31));
|
||||
|
||||
if (ucore->inlen < sizeof(cmd.base))
|
||||
return -EINVAL;
|
||||
|
||||
ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (cmd.base.attr_mask &
|
||||
~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (ucore->inlen > sizeof(cmd)) {
|
||||
if (ib_is_udata_cleared(ucore, sizeof(cmd),
|
||||
ucore->inlen - sizeof(cmd)))
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
ret = modify_qp(file, &cmd, uhw);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
|
||||
struct ib_device *ib_dev,
|
||||
const char __user *buf, int in_len,
|
||||
@ -2875,6 +2931,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
|
||||
struct ib_ah *ah;
|
||||
struct ib_ah_attr attr;
|
||||
int ret;
|
||||
struct ib_udata udata;
|
||||
|
||||
if (out_len < sizeof resp)
|
||||
return -ENOSPC;
|
||||
@ -2882,6 +2939,10 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
|
||||
if (copy_from_user(&cmd, buf, sizeof cmd))
|
||||
return -EFAULT;
|
||||
|
||||
INIT_UDATA(&udata, buf + sizeof(cmd),
|
||||
(unsigned long)cmd.response + sizeof(resp),
|
||||
in_len - sizeof(cmd), out_len - sizeof(resp));
|
||||
|
||||
uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
|
||||
if (!uobj)
|
||||
return -ENOMEM;
|
||||
@ -2908,12 +2969,16 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
|
||||
memset(&attr.dmac, 0, sizeof(attr.dmac));
|
||||
memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
|
||||
|
||||
ah = ib_create_ah(pd, &attr);
|
||||
ah = pd->device->create_ah(pd, &attr, &udata);
|
||||
|
||||
if (IS_ERR(ah)) {
|
||||
ret = PTR_ERR(ah);
|
||||
goto err_put;
|
||||
}
|
||||
|
||||
ah->device = pd->device;
|
||||
ah->pd = pd;
|
||||
atomic_inc(&pd->usecnt);
|
||||
ah->uobject = uobj;
|
||||
uobj->object = ah;
|
||||
|
||||
@ -3124,8 +3189,10 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
|
||||
kern_spec_val = (void *)kern_spec +
|
||||
sizeof(struct ib_uverbs_flow_spec_hdr);
|
||||
kern_spec_mask = kern_spec_val + kern_filter_sz;
|
||||
if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL))
|
||||
return -EINVAL;
|
||||
|
||||
switch (ib_spec->type) {
|
||||
switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
|
||||
case IB_FLOW_SPEC_ETH:
|
||||
ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
|
||||
actual_filter_sz = spec_filter_size(kern_spec_mask,
|
||||
@ -3175,6 +3242,21 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
|
||||
memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
|
||||
memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
|
||||
break;
|
||||
case IB_FLOW_SPEC_VXLAN_TUNNEL:
|
||||
ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz);
|
||||
actual_filter_sz = spec_filter_size(kern_spec_mask,
|
||||
kern_filter_sz,
|
||||
ib_filter_sz);
|
||||
if (actual_filter_sz <= 0)
|
||||
return -EINVAL;
|
||||
ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel);
|
||||
memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz);
|
||||
memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz);
|
||||
|
||||
if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) ||
|
||||
(ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24))
|
||||
return -EINVAL;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -3745,7 +3827,6 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
|
||||
err = PTR_ERR(flow_id);
|
||||
goto err_free;
|
||||
}
|
||||
flow_id->qp = qp;
|
||||
flow_id->uobject = uobj;
|
||||
uobj->object = flow_id;
|
||||
|
||||
|
@ -137,6 +137,7 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
|
||||
[IB_USER_VERBS_EX_CMD_DESTROY_WQ] = ib_uverbs_ex_destroy_wq,
|
||||
[IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL] = ib_uverbs_ex_create_rwq_ind_table,
|
||||
[IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL] = ib_uverbs_ex_destroy_rwq_ind_table,
|
||||
[IB_USER_VERBS_EX_CMD_MODIFY_QP] = ib_uverbs_ex_modify_qp,
|
||||
};
|
||||
|
||||
static void ib_uverbs_add_one(struct ib_device *device);
|
||||
|
@ -315,7 +315,7 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
|
||||
{
|
||||
struct ib_ah *ah;
|
||||
|
||||
ah = pd->device->create_ah(pd, ah_attr);
|
||||
ah = pd->device->create_ah(pd, ah_attr, NULL);
|
||||
|
||||
if (!IS_ERR(ah)) {
|
||||
ah->device = pd->device;
|
||||
@ -328,7 +328,7 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
|
||||
}
|
||||
EXPORT_SYMBOL(ib_create_ah);
|
||||
|
||||
static int ib_get_header_version(const union rdma_network_hdr *hdr)
|
||||
int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
|
||||
{
|
||||
const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
|
||||
struct iphdr ip4h_checked;
|
||||
@ -359,6 +359,7 @@ static int ib_get_header_version(const union rdma_network_hdr *hdr)
|
||||
return 4;
|
||||
return 6;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_get_rdma_header_version);
|
||||
|
||||
static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
|
||||
u8 port_num,
|
||||
@ -369,7 +370,7 @@ static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
|
||||
if (rdma_protocol_ib(device, port_num))
|
||||
return RDMA_NETWORK_IB;
|
||||
|
||||
grh_version = ib_get_header_version((union rdma_network_hdr *)grh);
|
||||
grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
|
||||
|
||||
if (grh_version == 4)
|
||||
return RDMA_NETWORK_IPV4;
|
||||
@ -415,9 +416,9 @@ static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num,
|
||||
&context, gid_index);
|
||||
}
|
||||
|
||||
static int get_gids_from_rdma_hdr(union rdma_network_hdr *hdr,
|
||||
enum rdma_network_type net_type,
|
||||
union ib_gid *sgid, union ib_gid *dgid)
|
||||
int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
|
||||
enum rdma_network_type net_type,
|
||||
union ib_gid *sgid, union ib_gid *dgid)
|
||||
{
|
||||
struct sockaddr_in src_in;
|
||||
struct sockaddr_in dst_in;
|
||||
@ -447,6 +448,7 @@ static int get_gids_from_rdma_hdr(union rdma_network_hdr *hdr,
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
|
||||
|
||||
int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
|
||||
const struct ib_wc *wc, const struct ib_grh *grh,
|
||||
@ -469,8 +471,8 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
|
||||
net_type = ib_get_net_type_by_grh(device, port_num, grh);
|
||||
gid_type = ib_network_to_gid_type(net_type);
|
||||
}
|
||||
ret = get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
|
||||
&sgid, &dgid);
|
||||
ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
|
||||
&sgid, &dgid);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1014,6 +1016,7 @@ static const struct {
|
||||
IB_QP_QKEY),
|
||||
[IB_QPT_GSI] = (IB_QP_CUR_STATE |
|
||||
IB_QP_QKEY),
|
||||
[IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -1047,6 +1050,7 @@ static const struct {
|
||||
IB_QP_QKEY),
|
||||
[IB_QPT_GSI] = (IB_QP_CUR_STATE |
|
||||
IB_QP_QKEY),
|
||||
[IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
|
||||
}
|
||||
},
|
||||
[IB_QPS_SQD] = {
|
||||
@ -1196,66 +1200,66 @@ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
|
||||
}
|
||||
EXPORT_SYMBOL(ib_modify_qp_is_ok);
|
||||
|
||||
int ib_resolve_eth_dmac(struct ib_qp *qp,
|
||||
struct ib_qp_attr *qp_attr, int *qp_attr_mask)
|
||||
int ib_resolve_eth_dmac(struct ib_device *device,
|
||||
struct ib_ah_attr *ah_attr)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (*qp_attr_mask & IB_QP_AV) {
|
||||
if (qp_attr->ah_attr.port_num < rdma_start_port(qp->device) ||
|
||||
qp_attr->ah_attr.port_num > rdma_end_port(qp->device))
|
||||
return -EINVAL;
|
||||
if (ah_attr->port_num < rdma_start_port(device) ||
|
||||
ah_attr->port_num > rdma_end_port(device))
|
||||
return -EINVAL;
|
||||
|
||||
if (!rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num))
|
||||
return 0;
|
||||
if (!rdma_cap_eth_ah(device, ah_attr->port_num))
|
||||
return 0;
|
||||
|
||||
if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) {
|
||||
rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw,
|
||||
qp_attr->ah_attr.dmac);
|
||||
} else {
|
||||
union ib_gid sgid;
|
||||
struct ib_gid_attr sgid_attr;
|
||||
int ifindex;
|
||||
int hop_limit;
|
||||
if (rdma_link_local_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
|
||||
rdma_get_ll_mac((struct in6_addr *)ah_attr->grh.dgid.raw,
|
||||
ah_attr->dmac);
|
||||
} else {
|
||||
union ib_gid sgid;
|
||||
struct ib_gid_attr sgid_attr;
|
||||
int ifindex;
|
||||
int hop_limit;
|
||||
|
||||
ret = ib_query_gid(qp->device,
|
||||
qp_attr->ah_attr.port_num,
|
||||
qp_attr->ah_attr.grh.sgid_index,
|
||||
&sgid, &sgid_attr);
|
||||
ret = ib_query_gid(device,
|
||||
ah_attr->port_num,
|
||||
ah_attr->grh.sgid_index,
|
||||
&sgid, &sgid_attr);
|
||||
|
||||
if (ret || !sgid_attr.ndev) {
|
||||
if (!ret)
|
||||
ret = -ENXIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ifindex = sgid_attr.ndev->ifindex;
|
||||
|
||||
ret = rdma_addr_find_l2_eth_by_grh(&sgid,
|
||||
&qp_attr->ah_attr.grh.dgid,
|
||||
qp_attr->ah_attr.dmac,
|
||||
NULL, &ifindex, &hop_limit);
|
||||
|
||||
dev_put(sgid_attr.ndev);
|
||||
|
||||
qp_attr->ah_attr.grh.hop_limit = hop_limit;
|
||||
if (ret || !sgid_attr.ndev) {
|
||||
if (!ret)
|
||||
ret = -ENXIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ifindex = sgid_attr.ndev->ifindex;
|
||||
|
||||
ret = rdma_addr_find_l2_eth_by_grh(&sgid,
|
||||
&ah_attr->grh.dgid,
|
||||
ah_attr->dmac,
|
||||
NULL, &ifindex, &hop_limit);
|
||||
|
||||
dev_put(sgid_attr.ndev);
|
||||
|
||||
ah_attr->grh.hop_limit = hop_limit;
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_resolve_eth_dmac);
|
||||
|
||||
|
||||
int ib_modify_qp(struct ib_qp *qp,
|
||||
struct ib_qp_attr *qp_attr,
|
||||
int qp_attr_mask)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = ib_resolve_eth_dmac(qp, qp_attr, &qp_attr_mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (qp_attr_mask & IB_QP_AV) {
|
||||
int ret;
|
||||
|
||||
ret = ib_resolve_eth_dmac(qp->device, &qp_attr->ah_attr);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
|
||||
}
|
||||
@ -1734,8 +1738,10 @@ struct ib_flow *ib_create_flow(struct ib_qp *qp,
|
||||
return ERR_PTR(-ENOSYS);
|
||||
|
||||
flow_id = qp->device->create_flow(qp, flow_attr, domain);
|
||||
if (!IS_ERR(flow_id))
|
||||
if (!IS_ERR(flow_id)) {
|
||||
atomic_inc(&qp->usecnt);
|
||||
flow_id->qp = qp;
|
||||
}
|
||||
return flow_id;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_create_flow);
|
||||
|
@ -62,7 +62,8 @@
|
||||
#include "common.h"
|
||||
|
||||
static struct ib_ah *iwch_ah_create(struct ib_pd *pd,
|
||||
struct ib_ah_attr *ah_attr)
|
||||
struct ib_ah_attr *ah_attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
@ -59,7 +59,9 @@ module_param(fastreg_support, int, 0644);
|
||||
MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
|
||||
|
||||
static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
|
||||
struct ib_ah_attr *ah_attr)
|
||||
struct ib_ah_attr *ah_attr,
|
||||
struct ib_udata *udata)
|
||||
|
||||
{
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
@ -39,7 +39,8 @@
|
||||
#define HNS_ROCE_VLAN_SL_BIT_MASK 7
|
||||
#define HNS_ROCE_VLAN_SL_SHIFT 13
|
||||
|
||||
struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *ah_attr)
|
||||
struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *ah_attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device);
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
|
@ -687,7 +687,8 @@ void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
|
||||
unsigned long obj, int cnt,
|
||||
int rr);
|
||||
|
||||
struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
|
||||
struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
|
||||
struct ib_udata *udata);
|
||||
int hns_roce_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
|
||||
int hns_roce_destroy_ah(struct ib_ah *ah);
|
||||
|
||||
|
@ -2704,7 +2704,9 @@ static int i40iw_query_pkey(struct ib_device *ibdev,
|
||||
* @ah_attr: address handle attributes
|
||||
*/
|
||||
static struct ib_ah *i40iw_create_ah(struct ib_pd *ibpd,
|
||||
struct ib_ah_attr *attr)
|
||||
struct ib_ah_attr *attr,
|
||||
struct ib_udata *udata)
|
||||
|
||||
{
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
@ -111,7 +111,9 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
|
||||
!(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support))
|
||||
--ah->av.eth.stat_rate;
|
||||
}
|
||||
|
||||
ah->av.eth.sl_tclass_flowlabel |=
|
||||
cpu_to_be32((ah_attr->grh.traffic_class << 20) |
|
||||
ah_attr->grh.flow_label);
|
||||
/*
|
||||
* HW requires multicast LID so we just choose one.
|
||||
*/
|
||||
@ -119,12 +121,14 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
|
||||
ah->av.ib.dlid = cpu_to_be16(0xc000);
|
||||
|
||||
memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16);
|
||||
ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29);
|
||||
ah->av.eth.sl_tclass_flowlabel |= cpu_to_be32(ah_attr->sl << 29);
|
||||
|
||||
return &ah->ibah;
|
||||
}
|
||||
|
||||
struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
|
||||
struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
|
||||
struct ib_udata *udata)
|
||||
|
||||
{
|
||||
struct mlx4_ib_ah *ah;
|
||||
struct ib_ah *ret;
|
||||
|
@ -39,6 +39,8 @@
|
||||
#include <linux/mlx4/cmd.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <rdma/ib_pma.h>
|
||||
#include <linux/ip.h>
|
||||
#include <net/ipv6.h>
|
||||
|
||||
#include <linux/mlx4/driver.h>
|
||||
#include "mlx4_ib.h"
|
||||
@ -480,6 +482,23 @@ static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int get_gids_from_l3_hdr(struct ib_grh *grh, union ib_gid *sgid,
|
||||
union ib_gid *dgid)
|
||||
{
|
||||
int version = ib_get_rdma_header_version((const union rdma_network_hdr *)grh);
|
||||
enum rdma_network_type net_type;
|
||||
|
||||
if (version == 4)
|
||||
net_type = RDMA_NETWORK_IPV4;
|
||||
else if (version == 6)
|
||||
net_type = RDMA_NETWORK_IPV6;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
return ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
|
||||
sgid, dgid);
|
||||
}
|
||||
|
||||
int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
|
||||
enum ib_qp_type dest_qpt, struct ib_wc *wc,
|
||||
struct ib_grh *grh, struct ib_mad *mad)
|
||||
@ -538,7 +557,10 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
|
||||
memset(&attr, 0, sizeof attr);
|
||||
attr.port_num = port;
|
||||
if (is_eth) {
|
||||
memcpy(&attr.grh.dgid.raw[0], &grh->dgid.raw[0], 16);
|
||||
union ib_gid sgid;
|
||||
|
||||
if (get_gids_from_l3_hdr(grh, &sgid, &attr.grh.dgid))
|
||||
return -EINVAL;
|
||||
attr.ah_flags = IB_AH_GRH;
|
||||
}
|
||||
ah = ib_create_ah(tun_ctx->pd, &attr);
|
||||
@ -651,6 +673,11 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
|
||||
is_eth = 1;
|
||||
|
||||
if (is_eth) {
|
||||
union ib_gid dgid;
|
||||
union ib_gid sgid;
|
||||
|
||||
if (get_gids_from_l3_hdr(grh, &sgid, &dgid))
|
||||
return -EINVAL;
|
||||
if (!(wc->wc_flags & IB_WC_GRH)) {
|
||||
mlx4_ib_warn(ibdev, "RoCE grh not present.\n");
|
||||
return -EINVAL;
|
||||
@ -659,10 +686,10 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
|
||||
mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
err = mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave);
|
||||
err = mlx4_get_slave_from_roce_gid(dev->dev, port, dgid.raw, &slave);
|
||||
if (err && mlx4_is_mf_bonded(dev->dev)) {
|
||||
other_port = (port == 1) ? 2 : 1;
|
||||
err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, grh->dgid.raw, &slave);
|
||||
err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, dgid.raw, &slave);
|
||||
if (!err) {
|
||||
port = other_port;
|
||||
pr_debug("resolved slave %d from gid %pI6 wire port %d other %d\n",
|
||||
@ -702,10 +729,18 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
|
||||
|
||||
/* If a grh is present, we demux according to it */
|
||||
if (wc->wc_flags & IB_WC_GRH) {
|
||||
slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id);
|
||||
if (slave < 0) {
|
||||
mlx4_ib_warn(ibdev, "failed matching grh\n");
|
||||
return -ENOENT;
|
||||
if (grh->dgid.global.interface_id ==
|
||||
cpu_to_be64(IB_SA_WELL_KNOWN_GUID) &&
|
||||
grh->dgid.global.subnet_prefix == cpu_to_be64(
|
||||
atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix))) {
|
||||
slave = 0;
|
||||
} else {
|
||||
slave = mlx4_ib_find_real_gid(ibdev, port,
|
||||
grh->dgid.global.interface_id);
|
||||
if (slave < 0) {
|
||||
mlx4_ib_warn(ibdev, "failed matching grh\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
}
|
||||
}
|
||||
/* Class-specific handling */
|
||||
|
@ -547,6 +547,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
|
||||
props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
|
||||
props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
|
||||
props->timestamp_mask = 0xFFFFFFFFFFFFULL;
|
||||
props->max_ah = INT_MAX;
|
||||
|
||||
if (!mlx4_is_slave(dev->dev))
|
||||
err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
|
||||
@ -697,9 +698,11 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
|
||||
IB_WIDTH_4X : IB_WIDTH_1X;
|
||||
props->active_speed = IB_SPEED_QDR;
|
||||
props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ||
|
||||
(((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
|
||||
IB_WIDTH_4X : IB_WIDTH_1X;
|
||||
props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
|
||||
IB_SPEED_FDR : IB_SPEED_QDR;
|
||||
props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
|
||||
props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
|
||||
props->max_msg_sz = mdev->dev->caps.max_msg_sz;
|
||||
@ -2817,14 +2820,19 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
||||
if (!ibdev->ib_uc_qpns_bitmap)
|
||||
goto err_steer_qp_release;
|
||||
|
||||
bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
|
||||
|
||||
err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
|
||||
dev, ibdev->steer_qpn_base,
|
||||
ibdev->steer_qpn_base +
|
||||
ibdev->steer_qpn_count - 1);
|
||||
if (err)
|
||||
goto err_steer_free_bitmap;
|
||||
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
|
||||
bitmap_zero(ibdev->ib_uc_qpns_bitmap,
|
||||
ibdev->steer_qpn_count);
|
||||
err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
|
||||
dev, ibdev->steer_qpn_base,
|
||||
ibdev->steer_qpn_base +
|
||||
ibdev->steer_qpn_count - 1);
|
||||
if (err)
|
||||
goto err_steer_free_bitmap;
|
||||
} else {
|
||||
bitmap_fill(ibdev->ib_uc_qpns_bitmap,
|
||||
ibdev->steer_qpn_count);
|
||||
}
|
||||
}
|
||||
|
||||
for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
|
||||
|
@ -742,7 +742,8 @@ int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
|
||||
void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
|
||||
void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
|
||||
|
||||
struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
|
||||
struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
|
||||
struct ib_udata *udata);
|
||||
int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
|
||||
int mlx4_ib_destroy_ah(struct ib_ah *ah);
|
||||
|
||||
|
@ -644,7 +644,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
|
||||
int qpn;
|
||||
int err;
|
||||
struct ib_qp_cap backup_cap;
|
||||
struct mlx4_ib_sqp *sqp;
|
||||
struct mlx4_ib_sqp *sqp = NULL;
|
||||
struct mlx4_ib_qp *qp;
|
||||
enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
|
||||
struct mlx4_ib_cq *mcq;
|
||||
@ -933,7 +933,9 @@ err_db:
|
||||
mlx4_db_free(dev->dev, &qp->db);
|
||||
|
||||
err:
|
||||
if (!*caller_qp)
|
||||
if (sqp)
|
||||
kfree(sqp);
|
||||
else if (!*caller_qp)
|
||||
kfree(qp);
|
||||
return err;
|
||||
}
|
||||
@ -1280,7 +1282,8 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp)
|
||||
if (is_qp0(dev, mqp))
|
||||
mlx4_CLOSE_PORT(dev->dev, mqp->port);
|
||||
|
||||
if (dev->qp1_proxy[mqp->port - 1] == mqp) {
|
||||
if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI &&
|
||||
dev->qp1_proxy[mqp->port - 1] == mqp) {
|
||||
mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
|
||||
dev->qp1_proxy[mqp->port - 1] = NULL;
|
||||
mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
|
||||
@ -1764,14 +1767,14 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
|
||||
u8 port_num = mlx4_is_bonded(to_mdev(ibqp->device)->dev) ? 1 :
|
||||
attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
|
||||
union ib_gid gid;
|
||||
struct ib_gid_attr gid_attr;
|
||||
struct ib_gid_attr gid_attr = {.gid_type = IB_GID_TYPE_IB};
|
||||
u16 vlan = 0xffff;
|
||||
u8 smac[ETH_ALEN];
|
||||
int status = 0;
|
||||
int is_eth = rdma_cap_eth_ah(&dev->ib_dev, port_num) &&
|
||||
attr->ah_attr.ah_flags & IB_AH_GRH;
|
||||
|
||||
if (is_eth) {
|
||||
if (is_eth && attr->ah_attr.ah_flags & IB_AH_GRH) {
|
||||
int index = attr->ah_attr.grh.sgid_index;
|
||||
|
||||
status = ib_get_cached_gid(ibqp->device, port_num,
|
||||
|
@ -64,7 +64,9 @@ static struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev,
|
||||
return &ah->ibah;
|
||||
}
|
||||
|
||||
struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
|
||||
struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
|
||||
struct ib_udata *udata)
|
||||
|
||||
{
|
||||
struct mlx5_ib_ah *ah;
|
||||
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
||||
@ -75,6 +77,27 @@ struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
|
||||
if (ll == IB_LINK_LAYER_ETHERNET && !(ah_attr->ah_flags & IB_AH_GRH))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (ll == IB_LINK_LAYER_ETHERNET && udata) {
|
||||
int err;
|
||||
struct mlx5_ib_create_ah_resp resp = {};
|
||||
u32 min_resp_len = offsetof(typeof(resp), dmac) +
|
||||
sizeof(resp.dmac);
|
||||
|
||||
if (udata->outlen < min_resp_len)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
resp.response_length = min_resp_len;
|
||||
|
||||
err = ib_resolve_eth_dmac(pd->device, ah_attr);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
memcpy(resp.dmac, ah_attr->dmac, ETH_ALEN);
|
||||
err = ib_copy_to_udata(udata, &resp, resp.response_length);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
|
||||
if (!ah)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
@ -731,7 +731,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
|
||||
int entries, u32 **cqb,
|
||||
int *cqe_size, int *index, int *inlen)
|
||||
{
|
||||
struct mlx5_ib_create_cq ucmd;
|
||||
struct mlx5_ib_create_cq ucmd = {};
|
||||
size_t ucmdlen;
|
||||
int page_shift;
|
||||
__be64 *pas;
|
||||
@ -770,7 +770,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
|
||||
if (err)
|
||||
goto err_umem;
|
||||
|
||||
mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift,
|
||||
mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift,
|
||||
&ncont, NULL);
|
||||
mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
|
||||
ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
|
||||
@ -792,8 +792,36 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
|
||||
|
||||
*index = to_mucontext(context)->uuari.uars[0].index;
|
||||
|
||||
if (ucmd.cqe_comp_en == 1) {
|
||||
if (unlikely((*cqe_size != 64) ||
|
||||
!MLX5_CAP_GEN(dev->mdev, cqe_compression))) {
|
||||
err = -EOPNOTSUPP;
|
||||
mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n",
|
||||
*cqe_size);
|
||||
goto err_cqb;
|
||||
}
|
||||
|
||||
if (unlikely(!ucmd.cqe_comp_res_format ||
|
||||
!(ucmd.cqe_comp_res_format <
|
||||
MLX5_IB_CQE_RES_RESERVED) ||
|
||||
(ucmd.cqe_comp_res_format &
|
||||
(ucmd.cqe_comp_res_format - 1)))) {
|
||||
err = -EOPNOTSUPP;
|
||||
mlx5_ib_warn(dev, "CQE compression res format %d is not supported!\n",
|
||||
ucmd.cqe_comp_res_format);
|
||||
goto err_cqb;
|
||||
}
|
||||
|
||||
MLX5_SET(cqc, cqc, cqe_comp_en, 1);
|
||||
MLX5_SET(cqc, cqc, mini_cqe_res_format,
|
||||
ilog2(ucmd.cqe_comp_res_format));
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_cqb:
|
||||
kfree(cqb);
|
||||
|
||||
err_db:
|
||||
mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
|
||||
|
||||
@ -1125,7 +1153,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
|
||||
return err;
|
||||
}
|
||||
|
||||
mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift,
|
||||
mlx5_ib_cont_pages(umem, ucmd.buf_addr, 0, &npages, page_shift,
|
||||
npas, NULL);
|
||||
|
||||
cq->resize_umem = umem;
|
||||
|
@ -127,7 +127,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
|
||||
|
||||
if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
|
||||
&& ibdev->ib_active) {
|
||||
struct ib_event ibev = {0};
|
||||
struct ib_event ibev = { };
|
||||
|
||||
ibev.device = &ibdev->ib_dev;
|
||||
ibev.event = (event == NETDEV_UP) ?
|
||||
@ -496,6 +496,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibdev);
|
||||
struct mlx5_core_dev *mdev = dev->mdev;
|
||||
int err = -ENOMEM;
|
||||
int max_sq_desc;
|
||||
int max_rq_sg;
|
||||
int max_sq_sg;
|
||||
u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
|
||||
@ -618,9 +619,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
||||
props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
|
||||
max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
|
||||
sizeof(struct mlx5_wqe_data_seg);
|
||||
max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) -
|
||||
sizeof(struct mlx5_wqe_ctrl_seg)) /
|
||||
sizeof(struct mlx5_wqe_data_seg);
|
||||
max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
|
||||
max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
|
||||
sizeof(struct mlx5_wqe_raddr_seg)) /
|
||||
sizeof(struct mlx5_wqe_data_seg);
|
||||
props->max_sge = min(max_rq_sg, max_sq_sg);
|
||||
props->max_sge_rd = MLX5_MAX_SGE_RD;
|
||||
props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
|
||||
@ -643,6 +645,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
||||
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
|
||||
props->max_mcast_grp;
|
||||
props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
|
||||
props->max_ah = INT_MAX;
|
||||
props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
|
||||
props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
|
||||
|
||||
@ -669,6 +672,40 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
||||
1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
|
||||
}
|
||||
|
||||
if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
|
||||
uhw->outlen)) {
|
||||
resp.mlx5_ib_support_multi_pkt_send_wqes =
|
||||
MLX5_CAP_ETH(mdev, multi_pkt_send_wqe);
|
||||
resp.response_length +=
|
||||
sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
|
||||
}
|
||||
|
||||
if (field_avail(typeof(resp), reserved, uhw->outlen))
|
||||
resp.response_length += sizeof(resp.reserved);
|
||||
|
||||
if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
|
||||
resp.cqe_comp_caps.max_num =
|
||||
MLX5_CAP_GEN(dev->mdev, cqe_compression) ?
|
||||
MLX5_CAP_GEN(dev->mdev, cqe_compression_max_num) : 0;
|
||||
resp.cqe_comp_caps.supported_format =
|
||||
MLX5_IB_CQE_RES_FORMAT_HASH |
|
||||
MLX5_IB_CQE_RES_FORMAT_CSUM;
|
||||
resp.response_length += sizeof(resp.cqe_comp_caps);
|
||||
}
|
||||
|
||||
if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen)) {
|
||||
if (MLX5_CAP_QOS(mdev, packet_pacing) &&
|
||||
MLX5_CAP_GEN(mdev, qos)) {
|
||||
resp.packet_pacing_caps.qp_rate_limit_max =
|
||||
MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
|
||||
resp.packet_pacing_caps.qp_rate_limit_min =
|
||||
MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
|
||||
resp.packet_pacing_caps.supported_qpts |=
|
||||
1 << IB_QPT_RAW_PACKET;
|
||||
}
|
||||
resp.response_length += sizeof(resp.packet_pacing_caps);
|
||||
}
|
||||
|
||||
if (uhw->outlen) {
|
||||
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
|
||||
|
||||
@ -1093,7 +1130,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
|
||||
resp.response_length += sizeof(resp.cqe_version);
|
||||
|
||||
if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
|
||||
resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE;
|
||||
resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
|
||||
MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
|
||||
resp.response_length += sizeof(resp.cmds_supp_uhw);
|
||||
}
|
||||
|
||||
@ -1502,6 +1540,22 @@ static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
|
||||
}
|
||||
|
||||
static void set_flow_label(void *misc_c, void *misc_v, u8 mask, u8 val,
|
||||
bool inner)
|
||||
{
|
||||
if (inner) {
|
||||
MLX5_SET(fte_match_set_misc,
|
||||
misc_c, inner_ipv6_flow_label, mask);
|
||||
MLX5_SET(fte_match_set_misc,
|
||||
misc_v, inner_ipv6_flow_label, val);
|
||||
} else {
|
||||
MLX5_SET(fte_match_set_misc,
|
||||
misc_c, outer_ipv6_flow_label, mask);
|
||||
MLX5_SET(fte_match_set_misc,
|
||||
misc_v, outer_ipv6_flow_label, val);
|
||||
}
|
||||
}
|
||||
|
||||
static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
|
||||
{
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
|
||||
@ -1515,6 +1569,7 @@ static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
|
||||
#define LAST_IPV4_FIELD tos
|
||||
#define LAST_IPV6_FIELD traffic_class
|
||||
#define LAST_TCP_UDP_FIELD src_port
|
||||
#define LAST_TUNNEL_FIELD tunnel_id
|
||||
|
||||
/* Field is the last supported field */
|
||||
#define FIELDS_NOT_SUPPORTED(filter, field)\
|
||||
@ -1527,155 +1582,164 @@ static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
|
||||
static int parse_flow_attr(u32 *match_c, u32 *match_v,
|
||||
const union ib_flow_spec *ib_spec)
|
||||
{
|
||||
void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
|
||||
outer_headers);
|
||||
void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
|
||||
outer_headers);
|
||||
void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
|
||||
misc_parameters);
|
||||
void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
|
||||
misc_parameters);
|
||||
void *headers_c;
|
||||
void *headers_v;
|
||||
|
||||
switch (ib_spec->type) {
|
||||
if (ib_spec->type & IB_FLOW_SPEC_INNER) {
|
||||
headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
|
||||
inner_headers);
|
||||
headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
|
||||
inner_headers);
|
||||
} else {
|
||||
headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
|
||||
outer_headers);
|
||||
headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
|
||||
outer_headers);
|
||||
}
|
||||
|
||||
switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
|
||||
case IB_FLOW_SPEC_ETH:
|
||||
if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
|
||||
return -ENOTSUPP;
|
||||
|
||||
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
|
||||
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
|
||||
dmac_47_16),
|
||||
ib_spec->eth.mask.dst_mac);
|
||||
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
|
||||
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
||||
dmac_47_16),
|
||||
ib_spec->eth.val.dst_mac);
|
||||
|
||||
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
|
||||
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
|
||||
smac_47_16),
|
||||
ib_spec->eth.mask.src_mac);
|
||||
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
|
||||
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
||||
smac_47_16),
|
||||
ib_spec->eth.val.src_mac);
|
||||
|
||||
if (ib_spec->eth.mask.vlan_tag) {
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
||||
vlan_tag, 1);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
||||
vlan_tag, 1);
|
||||
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
||||
first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
||||
first_vid, ntohs(ib_spec->eth.val.vlan_tag));
|
||||
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
||||
first_cfi,
|
||||
ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
||||
first_cfi,
|
||||
ntohs(ib_spec->eth.val.vlan_tag) >> 12);
|
||||
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
||||
first_prio,
|
||||
ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
||||
first_prio,
|
||||
ntohs(ib_spec->eth.val.vlan_tag) >> 13);
|
||||
}
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
||||
ethertype, ntohs(ib_spec->eth.mask.ether_type));
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
||||
ethertype, ntohs(ib_spec->eth.val.ether_type));
|
||||
break;
|
||||
case IB_FLOW_SPEC_IPV4:
|
||||
if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
|
||||
return -ENOTSUPP;
|
||||
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
||||
ethertype, 0xffff);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
||||
ethertype, ETH_P_IP);
|
||||
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
|
||||
src_ipv4_src_ipv6.ipv4_layout.ipv4),
|
||||
&ib_spec->ipv4.mask.src_ip,
|
||||
sizeof(ib_spec->ipv4.mask.src_ip));
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
||||
src_ipv4_src_ipv6.ipv4_layout.ipv4),
|
||||
&ib_spec->ipv4.val.src_ip,
|
||||
sizeof(ib_spec->ipv4.val.src_ip));
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
|
||||
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
|
||||
&ib_spec->ipv4.mask.dst_ip,
|
||||
sizeof(ib_spec->ipv4.mask.dst_ip));
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
||||
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
|
||||
&ib_spec->ipv4.val.dst_ip,
|
||||
sizeof(ib_spec->ipv4.val.dst_ip));
|
||||
|
||||
set_tos(outer_headers_c, outer_headers_v,
|
||||
set_tos(headers_c, headers_v,
|
||||
ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
|
||||
|
||||
set_proto(outer_headers_c, outer_headers_v,
|
||||
set_proto(headers_c, headers_v,
|
||||
ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto);
|
||||
break;
|
||||
case IB_FLOW_SPEC_IPV6:
|
||||
if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
|
||||
return -ENOTSUPP;
|
||||
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
||||
ethertype, 0xffff);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
||||
ethertype, ETH_P_IPV6);
|
||||
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
|
||||
src_ipv4_src_ipv6.ipv6_layout.ipv6),
|
||||
&ib_spec->ipv6.mask.src_ip,
|
||||
sizeof(ib_spec->ipv6.mask.src_ip));
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
||||
src_ipv4_src_ipv6.ipv6_layout.ipv6),
|
||||
&ib_spec->ipv6.val.src_ip,
|
||||
sizeof(ib_spec->ipv6.val.src_ip));
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
|
||||
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
|
||||
&ib_spec->ipv6.mask.dst_ip,
|
||||
sizeof(ib_spec->ipv6.mask.dst_ip));
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
||||
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
|
||||
&ib_spec->ipv6.val.dst_ip,
|
||||
sizeof(ib_spec->ipv6.val.dst_ip));
|
||||
|
||||
set_tos(outer_headers_c, outer_headers_v,
|
||||
set_tos(headers_c, headers_v,
|
||||
ib_spec->ipv6.mask.traffic_class,
|
||||
ib_spec->ipv6.val.traffic_class);
|
||||
|
||||
set_proto(outer_headers_c, outer_headers_v,
|
||||
set_proto(headers_c, headers_v,
|
||||
ib_spec->ipv6.mask.next_hdr,
|
||||
ib_spec->ipv6.val.next_hdr);
|
||||
|
||||
MLX5_SET(fte_match_set_misc, misc_params_c,
|
||||
outer_ipv6_flow_label,
|
||||
ntohl(ib_spec->ipv6.mask.flow_label));
|
||||
MLX5_SET(fte_match_set_misc, misc_params_v,
|
||||
outer_ipv6_flow_label,
|
||||
ntohl(ib_spec->ipv6.val.flow_label));
|
||||
set_flow_label(misc_params_c, misc_params_v,
|
||||
ntohl(ib_spec->ipv6.mask.flow_label),
|
||||
ntohl(ib_spec->ipv6.val.flow_label),
|
||||
ib_spec->type & IB_FLOW_SPEC_INNER);
|
||||
|
||||
break;
|
||||
case IB_FLOW_SPEC_TCP:
|
||||
if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
|
||||
LAST_TCP_UDP_FIELD))
|
||||
return -ENOTSUPP;
|
||||
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
|
||||
0xff);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
|
||||
IPPROTO_TCP);
|
||||
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
|
||||
ntohs(ib_spec->tcp_udp.mask.src_port));
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
|
||||
ntohs(ib_spec->tcp_udp.val.src_port));
|
||||
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
|
||||
ntohs(ib_spec->tcp_udp.mask.dst_port));
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
|
||||
ntohs(ib_spec->tcp_udp.val.dst_port));
|
||||
break;
|
||||
case IB_FLOW_SPEC_UDP:
|
||||
@ -1683,21 +1747,31 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
|
||||
LAST_TCP_UDP_FIELD))
|
||||
return -ENOTSUPP;
|
||||
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
|
||||
0xff);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
|
||||
IPPROTO_UDP);
|
||||
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
|
||||
ntohs(ib_spec->tcp_udp.mask.src_port));
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
|
||||
ntohs(ib_spec->tcp_udp.val.src_port));
|
||||
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
|
||||
ntohs(ib_spec->tcp_udp.mask.dst_port));
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
|
||||
ntohs(ib_spec->tcp_udp.val.dst_port));
|
||||
break;
|
||||
case IB_FLOW_SPEC_VXLAN_TUNNEL:
|
||||
if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
|
||||
LAST_TUNNEL_FIELD))
|
||||
return -ENOTSUPP;
|
||||
|
||||
MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
|
||||
ntohl(ib_spec->tunnel.mask.tunnel_id));
|
||||
MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
|
||||
ntohl(ib_spec->tunnel.val.tunnel_id));
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -2718,6 +2792,8 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
|
||||
struct ib_port_immutable *immutable)
|
||||
{
|
||||
struct ib_port_attr attr;
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibdev);
|
||||
enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
|
||||
int err;
|
||||
|
||||
err = mlx5_ib_query_port(ibdev, port_num, &attr);
|
||||
@ -2727,7 +2803,8 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
|
||||
immutable->pkey_tbl_len = attr.pkey_tbl_len;
|
||||
immutable->gid_tbl_len = attr.gid_tbl_len;
|
||||
immutable->core_cap_flags = get_core_cap_flags(ibdev);
|
||||
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
|
||||
if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
|
||||
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2741,7 +2818,7 @@ static void get_dev_fw_str(struct ib_device *ibdev, char *str,
|
||||
fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
|
||||
}
|
||||
|
||||
static int mlx5_roce_lag_init(struct mlx5_ib_dev *dev)
|
||||
static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = dev->mdev;
|
||||
struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
|
||||
@ -2770,7 +2847,7 @@ err_destroy_vport_lag:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5_roce_lag_cleanup(struct mlx5_ib_dev *dev)
|
||||
static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = dev->mdev;
|
||||
|
||||
@ -2782,15 +2859,7 @@ static void mlx5_roce_lag_cleanup(struct mlx5_ib_dev *dev)
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5_remove_roce_notifier(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
if (dev->roce.nb.notifier_call) {
|
||||
unregister_netdevice_notifier(&dev->roce.nb);
|
||||
dev->roce.nb.notifier_call = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
|
||||
static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
int err;
|
||||
|
||||
@ -2801,28 +2870,51 @@ static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mlx5_nic_vport_enable_roce(dev->mdev);
|
||||
if (err)
|
||||
goto err_unregister_netdevice_notifier;
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = mlx5_roce_lag_init(dev);
|
||||
static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
if (dev->roce.nb.notifier_call) {
|
||||
unregister_netdevice_notifier(&dev->roce.nb);
|
||||
dev->roce.nb.notifier_call = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mlx5_add_netdev_notifier(dev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (MLX5_CAP_GEN(dev->mdev, roce)) {
|
||||
err = mlx5_nic_vport_enable_roce(dev->mdev);
|
||||
if (err)
|
||||
goto err_unregister_netdevice_notifier;
|
||||
}
|
||||
|
||||
err = mlx5_eth_lag_init(dev);
|
||||
if (err)
|
||||
goto err_disable_roce;
|
||||
|
||||
return 0;
|
||||
|
||||
err_disable_roce:
|
||||
mlx5_nic_vport_disable_roce(dev->mdev);
|
||||
if (MLX5_CAP_GEN(dev->mdev, roce))
|
||||
mlx5_nic_vport_disable_roce(dev->mdev);
|
||||
|
||||
err_unregister_netdevice_notifier:
|
||||
mlx5_remove_roce_notifier(dev);
|
||||
mlx5_remove_netdev_notifier(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5_disable_roce(struct mlx5_ib_dev *dev)
|
||||
static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
mlx5_roce_lag_cleanup(dev);
|
||||
mlx5_nic_vport_disable_roce(dev->mdev);
|
||||
mlx5_eth_lag_cleanup(dev);
|
||||
if (MLX5_CAP_GEN(dev->mdev, roce))
|
||||
mlx5_nic_vport_disable_roce(dev->mdev);
|
||||
}
|
||||
|
||||
static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
|
||||
@ -2944,9 +3036,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
||||
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
|
||||
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
|
||||
|
||||
if ((ll == IB_LINK_LAYER_ETHERNET) && !MLX5_CAP_GEN(mdev, roce))
|
||||
return NULL;
|
||||
|
||||
printk_once(KERN_INFO "%s", mlx5_version);
|
||||
|
||||
dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
|
||||
@ -2992,6 +3081,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
||||
(1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
|
||||
(1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
|
||||
(1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
|
||||
(1ull << IB_USER_VERBS_CMD_CREATE_AH) |
|
||||
(1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
|
||||
(1ull << IB_USER_VERBS_CMD_REG_MR) |
|
||||
(1ull << IB_USER_VERBS_CMD_REREG_MR) |
|
||||
(1ull << IB_USER_VERBS_CMD_DEREG_MR) |
|
||||
@ -3014,7 +3105,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
||||
dev->ib_dev.uverbs_ex_cmd_mask =
|
||||
(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
|
||||
(1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
|
||||
(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
|
||||
(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) |
|
||||
(1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP);
|
||||
|
||||
dev->ib_dev.query_device = mlx5_ib_query_device;
|
||||
dev->ib_dev.query_port = mlx5_ib_query_port;
|
||||
@ -3125,14 +3217,14 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
||||
spin_lock_init(&dev->reset_flow_resource_lock);
|
||||
|
||||
if (ll == IB_LINK_LAYER_ETHERNET) {
|
||||
err = mlx5_enable_roce(dev);
|
||||
err = mlx5_enable_eth(dev);
|
||||
if (err)
|
||||
goto err_dealloc;
|
||||
}
|
||||
|
||||
err = create_dev_resources(&dev->devr);
|
||||
if (err)
|
||||
goto err_disable_roce;
|
||||
goto err_disable_eth;
|
||||
|
||||
err = mlx5_ib_odp_init_one(dev);
|
||||
if (err)
|
||||
@ -3176,10 +3268,10 @@ err_odp:
|
||||
err_rsrc:
|
||||
destroy_dev_resources(&dev->devr);
|
||||
|
||||
err_disable_roce:
|
||||
err_disable_eth:
|
||||
if (ll == IB_LINK_LAYER_ETHERNET) {
|
||||
mlx5_disable_roce(dev);
|
||||
mlx5_remove_roce_notifier(dev);
|
||||
mlx5_disable_eth(dev);
|
||||
mlx5_remove_netdev_notifier(dev);
|
||||
}
|
||||
|
||||
err_free_port:
|
||||
@ -3196,14 +3288,14 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
|
||||
struct mlx5_ib_dev *dev = context;
|
||||
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
|
||||
|
||||
mlx5_remove_roce_notifier(dev);
|
||||
mlx5_remove_netdev_notifier(dev);
|
||||
ib_unregister_device(&dev->ib_dev);
|
||||
mlx5_ib_dealloc_q_counters(dev);
|
||||
destroy_umrc_res(dev);
|
||||
mlx5_ib_odp_remove_one(dev);
|
||||
destroy_dev_resources(&dev->devr);
|
||||
if (ll == IB_LINK_LAYER_ETHERNET)
|
||||
mlx5_disable_roce(dev);
|
||||
mlx5_disable_eth(dev);
|
||||
kfree(dev->port);
|
||||
ib_dealloc_device(&dev->ib_dev);
|
||||
}
|
||||
|
@ -37,12 +37,15 @@
|
||||
|
||||
/* @umem: umem object to scan
|
||||
* @addr: ib virtual address requested by the user
|
||||
* @max_page_shift: high limit for page_shift - 0 means no limit
|
||||
* @count: number of PAGE_SIZE pages covered by umem
|
||||
* @shift: page shift for the compound pages found in the region
|
||||
* @ncont: number of compund pages
|
||||
* @order: log2 of the number of compound pages
|
||||
*/
|
||||
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
|
||||
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
|
||||
unsigned long max_page_shift,
|
||||
int *count, int *shift,
|
||||
int *ncont, int *order)
|
||||
{
|
||||
unsigned long tmp;
|
||||
@ -72,6 +75,8 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
|
||||
addr = addr >> page_shift;
|
||||
tmp = (unsigned long)addr;
|
||||
m = find_first_bit(&tmp, BITS_PER_LONG);
|
||||
if (max_page_shift)
|
||||
m = min_t(unsigned long, max_page_shift - page_shift, m);
|
||||
skip = 1 << m;
|
||||
mask = skip - 1;
|
||||
i = 0;
|
||||
|
@ -63,6 +63,8 @@ pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
|
||||
#define MLX5_IB_DEFAULT_UIDX 0xffffff
|
||||
#define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
|
||||
|
||||
#define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
|
||||
|
||||
enum {
|
||||
MLX5_IB_MMAP_CMD_SHIFT = 8,
|
||||
MLX5_IB_MMAP_CMD_MASK = 0xff,
|
||||
@ -387,6 +389,7 @@ struct mlx5_ib_qp {
|
||||
struct list_head qps_list;
|
||||
struct list_head cq_recv_list;
|
||||
struct list_head cq_send_list;
|
||||
u32 rate_limit;
|
||||
};
|
||||
|
||||
struct mlx5_ib_cq_buf {
|
||||
@ -418,7 +421,7 @@ struct mlx5_umr_wr {
|
||||
struct ib_pd *pd;
|
||||
unsigned int page_shift;
|
||||
unsigned int npages;
|
||||
u32 length;
|
||||
u64 length;
|
||||
int access_flags;
|
||||
u32 mkey;
|
||||
};
|
||||
@ -737,7 +740,8 @@ void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
|
||||
int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
|
||||
u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
|
||||
const void *in_mad, void *response_mad);
|
||||
struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
|
||||
struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
|
||||
struct ib_udata *udata);
|
||||
int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
|
||||
int mlx5_ib_destroy_ah(struct ib_ah *ah);
|
||||
struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
|
||||
@ -823,7 +827,9 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
|
||||
struct ib_port_attr *props);
|
||||
int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
|
||||
void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
|
||||
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
|
||||
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
|
||||
unsigned long max_page_shift,
|
||||
int *count, int *shift,
|
||||
int *ncont, int *order);
|
||||
void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
|
||||
int page_shift, size_t offset, size_t num_pages,
|
||||
|
@ -627,7 +627,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
|
||||
ent->order = i + 2;
|
||||
ent->dev = dev;
|
||||
|
||||
if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
|
||||
if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
|
||||
(mlx5_core_is_pf(dev->mdev)))
|
||||
limit = dev->mdev->profile->mr_cache[i].limit;
|
||||
else
|
||||
limit = 0;
|
||||
@ -645,6 +646,33 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void wait_for_async_commands(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
struct mlx5_mr_cache *cache = &dev->cache;
|
||||
struct mlx5_cache_ent *ent;
|
||||
int total = 0;
|
||||
int i;
|
||||
int j;
|
||||
|
||||
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
|
||||
ent = &cache->ent[i];
|
||||
for (j = 0 ; j < 1000; j++) {
|
||||
if (!ent->pending)
|
||||
break;
|
||||
msleep(50);
|
||||
}
|
||||
}
|
||||
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
|
||||
ent = &cache->ent[i];
|
||||
total += ent->pending;
|
||||
}
|
||||
|
||||
if (total)
|
||||
mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
|
||||
else
|
||||
mlx5_ib_warn(dev, "done with all pending requests\n");
|
||||
}
|
||||
|
||||
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
int i;
|
||||
@ -658,6 +686,7 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
|
||||
clean_keys(dev, i);
|
||||
|
||||
destroy_workqueue(dev->cache.wq);
|
||||
wait_for_async_commands(dev);
|
||||
del_timer_sync(&dev->delay_timer);
|
||||
|
||||
return 0;
|
||||
@ -815,29 +844,34 @@ static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
|
||||
umrwr->mkey = key;
|
||||
}
|
||||
|
||||
static struct ib_umem *mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
|
||||
int access_flags, int *npages,
|
||||
int *page_shift, int *ncont, int *order)
|
||||
static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
|
||||
int access_flags, struct ib_umem **umem,
|
||||
int *npages, int *page_shift, int *ncont,
|
||||
int *order)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
||||
struct ib_umem *umem = ib_umem_get(pd->uobject->context, start, length,
|
||||
access_flags, 0);
|
||||
if (IS_ERR(umem)) {
|
||||
int err;
|
||||
|
||||
*umem = ib_umem_get(pd->uobject->context, start, length,
|
||||
access_flags, 0);
|
||||
err = PTR_ERR_OR_ZERO(*umem);
|
||||
if (err < 0) {
|
||||
mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
|
||||
return (void *)umem;
|
||||
return err;
|
||||
}
|
||||
|
||||
mlx5_ib_cont_pages(umem, start, npages, page_shift, ncont, order);
|
||||
mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
|
||||
page_shift, ncont, order);
|
||||
if (!*npages) {
|
||||
mlx5_ib_warn(dev, "avoid zero region\n");
|
||||
ib_umem_release(umem);
|
||||
return ERR_PTR(-EINVAL);
|
||||
ib_umem_release(*umem);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
|
||||
*npages, *ncont, *order, *page_shift);
|
||||
|
||||
return umem;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
@ -1163,11 +1197,11 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
|
||||
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
|
||||
start, virt_addr, length, access_flags);
|
||||
umem = mr_umem_get(pd, start, length, access_flags, &npages,
|
||||
err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
|
||||
&page_shift, &ncont, &order);
|
||||
|
||||
if (IS_ERR(umem))
|
||||
return (void *)umem;
|
||||
if (err < 0)
|
||||
return ERR_PTR(err);
|
||||
|
||||
if (use_umr(order)) {
|
||||
mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
|
||||
@ -1341,10 +1375,9 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
|
||||
*/
|
||||
flags |= IB_MR_REREG_TRANS;
|
||||
ib_umem_release(mr->umem);
|
||||
mr->umem = mr_umem_get(pd, addr, len, access_flags, &npages,
|
||||
&page_shift, &ncont, &order);
|
||||
if (IS_ERR(mr->umem)) {
|
||||
err = PTR_ERR(mr->umem);
|
||||
err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
|
||||
&npages, &page_shift, &ncont, &order);
|
||||
if (err < 0) {
|
||||
mr->umem = NULL;
|
||||
return err;
|
||||
}
|
||||
|
@ -78,12 +78,14 @@ struct mlx5_wqe_eth_pad {
|
||||
|
||||
enum raw_qp_set_mask_map {
|
||||
MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID = 1UL << 0,
|
||||
MLX5_RAW_QP_RATE_LIMIT = 1UL << 1,
|
||||
};
|
||||
|
||||
struct mlx5_modify_raw_qp_param {
|
||||
u16 operation;
|
||||
|
||||
u32 set_mask; /* raw_qp_set_mask_map */
|
||||
u32 rate_limit;
|
||||
u8 rq_q_ctr_id;
|
||||
};
|
||||
|
||||
@ -352,6 +354,29 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr)
|
||||
return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
|
||||
}
|
||||
|
||||
static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size)
|
||||
{
|
||||
int max_sge;
|
||||
|
||||
if (attr->qp_type == IB_QPT_RC)
|
||||
max_sge = (min_t(int, wqe_size, 512) -
|
||||
sizeof(struct mlx5_wqe_ctrl_seg) -
|
||||
sizeof(struct mlx5_wqe_raddr_seg)) /
|
||||
sizeof(struct mlx5_wqe_data_seg);
|
||||
else if (attr->qp_type == IB_QPT_XRC_INI)
|
||||
max_sge = (min_t(int, wqe_size, 512) -
|
||||
sizeof(struct mlx5_wqe_ctrl_seg) -
|
||||
sizeof(struct mlx5_wqe_xrc_seg) -
|
||||
sizeof(struct mlx5_wqe_raddr_seg)) /
|
||||
sizeof(struct mlx5_wqe_data_seg);
|
||||
else
|
||||
max_sge = (wqe_size - sq_overhead(attr)) /
|
||||
sizeof(struct mlx5_wqe_data_seg);
|
||||
|
||||
return min_t(int, max_sge, wqe_size - sq_overhead(attr) /
|
||||
sizeof(struct mlx5_wqe_data_seg));
|
||||
}
|
||||
|
||||
static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
|
||||
struct mlx5_ib_qp *qp)
|
||||
{
|
||||
@ -382,13 +407,18 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
|
||||
wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
|
||||
qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
|
||||
if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
|
||||
mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
|
||||
mlx5_ib_dbg(dev, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n",
|
||||
attr->cap.max_send_wr, wqe_size, MLX5_SEND_WQE_BB,
|
||||
qp->sq.wqe_cnt,
|
||||
1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
|
||||
return -ENOMEM;
|
||||
}
|
||||
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
|
||||
qp->sq.max_gs = attr->cap.max_send_sge;
|
||||
qp->sq.max_gs = get_send_sge(attr, wqe_size);
|
||||
if (qp->sq.max_gs < attr->cap.max_send_sge)
|
||||
return -ENOMEM;
|
||||
|
||||
attr->cap.max_send_sge = qp->sq.max_gs;
|
||||
qp->sq.max_post = wq_size / wqe_size;
|
||||
attr->cap.max_send_wr = qp->sq.max_post;
|
||||
|
||||
@ -648,7 +678,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev,
|
||||
return PTR_ERR(*umem);
|
||||
}
|
||||
|
||||
mlx5_ib_cont_pages(*umem, addr, npages, page_shift, ncont, NULL);
|
||||
mlx5_ib_cont_pages(*umem, addr, 0, npages, page_shift, ncont, NULL);
|
||||
|
||||
err = mlx5_ib_get_buf_offset(addr, *page_shift, offset);
|
||||
if (err) {
|
||||
@ -701,7 +731,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
||||
return err;
|
||||
}
|
||||
|
||||
mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, &npages, &page_shift,
|
||||
mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, 0, &npages, &page_shift,
|
||||
&ncont, NULL);
|
||||
err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift,
|
||||
&rwq->rq_page_offset);
|
||||
@ -2443,8 +2473,14 @@ out:
|
||||
}
|
||||
|
||||
static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
|
||||
struct mlx5_ib_sq *sq, int new_state)
|
||||
struct mlx5_ib_sq *sq,
|
||||
int new_state,
|
||||
const struct mlx5_modify_raw_qp_param *raw_qp_param)
|
||||
{
|
||||
struct mlx5_ib_qp *ibqp = sq->base.container_mibqp;
|
||||
u32 old_rate = ibqp->rate_limit;
|
||||
u32 new_rate = old_rate;
|
||||
u16 rl_index = 0;
|
||||
void *in;
|
||||
void *sqc;
|
||||
int inlen;
|
||||
@ -2460,10 +2496,44 @@ static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
|
||||
sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
|
||||
MLX5_SET(sqc, sqc, state, new_state);
|
||||
|
||||
err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen);
|
||||
if (err)
|
||||
goto out;
|
||||
if (raw_qp_param->set_mask & MLX5_RAW_QP_RATE_LIMIT) {
|
||||
if (new_state != MLX5_SQC_STATE_RDY)
|
||||
pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n",
|
||||
__func__);
|
||||
else
|
||||
new_rate = raw_qp_param->rate_limit;
|
||||
}
|
||||
|
||||
if (old_rate != new_rate) {
|
||||
if (new_rate) {
|
||||
err = mlx5_rl_add_rate(dev, new_rate, &rl_index);
|
||||
if (err) {
|
||||
pr_err("Failed configuring rate %u: %d\n",
|
||||
new_rate, err);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
|
||||
MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
|
||||
}
|
||||
|
||||
err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen);
|
||||
if (err) {
|
||||
/* Remove new rate from table if failed */
|
||||
if (new_rate &&
|
||||
old_rate != new_rate)
|
||||
mlx5_rl_remove_rate(dev, new_rate);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Only remove the old rate after new rate was set */
|
||||
if ((old_rate &&
|
||||
(old_rate != new_rate)) ||
|
||||
(new_state != MLX5_SQC_STATE_RDY))
|
||||
mlx5_rl_remove_rate(dev, old_rate);
|
||||
|
||||
ibqp->rate_limit = new_rate;
|
||||
sq->state = new_state;
|
||||
|
||||
out:
|
||||
@ -2478,6 +2548,8 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
||||
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
|
||||
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
|
||||
struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
|
||||
int modify_rq = !!qp->rq.wqe_cnt;
|
||||
int modify_sq = !!qp->sq.wqe_cnt;
|
||||
int rq_state;
|
||||
int sq_state;
|
||||
int err;
|
||||
@ -2495,10 +2567,18 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
||||
rq_state = MLX5_RQC_STATE_RST;
|
||||
sq_state = MLX5_SQC_STATE_RST;
|
||||
break;
|
||||
case MLX5_CMD_OP_INIT2INIT_QP:
|
||||
case MLX5_CMD_OP_INIT2RTR_QP:
|
||||
case MLX5_CMD_OP_RTR2RTS_QP:
|
||||
case MLX5_CMD_OP_RTS2RTS_QP:
|
||||
if (raw_qp_param->set_mask ==
|
||||
MLX5_RAW_QP_RATE_LIMIT) {
|
||||
modify_rq = 0;
|
||||
sq_state = sq->state;
|
||||
} else {
|
||||
return raw_qp_param->set_mask ? -EINVAL : 0;
|
||||
}
|
||||
break;
|
||||
case MLX5_CMD_OP_INIT2INIT_QP:
|
||||
case MLX5_CMD_OP_INIT2RTR_QP:
|
||||
if (raw_qp_param->set_mask)
|
||||
return -EINVAL;
|
||||
else
|
||||
@ -2508,13 +2588,13 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (qp->rq.wqe_cnt) {
|
||||
err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param);
|
||||
if (modify_rq) {
|
||||
err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (qp->sq.wqe_cnt) {
|
||||
if (modify_sq) {
|
||||
if (tx_affinity) {
|
||||
err = modify_raw_packet_tx_affinity(dev->mdev, sq,
|
||||
tx_affinity);
|
||||
@ -2522,7 +2602,7 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
||||
return err;
|
||||
}
|
||||
|
||||
return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state);
|
||||
return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state, raw_qp_param);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -2578,7 +2658,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
|
||||
struct mlx5_ib_port *mibport = NULL;
|
||||
enum mlx5_qp_state mlx5_cur, mlx5_new;
|
||||
enum mlx5_qp_optpar optpar;
|
||||
int sqd_event;
|
||||
int mlx5_st;
|
||||
int err;
|
||||
u16 op;
|
||||
@ -2725,12 +2804,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
|
||||
if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
|
||||
context->db_rec_addr = cpu_to_be64(qp->db.dma);
|
||||
|
||||
if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
|
||||
attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
|
||||
sqd_event = 1;
|
||||
else
|
||||
sqd_event = 0;
|
||||
|
||||
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
|
||||
u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
|
||||
qp->port) - 1;
|
||||
@ -2777,6 +2850,12 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
|
||||
raw_qp_param.rq_q_ctr_id = mibport->q_cnt_id;
|
||||
raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID;
|
||||
}
|
||||
|
||||
if (attr_mask & IB_QP_RATE_LIMIT) {
|
||||
raw_qp_param.rate_limit = attr->rate_limit;
|
||||
raw_qp_param.set_mask |= MLX5_RAW_QP_RATE_LIMIT;
|
||||
}
|
||||
|
||||
err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity);
|
||||
} else {
|
||||
err = mlx5_core_qp_modify(dev->mdev, op, optpar, context,
|
||||
@ -3068,10 +3147,10 @@ static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
|
||||
{
|
||||
memset(umr, 0, sizeof(*umr));
|
||||
umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
|
||||
umr->flags = 1 << 7;
|
||||
umr->flags = MLX5_UMR_INLINE;
|
||||
}
|
||||
|
||||
static __be64 get_umr_reg_mr_mask(void)
|
||||
static __be64 get_umr_reg_mr_mask(int atomic)
|
||||
{
|
||||
u64 result;
|
||||
|
||||
@ -3084,9 +3163,11 @@ static __be64 get_umr_reg_mr_mask(void)
|
||||
MLX5_MKEY_MASK_KEY |
|
||||
MLX5_MKEY_MASK_RR |
|
||||
MLX5_MKEY_MASK_RW |
|
||||
MLX5_MKEY_MASK_A |
|
||||
MLX5_MKEY_MASK_FREE;
|
||||
|
||||
if (atomic)
|
||||
result |= MLX5_MKEY_MASK_A;
|
||||
|
||||
return cpu_to_be64(result);
|
||||
}
|
||||
|
||||
@ -3147,7 +3228,7 @@ static __be64 get_umr_update_pd_mask(void)
|
||||
}
|
||||
|
||||
static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
|
||||
struct ib_send_wr *wr)
|
||||
struct ib_send_wr *wr, int atomic)
|
||||
{
|
||||
struct mlx5_umr_wr *umrwr = umr_wr(wr);
|
||||
|
||||
@ -3172,7 +3253,7 @@ static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
|
||||
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD)
|
||||
umr->mkey_mask |= get_umr_update_pd_mask();
|
||||
if (!umr->mkey_mask)
|
||||
umr->mkey_mask = get_umr_reg_mr_mask();
|
||||
umr->mkey_mask = get_umr_reg_mr_mask(atomic);
|
||||
} else {
|
||||
umr->mkey_mask = get_umr_unreg_mr_mask();
|
||||
}
|
||||
@ -4025,7 +4106,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
}
|
||||
qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
|
||||
ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);
|
||||
set_reg_umr_segment(seg, wr);
|
||||
set_reg_umr_segment(seg, wr, !!(MLX5_CAP_GEN(mdev, atomic)));
|
||||
seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
|
||||
size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
|
||||
if (unlikely((seg == qend)))
|
||||
|
@ -118,7 +118,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
|
||||
return err;
|
||||
}
|
||||
|
||||
mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages,
|
||||
mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, 0, &npages,
|
||||
&page_shift, &ncont, NULL);
|
||||
err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
|
||||
&offset);
|
||||
@ -280,6 +280,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
|
||||
mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n",
|
||||
desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
|
||||
srq->msrq.max_avail_gather);
|
||||
in.type = init_attr->srq_type;
|
||||
|
||||
if (pd->uobject)
|
||||
err = create_srq_user(pd, srq, &in, udata, buf_size);
|
||||
@ -292,7 +293,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
|
||||
goto err_srq;
|
||||
}
|
||||
|
||||
in.type = init_attr->srq_type;
|
||||
in.log_size = ilog2(srq->msrq.max);
|
||||
in.wqe_shift = srq->msrq.wqe_shift - 4;
|
||||
if (srq->wq_sig)
|
||||
|
@ -186,8 +186,8 @@ int mthca_create_ah(struct mthca_dev *dev,
|
||||
|
||||
on_hca_fail:
|
||||
if (ah->type == MTHCA_AH_PCI_POOL) {
|
||||
ah->av = pci_pool_alloc(dev->av_table.pool,
|
||||
GFP_ATOMIC, &ah->avdma);
|
||||
ah->av = pci_pool_zalloc(dev->av_table.pool,
|
||||
GFP_ATOMIC, &ah->avdma);
|
||||
if (!ah->av)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -196,8 +196,6 @@ on_hca_fail:
|
||||
|
||||
ah->key = pd->ntmr.ibmr.lkey;
|
||||
|
||||
memset(av, 0, MTHCA_AV_SIZE);
|
||||
|
||||
av->port_pd = cpu_to_be32(pd->pd_num | (ah_attr->port_num << 24));
|
||||
av->g_slid = ah_attr->src_path_bits;
|
||||
av->dlid = cpu_to_be16(ah_attr->dlid);
|
||||
|
@ -410,7 +410,9 @@ static int mthca_dealloc_pd(struct ib_pd *pd)
|
||||
}
|
||||
|
||||
static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
|
||||
struct ib_ah_attr *ah_attr)
|
||||
struct ib_ah_attr *ah_attr,
|
||||
struct ib_udata *udata)
|
||||
|
||||
{
|
||||
int err;
|
||||
struct mthca_ah *ah;
|
||||
|
@ -771,7 +771,8 @@ static int nes_dealloc_pd(struct ib_pd *ibpd)
|
||||
/**
|
||||
* nes_create_ah
|
||||
*/
|
||||
static struct ib_ah *nes_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
|
||||
static struct ib_ah *nes_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
@ -154,7 +154,8 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
|
||||
return status;
|
||||
}
|
||||
|
||||
struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
|
||||
struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
u32 *ahid_addr;
|
||||
int status;
|
||||
|
@ -50,7 +50,9 @@ enum {
|
||||
OCRDMA_AH_L3_TYPE_MASK = 0x03,
|
||||
OCRDMA_AH_L3_TYPE_SHIFT = 0x1D /* 29 bits */
|
||||
};
|
||||
struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *);
|
||||
|
||||
struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *,
|
||||
struct ib_udata *);
|
||||
int ocrdma_destroy_ah(struct ib_ah *);
|
||||
int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *);
|
||||
int ocrdma_modify_ah(struct ib_ah *, struct ib_ah_attr *);
|
||||
|
@ -2094,7 +2094,8 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
|
||||
struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct qedr_ah *ah;
|
||||
|
||||
|
@ -70,7 +70,8 @@ int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr,
|
||||
int qp_attr_mask, struct ib_qp_init_attr *);
|
||||
int qedr_destroy_qp(struct ib_qp *ibqp);
|
||||
|
||||
struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr);
|
||||
struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
|
||||
struct ib_udata *udata);
|
||||
int qedr_destroy_ah(struct ib_ah *ibah);
|
||||
|
||||
int qedr_dereg_mr(struct ib_mr *);
|
||||
|
@ -738,7 +738,9 @@ int usnic_ib_mmap(struct ib_ucontext *context,
|
||||
|
||||
/* In ib callbacks section - Start of stub funcs */
|
||||
struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
|
||||
struct ib_ah_attr *ah_attr)
|
||||
struct ib_ah_attr *ah_attr,
|
||||
struct ib_udata *udata)
|
||||
|
||||
{
|
||||
usnic_dbg("\n");
|
||||
return ERR_PTR(-EPERM);
|
||||
|
@ -75,7 +75,9 @@ int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext);
|
||||
int usnic_ib_mmap(struct ib_ucontext *context,
|
||||
struct vm_area_struct *vma);
|
||||
struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
|
||||
struct ib_ah_attr *ah_attr);
|
||||
struct ib_ah_attr *ah_attr,
|
||||
struct ib_udata *udata);
|
||||
|
||||
int usnic_ib_destroy_ah(struct ib_ah *ah);
|
||||
int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr);
|
||||
|
@ -82,7 +82,7 @@ enum rxe_device_param {
|
||||
RXE_MAX_SGE = 32,
|
||||
RXE_MAX_SGE_RD = 32,
|
||||
RXE_MAX_CQ = 16384,
|
||||
RXE_MAX_LOG_CQE = 13,
|
||||
RXE_MAX_LOG_CQE = 15,
|
||||
RXE_MAX_MR = 2 * 1024,
|
||||
RXE_MAX_PD = 0x7ffc,
|
||||
RXE_MAX_QP_RD_ATOM = 128,
|
||||
|
@ -316,7 +316,9 @@ static int rxe_init_av(struct rxe_dev *rxe, struct ib_ah_attr *attr,
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
|
||||
static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
|
||||
struct ib_udata *udata)
|
||||
|
||||
{
|
||||
int err;
|
||||
struct rxe_dev *rxe = to_rdev(ibpd->device);
|
||||
|
@ -1050,8 +1050,6 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
|
||||
|
||||
tx_qp = ib_create_qp(priv->pd, &attr);
|
||||
if (PTR_ERR(tx_qp) == -EINVAL) {
|
||||
ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n",
|
||||
priv->ca->name);
|
||||
attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
|
||||
tx_qp = ib_create_qp(priv->pd, &attr);
|
||||
}
|
||||
|
@ -1605,13 +1605,14 @@ static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
|
||||
r->com.from_state = r->com.state;
|
||||
r->com.to_state = state;
|
||||
r->com.state = RES_EQ_BUSY;
|
||||
if (eq)
|
||||
*eq = r;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irq(mlx4_tlock(dev));
|
||||
|
||||
if (!err && eq)
|
||||
*eq = r;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -576,7 +576,7 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
|
||||
u8 self_lb_en_modifiable[0x1];
|
||||
u8 reserved_at_9[0x2];
|
||||
u8 max_lso_cap[0x5];
|
||||
u8 reserved_at_10[0x2];
|
||||
u8 multi_pkt_send_wqe[0x2];
|
||||
u8 wqe_inline_mode[0x2];
|
||||
u8 rss_ind_tbl_cap[0x4];
|
||||
u8 reg_umr_sq[0x1];
|
||||
|
@ -1102,6 +1102,7 @@ enum ib_qp_attr_mask {
|
||||
IB_QP_RESERVED2 = (1<<22),
|
||||
IB_QP_RESERVED3 = (1<<23),
|
||||
IB_QP_RESERVED4 = (1<<24),
|
||||
IB_QP_RATE_LIMIT = (1<<25),
|
||||
};
|
||||
|
||||
enum ib_qp_state {
|
||||
@ -1151,6 +1152,7 @@ struct ib_qp_attr {
|
||||
u8 rnr_retry;
|
||||
u8 alt_port_num;
|
||||
u8 alt_timeout;
|
||||
u32 rate_limit;
|
||||
};
|
||||
|
||||
enum ib_wr_opcode {
|
||||
@ -1592,17 +1594,19 @@ enum ib_flow_attr_type {
|
||||
/* Supported steering header types */
|
||||
enum ib_flow_spec_type {
|
||||
/* L2 headers*/
|
||||
IB_FLOW_SPEC_ETH = 0x20,
|
||||
IB_FLOW_SPEC_IB = 0x22,
|
||||
IB_FLOW_SPEC_ETH = 0x20,
|
||||
IB_FLOW_SPEC_IB = 0x22,
|
||||
/* L3 header*/
|
||||
IB_FLOW_SPEC_IPV4 = 0x30,
|
||||
IB_FLOW_SPEC_IPV6 = 0x31,
|
||||
IB_FLOW_SPEC_IPV4 = 0x30,
|
||||
IB_FLOW_SPEC_IPV6 = 0x31,
|
||||
/* L4 headers*/
|
||||
IB_FLOW_SPEC_TCP = 0x40,
|
||||
IB_FLOW_SPEC_UDP = 0x41
|
||||
IB_FLOW_SPEC_TCP = 0x40,
|
||||
IB_FLOW_SPEC_UDP = 0x41,
|
||||
IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
|
||||
IB_FLOW_SPEC_INNER = 0x100,
|
||||
};
|
||||
#define IB_FLOW_SPEC_LAYER_MASK 0xF0
|
||||
#define IB_FLOW_SPEC_SUPPORT_LAYERS 4
|
||||
#define IB_FLOW_SPEC_SUPPORT_LAYERS 8
|
||||
|
||||
/* Flow steering rule priority is set according to it's domain.
|
||||
* Lower domain value means higher priority.
|
||||
@ -1630,7 +1634,7 @@ struct ib_flow_eth_filter {
|
||||
};
|
||||
|
||||
struct ib_flow_spec_eth {
|
||||
enum ib_flow_spec_type type;
|
||||
u32 type;
|
||||
u16 size;
|
||||
struct ib_flow_eth_filter val;
|
||||
struct ib_flow_eth_filter mask;
|
||||
@ -1644,7 +1648,7 @@ struct ib_flow_ib_filter {
|
||||
};
|
||||
|
||||
struct ib_flow_spec_ib {
|
||||
enum ib_flow_spec_type type;
|
||||
u32 type;
|
||||
u16 size;
|
||||
struct ib_flow_ib_filter val;
|
||||
struct ib_flow_ib_filter mask;
|
||||
@ -1669,7 +1673,7 @@ struct ib_flow_ipv4_filter {
|
||||
};
|
||||
|
||||
struct ib_flow_spec_ipv4 {
|
||||
enum ib_flow_spec_type type;
|
||||
u32 type;
|
||||
u16 size;
|
||||
struct ib_flow_ipv4_filter val;
|
||||
struct ib_flow_ipv4_filter mask;
|
||||
@ -1687,7 +1691,7 @@ struct ib_flow_ipv6_filter {
|
||||
};
|
||||
|
||||
struct ib_flow_spec_ipv6 {
|
||||
enum ib_flow_spec_type type;
|
||||
u32 type;
|
||||
u16 size;
|
||||
struct ib_flow_ipv6_filter val;
|
||||
struct ib_flow_ipv6_filter mask;
|
||||
@ -1701,15 +1705,30 @@ struct ib_flow_tcp_udp_filter {
|
||||
};
|
||||
|
||||
struct ib_flow_spec_tcp_udp {
|
||||
enum ib_flow_spec_type type;
|
||||
u32 type;
|
||||
u16 size;
|
||||
struct ib_flow_tcp_udp_filter val;
|
||||
struct ib_flow_tcp_udp_filter mask;
|
||||
};
|
||||
|
||||
struct ib_flow_tunnel_filter {
|
||||
__be32 tunnel_id;
|
||||
u8 real_sz[0];
|
||||
};
|
||||
|
||||
/* ib_flow_spec_tunnel describes the Vxlan tunnel
|
||||
* the tunnel_id from val has the vni value
|
||||
*/
|
||||
struct ib_flow_spec_tunnel {
|
||||
u32 type;
|
||||
u16 size;
|
||||
struct ib_flow_tunnel_filter val;
|
||||
struct ib_flow_tunnel_filter mask;
|
||||
};
|
||||
|
||||
union ib_flow_spec {
|
||||
struct {
|
||||
enum ib_flow_spec_type type;
|
||||
u32 type;
|
||||
u16 size;
|
||||
};
|
||||
struct ib_flow_spec_eth eth;
|
||||
@ -1717,6 +1736,7 @@ union ib_flow_spec {
|
||||
struct ib_flow_spec_ipv4 ipv4;
|
||||
struct ib_flow_spec_tcp_udp tcp_udp;
|
||||
struct ib_flow_spec_ipv6 ipv6;
|
||||
struct ib_flow_spec_tunnel tunnel;
|
||||
};
|
||||
|
||||
struct ib_flow_attr {
|
||||
@ -1933,7 +1953,8 @@ struct ib_device {
|
||||
struct ib_udata *udata);
|
||||
int (*dealloc_pd)(struct ib_pd *pd);
|
||||
struct ib_ah * (*create_ah)(struct ib_pd *pd,
|
||||
struct ib_ah_attr *ah_attr);
|
||||
struct ib_ah_attr *ah_attr,
|
||||
struct ib_udata *udata);
|
||||
int (*modify_ah)(struct ib_ah *ah,
|
||||
struct ib_ah_attr *ah_attr);
|
||||
int (*query_ah)(struct ib_ah *ah,
|
||||
@ -2580,6 +2601,24 @@ void ib_dealloc_pd(struct ib_pd *pd);
|
||||
*/
|
||||
struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
|
||||
|
||||
/**
|
||||
* ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
|
||||
* work completion.
|
||||
* @hdr: the L3 header to parse
|
||||
* @net_type: type of header to parse
|
||||
* @sgid: place to store source gid
|
||||
* @dgid: place to store destination gid
|
||||
*/
|
||||
int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
|
||||
enum rdma_network_type net_type,
|
||||
union ib_gid *sgid, union ib_gid *dgid);
|
||||
|
||||
/**
|
||||
* ib_get_rdma_header_version - Get the header version
|
||||
* @hdr: the L3 header to parse
|
||||
*/
|
||||
int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
|
||||
|
||||
/**
|
||||
* ib_init_ah_from_wc - Initializes address handle attributes from a
|
||||
* work completion.
|
||||
@ -3357,4 +3396,7 @@ int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
|
||||
void ib_drain_rq(struct ib_qp *qp);
|
||||
void ib_drain_sq(struct ib_qp *qp);
|
||||
void ib_drain_qp(struct ib_qp *qp);
|
||||
|
||||
int ib_resolve_eth_dmac(struct ib_device *device,
|
||||
struct ib_ah_attr *ah_attr);
|
||||
#endif /* IB_VERBS_H */
|
||||
|
@ -37,6 +37,7 @@
|
||||
#define IB_USER_VERBS_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
|
||||
/*
|
||||
* Increment this value if any changes that break userspace ABI
|
||||
@ -93,6 +94,7 @@ enum {
|
||||
IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
|
||||
IB_USER_VERBS_EX_CMD_CREATE_CQ = IB_USER_VERBS_CMD_CREATE_CQ,
|
||||
IB_USER_VERBS_EX_CMD_CREATE_QP = IB_USER_VERBS_CMD_CREATE_QP,
|
||||
IB_USER_VERBS_EX_CMD_MODIFY_QP = IB_USER_VERBS_CMD_MODIFY_QP,
|
||||
IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
|
||||
IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
|
||||
IB_USER_VERBS_EX_CMD_CREATE_WQ,
|
||||
@ -545,6 +547,14 @@ enum {
|
||||
IB_UVERBS_CREATE_QP_SUP_COMP_MASK = IB_UVERBS_CREATE_QP_MASK_IND_TABLE,
|
||||
};
|
||||
|
||||
enum {
|
||||
IB_USER_LEGACY_LAST_QP_ATTR_MASK = IB_QP_DEST_QPN
|
||||
};
|
||||
|
||||
enum {
|
||||
IB_USER_LAST_QP_ATTR_MASK = IB_QP_RATE_LIMIT
|
||||
};
|
||||
|
||||
struct ib_uverbs_ex_create_qp {
|
||||
__u64 user_handle;
|
||||
__u32 pd_handle;
|
||||
@ -684,9 +694,20 @@ struct ib_uverbs_modify_qp {
|
||||
__u64 driver_data[0];
|
||||
};
|
||||
|
||||
struct ib_uverbs_ex_modify_qp {
|
||||
struct ib_uverbs_modify_qp base;
|
||||
__u32 rate_limit;
|
||||
__u32 reserved;
|
||||
};
|
||||
|
||||
struct ib_uverbs_modify_qp_resp {
|
||||
};
|
||||
|
||||
struct ib_uverbs_ex_modify_qp_resp {
|
||||
__u32 comp_mask;
|
||||
__u32 response_length;
|
||||
};
|
||||
|
||||
struct ib_uverbs_destroy_qp {
|
||||
__u64 response;
|
||||
__u32 qp_handle;
|
||||
@ -908,6 +929,23 @@ struct ib_uverbs_flow_spec_ipv6 {
|
||||
struct ib_uverbs_flow_ipv6_filter mask;
|
||||
};
|
||||
|
||||
struct ib_uverbs_flow_tunnel_filter {
|
||||
__be32 tunnel_id;
|
||||
};
|
||||
|
||||
struct ib_uverbs_flow_spec_tunnel {
|
||||
union {
|
||||
struct ib_uverbs_flow_spec_hdr hdr;
|
||||
struct {
|
||||
__u32 type;
|
||||
__u16 size;
|
||||
__u16 reserved;
|
||||
};
|
||||
};
|
||||
struct ib_uverbs_flow_tunnel_filter val;
|
||||
struct ib_uverbs_flow_tunnel_filter mask;
|
||||
};
|
||||
|
||||
struct ib_uverbs_flow_attr {
|
||||
__u32 type;
|
||||
__u16 size;
|
||||
|
@ -82,6 +82,7 @@ enum mlx5_ib_alloc_ucontext_resp_mask {
|
||||
|
||||
enum mlx5_user_cmds_supp_uhw {
|
||||
MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0,
|
||||
MLX5_USER_CMDS_SUPP_UHW_CREATE_AH = 1 << 1,
|
||||
};
|
||||
|
||||
struct mlx5_ib_alloc_ucontext_resp {
|
||||
@ -124,18 +125,47 @@ struct mlx5_ib_rss_caps {
|
||||
__u8 reserved[7];
|
||||
};
|
||||
|
||||
enum mlx5_ib_cqe_comp_res_format {
|
||||
MLX5_IB_CQE_RES_FORMAT_HASH = 1 << 0,
|
||||
MLX5_IB_CQE_RES_FORMAT_CSUM = 1 << 1,
|
||||
MLX5_IB_CQE_RES_RESERVED = 1 << 2,
|
||||
};
|
||||
|
||||
struct mlx5_ib_cqe_comp_caps {
|
||||
__u32 max_num;
|
||||
__u32 supported_format; /* enum mlx5_ib_cqe_comp_res_format */
|
||||
};
|
||||
|
||||
struct mlx5_packet_pacing_caps {
|
||||
__u32 qp_rate_limit_min;
|
||||
__u32 qp_rate_limit_max; /* In kpbs */
|
||||
|
||||
/* Corresponding bit will be set if qp type from
|
||||
* 'enum ib_qp_type' is supported, e.g.
|
||||
* supported_qpts |= 1 << IB_QPT_RAW_PACKET
|
||||
*/
|
||||
__u32 supported_qpts;
|
||||
__u32 reserved;
|
||||
};
|
||||
|
||||
struct mlx5_ib_query_device_resp {
|
||||
__u32 comp_mask;
|
||||
__u32 response_length;
|
||||
struct mlx5_ib_tso_caps tso_caps;
|
||||
struct mlx5_ib_rss_caps rss_caps;
|
||||
struct mlx5_ib_cqe_comp_caps cqe_comp_caps;
|
||||
struct mlx5_packet_pacing_caps packet_pacing_caps;
|
||||
__u32 mlx5_ib_support_multi_pkt_send_wqes;
|
||||
__u32 reserved;
|
||||
};
|
||||
|
||||
struct mlx5_ib_create_cq {
|
||||
__u64 buf_addr;
|
||||
__u64 db_addr;
|
||||
__u32 cqe_size;
|
||||
__u32 reserved; /* explicit padding (optional on i386) */
|
||||
__u8 cqe_comp_en;
|
||||
__u8 cqe_comp_res_format;
|
||||
__u16 reserved; /* explicit padding (optional on i386) */
|
||||
};
|
||||
|
||||
struct mlx5_ib_create_cq_resp {
|
||||
@ -232,6 +262,12 @@ struct mlx5_ib_create_wq {
|
||||
__u32 reserved;
|
||||
};
|
||||
|
||||
struct mlx5_ib_create_ah_resp {
|
||||
__u32 response_length;
|
||||
__u8 dmac[ETH_ALEN];
|
||||
__u8 reserved[6];
|
||||
};
|
||||
|
||||
struct mlx5_ib_create_wq_resp {
|
||||
__u32 response_length;
|
||||
__u32 reserved;
|
||||
|
Loading…
Reference in New Issue
Block a user