v6.1 merge window pull request

- Small bug fixes in mlx5, efa, rxe, hns, irdma, erdma, siw
 
 - rts tracing improvements
 
 - Code improvements: strlscpy conversion, unused parameter, spelling
   mistakes, unused variables, flex arrays
 
 - restrack device details report for hns
 
 - Simplify struct device initialization in SRP
 
 - Eliminate the never-used service_mask support in IB CM
 
 - Make rxe not print to the console for some kinds of network packets
 
 - Asymetric paths and router support in the CM through netlink messages
 
 - DMABUF importer support for mlx5devx umem's
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQRRRCHOFoQz/8F5bUaFwuHvBreFYQUCYz9bgAAKCRCFwuHvBreF
 YevoAP47J/svlOFlFtBhTVF79Ddtf+MMeqeVvLoHHQbCU5rUpAD+KUpTXAvwNcM9
 dHwNXz9ctanP5397qusH0rxOKPo/EA4=
 =lgSv
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma updates from Jason Gunthorpe:
 "Not a big list of changes this cycle, mostly small things. The new
  MANA rdma driver should come next cycle along with a bunch of work on
  rxe.

  Summary:

   - Small bug fixes in mlx5, efa, rxe, hns, irdma, erdma, siw

   - rts tracing improvements

   - Code improvements: strlscpy conversion, unused parameter, spelling
     mistakes, unused variables, flex arrays

   - restrack device details report for hns

   - Simplify struct device initialization in SRP

   - Eliminate the never-used service_mask support in IB CM

   - Make rxe not print to the console for some kinds of network packets

   - Asymetric paths and router support in the CM through netlink
     messages

   - DMABUF importer support for mlx5devx umem's"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (84 commits)
  RDMA/rxe: Remove error/warning messages from packet receiver path
  RDMA/usnic: fix set-but-not-unused variable 'flags' warning
  IB/hfi1: Use skb_put_data() instead of skb_put/memcpy pair
  RDMA/hns: Unified Log Printing Style
  RDMA/hns: Replacing magic number with macros in apply_func_caps()
  RDMA/hns: Repacing 'dseg_len' by macros in fill_ext_sge_inl_data()
  RDMA/hns: Remove redundant 'max_srq_desc_sz' in caps
  RDMA/hns: Remove redundant 'num_mtt_segs' and 'max_extend_sg'
  RDMA/hns: Remove redundant 'phy_addr' in hns_roce_hem_list_find_mtt()
  RDMA/hns: Remove redundant 'use_lowmem' argument from hns_roce_init_hem_table()
  RDMA/hns: Remove redundant 'bt_level' for hem_list_alloc_item()
  RDMA/hns: Remove redundant 'attr_mask' in modify_qp_init_to_init()
  RDMA/hns: Remove unnecessary brackets when getting point
  RDMA/hns: Remove unnecessary braces for single statement blocks
  RDMA/hns: Cleanup for a spelling error of Asynchronous
  IB/rdmavt: Add __init/__exit annotations to module init/exit funcs
  RDMA/rxe: Remove redundant num_sge fields
  RDMA/mlx5: Enable ATS support for MRs and umems
  RDMA/mlx5: Add support for dmabuf to devx umem
  RDMA/core: Add UVERBS_ATTR_RAW_FD
  ...
This commit is contained in:
Linus Torvalds 2022-10-07 12:05:29 -07:00
commit e08466a7c0
111 changed files with 1704 additions and 873 deletions

View File

@ -360,10 +360,9 @@ static int process_msg_sess_info(struct rnbd_srv_session *srv_sess,
const void *msg, size_t len, const void *msg, size_t len,
void *data, size_t datalen); void *data, size_t datalen);
static int rnbd_srv_rdma_ev(void *priv, static int rnbd_srv_rdma_ev(void *priv, struct rtrs_srv_op *id,
struct rtrs_srv_op *id, int dir, void *data, size_t datalen,
void *data, size_t datalen, const void *usr, const void *usr, size_t usrlen)
size_t usrlen)
{ {
struct rnbd_srv_session *srv_sess = priv; struct rnbd_srv_session *srv_sess = priv;
const struct rnbd_msg_hdr *hdr = usr; const struct rnbd_msg_hdr *hdr = usr;
@ -389,8 +388,8 @@ static int rnbd_srv_rdma_ev(void *priv,
datalen); datalen);
break; break;
default: default:
pr_warn("Received unexpected message type %d with dir %d from session %s\n", pr_warn("Received unexpected message type %d from session %s\n",
type, dir, srv_sess->sessname); type, srv_sess->sessname);
return -EINVAL; return -EINVAL;
} }

View File

@ -175,6 +175,7 @@ struct cm_device {
struct cm_av { struct cm_av {
struct cm_port *port; struct cm_port *port;
struct rdma_ah_attr ah_attr; struct rdma_ah_attr ah_attr;
u16 dlid_datapath;
u16 pkey_index; u16 pkey_index;
u8 timeout; u8 timeout;
}; };
@ -617,7 +618,6 @@ static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
struct rb_node *parent = NULL; struct rb_node *parent = NULL;
struct cm_id_private *cur_cm_id_priv; struct cm_id_private *cur_cm_id_priv;
__be64 service_id = cm_id_priv->id.service_id; __be64 service_id = cm_id_priv->id.service_id;
__be64 service_mask = cm_id_priv->id.service_mask;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&cm.lock, flags); spin_lock_irqsave(&cm.lock, flags);
@ -625,9 +625,16 @@ static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
parent = *link; parent = *link;
cur_cm_id_priv = rb_entry(parent, struct cm_id_private, cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
service_node); service_node);
if ((cur_cm_id_priv->id.service_mask & service_id) ==
(service_mask & cur_cm_id_priv->id.service_id) && if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
(cm_id_priv->id.device == cur_cm_id_priv->id.device)) { link = &(*link)->rb_left;
else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
link = &(*link)->rb_right;
else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
link = &(*link)->rb_left;
else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
link = &(*link)->rb_right;
else {
/* /*
* Sharing an ib_cm_id with different handlers is not * Sharing an ib_cm_id with different handlers is not
* supported * supported
@ -643,17 +650,6 @@ static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
spin_unlock_irqrestore(&cm.lock, flags); spin_unlock_irqrestore(&cm.lock, flags);
return cur_cm_id_priv; return cur_cm_id_priv;
} }
if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
link = &(*link)->rb_left;
else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
link = &(*link)->rb_right;
else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
link = &(*link)->rb_left;
else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
link = &(*link)->rb_right;
else
link = &(*link)->rb_right;
} }
cm_id_priv->listen_sharecount++; cm_id_priv->listen_sharecount++;
rb_link_node(&cm_id_priv->service_node, parent, link); rb_link_node(&cm_id_priv->service_node, parent, link);
@ -670,12 +666,7 @@ static struct cm_id_private *cm_find_listen(struct ib_device *device,
while (node) { while (node) {
cm_id_priv = rb_entry(node, struct cm_id_private, service_node); cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
if ((cm_id_priv->id.service_mask & service_id) ==
cm_id_priv->id.service_id &&
(cm_id_priv->id.device == device)) {
refcount_inc(&cm_id_priv->refcount);
return cm_id_priv;
}
if (device < cm_id_priv->id.device) if (device < cm_id_priv->id.device)
node = node->rb_left; node = node->rb_left;
else if (device > cm_id_priv->id.device) else if (device > cm_id_priv->id.device)
@ -684,8 +675,10 @@ static struct cm_id_private *cm_find_listen(struct ib_device *device,
node = node->rb_left; node = node->rb_left;
else if (be64_gt(service_id, cm_id_priv->id.service_id)) else if (be64_gt(service_id, cm_id_priv->id.service_id))
node = node->rb_right; node = node->rb_right;
else else {
node = node->rb_right; refcount_inc(&cm_id_priv->refcount);
return cm_id_priv;
}
} }
return NULL; return NULL;
} }
@ -1158,22 +1151,17 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id)
} }
EXPORT_SYMBOL(ib_destroy_cm_id); EXPORT_SYMBOL(ib_destroy_cm_id);
static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id, static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id)
__be64 service_mask)
{ {
service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
service_id &= service_mask;
if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
(service_id != IB_CM_ASSIGN_SERVICE_ID)) (service_id != IB_CM_ASSIGN_SERVICE_ID))
return -EINVAL; return -EINVAL;
if (service_id == IB_CM_ASSIGN_SERVICE_ID) { if (service_id == IB_CM_ASSIGN_SERVICE_ID)
cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++); cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++);
cm_id_priv->id.service_mask = ~cpu_to_be64(0); else
} else {
cm_id_priv->id.service_id = service_id; cm_id_priv->id.service_id = service_id;
cm_id_priv->id.service_mask = service_mask;
}
return 0; return 0;
} }
@ -1185,12 +1173,8 @@ static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id,
* and service ID resolution requests. The service ID should be specified * and service ID resolution requests. The service ID should be specified
* network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
* assign a service ID to the caller. * assign a service ID to the caller.
* @service_mask: Mask applied to service ID used to listen across a
* range of service IDs. If set to 0, the service ID is matched
* exactly. This parameter is ignored if %service_id is set to
* IB_CM_ASSIGN_SERVICE_ID.
*/ */
int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask) int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id)
{ {
struct cm_id_private *cm_id_priv = struct cm_id_private *cm_id_priv =
container_of(cm_id, struct cm_id_private, id); container_of(cm_id, struct cm_id_private, id);
@ -1203,7 +1187,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
goto out; goto out;
} }
ret = cm_init_listen(cm_id_priv, service_id, service_mask); ret = cm_init_listen(cm_id_priv, service_id);
if (ret) if (ret)
goto out; goto out;
@ -1251,7 +1235,7 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
if (IS_ERR(cm_id_priv)) if (IS_ERR(cm_id_priv))
return ERR_CAST(cm_id_priv); return ERR_CAST(cm_id_priv);
err = cm_init_listen(cm_id_priv, service_id, 0); err = cm_init_listen(cm_id_priv, service_id);
if (err) { if (err) {
ib_destroy_cm_id(&cm_id_priv->id); ib_destroy_cm_id(&cm_id_priv->id);
return ERR_PTR(err); return ERR_PTR(err);
@ -1321,6 +1305,7 @@ static void cm_format_req(struct cm_req_msg *req_msg,
struct sa_path_rec *pri_path = param->primary_path; struct sa_path_rec *pri_path = param->primary_path;
struct sa_path_rec *alt_path = param->alternate_path; struct sa_path_rec *alt_path = param->alternate_path;
bool pri_ext = false; bool pri_ext = false;
__be16 lid;
if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA) if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA)
pri_ext = opa_is_extended_lid(pri_path->opa.dlid, pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
@ -1380,9 +1365,16 @@ static void cm_format_req(struct cm_req_msg *req_msg,
htons(ntohl(sa_path_get_dlid( htons(ntohl(sa_path_get_dlid(
pri_path))))); pri_path)))));
} else { } else {
if (param->primary_path_inbound) {
lid = param->primary_path_inbound->ib.dlid;
IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
be16_to_cpu(lid));
} else
IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
be16_to_cpu(IB_LID_PERMISSIVE));
/* Work-around until there's a way to obtain remote LID info */ /* Work-around until there's a way to obtain remote LID info */
IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
be16_to_cpu(IB_LID_PERMISSIVE));
IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg, IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
be16_to_cpu(IB_LID_PERMISSIVE)); be16_to_cpu(IB_LID_PERMISSIVE));
} }
@ -1522,7 +1514,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
} }
} }
cm_id->service_id = param->service_id; cm_id->service_id = param->service_id;
cm_id->service_mask = ~cpu_to_be64(0);
cm_id_priv->timeout_ms = cm_convert_to_ms( cm_id_priv->timeout_ms = cm_convert_to_ms(
param->primary_path->packet_life_time) * 2 + param->primary_path->packet_life_time) * 2 +
cm_convert_to_ms( cm_convert_to_ms(
@ -1538,6 +1529,10 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
spin_lock_irqsave(&cm_id_priv->lock, flags); spin_lock_irqsave(&cm_id_priv->lock, flags);
cm_move_av_from_path(&cm_id_priv->av, &av); cm_move_av_from_path(&cm_id_priv->av, &av);
if (param->primary_path_outbound)
cm_id_priv->av.dlid_datapath =
be16_to_cpu(param->primary_path_outbound->ib.dlid);
if (param->alternate_path) if (param->alternate_path)
cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av); cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av);
@ -1632,14 +1627,13 @@ static void cm_path_set_rec_type(struct ib_device *ib_device, u32 port_num,
static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg, static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
struct sa_path_rec *primary_path, struct sa_path_rec *primary_path,
struct sa_path_rec *alt_path) struct sa_path_rec *alt_path,
struct ib_wc *wc)
{ {
u32 lid; u32 lid;
if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) { if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
sa_path_set_dlid(primary_path, sa_path_set_dlid(primary_path, wc->slid);
IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
req_msg));
sa_path_set_slid(primary_path, sa_path_set_slid(primary_path,
IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID, IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
req_msg)); req_msg));
@ -1676,7 +1670,8 @@ static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
static void cm_format_paths_from_req(struct cm_req_msg *req_msg, static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
struct sa_path_rec *primary_path, struct sa_path_rec *primary_path,
struct sa_path_rec *alt_path) struct sa_path_rec *alt_path,
struct ib_wc *wc)
{ {
primary_path->dgid = primary_path->dgid =
*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg); *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg);
@ -1734,7 +1729,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
if (sa_path_is_roce(alt_path)) if (sa_path_is_roce(alt_path))
alt_path->roce.route_resolved = false; alt_path->roce.route_resolved = false;
} }
cm_format_path_lid_from_req(req_msg, primary_path, alt_path); cm_format_path_lid_from_req(req_msg, primary_path, alt_path, wc);
} }
static u16 cm_get_bth_pkey(struct cm_work *work) static u16 cm_get_bth_pkey(struct cm_work *work)
@ -2079,7 +2074,6 @@ static int cm_req_handler(struct cm_work *work)
cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg)); cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
cm_id_priv->id.service_id = cm_id_priv->id.service_id =
cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)); cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
cm_id_priv->id.service_mask = ~cpu_to_be64(0);
cm_id_priv->tid = req_msg->hdr.tid; cm_id_priv->tid = req_msg->hdr.tid;
cm_id_priv->timeout_ms = cm_convert_to_ms( cm_id_priv->timeout_ms = cm_convert_to_ms(
IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg)); IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg));
@ -2148,7 +2142,7 @@ static int cm_req_handler(struct cm_work *work)
if (cm_req_has_alt_path(req_msg)) if (cm_req_has_alt_path(req_msg))
work->path[1].rec_type = work->path[0].rec_type; work->path[1].rec_type = work->path[0].rec_type;
cm_format_paths_from_req(req_msg, &work->path[0], cm_format_paths_from_req(req_msg, &work->path[0],
&work->path[1]); &work->path[1], work->mad_recv_wc->wc);
if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
sa_path_set_dmac(&work->path[0], sa_path_set_dmac(&work->path[0],
cm_id_priv->av.ah_attr.roce.dmac); cm_id_priv->av.ah_attr.roce.dmac);
@ -2173,6 +2167,10 @@ static int cm_req_handler(struct cm_work *work)
NULL, 0); NULL, 0);
goto rejected; goto rejected;
} }
if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_IB)
cm_id_priv->av.dlid_datapath =
IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg);
if (cm_req_has_alt_path(req_msg)) { if (cm_req_has_alt_path(req_msg)) {
ret = cm_init_av_by_path(&work->path[1], NULL, ret = cm_init_av_by_path(&work->path[1], NULL,
&cm_id_priv->alt_av); &cm_id_priv->alt_av);
@ -3486,7 +3484,6 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
spin_lock_irqsave(&cm_id_priv->lock, flags); spin_lock_irqsave(&cm_id_priv->lock, flags);
cm_move_av_from_path(&cm_id_priv->av, &av); cm_move_av_from_path(&cm_id_priv->av, &av);
cm_id->service_id = param->service_id; cm_id->service_id = param->service_id;
cm_id->service_mask = ~cpu_to_be64(0);
cm_id_priv->timeout_ms = param->timeout_ms; cm_id_priv->timeout_ms = param->timeout_ms;
cm_id_priv->max_cm_retries = param->max_cm_retries; cm_id_priv->max_cm_retries = param->max_cm_retries;
if (cm_id->state != IB_CM_IDLE) { if (cm_id->state != IB_CM_IDLE) {
@ -3561,7 +3558,6 @@ static int cm_sidr_req_handler(struct cm_work *work)
cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg)); cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg));
cm_id_priv->id.service_id = cm_id_priv->id.service_id =
cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg)); cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
cm_id_priv->id.service_mask = ~cpu_to_be64(0);
cm_id_priv->tid = sidr_req_msg->hdr.tid; cm_id_priv->tid = sidr_req_msg->hdr.tid;
wc = work->mad_recv_wc->wc; wc = work->mad_recv_wc->wc;
@ -4134,6 +4130,10 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
*qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
IB_QP_DEST_QPN | IB_QP_RQ_PSN; IB_QP_DEST_QPN | IB_QP_RQ_PSN;
qp_attr->ah_attr = cm_id_priv->av.ah_attr; qp_attr->ah_attr = cm_id_priv->av.ah_attr;
if ((qp_attr->ah_attr.type == RDMA_AH_ATTR_TYPE_IB) &&
cm_id_priv->av.dlid_datapath &&
(cm_id_priv->av.dlid_datapath != 0xffff))
qp_attr->ah_attr.ib.dlid = cm_id_priv->av.dlid_datapath;
qp_attr->path_mtu = cm_id_priv->path_mtu; qp_attr->path_mtu = cm_id_priv->path_mtu;
qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);

View File

@ -2026,6 +2026,8 @@ static void _destroy_id(struct rdma_id_private *id_priv,
cma_id_put(id_priv->id.context); cma_id_put(id_priv->id.context);
kfree(id_priv->id.route.path_rec); kfree(id_priv->id.route.path_rec);
kfree(id_priv->id.route.path_rec_inbound);
kfree(id_priv->id.route.path_rec_outbound);
put_net(id_priv->id.route.addr.dev_addr.net); put_net(id_priv->id.route.addr.dev_addr.net);
kfree(id_priv); kfree(id_priv);
@ -2241,14 +2243,14 @@ cma_ib_new_conn_id(const struct rdma_cm_id *listen_id,
goto err; goto err;
rt = &id->route; rt = &id->route;
rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; rt->num_pri_alt_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
rt->path_rec = kmalloc_array(rt->num_paths, sizeof(*rt->path_rec), rt->path_rec = kmalloc_array(rt->num_pri_alt_paths,
GFP_KERNEL); sizeof(*rt->path_rec), GFP_KERNEL);
if (!rt->path_rec) if (!rt->path_rec)
goto err; goto err;
rt->path_rec[0] = *path; rt->path_rec[0] = *path;
if (rt->num_paths == 2) if (rt->num_pri_alt_paths == 2)
rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
if (net_dev) { if (net_dev) {
@ -2817,26 +2819,72 @@ int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer)
} }
EXPORT_SYMBOL(rdma_set_min_rnr_timer); EXPORT_SYMBOL(rdma_set_min_rnr_timer);
static void route_set_path_rec_inbound(struct cma_work *work,
struct sa_path_rec *path_rec)
{
struct rdma_route *route = &work->id->id.route;
if (!route->path_rec_inbound) {
route->path_rec_inbound =
kzalloc(sizeof(*route->path_rec_inbound), GFP_KERNEL);
if (!route->path_rec_inbound)
return;
}
*route->path_rec_inbound = *path_rec;
}
static void route_set_path_rec_outbound(struct cma_work *work,
struct sa_path_rec *path_rec)
{
struct rdma_route *route = &work->id->id.route;
if (!route->path_rec_outbound) {
route->path_rec_outbound =
kzalloc(sizeof(*route->path_rec_outbound), GFP_KERNEL);
if (!route->path_rec_outbound)
return;
}
*route->path_rec_outbound = *path_rec;
}
static void cma_query_handler(int status, struct sa_path_rec *path_rec, static void cma_query_handler(int status, struct sa_path_rec *path_rec,
void *context) int num_prs, void *context)
{ {
struct cma_work *work = context; struct cma_work *work = context;
struct rdma_route *route; struct rdma_route *route;
int i;
route = &work->id->id.route; route = &work->id->id.route;
if (!status) { if (status)
route->num_paths = 1; goto fail;
*route->path_rec = *path_rec;
} else { for (i = 0; i < num_prs; i++) {
work->old_state = RDMA_CM_ROUTE_QUERY; if (!path_rec[i].flags || (path_rec[i].flags & IB_PATH_GMP))
work->new_state = RDMA_CM_ADDR_RESOLVED; *route->path_rec = path_rec[i];
work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; else if (path_rec[i].flags & IB_PATH_INBOUND)
work->event.status = status; route_set_path_rec_inbound(work, &path_rec[i]);
pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n", else if (path_rec[i].flags & IB_PATH_OUTBOUND)
status); route_set_path_rec_outbound(work, &path_rec[i]);
}
if (!route->path_rec) {
status = -EINVAL;
goto fail;
} }
route->num_pri_alt_paths = 1;
queue_work(cma_wq, &work->work);
return;
fail:
work->old_state = RDMA_CM_ROUTE_QUERY;
work->new_state = RDMA_CM_ADDR_RESOLVED;
work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
work->event.status = status;
pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n",
status);
queue_work(cma_wq, &work->work); queue_work(cma_wq, &work->work);
} }
@ -3081,7 +3129,7 @@ int rdma_set_ib_path(struct rdma_cm_id *id,
dev_put(ndev); dev_put(ndev);
} }
id->route.num_paths = 1; id->route.num_pri_alt_paths = 1;
return 0; return 0;
err_free: err_free:
@ -3214,7 +3262,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
goto err1; goto err1;
} }
route->num_paths = 1; route->num_pri_alt_paths = 1;
ndev = cma_iboe_set_path_rec_l2_fields(id_priv); ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
if (!ndev) { if (!ndev) {
@ -3274,7 +3322,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
err2: err2:
kfree(route->path_rec); kfree(route->path_rec);
route->path_rec = NULL; route->path_rec = NULL;
route->num_paths = 0; route->num_pri_alt_paths = 0;
err1: err1:
kfree(work); kfree(work);
return ret; return ret;
@ -4265,7 +4313,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
} }
req.primary_path = &route->path_rec[0]; req.primary_path = &route->path_rec[0];
if (route->num_paths == 2) req.primary_path_inbound = route->path_rec_inbound;
req.primary_path_outbound = route->path_rec_outbound;
if (route->num_pri_alt_paths == 2)
req.alternate_path = &route->path_rec[1]; req.alternate_path = &route->path_rec[1];
req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;

View File

@ -292,7 +292,7 @@ static struct config_group *make_cma_dev(struct config_group *group,
goto fail; goto fail;
} }
strlcpy(cma_dev_group->name, name, sizeof(cma_dev_group->name)); strscpy(cma_dev_group->name, name, sizeof(cma_dev_group->name));
config_group_init_type_name(&cma_dev_group->ports_group, "ports", config_group_init_type_name(&cma_dev_group->ports_group, "ports",
&cma_ports_group_type); &cma_ports_group_type);

View File

@ -422,7 +422,7 @@ int ib_device_rename(struct ib_device *ibdev, const char *name)
return ret; return ret;
} }
strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX); strscpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
ret = rename_compat_devs(ibdev); ret = rename_compat_devs(ibdev);
downgrade_write(&devices_rwsem); downgrade_write(&devices_rwsem);
@ -1217,7 +1217,7 @@ static int assign_name(struct ib_device *device, const char *name)
ret = -ENFILE; ret = -ENFILE;
goto out; goto out;
} }
strlcpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX); strscpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX);
ret = xa_alloc_cyclic(&devices, &device->index, device, xa_limit_31b, ret = xa_alloc_cyclic(&devices, &device->index, device, xa_limit_31b,
&last_id, GFP_KERNEL); &last_id, GFP_KERNEL);

View File

@ -7,8 +7,7 @@
#include <rdma/ib_cache.h> #include <rdma/ib_cache.h>
#include <rdma/lag.h> #include <rdma/lag.h>
static struct sk_buff *rdma_build_skb(struct ib_device *device, static struct sk_buff *rdma_build_skb(struct net_device *netdev,
struct net_device *netdev,
struct rdma_ah_attr *ah_attr, struct rdma_ah_attr *ah_attr,
gfp_t flags) gfp_t flags)
{ {
@ -86,7 +85,7 @@ static struct net_device *rdma_get_xmit_slave_udp(struct ib_device *device,
struct net_device *slave; struct net_device *slave;
struct sk_buff *skb; struct sk_buff *skb;
skb = rdma_build_skb(device, master, ah_attr, flags); skb = rdma_build_skb(master, ah_attr, flags);
if (!skb) if (!skb)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);

View File

@ -50,6 +50,7 @@
#include <rdma/ib_marshall.h> #include <rdma/ib_marshall.h>
#include <rdma/ib_addr.h> #include <rdma/ib_addr.h>
#include <rdma/opa_addr.h> #include <rdma/opa_addr.h>
#include <rdma/rdma_cm.h>
#include "sa.h" #include "sa.h"
#include "core_priv.h" #include "core_priv.h"
@ -104,7 +105,8 @@ struct ib_sa_device {
}; };
struct ib_sa_query { struct ib_sa_query {
void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *); void (*callback)(struct ib_sa_query *sa_query, int status,
int num_prs, struct ib_sa_mad *mad);
void (*release)(struct ib_sa_query *); void (*release)(struct ib_sa_query *);
struct ib_sa_client *client; struct ib_sa_client *client;
struct ib_sa_port *port; struct ib_sa_port *port;
@ -116,6 +118,12 @@ struct ib_sa_query {
u32 seq; /* Local svc request sequence number */ u32 seq; /* Local svc request sequence number */
unsigned long timeout; /* Local svc timeout */ unsigned long timeout; /* Local svc timeout */
u8 path_use; /* How will the pathrecord be used */ u8 path_use; /* How will the pathrecord be used */
/* A separate buffer to save pathrecords of a response, as in cases
* like IB/netlink, mulptiple pathrecords are supported, so that
* mad->data is not large enough to hold them
*/
void *resp_pr_data;
}; };
#define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
@ -123,7 +131,8 @@ struct ib_sa_query {
#define IB_SA_QUERY_OPA 0x00000004 #define IB_SA_QUERY_OPA 0x00000004
struct ib_sa_path_query { struct ib_sa_path_query {
void (*callback)(int, struct sa_path_rec *, void *); void (*callback)(int status, struct sa_path_rec *rec,
int num_paths, void *context);
void *context; void *context;
struct ib_sa_query sa_query; struct ib_sa_query sa_query;
struct sa_path_rec *conv_pr; struct sa_path_rec *conv_pr;
@ -712,7 +721,7 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) && if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
sa_rec->reversible != 0) sa_rec->reversible != 0)
query->path_use = LS_RESOLVE_PATH_USE_GMP; query->path_use = LS_RESOLVE_PATH_USE_ALL;
else else
query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL; query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
header->path_use = query->path_use; header->path_use = query->path_use;
@ -865,50 +874,81 @@ static void send_handler(struct ib_mad_agent *agent,
static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query, static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
const struct nlmsghdr *nlh) const struct nlmsghdr *nlh)
{ {
struct ib_path_rec_data *srec, *drec;
struct ib_sa_path_query *path_query;
struct ib_mad_send_wc mad_send_wc; struct ib_mad_send_wc mad_send_wc;
struct ib_sa_mad *mad = NULL;
const struct nlattr *head, *curr; const struct nlattr *head, *curr;
struct ib_path_rec_data *rec; struct ib_sa_mad *mad = NULL;
int len, rem; int len, rem, num_prs = 0;
u32 mask = 0; u32 mask = 0;
int status = -EIO; int status = -EIO;
if (query->callback) { if (!query->callback)
head = (const struct nlattr *) nlmsg_data(nlh); goto out;
len = nlmsg_len(nlh);
switch (query->path_use) {
case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
break;
case LS_RESOLVE_PATH_USE_ALL: path_query = container_of(query, struct ib_sa_path_query, sa_query);
case LS_RESOLVE_PATH_USE_GMP: mad = query->mad_buf->mad;
default: if (!path_query->conv_pr &&
mask = IB_PATH_PRIMARY | IB_PATH_GMP | (be16_to_cpu(mad->mad_hdr.attr_id) == IB_SA_ATTR_PATH_REC)) {
IB_PATH_BIDIRECTIONAL; /* Need a larger buffer for possible multiple PRs */
break; query->resp_pr_data = kvcalloc(RDMA_PRIMARY_PATH_MAX_REC_NUM,
sizeof(*drec), GFP_KERNEL);
if (!query->resp_pr_data) {
query->callback(query, -ENOMEM, 0, NULL);
return;
} }
nla_for_each_attr(curr, head, len, rem) {
if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) {
rec = nla_data(curr);
/*
* Get the first one. In the future, we may
* need to get up to 6 pathrecords.
*/
if ((rec->flags & mask) == mask) {
mad = query->mad_buf->mad;
mad->mad_hdr.method |=
IB_MGMT_METHOD_RESP;
memcpy(mad->data, rec->path_rec,
sizeof(rec->path_rec));
status = 0;
break;
}
}
}
query->callback(query, status, mad);
} }
head = (const struct nlattr *) nlmsg_data(nlh);
len = nlmsg_len(nlh);
switch (query->path_use) {
case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
break;
case LS_RESOLVE_PATH_USE_ALL:
mask = IB_PATH_PRIMARY;
break;
case LS_RESOLVE_PATH_USE_GMP:
default:
mask = IB_PATH_PRIMARY | IB_PATH_GMP |
IB_PATH_BIDIRECTIONAL;
break;
}
drec = (struct ib_path_rec_data *)query->resp_pr_data;
nla_for_each_attr(curr, head, len, rem) {
if (curr->nla_type != LS_NLA_TYPE_PATH_RECORD)
continue;
srec = nla_data(curr);
if ((srec->flags & mask) != mask)
continue;
status = 0;
if (!drec) {
memcpy(mad->data, srec->path_rec,
sizeof(srec->path_rec));
num_prs = 1;
break;
}
memcpy(drec, srec, sizeof(*drec));
drec++;
num_prs++;
if (num_prs >= RDMA_PRIMARY_PATH_MAX_REC_NUM)
break;
}
if (!status)
mad->mad_hdr.method |= IB_MGMT_METHOD_RESP;
query->callback(query, status, num_prs, mad);
kvfree(query->resp_pr_data);
query->resp_pr_data = NULL;
out:
mad_send_wc.send_buf = query->mad_buf; mad_send_wc.send_buf = query->mad_buf;
mad_send_wc.status = IB_WC_SUCCESS; mad_send_wc.status = IB_WC_SUCCESS;
send_handler(query->mad_buf->mad_agent, &mad_send_wc); send_handler(query->mad_buf->mad_agent, &mad_send_wc);
@ -1411,41 +1451,90 @@ static int opa_pr_query_possible(struct ib_sa_client *client,
return PR_IB_SUPPORTED; return PR_IB_SUPPORTED;
} }
static void ib_sa_pr_callback_single(struct ib_sa_path_query *query,
int status, struct ib_sa_mad *mad)
{
struct sa_path_rec rec = {};
ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
mad->data, &rec);
rec.rec_type = SA_PATH_REC_TYPE_IB;
sa_path_set_dmac_zero(&rec);
if (query->conv_pr) {
struct sa_path_rec opa;
memset(&opa, 0, sizeof(struct sa_path_rec));
sa_convert_path_ib_to_opa(&opa, &rec);
query->callback(status, &opa, 1, query->context);
} else {
query->callback(status, &rec, 1, query->context);
}
}
/**
* ib_sa_pr_callback_multiple() - Parse path records then do callback.
*
* In a multiple-PR case the PRs are saved in "query->resp_pr_data"
* (instead of"mad->data") and with "ib_path_rec_data" structure format,
* so that rec->flags can be set to indicate the type of PR.
* This is valid only in IB fabric.
*/
static void ib_sa_pr_callback_multiple(struct ib_sa_path_query *query,
int status, int num_prs,
struct ib_path_rec_data *rec_data)
{
struct sa_path_rec *rec;
int i;
rec = kvcalloc(num_prs, sizeof(*rec), GFP_KERNEL);
if (!rec) {
query->callback(-ENOMEM, NULL, 0, query->context);
return;
}
for (i = 0; i < num_prs; i++) {
ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
rec_data[i].path_rec, rec + i);
rec[i].rec_type = SA_PATH_REC_TYPE_IB;
sa_path_set_dmac_zero(rec + i);
rec[i].flags = rec_data[i].flags;
}
query->callback(status, rec, num_prs, query->context);
kvfree(rec);
}
static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
int status, int status, int num_prs,
struct ib_sa_mad *mad) struct ib_sa_mad *mad)
{ {
struct ib_sa_path_query *query = struct ib_sa_path_query *query =
container_of(sa_query, struct ib_sa_path_query, sa_query); container_of(sa_query, struct ib_sa_path_query, sa_query);
struct sa_path_rec rec;
if (mad) { if (!mad || !num_prs) {
struct sa_path_rec rec; query->callback(status, NULL, 0, query->context);
return;
}
if (sa_query->flags & IB_SA_QUERY_OPA) { if (sa_query->flags & IB_SA_QUERY_OPA) {
ib_unpack(opa_path_rec_table, if (num_prs != 1) {
ARRAY_SIZE(opa_path_rec_table), query->callback(-EINVAL, NULL, 0, query->context);
mad->data, &rec); return;
rec.rec_type = SA_PATH_REC_TYPE_OPA;
query->callback(status, &rec, query->context);
} else {
ib_unpack(path_rec_table,
ARRAY_SIZE(path_rec_table),
mad->data, &rec);
rec.rec_type = SA_PATH_REC_TYPE_IB;
sa_path_set_dmac_zero(&rec);
if (query->conv_pr) {
struct sa_path_rec opa;
memset(&opa, 0, sizeof(struct sa_path_rec));
sa_convert_path_ib_to_opa(&opa, &rec);
query->callback(status, &opa, query->context);
} else {
query->callback(status, &rec, query->context);
}
} }
} else
query->callback(status, NULL, query->context); ib_unpack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
mad->data, &rec);
rec.rec_type = SA_PATH_REC_TYPE_OPA;
query->callback(status, &rec, num_prs, query->context);
} else {
if (!sa_query->resp_pr_data)
ib_sa_pr_callback_single(query, status, mad);
else
ib_sa_pr_callback_multiple(query, status, num_prs,
sa_query->resp_pr_data);
}
} }
static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
@ -1489,7 +1578,7 @@ int ib_sa_path_rec_get(struct ib_sa_client *client,
unsigned long timeout_ms, gfp_t gfp_mask, unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status, void (*callback)(int status,
struct sa_path_rec *resp, struct sa_path_rec *resp,
void *context), int num_paths, void *context),
void *context, void *context,
struct ib_sa_query **sa_query) struct ib_sa_query **sa_query)
{ {
@ -1588,7 +1677,7 @@ err1:
EXPORT_SYMBOL(ib_sa_path_rec_get); EXPORT_SYMBOL(ib_sa_path_rec_get);
static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query, static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
int status, int status, int num_prs,
struct ib_sa_mad *mad) struct ib_sa_mad *mad)
{ {
struct ib_sa_mcmember_query *query = struct ib_sa_mcmember_query *query =
@ -1680,7 +1769,7 @@ err1:
/* Support GuidInfoRecord */ /* Support GuidInfoRecord */
static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query, static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
int status, int status, int num_paths,
struct ib_sa_mad *mad) struct ib_sa_mad *mad)
{ {
struct ib_sa_guidinfo_query *query = struct ib_sa_guidinfo_query *query =
@ -1790,7 +1879,7 @@ static void ib_classportinfo_cb(void *context)
} }
static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query, static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
int status, int status, int num_prs,
struct ib_sa_mad *mad) struct ib_sa_mad *mad)
{ {
unsigned long flags; unsigned long flags;
@ -1966,13 +2055,13 @@ static void send_handler(struct ib_mad_agent *agent,
/* No callback -- already got recv */ /* No callback -- already got recv */
break; break;
case IB_WC_RESP_TIMEOUT_ERR: case IB_WC_RESP_TIMEOUT_ERR:
query->callback(query, -ETIMEDOUT, NULL); query->callback(query, -ETIMEDOUT, 0, NULL);
break; break;
case IB_WC_WR_FLUSH_ERR: case IB_WC_WR_FLUSH_ERR:
query->callback(query, -EINTR, NULL); query->callback(query, -EINTR, 0, NULL);
break; break;
default: default:
query->callback(query, -EIO, NULL); query->callback(query, -EIO, 0, NULL);
break; break;
} }
@ -2000,10 +2089,10 @@ static void recv_handler(struct ib_mad_agent *mad_agent,
if (mad_recv_wc->wc->status == IB_WC_SUCCESS) if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
query->callback(query, query->callback(query,
mad_recv_wc->recv_buf.mad->mad_hdr.status ? mad_recv_wc->recv_buf.mad->mad_hdr.status ?
-EINVAL : 0, -EINVAL : 0, 1,
(struct ib_sa_mad *) mad_recv_wc->recv_buf.mad); (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
else else
query->callback(query, -EIO, NULL); query->callback(query, -EIO, 0, NULL);
} }
ib_free_recv_mad(mad_recv_wc); ib_free_recv_mad(mad_recv_wc);

View File

@ -754,8 +754,8 @@ static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
{ {
struct rdma_dev_addr *dev_addr; struct rdma_dev_addr *dev_addr;
resp->num_paths = route->num_paths; resp->num_paths = route->num_pri_alt_paths;
switch (route->num_paths) { switch (route->num_pri_alt_paths) {
case 0: case 0:
dev_addr = &route->addr.dev_addr; dev_addr = &route->addr.dev_addr;
rdma_addr_get_dgid(dev_addr, rdma_addr_get_dgid(dev_addr,
@ -781,8 +781,8 @@ static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
struct rdma_route *route) struct rdma_route *route)
{ {
resp->num_paths = route->num_paths; resp->num_paths = route->num_pri_alt_paths;
switch (route->num_paths) { switch (route->num_pri_alt_paths) {
case 0: case 0:
rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr, rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
(union ib_gid *)&resp->ib_route[0].dgid); (union ib_gid *)&resp->ib_route[0].dgid);
@ -921,7 +921,7 @@ static ssize_t ucma_query_path(struct ucma_context *ctx,
if (!resp) if (!resp)
return -ENOMEM; return -ENOMEM;
resp->num_paths = ctx->cm_id->route.num_paths; resp->num_paths = ctx->cm_id->route.num_pri_alt_paths;
for (i = 0, out_len -= sizeof(*resp); for (i = 0, out_len -= sizeof(*resp);
i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data); i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
i++, out_len -= sizeof(struct ib_path_rec_data)) { i++, out_len -= sizeof(struct ib_path_rec_data)) {

View File

@ -43,8 +43,6 @@
#include <linux/hmm.h> #include <linux/hmm.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h> #include <rdma/ib_umem_odp.h>
#include "uverbs.h" #include "uverbs.h"

View File

@ -739,6 +739,7 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
mr->uobject = uobj; mr->uobject = uobj;
atomic_inc(&pd->usecnt); atomic_inc(&pd->usecnt);
mr->iova = cmd.hca_va; mr->iova = cmd.hca_va;
mr->length = cmd.length;
rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
rdma_restrack_set_name(&mr->res, NULL); rdma_restrack_set_name(&mr->res, NULL);
@ -861,8 +862,10 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
mr->pd = new_pd; mr->pd = new_pd;
atomic_inc(&new_pd->usecnt); atomic_inc(&new_pd->usecnt);
} }
if (cmd.flags & IB_MR_REREG_TRANS) if (cmd.flags & IB_MR_REREG_TRANS) {
mr->iova = cmd.hca_va; mr->iova = cmd.hca_va;
mr->length = cmd.length;
}
} }
memset(&resp, 0, sizeof(resp)); memset(&resp, 0, sizeof(resp));

View File

@ -337,6 +337,14 @@ static int uverbs_process_attr(struct bundle_priv *pbundle,
break; break;
case UVERBS_ATTR_TYPE_RAW_FD:
if (uattr->attr_data.reserved || uattr->len != 0 ||
uattr->data_s64 < INT_MIN || uattr->data_s64 > INT_MAX)
return -EINVAL;
/* _uverbs_get_const_signed() is the accessor */
e->ptr_attr.data = uattr->data_s64;
break;
case UVERBS_ATTR_TYPE_IDRS_ARRAY: case UVERBS_ATTR_TYPE_IDRS_ARRAY:
return uverbs_process_idrs_array(pbundle, attr_uapi, return uverbs_process_idrs_array(pbundle, attr_uapi,
&e->objs_arr_attr, uattr, &e->objs_arr_attr, uattr,

View File

@ -1038,7 +1038,7 @@ struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
ret = pd->device->ops.create_srq(srq, srq_init_attr, udata); ret = pd->device->ops.create_srq(srq, srq_init_attr, udata);
if (ret) { if (ret) {
rdma_restrack_put(&srq->res); rdma_restrack_put(&srq->res);
atomic_dec(&srq->pd->usecnt); atomic_dec(&pd->usecnt);
if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd) if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd)
atomic_dec(&srq->ext.xrc.xrcd->usecnt); atomic_dec(&srq->ext.xrc.xrcd->usecnt);
if (ib_srq_has_cq(srq->srq_type)) if (ib_srq_has_cq(srq->srq_type))
@ -2149,6 +2149,8 @@ struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->pd = pd; mr->pd = pd;
mr->dm = NULL; mr->dm = NULL;
atomic_inc(&pd->usecnt); atomic_inc(&pd->usecnt);
mr->iova = virt_addr;
mr->length = length;
rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
rdma_restrack_parent_name(&mr->res, &pd->res); rdma_restrack_parent_name(&mr->res, &pd->res);

View File

@ -725,7 +725,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
/* ib device init */ /* ib device init */
ibdev->node_type = RDMA_NODE_IB_CA; ibdev->node_type = RDMA_NODE_IB_CA;
strlcpy(ibdev->node_desc, BNXT_RE_DESC " HCA", strscpy(ibdev->node_desc, BNXT_RE_DESC " HCA",
strlen(BNXT_RE_DESC) + 5); strlen(BNXT_RE_DESC) + 5);
ibdev->phys_port_cnt = 1; ibdev->phys_port_cnt = 1;

View File

@ -444,7 +444,10 @@ struct efa_admin_create_cq_cmd {
/* /*
* 4:0 : cq_entry_size_words - size of CQ entry in * 4:0 : cq_entry_size_words - size of CQ entry in
* 32-bit words, valid values: 4, 8. * 32-bit words, valid values: 4, 8.
* 7:5 : reserved7 - MBZ * 5 : set_src_addr - If set, source address will be
* filled on RX completions from unknown senders.
* Requires 8 words CQ entry size.
* 7:6 : reserved7 - MBZ
*/ */
u8 cq_caps_2; u8 cq_caps_2;
@ -980,6 +983,7 @@ struct efa_admin_host_info {
#define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5) #define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
#define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK BIT(6) #define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK BIT(6)
#define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0) #define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
#define EFA_ADMIN_CREATE_CQ_CMD_SET_SRC_ADDR_MASK BIT(5)
/* create_cq_resp */ /* create_cq_resp */
#define EFA_ADMIN_CREATE_CQ_RESP_DB_VALID_MASK BIT(0) #define EFA_ADMIN_CREATE_CQ_RESP_DB_VALID_MASK BIT(0)

View File

@ -168,7 +168,10 @@ int efa_com_create_cq(struct efa_com_dev *edev,
EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED, 1); EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED, 1);
create_cmd.eqn = params->eqn; create_cmd.eqn = params->eqn;
} }
if (params->set_src_addr) {
EFA_SET(&create_cmd.cq_caps_2,
EFA_ADMIN_CREATE_CQ_CMD_SET_SRC_ADDR, 1);
}
efa_com_set_dma_addr(params->dma_addr, efa_com_set_dma_addr(params->dma_addr,
&create_cmd.cq_ba.mem_addr_high, &create_cmd.cq_ba.mem_addr_high,
&create_cmd.cq_ba.mem_addr_low); &create_cmd.cq_ba.mem_addr_low);

View File

@ -75,7 +75,8 @@ struct efa_com_create_cq_params {
u16 uarn; u16 uarn;
u16 eqn; u16 eqn;
u8 entry_size_in_bytes; u8 entry_size_in_bytes;
bool interrupt_mode_enabled; u8 interrupt_mode_enabled : 1;
u8 set_src_addr : 1;
}; };
struct efa_com_create_cq_result { struct efa_com_create_cq_result {

View File

@ -0,0 +1,289 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
* Copyright 2018-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_IO_H_
#define _EFA_IO_H_
#define EFA_IO_TX_DESC_NUM_BUFS 2
#define EFA_IO_TX_DESC_NUM_RDMA_BUFS 1
#define EFA_IO_TX_DESC_INLINE_MAX_SIZE 32
#define EFA_IO_TX_DESC_IMM_DATA_SIZE 4
enum efa_io_queue_type {
/* send queue (of a QP) */
EFA_IO_SEND_QUEUE = 1,
/* recv queue (of a QP) */
EFA_IO_RECV_QUEUE = 2,
};
enum efa_io_send_op_type {
/* send message */
EFA_IO_SEND = 0,
/* RDMA read */
EFA_IO_RDMA_READ = 1,
};
enum efa_io_comp_status {
/* Successful completion */
EFA_IO_COMP_STATUS_OK = 0,
/* Flushed during QP destroy */
EFA_IO_COMP_STATUS_FLUSHED = 1,
/* Internal QP error */
EFA_IO_COMP_STATUS_LOCAL_ERROR_QP_INTERNAL_ERROR = 2,
/* Bad operation type */
EFA_IO_COMP_STATUS_LOCAL_ERROR_INVALID_OP_TYPE = 3,
/* Bad AH */
EFA_IO_COMP_STATUS_LOCAL_ERROR_INVALID_AH = 4,
/* LKEY not registered or does not match IOVA */
EFA_IO_COMP_STATUS_LOCAL_ERROR_INVALID_LKEY = 5,
/* Message too long */
EFA_IO_COMP_STATUS_LOCAL_ERROR_BAD_LENGTH = 6,
/* Destination ENI is down or does not run EFA */
EFA_IO_COMP_STATUS_REMOTE_ERROR_BAD_ADDRESS = 7,
/* Connection was reset by remote side */
EFA_IO_COMP_STATUS_REMOTE_ERROR_ABORT = 8,
/* Bad dest QP number (QP does not exist or is in error state) */
EFA_IO_COMP_STATUS_REMOTE_ERROR_BAD_DEST_QPN = 9,
/* Destination resource not ready (no WQEs posted on RQ) */
EFA_IO_COMP_STATUS_REMOTE_ERROR_RNR = 10,
/* Receiver SGL too short */
EFA_IO_COMP_STATUS_REMOTE_ERROR_BAD_LENGTH = 11,
/* Unexpected status returned by responder */
EFA_IO_COMP_STATUS_REMOTE_ERROR_BAD_STATUS = 12,
/* Unresponsive remote - detected locally */
EFA_IO_COMP_STATUS_LOCAL_ERROR_UNRESP_REMOTE = 13,
};
struct efa_io_tx_meta_desc {
/* Verbs-generated Request ID */
u16 req_id;
/*
* control flags
* 3:0 : op_type - operation type: send/rdma/fast mem
* ops/etc
* 4 : has_imm - immediate_data field carries valid
* data.
* 5 : inline_msg - inline mode - inline message data
* follows this descriptor (no buffer descriptors).
* Note that it is different from immediate data
* 6 : meta_extension - Extended metadata. MBZ
* 7 : meta_desc - Indicates metadata descriptor.
* Must be set.
*/
u8 ctrl1;
/*
* control flags
* 0 : phase
* 1 : reserved25 - MBZ
* 2 : first - Indicates first descriptor in
* transaction. Must be set.
* 3 : last - Indicates last descriptor in
* transaction. Must be set.
* 4 : comp_req - Indicates whether completion should
* be posted, after packet is transmitted. Valid only
* for the first descriptor
* 7:5 : reserved29 - MBZ
*/
u8 ctrl2;
u16 dest_qp_num;
/*
* If inline_msg bit is set, length of inline message in bytes,
* otherwise length of SGL (number of buffers).
*/
u16 length;
/*
* immediate data: if has_imm is set, then this field is included
* within Tx message and reported in remote Rx completion.
*/
u32 immediate_data;
u16 ah;
u16 reserved;
/* Queue key */
u32 qkey;
u8 reserved2[12];
};
/*
* Tx queue buffer descriptor, for any transport type. Preceded by metadata
* descriptor.
*/
struct efa_io_tx_buf_desc {
/* length in bytes */
u32 length;
/*
* 23:0 : lkey - local memory translation key
* 31:24 : reserved - MBZ
*/
u32 lkey;
/* Buffer address bits[31:0] */
u32 buf_addr_lo;
/* Buffer address bits[63:32] */
u32 buf_addr_hi;
};
struct efa_io_remote_mem_addr {
/* length in bytes */
u32 length;
/* remote memory translation key */
u32 rkey;
/* Buffer address bits[31:0] */
u32 buf_addr_lo;
/* Buffer address bits[63:32] */
u32 buf_addr_hi;
};
struct efa_io_rdma_req {
/* Remote memory address */
struct efa_io_remote_mem_addr remote_mem;
/* Local memory address */
struct efa_io_tx_buf_desc local_mem[1];
};
/*
* Tx WQE, composed of tx meta descriptors followed by either tx buffer
* descriptors or inline data
*/
struct efa_io_tx_wqe {
/* TX meta */
struct efa_io_tx_meta_desc meta;
union {
/* Send buffer descriptors */
struct efa_io_tx_buf_desc sgl[2];
u8 inline_data[32];
/* RDMA local and remote memory addresses */
struct efa_io_rdma_req rdma_req;
} data;
};
/*
* Rx buffer descriptor; RX WQE is composed of one or more RX buffer
* descriptors.
*/
struct efa_io_rx_desc {
/* Buffer address bits[31:0] */
u32 buf_addr_lo;
/* Buffer Pointer[63:32] */
u32 buf_addr_hi;
/* Verbs-generated request id. */
u16 req_id;
/* Length in bytes. */
u16 length;
/*
* LKey and control flags
* 23:0 : lkey
* 29:24 : reserved - MBZ
* 30 : first - Indicates first descriptor in WQE
* 31 : last - Indicates last descriptor in WQE
*/
u32 lkey_ctrl;
};
/* Common IO completion descriptor */
struct efa_io_cdesc_common {
/*
* verbs-generated request ID, as provided in the completed tx or rx
* descriptor.
*/
u16 req_id;
u8 status;
/*
* flags
* 0 : phase - Phase bit
* 2:1 : q_type - enum efa_io_queue_type: send/recv
* 3 : has_imm - indicates that immediate data is
* present - for RX completions only
* 7:4 : reserved28 - MBZ
*/
u8 flags;
/* local QP number */
u16 qp_num;
/* Transferred length */
u16 length;
};
/* Tx completion descriptor */
struct efa_io_tx_cdesc {
/* Common completion info */
struct efa_io_cdesc_common common;
};
/* Rx Completion Descriptor */
struct efa_io_rx_cdesc {
/* Common completion info */
struct efa_io_cdesc_common common;
/* Remote Address Handle FW index, 0xFFFF indicates invalid ah */
u16 ah;
u16 src_qp_num;
/* Immediate data */
u32 imm;
};
/* Extended Rx Completion Descriptor */
struct efa_io_rx_cdesc_ex {
/* Base RX completion info */
struct efa_io_rx_cdesc rx_cdesc_base;
/*
* Valid only in case of unknown AH (0xFFFF) and CQ set_src_addr is
* enabled.
*/
u8 src_addr[16];
};
/* tx_meta_desc */
#define EFA_IO_TX_META_DESC_OP_TYPE_MASK GENMASK(3, 0)
#define EFA_IO_TX_META_DESC_HAS_IMM_MASK BIT(4)
#define EFA_IO_TX_META_DESC_INLINE_MSG_MASK BIT(5)
#define EFA_IO_TX_META_DESC_META_EXTENSION_MASK BIT(6)
#define EFA_IO_TX_META_DESC_META_DESC_MASK BIT(7)
#define EFA_IO_TX_META_DESC_PHASE_MASK BIT(0)
#define EFA_IO_TX_META_DESC_FIRST_MASK BIT(2)
#define EFA_IO_TX_META_DESC_LAST_MASK BIT(3)
#define EFA_IO_TX_META_DESC_COMP_REQ_MASK BIT(4)
/* tx_buf_desc */
#define EFA_IO_TX_BUF_DESC_LKEY_MASK GENMASK(23, 0)
/* rx_desc */
#define EFA_IO_RX_DESC_LKEY_MASK GENMASK(23, 0)
#define EFA_IO_RX_DESC_FIRST_MASK BIT(30)
#define EFA_IO_RX_DESC_LAST_MASK BIT(31)
/* cdesc_common */
#define EFA_IO_CDESC_COMMON_PHASE_MASK BIT(0)
#define EFA_IO_CDESC_COMMON_Q_TYPE_MASK GENMASK(2, 1)
#define EFA_IO_CDESC_COMMON_HAS_IMM_MASK BIT(3)
#endif /* _EFA_IO_H_ */

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* /*
* Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved. * Copyright 2018-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
*/ */
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
@ -15,6 +15,7 @@
#include <rdma/uverbs_ioctl.h> #include <rdma/uverbs_ioctl.h>
#include "efa.h" #include "efa.h"
#include "efa_io_defs.h"
enum { enum {
EFA_MMAP_DMA_PAGE = 0, EFA_MMAP_DMA_PAGE = 0,
@ -242,6 +243,7 @@ int efa_query_device(struct ib_device *ibdev,
resp.max_rq_wr = dev_attr->max_rq_depth; resp.max_rq_wr = dev_attr->max_rq_depth;
resp.max_rdma_size = dev_attr->max_rdma_size; resp.max_rdma_size = dev_attr->max_rdma_size;
resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_WITH_SGID;
if (EFA_DEV_CAP(dev, RDMA_READ)) if (EFA_DEV_CAP(dev, RDMA_READ))
resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ; resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
@ -1064,6 +1066,7 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct efa_ibv_create_cq cmd = {}; struct efa_ibv_create_cq cmd = {};
struct efa_cq *cq = to_ecq(ibcq); struct efa_cq *cq = to_ecq(ibcq);
int entries = attr->cqe; int entries = attr->cqe;
bool set_src_addr;
int err; int err;
ibdev_dbg(ibdev, "create_cq entries %d\n", entries); ibdev_dbg(ibdev, "create_cq entries %d\n", entries);
@ -1109,7 +1112,10 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto err_out; goto err_out;
} }
if (!cmd.cq_entry_size) { set_src_addr = !!(cmd.flags & EFA_CREATE_CQ_WITH_SGID);
if ((cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc_ex)) &&
(set_src_addr ||
cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc))) {
ibdev_dbg(ibdev, ibdev_dbg(ibdev,
"Invalid entry size [%u]\n", cmd.cq_entry_size); "Invalid entry size [%u]\n", cmd.cq_entry_size);
err = -EINVAL; err = -EINVAL;
@ -1138,6 +1144,7 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
params.dma_addr = cq->dma_addr; params.dma_addr = cq->dma_addr;
params.entry_size_in_bytes = cmd.cq_entry_size; params.entry_size_in_bytes = cmd.cq_entry_size;
params.num_sub_cqs = cmd.num_sub_cqs; params.num_sub_cqs = cmd.num_sub_cqs;
params.set_src_addr = set_src_addr;
if (cmd.flags & EFA_CREATE_CQ_WITH_COMPLETION_CHANNEL) { if (cmd.flags & EFA_CREATE_CQ_WITH_COMPLETION_CHANNEL) {
cq->eq = efa_vec2eq(dev, attr->comp_vector); cq->eq = efa_vec2eq(dev, attr->comp_vector);
params.eqn = cq->eq->eeq.eqn; params.eqn = cq->eq->eeq.eqn;

View File

@ -9,6 +9,7 @@
#include <linux/bitfield.h> #include <linux/bitfield.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/xarray.h> #include <linux/xarray.h>
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
@ -196,6 +197,7 @@ struct erdma_dev {
struct erdma_devattr attrs; struct erdma_devattr attrs;
/* physical port state (only one port per device) */ /* physical port state (only one port per device) */
enum ib_port_state state; enum ib_port_state state;
u32 mtu;
/* cmdq and aeq use the same msix vector */ /* cmdq and aeq use the same msix vector */
struct erdma_irq comm_irq; struct erdma_irq comm_irq;
@ -269,7 +271,7 @@ void erdma_finish_cmdq_init(struct erdma_dev *dev);
void erdma_cmdq_destroy(struct erdma_dev *dev); void erdma_cmdq_destroy(struct erdma_dev *dev);
void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op); void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op);
int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, u64 *req, u32 req_size, int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
u64 *resp0, u64 *resp1); u64 *resp0, u64 *resp1);
void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq); void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq);

View File

@ -10,15 +10,7 @@
/* Copyright (c) 2008-2019, IBM Corporation */ /* Copyright (c) 2008-2019, IBM Corporation */
/* Copyright (c) 2017, Open Grid Computing, Inc. */ /* Copyright (c) 2017, Open Grid Computing, Inc. */
#include <linux/errno.h>
#include <linux/inetdevice.h>
#include <linux/net.h>
#include <linux/types.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <net/addrconf.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_verbs.h>
#include "erdma.h" #include "erdma.h"
#include "erdma_cm.h" #include "erdma_cm.h"

View File

@ -4,13 +4,7 @@
/* Kai Shen <kaishen@linux.alibaba.com> */ /* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2022, Alibaba Group. */ /* Copyright (c) 2020-2022, Alibaba Group. */
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/types.h>
#include "erdma.h" #include "erdma.h"
#include "erdma_hw.h"
#include "erdma_verbs.h"
static void arm_cmdq_cq(struct erdma_cmdq *cmdq) static void arm_cmdq_cq(struct erdma_cmdq *cmdq)
{ {
@ -441,7 +435,7 @@ void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op)
FIELD_PREP(ERDMA_CMD_HDR_OPCODE_MASK, op); FIELD_PREP(ERDMA_CMD_HDR_OPCODE_MASK, op);
} }
int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, u64 *req, u32 req_size, int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
u64 *resp0, u64 *resp1) u64 *resp0, u64 *resp1)
{ {
struct erdma_comp_wait *comp_wait; struct erdma_comp_wait *comp_wait;

View File

@ -4,9 +4,6 @@
/* Kai Shen <kaishen@linux.alibaba.com> */ /* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2022, Alibaba Group. */ /* Copyright (c) 2020-2022, Alibaba Group. */
#include <rdma/ib_verbs.h>
#include "erdma_hw.h"
#include "erdma_verbs.h" #include "erdma_verbs.h"
static void *get_next_valid_cqe(struct erdma_cq *cq) static void *get_next_valid_cqe(struct erdma_cq *cq)
@ -62,7 +59,6 @@ static const enum ib_wc_opcode wc_mapping_table[ERDMA_NUM_OPCODES] = {
[ERDMA_OP_RECV_IMM] = IB_WC_RECV_RDMA_WITH_IMM, [ERDMA_OP_RECV_IMM] = IB_WC_RECV_RDMA_WITH_IMM,
[ERDMA_OP_RECV_INV] = IB_WC_RECV, [ERDMA_OP_RECV_INV] = IB_WC_RECV,
[ERDMA_OP_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE, [ERDMA_OP_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
[ERDMA_OP_INVALIDATE] = IB_WC_LOCAL_INV,
[ERDMA_OP_RSP_SEND_IMM] = IB_WC_RECV, [ERDMA_OP_RSP_SEND_IMM] = IB_WC_RECV,
[ERDMA_OP_SEND_WITH_INV] = IB_WC_SEND, [ERDMA_OP_SEND_WITH_INV] = IB_WC_SEND,
[ERDMA_OP_REG_MR] = IB_WC_REG_MR, [ERDMA_OP_REG_MR] = IB_WC_REG_MR,

View File

@ -4,12 +4,6 @@
/* Kai Shen <kaishen@linux.alibaba.com> */ /* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2022, Alibaba Group. */ /* Copyright (c) 2020-2022, Alibaba Group. */
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/types.h>
#include "erdma.h"
#include "erdma_hw.h"
#include "erdma_verbs.h" #include "erdma_verbs.h"
#define MAX_POLL_CHUNK_SIZE 16 #define MAX_POLL_CHUNK_SIZE 16
@ -229,9 +223,7 @@ static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
req.db_dma_addr_l = lower_32_bits(db_info_dma_addr); req.db_dma_addr_l = lower_32_bits(db_info_dma_addr);
req.db_dma_addr_h = upper_32_bits(db_info_dma_addr); req.db_dma_addr_h = upper_32_bits(db_info_dma_addr);
return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
sizeof(struct erdma_cmdq_create_eq_req),
NULL, NULL);
} }
static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn) static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
@ -281,8 +273,7 @@ static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
req.qtype = ERDMA_EQ_TYPE_CEQ; req.qtype = ERDMA_EQ_TYPE_CEQ;
req.vector_idx = ceqn + 1; req.vector_idx = ceqn + 1;
err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL, err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
NULL);
if (err) if (err)
return; return;

View File

@ -153,6 +153,7 @@ enum CMDQ_COMMON_OPCODE {
CMDQ_OPCODE_CREATE_EQ = 0, CMDQ_OPCODE_CREATE_EQ = 0,
CMDQ_OPCODE_DESTROY_EQ = 1, CMDQ_OPCODE_DESTROY_EQ = 1,
CMDQ_OPCODE_QUERY_FW_INFO = 2, CMDQ_OPCODE_QUERY_FW_INFO = 2,
CMDQ_OPCODE_CONF_MTU = 3,
}; };
/* cmdq-SQE HDR */ /* cmdq-SQE HDR */
@ -190,6 +191,11 @@ struct erdma_cmdq_destroy_eq_req {
u8 qtype; u8 qtype;
}; };
struct erdma_cmdq_config_mtu_req {
u64 hdr;
u32 mtu;
};
/* create_cq cfg0 */ /* create_cq cfg0 */
#define ERDMA_CMD_CREATE_CQ_DEPTH_MASK GENMASK(31, 24) #define ERDMA_CMD_CREATE_CQ_DEPTH_MASK GENMASK(31, 24)
#define ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK GENMASK(23, 20) #define ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK GENMASK(23, 20)
@ -450,13 +456,13 @@ enum erdma_opcode {
ERDMA_OP_RECV_IMM = 5, ERDMA_OP_RECV_IMM = 5,
ERDMA_OP_RECV_INV = 6, ERDMA_OP_RECV_INV = 6,
ERDMA_OP_REQ_ERR = 7, ERDMA_OP_RSVD0 = 7,
ERDMA_OP_READ_RESPONSE = 8, ERDMA_OP_RSVD1 = 8,
ERDMA_OP_WRITE_WITH_IMM = 9, ERDMA_OP_WRITE_WITH_IMM = 9,
ERDMA_OP_RECV_ERR = 10, ERDMA_OP_RSVD2 = 10,
ERDMA_OP_RSVD3 = 11,
ERDMA_OP_INVALIDATE = 11,
ERDMA_OP_RSP_SEND_IMM = 12, ERDMA_OP_RSP_SEND_IMM = 12,
ERDMA_OP_SEND_WITH_INV = 13, ERDMA_OP_SEND_WITH_INV = 13,

View File

@ -4,21 +4,12 @@
/* Kai Shen <kaishen@linux.alibaba.com> */ /* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2022, Alibaba Group. */ /* Copyright (c) 2020-2022, Alibaba Group. */
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <net/addrconf.h> #include <net/addrconf.h>
#include <rdma/erdma-abi.h> #include <rdma/erdma-abi.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
#include "erdma.h" #include "erdma.h"
#include "erdma_cm.h" #include "erdma_cm.h"
#include "erdma_hw.h"
#include "erdma_verbs.h" #include "erdma_verbs.h"
MODULE_AUTHOR("Cheng Xu <chengyou@linux.alibaba.com>"); MODULE_AUTHOR("Cheng Xu <chengyou@linux.alibaba.com>");
@ -43,10 +34,15 @@ static int erdma_netdev_event(struct notifier_block *nb, unsigned long event,
dev->state = IB_PORT_DOWN; dev->state = IB_PORT_DOWN;
erdma_port_event(dev, IB_EVENT_PORT_ERR); erdma_port_event(dev, IB_EVENT_PORT_ERR);
break; break;
case NETDEV_CHANGEMTU:
if (dev->mtu != netdev->mtu) {
erdma_set_mtu(dev, netdev->mtu);
dev->mtu = netdev->mtu;
}
break;
case NETDEV_REGISTER: case NETDEV_REGISTER:
case NETDEV_UNREGISTER: case NETDEV_UNREGISTER:
case NETDEV_CHANGEADDR: case NETDEV_CHANGEADDR:
case NETDEV_CHANGEMTU:
case NETDEV_GOING_DOWN: case NETDEV_GOING_DOWN:
case NETDEV_CHANGE: case NETDEV_CHANGE:
default: default:
@ -104,6 +100,7 @@ static int erdma_device_register(struct erdma_dev *dev)
if (ret) if (ret)
return ret; return ret;
dev->mtu = dev->netdev->mtu;
addrconf_addr_eui48((u8 *)&ibdev->node_guid, dev->netdev->dev_addr); addrconf_addr_eui48((u8 *)&ibdev->node_guid, dev->netdev->dev_addr);
ret = ib_register_device(ibdev, "erdma_%d", &dev->pdev->dev); ret = ib_register_device(ibdev, "erdma_%d", &dev->pdev->dev);

View File

@ -6,15 +6,6 @@
/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
/* Copyright (c) 2008-2019, IBM Corporation */ /* Copyright (c) 2008-2019, IBM Corporation */
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/types.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_verbs.h>
#include "erdma.h"
#include "erdma_cm.h" #include "erdma_cm.h"
#include "erdma_verbs.h" #include "erdma_verbs.h"
@ -105,8 +96,7 @@ static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
req.send_nxt += MPA_DEFAULT_HDR_LEN + qp->attrs.pd_len; req.send_nxt += MPA_DEFAULT_HDR_LEN + qp->attrs.pd_len;
req.recv_nxt = tp->rcv_nxt; req.recv_nxt = tp->rcv_nxt;
return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL, return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
NULL);
} }
static int erdma_modify_qp_state_to_stop(struct erdma_qp *qp, static int erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
@ -124,8 +114,7 @@ static int erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, attrs->state) | req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, attrs->state) |
FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp)); FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL, return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
NULL);
} }
int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs, int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,

View File

@ -9,21 +9,14 @@
/* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. */ /* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. */
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <net/addrconf.h> #include <net/addrconf.h>
#include <rdma/erdma-abi.h> #include <rdma/erdma-abi.h>
#include <rdma/ib_umem.h> #include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_verbs.h>
#include <rdma/uverbs_ioctl.h> #include <rdma/uverbs_ioctl.h>
#include "erdma.h" #include "erdma.h"
#include "erdma_cm.h" #include "erdma_cm.h"
#include "erdma_hw.h"
#include "erdma_verbs.h" #include "erdma_verbs.h"
static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp) static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp)
@ -102,7 +95,7 @@ static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp)
req.rq_db_info_dma_addr = user_qp->rq_db_info_dma_addr; req.rq_db_info_dma_addr = user_qp->rq_db_info_dma_addr;
} }
err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), &resp0, err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0,
&resp1); &resp1);
if (!err) if (!err)
qp->attrs.cookie = qp->attrs.cookie =
@ -151,8 +144,7 @@ static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
} }
post_cmd: post_cmd:
return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL, return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
NULL);
} }
static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq) static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq)
@ -202,8 +194,7 @@ static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq)
req.cq_db_info_addr = cq->user_cq.db_info_dma_addr; req.cq_db_info_addr = cq->user_cq.db_info_dma_addr;
} }
return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL, return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
NULL);
} }
static int erdma_alloc_idx(struct erdma_resource_cb *res_cb) static int erdma_alloc_idx(struct erdma_resource_cb *res_cb)
@ -976,8 +967,7 @@ int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
req.cfg = FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, ibmr->lkey >> 8) | req.cfg = FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, ibmr->lkey >> 8) |
FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, ibmr->lkey & 0xFF); FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, ibmr->lkey & 0xFF);
ret = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL, ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
NULL);
if (ret) if (ret)
return ret; return ret;
@ -1002,8 +992,7 @@ int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
CMDQ_OPCODE_DESTROY_CQ); CMDQ_OPCODE_DESTROY_CQ);
req.cqn = cq->cqn; req.cqn = cq->cqn;
err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL, err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
NULL);
if (err) if (err)
return err; return err;
@ -1040,8 +1029,7 @@ int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
CMDQ_OPCODE_DESTROY_QP); CMDQ_OPCODE_DESTROY_QP);
req.qpn = QP_ID(qp); req.qpn = QP_ID(qp);
err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL, err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
NULL);
if (err) if (err)
return err; return err;
@ -1448,6 +1436,17 @@ err_out_xa:
return ret; return ret;
} }
void erdma_set_mtu(struct erdma_dev *dev, u32 mtu)
{
struct erdma_cmdq_config_mtu_req req;
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
CMDQ_OPCODE_CONF_MTU);
req.mtu = mtu;
erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
}
void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason) void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason)
{ {
struct ib_event event; struct ib_event event;

View File

@ -7,15 +7,7 @@
#ifndef __ERDMA_VERBS_H__ #ifndef __ERDMA_VERBS_H__
#define __ERDMA_VERBS_H__ #define __ERDMA_VERBS_H__
#include <linux/errno.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/iw_cm.h>
#include "erdma.h" #include "erdma.h"
#include "erdma_cm.h"
#include "erdma_hw.h"
/* RDMA Capability. */ /* RDMA Capability. */
#define ERDMA_MAX_PD (128 * 1024) #define ERDMA_MAX_PD (128 * 1024)
@ -338,5 +330,6 @@ struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset); unsigned int *sg_offset);
void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason); void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason);
void erdma_set_mtu(struct erdma_dev *dev, u32 mtu);
#endif #endif

View File

@ -8753,7 +8753,7 @@ static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
/* /*
* When writing a LCB CSR, out_data contains the full value to * When writing a LCB CSR, out_data contains the full value to
* to be written, while in_data contains the relative LCB * be written, while in_data contains the relative LCB
* address in 7:0. Do the work here, rather than the caller, * address in 7:0. Do the work here, rather than the caller,
* of distrubting the write data to where it needs to go: * of distrubting the write data to where it needs to go:
* *

View File

@ -965,7 +965,7 @@ static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
uctxt->userversion = uinfo->userversion; uctxt->userversion = uinfo->userversion;
uctxt->flags = hfi1_cap_mask; /* save current flag state */ uctxt->flags = hfi1_cap_mask; /* save current flag state */
init_waitqueue_head(&uctxt->wait); init_waitqueue_head(&uctxt->wait);
strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm)); strscpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid)); memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
uctxt->jkey = generate_jkey(current_uid()); uctxt->jkey = generate_jkey(current_uid());
hfi1_stats.sps_ctxts++; hfi1_stats.sps_ctxts++;

View File

@ -1114,7 +1114,7 @@ static void turn_off_spicos(struct hfi1_devdata *dd, int flags)
* Reset all of the fabric serdes for this HFI in preparation to take the * Reset all of the fabric serdes for this HFI in preparation to take the
* link to Polling. * link to Polling.
* *
* To do a reset, we need to write to to the serdes registers. Unfortunately, * To do a reset, we need to write to the serdes registers. Unfortunately,
* the fabric serdes download to the other HFI on the ASIC will have turned * the fabric serdes download to the other HFI on the ASIC will have turned
* off the firmware validation on this HFI. This means we can't write to the * off the firmware validation on this HFI. This means we can't write to the
* registers to reset the serdes. Work around this by performing a complete * registers to reset the serdes. Work around this by performing a complete

View File

@ -11,13 +11,10 @@
static void copy_ipoib_buf(struct sk_buff *skb, void *data, int size) static void copy_ipoib_buf(struct sk_buff *skb, void *data, int size)
{ {
void *dst_data;
skb_checksum_none_assert(skb); skb_checksum_none_assert(skb);
skb->protocol = *((__be16 *)data); skb->protocol = *((__be16 *)data);
dst_data = skb_put(skb, size); skb_put_data(skb, data, size);
memcpy(dst_data, data, size);
skb->mac_header = HFI1_IPOIB_PSEUDO_LEN; skb->mac_header = HFI1_IPOIB_PSEUDO_LEN;
skb_pull(skb, HFI1_IPOIB_ENCAP_LEN); skb_pull(skb, HFI1_IPOIB_ENCAP_LEN);
} }

View File

@ -1447,12 +1447,10 @@ static int shut_down_port(struct rvt_dev_info *rdi, u32 port_num)
struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi); struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
struct hfi1_devdata *dd = dd_from_dev(verbs_dev); struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
struct hfi1_pportdata *ppd = &dd->pport[port_num - 1]; struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
int ret;
set_link_down_reason(ppd, OPA_LINKDOWN_REASON_UNKNOWN, 0, set_link_down_reason(ppd, OPA_LINKDOWN_REASON_UNKNOWN, 0,
OPA_LINKDOWN_REASON_UNKNOWN); OPA_LINKDOWN_REASON_UNKNOWN);
ret = set_link_state(ppd, HLS_DN_DOWNDEF); return set_link_state(ppd, HLS_DN_DOWNDEF);
return ret;
} }
static int hfi1_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp, static int hfi1_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
@ -1801,7 +1799,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
ib_set_device_ops(ibdev, &hfi1_dev_ops); ib_set_device_ops(ibdev, &hfi1_dev_ops);
strlcpy(ibdev->node_desc, init_utsname()->nodename, strscpy(ibdev->node_desc, init_utsname()->nodename,
sizeof(ibdev->node_desc)); sizeof(ibdev->node_desc));
/* /*

View File

@ -391,9 +391,6 @@ void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe,
bool *call_send); bool *call_send);
extern const u32 rc_only_opcode;
extern const u32 uc_only_opcode;
int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet); int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet);
u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr, u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,

View File

@ -10,6 +10,6 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
ifdef CONFIG_INFINIBAND_HNS_HIP08 ifdef CONFIG_INFINIBAND_HNS_HIP08
hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs) hns-roce-hw-v2-objs := hns_roce_hw_v2.o $(hns-roce-objs)
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
endif endif

View File

@ -454,7 +454,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
hr_cq = xa_load(&hr_dev->cq_table.array, hr_cq = xa_load(&hr_dev->cq_table.array,
cqn & (hr_dev->caps.num_cqs - 1)); cqn & (hr_dev->caps.num_cqs - 1));
if (!hr_cq) { if (!hr_cq) {
dev_warn(hr_dev->dev, "Completion event for bogus CQ 0x%06x\n", dev_warn(hr_dev->dev, "completion event for bogus CQ 0x%06x\n",
cqn); cqn);
return; return;
} }
@ -475,14 +475,14 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
hr_cq = xa_load(&hr_dev->cq_table.array, hr_cq = xa_load(&hr_dev->cq_table.array,
cqn & (hr_dev->caps.num_cqs - 1)); cqn & (hr_dev->caps.num_cqs - 1));
if (!hr_cq) { if (!hr_cq) {
dev_warn(dev, "Async event for bogus CQ 0x%06x\n", cqn); dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
return; return;
} }
if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID && if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR && event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) { event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
dev_err(dev, "Unexpected event type 0x%x on CQ 0x%06x\n", dev_err(dev, "unexpected event type 0x%x on CQ 0x%06x\n",
event_type, cqn); event_type, cqn);
return; return;
} }

View File

@ -240,7 +240,6 @@ struct hns_roce_hem_table {
/* Single obj size */ /* Single obj size */
unsigned long obj_size; unsigned long obj_size;
unsigned long table_chunk_size; unsigned long table_chunk_size;
int lowmem;
struct mutex mutex; struct mutex mutex;
struct hns_roce_hem **hem; struct hns_roce_hem **hem;
u64 **bt_l1; u64 **bt_l1;
@ -599,7 +598,6 @@ struct hns_roce_qp {
struct hns_roce_db rdb; struct hns_roce_db rdb;
struct hns_roce_db sdb; struct hns_roce_db sdb;
unsigned long en_flags; unsigned long en_flags;
u32 doorbell_qpn;
enum ib_sig_type sq_signal_bits; enum ib_sig_type sq_signal_bits;
struct hns_roce_wq sq; struct hns_roce_wq sq;
@ -726,7 +724,7 @@ struct hns_roce_caps {
u32 max_sq_sg; u32 max_sq_sg;
u32 max_sq_inline; u32 max_sq_inline;
u32 max_rq_sg; u32 max_rq_sg;
u32 max_extend_sg; u32 rsv0;
u32 num_qps; u32 num_qps;
u32 num_pi_qps; u32 num_pi_qps;
u32 reserved_qps; u32 reserved_qps;
@ -736,7 +734,7 @@ struct hns_roce_caps {
u32 max_srq_sges; u32 max_srq_sges;
u32 max_sq_desc_sz; u32 max_sq_desc_sz;
u32 max_rq_desc_sz; u32 max_rq_desc_sz;
u32 max_srq_desc_sz; u32 rsv2;
int max_qp_init_rdma; int max_qp_init_rdma;
int max_qp_dest_rdma; int max_qp_dest_rdma;
u32 num_cqs; u32 num_cqs;
@ -749,7 +747,7 @@ struct hns_roce_caps {
int num_comp_vectors; int num_comp_vectors;
int num_other_vectors; int num_other_vectors;
u32 num_mtpts; u32 num_mtpts;
u32 num_mtt_segs; u32 rsv1;
u32 num_srqwqe_segs; u32 num_srqwqe_segs;
u32 num_idx_segs; u32 num_idx_segs;
int reserved_mrws; int reserved_mrws;
@ -848,11 +846,6 @@ struct hns_roce_caps {
enum cong_type cong_type; enum cong_type cong_type;
}; };
struct hns_roce_dfx_hw {
int (*query_cqc_info)(struct hns_roce_dev *hr_dev, u32 cqn,
int *buffer);
};
enum hns_roce_device_state { enum hns_roce_device_state {
HNS_ROCE_DEVICE_STATE_INITED, HNS_ROCE_DEVICE_STATE_INITED,
HNS_ROCE_DEVICE_STATE_RST_DOWN, HNS_ROCE_DEVICE_STATE_RST_DOWN,
@ -898,6 +891,9 @@ struct hns_roce_hw {
int (*init_eq)(struct hns_roce_dev *hr_dev); int (*init_eq)(struct hns_roce_dev *hr_dev);
void (*cleanup_eq)(struct hns_roce_dev *hr_dev); void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf); int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
int (*query_cqc)(struct hns_roce_dev *hr_dev, u32 cqn, void *buffer);
int (*query_qpc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
int (*query_mpt)(struct hns_roce_dev *hr_dev, u32 key, void *buffer);
const struct ib_device_ops *hns_roce_dev_ops; const struct ib_device_ops *hns_roce_dev_ops;
const struct ib_device_ops *hns_roce_dev_srq_ops; const struct ib_device_ops *hns_roce_dev_srq_ops;
}; };
@ -959,7 +955,6 @@ struct hns_roce_dev {
void *priv; void *priv;
struct workqueue_struct *irq_workq; struct workqueue_struct *irq_workq;
struct work_struct ecc_work; struct work_struct ecc_work;
const struct hns_roce_dfx_hw *dfx;
u32 func_num; u32 func_num;
u32 is_vf; u32 is_vf;
u32 cong_algo_tmpl_id; u32 cong_algo_tmpl_id;
@ -1227,8 +1222,12 @@ u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index);
void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev); void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
int hns_roce_init(struct hns_roce_dev *hr_dev); int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev); void hns_roce_exit(struct hns_roce_dev *hr_dev);
int hns_roce_fill_res_cq_entry(struct sk_buff *msg, int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq);
struct ib_cq *ib_cq); int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq);
int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp);
int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp);
int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr);
int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr);
struct hns_user_mmap_entry * struct hns_user_mmap_entry *
hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address, hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
size_t length, size_t length,

View File

@ -455,7 +455,7 @@ static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
* alloc bt space chunk for MTT/CQE. * alloc bt space chunk for MTT/CQE.
*/ */
size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size; size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size;
flag = (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | __GFP_NOWARN; flag = GFP_KERNEL | __GFP_NOWARN;
table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size >> PAGE_SHIFT, table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size >> PAGE_SHIFT,
size, flag); size, flag);
if (!table->hem[index->buf]) { if (!table->hem[index->buf]) {
@ -588,8 +588,7 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
table->hem[i] = hns_roce_alloc_hem(hr_dev, table->hem[i] = hns_roce_alloc_hem(hr_dev,
table->table_chunk_size >> PAGE_SHIFT, table->table_chunk_size >> PAGE_SHIFT,
table->table_chunk_size, table->table_chunk_size,
(table->lowmem ? GFP_KERNEL : GFP_KERNEL | __GFP_NOWARN);
GFP_HIGHUSER) | __GFP_NOWARN);
if (!table->hem[i]) { if (!table->hem[i]) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
@ -725,9 +724,6 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
int length; int length;
int i, j; int i, j;
if (!table->lowmem)
return NULL;
mutex_lock(&table->mutex); mutex_lock(&table->mutex);
if (!hns_roce_check_whether_mhop(hr_dev, table->type)) { if (!hns_roce_check_whether_mhop(hr_dev, table->type)) {
@ -783,8 +779,7 @@ out:
int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, u32 type, struct hns_roce_hem_table *table, u32 type,
unsigned long obj_size, unsigned long nobj, unsigned long obj_size, unsigned long nobj)
int use_lowmem)
{ {
unsigned long obj_per_chunk; unsigned long obj_per_chunk;
unsigned long num_hem; unsigned long num_hem;
@ -861,7 +856,6 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
table->type = type; table->type = type;
table->num_hem = num_hem; table->num_hem = num_hem;
table->obj_size = obj_size; table->obj_size = obj_size;
table->lowmem = use_lowmem;
mutex_init(&table->mutex); mutex_init(&table->mutex);
return 0; return 0;
@ -932,7 +926,7 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
if (table->hem[i]) { if (table->hem[i]) {
if (hr_dev->hw->clear_hem(hr_dev, table, if (hr_dev->hw->clear_hem(hr_dev, table,
i * table->table_chunk_size / table->obj_size, 0)) i * table->table_chunk_size / table->obj_size, 0))
dev_err(dev, "Clear HEM base address failed.\n"); dev_err(dev, "clear HEM base address failed.\n");
hns_roce_free_hem(hr_dev, table->hem[i]); hns_roce_free_hem(hr_dev, table->hem[i]);
} }
@ -986,7 +980,7 @@ struct hns_roce_hem_head {
static struct hns_roce_hem_item * static struct hns_roce_hem_item *
hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count, hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
bool exist_bt, int bt_level) bool exist_bt)
{ {
struct hns_roce_hem_item *hem; struct hns_roce_hem_item *hem;
@ -1195,7 +1189,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
start_aligned = (distance / step) * step + r->offset; start_aligned = (distance / step) * step + r->offset;
end = min_t(int, start_aligned + step - 1, max_ofs); end = min_t(int, start_aligned + step - 1, max_ofs);
cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit, cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit,
true, level); true);
if (!cur) { if (!cur) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_exit; goto err_exit;
@ -1247,7 +1241,7 @@ alloc_root_hem(struct hns_roce_dev *hr_dev, int unit, int *max_ba_num,
/* indicate to last region */ /* indicate to last region */
r = &regions[region_cnt - 1]; r = &regions[region_cnt - 1];
hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1, hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1,
ba_num, true, 0); ba_num, true);
if (!hem) if (!hem)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
@ -1264,7 +1258,7 @@ static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
struct hns_roce_hem_item *hem; struct hns_roce_hem_item *hem;
hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1, hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1,
r->count, false, 0); r->count, false);
if (!hem) if (!hem)
return -ENOMEM; return -ENOMEM;
@ -1421,7 +1415,7 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
&hem_list->btm_bt); &hem_list->btm_bt);
if (ret) { if (ret) {
dev_err(hr_dev->dev, dev_err(hr_dev->dev,
"alloc hem trunk fail ret=%d!\n", ret); "alloc hem trunk fail ret = %d!\n", ret);
goto err_alloc; goto err_alloc;
} }
} }
@ -1430,7 +1424,7 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
ret = hem_list_alloc_root_bt(hr_dev, hem_list, unit, regions, ret = hem_list_alloc_root_bt(hr_dev, hem_list, unit, regions,
region_cnt); region_cnt);
if (ret) if (ret)
dev_err(hr_dev->dev, "alloc hem root fail ret=%d!\n", ret); dev_err(hr_dev->dev, "alloc hem root fail ret = %d!\n", ret);
else else
return 0; return 0;
@ -1468,19 +1462,17 @@ void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list)
void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev, void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list, struct hns_roce_hem_list *hem_list,
int offset, int *mtt_cnt, u64 *phy_addr) int offset, int *mtt_cnt)
{ {
struct list_head *head = &hem_list->btm_bt; struct list_head *head = &hem_list->btm_bt;
struct hns_roce_hem_item *hem, *temp_hem; struct hns_roce_hem_item *hem, *temp_hem;
void *cpu_base = NULL; void *cpu_base = NULL;
u64 phy_base = 0;
int nr = 0; int nr = 0;
list_for_each_entry_safe(hem, temp_hem, head, sibling) { list_for_each_entry_safe(hem, temp_hem, head, sibling) {
if (hem_list_page_is_in_range(hem, offset)) { if (hem_list_page_is_in_range(hem, offset)) {
nr = offset - hem->start; nr = offset - hem->start;
cpu_base = hem->addr + nr * BA_BYTE_LEN; cpu_base = hem->addr + nr * BA_BYTE_LEN;
phy_base = hem->dma_addr + nr * BA_BYTE_LEN;
nr = hem->end + 1 - offset; nr = hem->end + 1 - offset;
break; break;
} }
@ -1489,8 +1481,5 @@ void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
if (mtt_cnt) if (mtt_cnt)
*mtt_cnt = nr; *mtt_cnt = nr;
if (phy_addr)
*phy_addr = phy_base;
return cpu_base; return cpu_base;
} }

View File

@ -111,8 +111,7 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
dma_addr_t *dma_handle); dma_addr_t *dma_handle);
int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, u32 type, struct hns_roce_hem_table *table, u32 type,
unsigned long obj_size, unsigned long nobj, unsigned long obj_size, unsigned long nobj);
int use_lowmem);
void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table); struct hns_roce_hem_table *table);
void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev); void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev);
@ -132,7 +131,7 @@ void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list); struct hns_roce_hem_list *hem_list);
void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev, void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list, struct hns_roce_hem_list *hem_list,
int offset, int *mtt_cnt, u64 *phy_addr); int offset, int *mtt_cnt);
static inline void hns_roce_hem_first(struct hns_roce_hem *hem, static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
struct hns_roce_hem_iter *iter) struct hns_roce_hem_iter *iter)

View File

@ -193,8 +193,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
unsigned int *sge_idx, u32 msg_len) unsigned int *sge_idx, u32 msg_len)
{ {
struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev; struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev;
unsigned int dseg_len = sizeof(struct hns_roce_v2_wqe_data_seg); unsigned int ext_sge_sz = qp->sq.max_gs * HNS_ROCE_SGE_SIZE;
unsigned int ext_sge_sz = qp->sq.max_gs * dseg_len;
unsigned int left_len_in_pg; unsigned int left_len_in_pg;
unsigned int idx = *sge_idx; unsigned int idx = *sge_idx;
unsigned int i = 0; unsigned int i = 0;
@ -222,7 +221,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
if (len <= left_len_in_pg) { if (len <= left_len_in_pg) {
memcpy(dseg, addr, len); memcpy(dseg, addr, len);
idx += len / dseg_len; idx += len / HNS_ROCE_SGE_SIZE;
i++; i++;
if (i >= wr->num_sge) if (i >= wr->num_sge)
@ -237,7 +236,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
len -= left_len_in_pg; len -= left_len_in_pg;
addr += left_len_in_pg; addr += left_len_in_pg;
idx += left_len_in_pg / dseg_len; idx += left_len_in_pg / HNS_ROCE_SGE_SIZE;
dseg = hns_roce_get_extend_sge(qp, dseg = hns_roce_get_extend_sge(qp,
idx & (qp->sge.sge_cnt - 1)); idx & (qp->sge.sge_cnt - 1));
left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT; left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT;
@ -381,7 +380,7 @@ static int check_send_valid(struct hns_roce_dev *hr_dev,
if (unlikely(ibqp->qp_type != IB_QPT_RC && if (unlikely(ibqp->qp_type != IB_QPT_RC &&
ibqp->qp_type != IB_QPT_GSI && ibqp->qp_type != IB_QPT_GSI &&
ibqp->qp_type != IB_QPT_UD)) { ibqp->qp_type != IB_QPT_UD)) {
ibdev_err(ibdev, "Not supported QP(0x%x)type!\n", ibdev_err(ibdev, "not supported QP(0x%x)type!\n",
ibqp->qp_type); ibqp->qp_type);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} else if (unlikely(hr_qp->state == IB_QPS_RESET || } else if (unlikely(hr_qp->state == IB_QPS_RESET ||
@ -637,7 +636,7 @@ static inline void update_sq_db(struct hns_roce_dev *hr_dev,
} else { } else {
struct hns_roce_v2_db sq_db = {}; struct hns_roce_v2_db sq_db = {};
hr_reg_write(&sq_db, DB_TAG, qp->doorbell_qpn); hr_reg_write(&sq_db, DB_TAG, qp->qpn);
hr_reg_write(&sq_db, DB_CMD, HNS_ROCE_V2_SQ_DB); hr_reg_write(&sq_db, DB_CMD, HNS_ROCE_V2_SQ_DB);
hr_reg_write(&sq_db, DB_PI, qp->sq.head); hr_reg_write(&sq_db, DB_PI, qp->sq.head);
hr_reg_write(&sq_db, DB_SL, qp->sl); hr_reg_write(&sq_db, DB_SL, qp->sl);
@ -1406,20 +1405,20 @@ static void func_clr_hw_resetting_state(struct hns_roce_dev *hr_dev,
hr_dev->dis_db = true; hr_dev->dis_db = true;
dev_warn(hr_dev->dev, dev_warn(hr_dev->dev,
"Func clear is pending, device in resetting state.\n"); "func clear is pending, device in resetting state.\n");
end = HNS_ROCE_V2_HW_RST_TIMEOUT; end = HNS_ROCE_V2_HW_RST_TIMEOUT;
while (end) { while (end) {
if (!ops->get_hw_reset_stat(handle)) { if (!ops->get_hw_reset_stat(handle)) {
hr_dev->is_reset = true; hr_dev->is_reset = true;
dev_info(hr_dev->dev, dev_info(hr_dev->dev,
"Func clear success after reset.\n"); "func clear success after reset.\n");
return; return;
} }
msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT); msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT; end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
} }
dev_warn(hr_dev->dev, "Func clear failed.\n"); dev_warn(hr_dev->dev, "func clear failed.\n");
} }
static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev, static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev,
@ -1431,21 +1430,21 @@ static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev,
hr_dev->dis_db = true; hr_dev->dis_db = true;
dev_warn(hr_dev->dev, dev_warn(hr_dev->dev,
"Func clear is pending, device in resetting state.\n"); "func clear is pending, device in resetting state.\n");
end = HNS_ROCE_V2_HW_RST_TIMEOUT; end = HNS_ROCE_V2_HW_RST_TIMEOUT;
while (end) { while (end) {
if (ops->ae_dev_reset_cnt(handle) != if (ops->ae_dev_reset_cnt(handle) !=
hr_dev->reset_cnt) { hr_dev->reset_cnt) {
hr_dev->is_reset = true; hr_dev->is_reset = true;
dev_info(hr_dev->dev, dev_info(hr_dev->dev,
"Func clear success after sw reset\n"); "func clear success after sw reset\n");
return; return;
} }
msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT); msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT; end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
} }
dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n"); dev_warn(hr_dev->dev, "func clear failed because of unfinished sw reset\n");
} }
static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval, static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
@ -1458,7 +1457,7 @@ static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) { if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) {
hr_dev->dis_db = true; hr_dev->dis_db = true;
hr_dev->is_reset = true; hr_dev->is_reset = true;
dev_info(hr_dev->dev, "Func clear success after reset.\n"); dev_info(hr_dev->dev, "func clear success after reset.\n");
return; return;
} }
@ -1475,9 +1474,9 @@ static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
if (retval && !flag) if (retval && !flag)
dev_warn(hr_dev->dev, dev_warn(hr_dev->dev,
"Func clear read failed, ret = %d.\n", retval); "func clear read failed, ret = %d.\n", retval);
dev_warn(hr_dev->dev, "Func clear failed.\n"); dev_warn(hr_dev->dev, "func clear failed.\n");
} }
static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id) static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id)
@ -1498,7 +1497,7 @@ static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id)
ret = hns_roce_cmq_send(hr_dev, &desc, 1); ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) { if (ret) {
fclr_write_fail_flag = true; fclr_write_fail_flag = true;
dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n", dev_err(hr_dev->dev, "func clear write failed, ret = %d.\n",
ret); ret);
goto out; goto out;
} }
@ -1966,7 +1965,6 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->min_cqes = HNS_ROCE_MIN_CQE_NUM; caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM; caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM; caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM; caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
caps->num_uars = HNS_ROCE_V2_UAR_NUM; caps->num_uars = HNS_ROCE_V2_UAR_NUM;
@ -1984,7 +1982,6 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA; caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ; caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ; caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ; caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
caps->trrl_entry_sz = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ; caps->trrl_entry_sz = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ;
caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ; caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
@ -2185,13 +2182,14 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM; caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM;
caps->reserved_xrcds = HNS_ROCE_V2_RSV_XRCD_NUM; caps->reserved_xrcds = HNS_ROCE_V2_RSV_XRCD_NUM;
caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS; caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS; caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
if (!caps->num_comp_vectors) if (!caps->num_comp_vectors)
caps->num_comp_vectors = min_t(u32, caps->eqc_bt_num - 1, caps->num_comp_vectors =
(u32)priv->handle->rinfo.num_vectors - 2); min_t(u32, caps->eqc_bt_num - HNS_ROCE_V2_AEQE_VEC_NUM,
(u32)priv->handle->rinfo.num_vectors -
(HNS_ROCE_V2_AEQE_VEC_NUM + HNS_ROCE_V2_ABNORMAL_VEC_NUM));
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
caps->eqe_hop_num = HNS_ROCE_V3_EQE_HOP_NUM; caps->eqe_hop_num = HNS_ROCE_V3_EQE_HOP_NUM;
@ -2272,14 +2270,12 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline); caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline);
caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg); caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg); caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg);
caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges); caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges); caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
caps->num_aeq_vectors = resp_a->num_aeq_vectors; caps->num_aeq_vectors = resp_a->num_aeq_vectors;
caps->num_other_vectors = resp_a->num_other_vectors; caps->num_other_vectors = resp_a->num_other_vectors;
caps->max_sq_desc_sz = resp_a->max_sq_desc_sz; caps->max_sq_desc_sz = resp_a->max_sq_desc_sz;
caps->max_rq_desc_sz = resp_a->max_rq_desc_sz; caps->max_rq_desc_sz = resp_a->max_rq_desc_sz;
caps->max_srq_desc_sz = resp_a->max_srq_desc_sz;
caps->cqe_sz = resp_a->cqe_sz; caps->cqe_sz = resp_a->cqe_sz;
caps->mtpt_entry_sz = resp_b->mtpt_entry_sz; caps->mtpt_entry_sz = resp_b->mtpt_entry_sz;
@ -4299,7 +4295,6 @@ static inline int get_pdn(struct ib_pd *ib_pd)
static void modify_qp_reset_to_init(struct ib_qp *ibqp, static void modify_qp_reset_to_init(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, const struct ib_qp_attr *attr,
int attr_mask,
struct hns_roce_v2_qp_context *context, struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask) struct hns_roce_v2_qp_context *qpc_mask)
{ {
@ -4363,7 +4358,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
} }
static void modify_qp_init_to_init(struct ib_qp *ibqp, static void modify_qp_init_to_init(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask, const struct ib_qp_attr *attr,
struct hns_roce_v2_qp_context *context, struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask) struct hns_roce_v2_qp_context *qpc_mask)
{ {
@ -4612,7 +4607,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
hr_reg_clear(qpc_mask, QPC_DQPN); hr_reg_clear(qpc_mask, QPC_DQPN);
} }
memcpy(&(context->dmac), dmac, sizeof(u32)); memcpy(&context->dmac, dmac, sizeof(u32));
hr_reg_write(context, QPC_DMAC_H, *((u16 *)(&dmac[4]))); hr_reg_write(context, QPC_DMAC_H, *((u16 *)(&dmac[4])));
qpc_mask->dmac = 0; qpc_mask->dmac = 0;
hr_reg_clear(qpc_mask, QPC_DMAC_H); hr_reg_clear(qpc_mask, QPC_DMAC_H);
@ -5014,11 +5009,9 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
memset(qpc_mask, 0, hr_dev->caps.qpc_sz); memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
modify_qp_reset_to_init(ibqp, attr, attr_mask, context, modify_qp_reset_to_init(ibqp, attr, context, qpc_mask);
qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
modify_qp_init_to_init(ibqp, attr, attr_mask, context, modify_qp_init_to_init(ibqp, attr, context, qpc_mask);
qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context, ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
qpc_mask); qpc_mask);
@ -5039,14 +5032,14 @@ static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
if (*timeout > QP_ACK_TIMEOUT_MAX_HIP08) { if (*timeout > QP_ACK_TIMEOUT_MAX_HIP08) {
ibdev_warn(&hr_dev->ib_dev, ibdev_warn(&hr_dev->ib_dev,
"Local ACK timeout shall be 0 to 20.\n"); "local ACK timeout shall be 0 to 20.\n");
return false; return false;
} }
*timeout += QP_ACK_TIMEOUT_OFFSET; *timeout += QP_ACK_TIMEOUT_OFFSET;
} else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) { } else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) {
if (*timeout > QP_ACK_TIMEOUT_MAX) { if (*timeout > QP_ACK_TIMEOUT_MAX) {
ibdev_warn(&hr_dev->ib_dev, ibdev_warn(&hr_dev->ib_dev,
"Local ACK timeout shall be 0 to 31.\n"); "local ACK timeout shall be 0 to 31.\n");
return false; return false;
} }
} }
@ -5306,9 +5299,8 @@ static int to_ib_qp_st(enum hns_roce_v2_qp_state state)
return (state < ARRAY_SIZE(map)) ? map[state] : -1; return (state < ARRAY_SIZE(map)) ? map[state] : -1;
} }
static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, u32 qpn,
struct hns_roce_qp *hr_qp, void *buffer)
struct hns_roce_v2_qp_context *hr_context)
{ {
struct hns_roce_cmd_mailbox *mailbox; struct hns_roce_cmd_mailbox *mailbox;
int ret; int ret;
@ -5318,11 +5310,11 @@ static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_QPC, ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_QPC,
hr_qp->qpn); qpn);
if (ret) if (ret)
goto out; goto out;
memcpy(hr_context, mailbox->buf, hr_dev->caps.qpc_sz); memcpy(buffer, mailbox->buf, hr_dev->caps.qpc_sz);
out: out:
hns_roce_free_cmd_mailbox(hr_dev, mailbox); hns_roce_free_cmd_mailbox(hr_dev, mailbox);
@ -5352,7 +5344,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
goto done; goto done;
} }
ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context); ret = hns_roce_v2_query_qpc(hr_dev, hr_qp->qpn, &context);
if (ret) { if (ret) {
ibdev_err(ibdev, "failed to query QPC, ret = %d.\n", ret); ibdev_err(ibdev, "failed to query QPC, ret = %d.\n", ret);
ret = -EINVAL; ret = -EINVAL;
@ -5550,7 +5542,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
msleep(20); msleep(20);
} }
ibdev_err(ibdev, "Query SCC clr done flag overtime.\n"); ibdev_err(ibdev, "query SCC clr done flag overtime.\n");
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
out: out:
@ -5773,6 +5765,64 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
return ret; return ret;
} }
static int hns_roce_v2_query_cqc(struct hns_roce_dev *hr_dev, u32 cqn,
void *buffer)
{
struct hns_roce_v2_cq_context *context;
struct hns_roce_cmd_mailbox *mailbox;
int ret;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma,
HNS_ROCE_CMD_QUERY_CQC, cqn);
if (ret) {
ibdev_err(&hr_dev->ib_dev,
"failed to process cmd when querying CQ, ret = %d.\n",
ret);
goto err_mailbox;
}
memcpy(buffer, context, sizeof(*context));
err_mailbox:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return ret;
}
static int hns_roce_v2_query_mpt(struct hns_roce_dev *hr_dev, u32 key,
void *buffer)
{
struct hns_roce_v2_mpt_entry *context;
struct hns_roce_cmd_mailbox *mailbox;
int ret;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_MPT,
key_to_hw_index(key));
if (ret) {
ibdev_err(&hr_dev->ib_dev,
"failed to process cmd when querying MPT, ret = %d.\n",
ret);
goto err_mailbox;
}
memcpy(buffer, context, sizeof(*context));
err_mailbox:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return ret;
}
static void hns_roce_irq_work_handle(struct work_struct *work) static void hns_roce_irq_work_handle(struct work_struct *work)
{ {
struct hns_roce_work *irq_work = struct hns_roce_work *irq_work =
@ -5781,26 +5831,26 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
switch (irq_work->event_type) { switch (irq_work->event_type) {
case HNS_ROCE_EVENT_TYPE_PATH_MIG: case HNS_ROCE_EVENT_TYPE_PATH_MIG:
ibdev_info(ibdev, "Path migrated succeeded.\n"); ibdev_info(ibdev, "path migrated succeeded.\n");
break; break;
case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
ibdev_warn(ibdev, "Path migration failed.\n"); ibdev_warn(ibdev, "path migration failed.\n");
break; break;
case HNS_ROCE_EVENT_TYPE_COMM_EST: case HNS_ROCE_EVENT_TYPE_COMM_EST:
break; break;
case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
ibdev_warn(ibdev, "Send queue drained.\n"); ibdev_warn(ibdev, "send queue drained.\n");
break; break;
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
ibdev_err(ibdev, "Local work queue 0x%x catast error, sub_event type is: %d\n", ibdev_err(ibdev, "local work queue 0x%x catast error, sub_event type is: %d\n",
irq_work->queue_num, irq_work->sub_type); irq_work->queue_num, irq_work->sub_type);
break; break;
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
ibdev_err(ibdev, "Invalid request local work queue 0x%x error.\n", ibdev_err(ibdev, "invalid request local work queue 0x%x error.\n",
irq_work->queue_num); irq_work->queue_num);
break; break;
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
ibdev_err(ibdev, "Local access violation work queue 0x%x error, sub_event type is: %d\n", ibdev_err(ibdev, "local access violation work queue 0x%x error, sub_event type is: %d\n",
irq_work->queue_num, irq_work->sub_type); irq_work->queue_num, irq_work->sub_type);
break; break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
@ -5822,7 +5872,7 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
ibdev_warn(ibdev, "DB overflow.\n"); ibdev_warn(ibdev, "DB overflow.\n");
break; break;
case HNS_ROCE_EVENT_TYPE_FLR: case HNS_ROCE_EVENT_TYPE_FLR:
ibdev_warn(ibdev, "Function level reset.\n"); ibdev_warn(ibdev, "function level reset.\n");
break; break;
case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION: case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
ibdev_err(ibdev, "xrc domain violation error.\n"); ibdev_err(ibdev, "xrc domain violation error.\n");
@ -5846,12 +5896,12 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
if (!irq_work) if (!irq_work)
return; return;
INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle); INIT_WORK(&irq_work->work, hns_roce_irq_work_handle);
irq_work->hr_dev = hr_dev; irq_work->hr_dev = hr_dev;
irq_work->event_type = eq->event_type; irq_work->event_type = eq->event_type;
irq_work->sub_type = eq->sub_type; irq_work->sub_type = eq->sub_type;
irq_work->queue_num = queue_num; irq_work->queue_num = queue_num;
queue_work(hr_dev->irq_workq, &(irq_work->work)); queue_work(hr_dev->irq_workq, &irq_work->work);
} }
static void update_eq_db(struct hns_roce_eq *eq) static void update_eq_db(struct hns_roce_eq *eq)
@ -5941,7 +5991,7 @@ static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
case HNS_ROCE_EVENT_TYPE_FLR: case HNS_ROCE_EVENT_TYPE_FLR:
break; break;
default: default:
dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n", dev_err(dev, "unhandled event %d on EQ %d at idx %u.\n",
event_type, eq->eqn, eq->cons_index); event_type, eq->eqn, eq->cons_index);
break; break;
} }
@ -6011,7 +6061,7 @@ static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
/* Completion event interrupt */ /* Completion event interrupt */
int_work = hns_roce_v2_ceq_int(hr_dev, eq); int_work = hns_roce_v2_ceq_int(hr_dev, eq);
else else
/* Asychronous event interrupt */ /* Asynchronous event interrupt */
int_work = hns_roce_v2_aeq_int(hr_dev, eq); int_work = hns_roce_v2_aeq_int(hr_dev, eq);
return IRQ_RETVAL(int_work); return IRQ_RETVAL(int_work);
@ -6332,7 +6382,7 @@ static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT, NULL, hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT, NULL,
0); 0);
if (err) if (err)
dev_err(hr_dev->dev, "Failed to alloc EQE mtr, err %d\n", err); dev_err(hr_dev->dev, "failed to alloc EQE mtr, err %d\n", err);
return err; return err;
} }
@ -6421,7 +6471,7 @@ static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
0, hr_dev->irq_names[j - comp_num], 0, hr_dev->irq_names[j - comp_num],
&eq_table->eq[j - other_num]); &eq_table->eq[j - other_num]);
if (ret) { if (ret) {
dev_err(hr_dev->dev, "Request irq error!\n"); dev_err(hr_dev->dev, "request irq error!\n");
goto err_request_failed; goto err_request_failed;
} }
} }
@ -6574,10 +6624,6 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
kfree(eq_table->eq); kfree(eq_table->eq);
} }
static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
.query_cqc_info = hns_roce_v2_query_cqc_info,
};
static const struct ib_device_ops hns_roce_v2_dev_ops = { static const struct ib_device_ops hns_roce_v2_dev_ops = {
.destroy_qp = hns_roce_v2_destroy_qp, .destroy_qp = hns_roce_v2_destroy_qp,
.modify_cq = hns_roce_v2_modify_cq, .modify_cq = hns_roce_v2_modify_cq,
@ -6618,6 +6664,9 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.init_eq = hns_roce_v2_init_eq_table, .init_eq = hns_roce_v2_init_eq_table,
.cleanup_eq = hns_roce_v2_cleanup_eq_table, .cleanup_eq = hns_roce_v2_cleanup_eq_table,
.write_srqc = hns_roce_v2_write_srqc, .write_srqc = hns_roce_v2_write_srqc,
.query_cqc = hns_roce_v2_query_cqc,
.query_qpc = hns_roce_v2_query_qpc,
.query_mpt = hns_roce_v2_query_mpt,
.hns_roce_dev_ops = &hns_roce_v2_dev_ops, .hns_roce_dev_ops = &hns_roce_v2_dev_ops,
.hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops, .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
}; };
@ -6649,7 +6698,6 @@ static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
hr_dev->is_vf = id->driver_data; hr_dev->is_vf = id->driver_data;
hr_dev->dev = &handle->pdev->dev; hr_dev->dev = &handle->pdev->dev;
hr_dev->hw = &hns_roce_hw_v2; hr_dev->hw = &hns_roce_hw_v2;
hr_dev->dfx = &hns_roce_dfx_hw_v2;
hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG; hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
hr_dev->odb_offset = hr_dev->sdb_offset; hr_dev->odb_offset = hr_dev->sdb_offset;
@ -6845,7 +6893,7 @@ static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret); dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
} else { } else {
handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED; handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
dev_info(dev, "Reset done, RoCE client reinit finished.\n"); dev_info(dev, "reset done, RoCE client reinit finished.\n");
} }
return ret; return ret;

View File

@ -46,7 +46,6 @@
#define HNS_ROCE_V2_MAX_CQE_NUM 0x400000 #define HNS_ROCE_V2_MAX_CQE_NUM 0x400000
#define HNS_ROCE_V2_MAX_RQ_SGE_NUM 64 #define HNS_ROCE_V2_MAX_RQ_SGE_NUM 64
#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64 #define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64
#define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000
#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20 #define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
#define HNS_ROCE_V3_MAX_SQ_INLINE 0x400 #define HNS_ROCE_V3_MAX_SQ_INLINE 0x400
#define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32 #define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32
@ -55,7 +54,6 @@
#define HNS_ROCE_V2_AEQE_VEC_NUM 1 #define HNS_ROCE_V2_AEQE_VEC_NUM 1
#define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1 #define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1
#define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000 #define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000
#define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000 #define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000 #define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_PD_NUM 0x1000000 #define HNS_ROCE_V2_MAX_PD_NUM 0x1000000
@ -65,7 +63,6 @@
#define HNS_ROCE_V2_MAX_QP_DEST_RDMA 128 #define HNS_ROCE_V2_MAX_QP_DEST_RDMA 128
#define HNS_ROCE_V2_MAX_SQ_DESC_SZ 64 #define HNS_ROCE_V2_MAX_SQ_DESC_SZ 64
#define HNS_ROCE_V2_MAX_RQ_DESC_SZ 16 #define HNS_ROCE_V2_MAX_RQ_DESC_SZ 16
#define HNS_ROCE_V2_MAX_SRQ_DESC_SZ 64
#define HNS_ROCE_V2_IRRL_ENTRY_SZ 64 #define HNS_ROCE_V2_IRRL_ENTRY_SZ 64
#define HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ 100 #define HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ 100
#define HNS_ROCE_V2_CQC_ENTRY_SZ 64 #define HNS_ROCE_V2_CQC_ENTRY_SZ 64
@ -406,6 +403,7 @@ enum hns_roce_v2_qp_state {
struct hns_roce_v2_qp_context_ex { struct hns_roce_v2_qp_context_ex {
__le32 data[64]; __le32 data[64];
}; };
struct hns_roce_v2_qp_context { struct hns_roce_v2_qp_context {
__le32 byte_4_sqpn_tst; __le32 byte_4_sqpn_tst;
__le32 wqe_sge_ba; __le32 wqe_sge_ba;
@ -758,7 +756,8 @@ struct hns_roce_v2_mpt_entry {
#define MPT_INNER_PA_VLD MPT_FIELD_LOC(71, 71) #define MPT_INNER_PA_VLD MPT_FIELD_LOC(71, 71)
#define MPT_MW_BIND_QPN MPT_FIELD_LOC(95, 72) #define MPT_MW_BIND_QPN MPT_FIELD_LOC(95, 72)
#define MPT_BOUND_LKEY MPT_FIELD_LOC(127, 96) #define MPT_BOUND_LKEY MPT_FIELD_LOC(127, 96)
#define MPT_LEN MPT_FIELD_LOC(191, 128) #define MPT_LEN_L MPT_FIELD_LOC(159, 128)
#define MPT_LEN_H MPT_FIELD_LOC(191, 160)
#define MPT_LKEY MPT_FIELD_LOC(223, 192) #define MPT_LKEY MPT_FIELD_LOC(223, 192)
#define MPT_VA MPT_FIELD_LOC(287, 224) #define MPT_VA MPT_FIELD_LOC(287, 224)
#define MPT_PBL_SIZE MPT_FIELD_LOC(319, 288) #define MPT_PBL_SIZE MPT_FIELD_LOC(319, 288)
@ -1173,7 +1172,7 @@ struct hns_roce_query_pf_caps_a {
__le16 max_sq_sg; __le16 max_sq_sg;
__le16 max_sq_inline; __le16 max_sq_inline;
__le16 max_rq_sg; __le16 max_rq_sg;
__le32 max_extend_sg; __le32 rsv0;
__le16 num_qpc_timer; __le16 num_qpc_timer;
__le16 num_cqc_timer; __le16 num_cqc_timer;
__le16 max_srq_sges; __le16 max_srq_sges;
@ -1181,7 +1180,7 @@ struct hns_roce_query_pf_caps_a {
u8 num_other_vectors; u8 num_other_vectors;
u8 max_sq_desc_sz; u8 max_sq_desc_sz;
u8 max_rq_desc_sz; u8 max_rq_desc_sz;
u8 max_srq_desc_sz; u8 rsv1;
u8 cqe_sz; u8 cqe_sz;
}; };
@ -1462,9 +1461,6 @@ struct hns_roce_sccc_clr_done {
__le32 rsv[5]; __le32 rsv[5];
}; };
int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn,
int *buffer);
static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2], static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2],
void __iomem *dest) void __iomem *dest)
{ {

View File

@ -1,34 +0,0 @@
// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
// Copyright (c) 2019 Hisilicon Limited.
#include "hnae3.h"
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hw_v2.h"
int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn,
int *buffer)
{
struct hns_roce_v2_cq_context *cq_context;
struct hns_roce_cmd_mailbox *mailbox;
int ret;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
cq_context = mailbox->buf;
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_CQC,
cqn);
if (ret) {
dev_err(hr_dev->dev, "QUERY cqc cmd process error\n");
goto err_mailbox;
}
memcpy(buffer, cq_context, sizeof(*cq_context));
err_mailbox:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return ret;
}

View File

@ -97,7 +97,7 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u32 port,
netdev = hr_dev->iboe.netdevs[port]; netdev = hr_dev->iboe.netdevs[port];
if (!netdev) { if (!netdev) {
dev_err(dev, "Can't find netdev on port(%u)!\n", port); dev_err(dev, "can't find netdev on port(%u)!\n", port);
return -ENODEV; return -ENODEV;
} }
@ -239,7 +239,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num,
net_dev = hr_dev->iboe.netdevs[port]; net_dev = hr_dev->iboe.netdevs[port];
if (!net_dev) { if (!net_dev) {
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
dev_err(dev, "Find netdev %u failed!\n", port); dev_err(dev, "find netdev %u failed!\n", port);
return -EINVAL; return -EINVAL;
} }
@ -515,7 +515,6 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.destroy_ah = hns_roce_destroy_ah, .destroy_ah = hns_roce_destroy_ah,
.destroy_cq = hns_roce_destroy_cq, .destroy_cq = hns_roce_destroy_cq,
.disassociate_ucontext = hns_roce_disassociate_ucontext, .disassociate_ucontext = hns_roce_disassociate_ucontext,
.fill_res_cq_entry = hns_roce_fill_res_cq_entry,
.get_dma_mr = hns_roce_get_dma_mr, .get_dma_mr = hns_roce_get_dma_mr,
.get_link_layer = hns_roce_get_link_layer, .get_link_layer = hns_roce_get_link_layer,
.get_port_immutable = hns_roce_port_immutable, .get_port_immutable = hns_roce_port_immutable,
@ -566,6 +565,15 @@ static const struct ib_device_ops hns_roce_dev_xrcd_ops = {
INIT_RDMA_OBJ_SIZE(ib_xrcd, hns_roce_xrcd, ibxrcd), INIT_RDMA_OBJ_SIZE(ib_xrcd, hns_roce_xrcd, ibxrcd),
}; };
static const struct ib_device_ops hns_roce_dev_restrack_ops = {
.fill_res_cq_entry = hns_roce_fill_res_cq_entry,
.fill_res_cq_entry_raw = hns_roce_fill_res_cq_entry_raw,
.fill_res_qp_entry = hns_roce_fill_res_qp_entry,
.fill_res_qp_entry_raw = hns_roce_fill_res_qp_entry_raw,
.fill_res_mr_entry = hns_roce_fill_res_mr_entry,
.fill_res_mr_entry_raw = hns_roce_fill_res_mr_entry_raw,
};
static int hns_roce_register_device(struct hns_roce_dev *hr_dev) static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
{ {
int ret; int ret;
@ -605,6 +613,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops); ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
ib_set_device_ops(ib_dev, &hns_roce_dev_ops); ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
ib_set_device_ops(ib_dev, &hns_roce_dev_restrack_ops);
for (i = 0; i < hr_dev->caps.num_ports; i++) { for (i = 0; i < hr_dev->caps.num_ports; i++) {
if (!hr_dev->iboe.netdevs[i]) if (!hr_dev->iboe.netdevs[i])
continue; continue;
@ -650,17 +659,17 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table, ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz, HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
hr_dev->caps.num_mtpts, 1); hr_dev->caps.num_mtpts);
if (ret) { if (ret) {
dev_err(dev, "Failed to init MTPT context memory, aborting.\n"); dev_err(dev, "failed to init MTPT context memory, aborting.\n");
return ret; return ret;
} }
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table, ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table,
HEM_TYPE_QPC, hr_dev->caps.qpc_sz, HEM_TYPE_QPC, hr_dev->caps.qpc_sz,
hr_dev->caps.num_qps, 1); hr_dev->caps.num_qps);
if (ret) { if (ret) {
dev_err(dev, "Failed to init QP context memory, aborting.\n"); dev_err(dev, "failed to init QP context memory, aborting.\n");
goto err_unmap_dmpt; goto err_unmap_dmpt;
} }
@ -668,9 +677,9 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
HEM_TYPE_IRRL, HEM_TYPE_IRRL,
hr_dev->caps.irrl_entry_sz * hr_dev->caps.irrl_entry_sz *
hr_dev->caps.max_qp_init_rdma, hr_dev->caps.max_qp_init_rdma,
hr_dev->caps.num_qps, 1); hr_dev->caps.num_qps);
if (ret) { if (ret) {
dev_err(dev, "Failed to init irrl_table memory, aborting.\n"); dev_err(dev, "failed to init irrl_table memory, aborting.\n");
goto err_unmap_qp; goto err_unmap_qp;
} }
@ -680,19 +689,19 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
HEM_TYPE_TRRL, HEM_TYPE_TRRL,
hr_dev->caps.trrl_entry_sz * hr_dev->caps.trrl_entry_sz *
hr_dev->caps.max_qp_dest_rdma, hr_dev->caps.max_qp_dest_rdma,
hr_dev->caps.num_qps, 1); hr_dev->caps.num_qps);
if (ret) { if (ret) {
dev_err(dev, dev_err(dev,
"Failed to init trrl_table memory, aborting.\n"); "failed to init trrl_table memory, aborting.\n");
goto err_unmap_irrl; goto err_unmap_irrl;
} }
} }
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table, ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table,
HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz, HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz,
hr_dev->caps.num_cqs, 1); hr_dev->caps.num_cqs);
if (ret) { if (ret) {
dev_err(dev, "Failed to init CQ context memory, aborting.\n"); dev_err(dev, "failed to init CQ context memory, aborting.\n");
goto err_unmap_trrl; goto err_unmap_trrl;
} }
@ -700,10 +709,10 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table, ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table,
HEM_TYPE_SRQC, HEM_TYPE_SRQC,
hr_dev->caps.srqc_entry_sz, hr_dev->caps.srqc_entry_sz,
hr_dev->caps.num_srqs, 1); hr_dev->caps.num_srqs);
if (ret) { if (ret) {
dev_err(dev, dev_err(dev,
"Failed to init SRQ context memory, aborting.\n"); "failed to init SRQ context memory, aborting.\n");
goto err_unmap_cq; goto err_unmap_cq;
} }
} }
@ -713,10 +722,10 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
&hr_dev->qp_table.sccc_table, &hr_dev->qp_table.sccc_table,
HEM_TYPE_SCCC, HEM_TYPE_SCCC,
hr_dev->caps.sccc_sz, hr_dev->caps.sccc_sz,
hr_dev->caps.num_qps, 1); hr_dev->caps.num_qps);
if (ret) { if (ret) {
dev_err(dev, dev_err(dev,
"Failed to init SCC context memory, aborting.\n"); "failed to init SCC context memory, aborting.\n");
goto err_unmap_srq; goto err_unmap_srq;
} }
} }
@ -725,10 +734,10 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table, ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table,
HEM_TYPE_QPC_TIMER, HEM_TYPE_QPC_TIMER,
hr_dev->caps.qpc_timer_entry_sz, hr_dev->caps.qpc_timer_entry_sz,
hr_dev->caps.qpc_timer_bt_num, 1); hr_dev->caps.qpc_timer_bt_num);
if (ret) { if (ret) {
dev_err(dev, dev_err(dev,
"Failed to init QPC timer memory, aborting.\n"); "failed to init QPC timer memory, aborting.\n");
goto err_unmap_ctx; goto err_unmap_ctx;
} }
} }
@ -737,10 +746,10 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cqc_timer_table, ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cqc_timer_table,
HEM_TYPE_CQC_TIMER, HEM_TYPE_CQC_TIMER,
hr_dev->caps.cqc_timer_entry_sz, hr_dev->caps.cqc_timer_entry_sz,
hr_dev->caps.cqc_timer_bt_num, 1); hr_dev->caps.cqc_timer_bt_num);
if (ret) { if (ret) {
dev_err(dev, dev_err(dev,
"Failed to init CQC timer memory, aborting.\n"); "failed to init CQC timer memory, aborting.\n");
goto err_unmap_qpc_timer; goto err_unmap_qpc_timer;
} }
} }
@ -749,7 +758,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->gmv_table, ret = hns_roce_init_hem_table(hr_dev, &hr_dev->gmv_table,
HEM_TYPE_GMV, HEM_TYPE_GMV,
hr_dev->caps.gmv_entry_sz, hr_dev->caps.gmv_entry_sz,
hr_dev->caps.gmv_entry_num, 1); hr_dev->caps.gmv_entry_num);
if (ret) { if (ret) {
dev_err(dev, dev_err(dev,
"failed to init gmv table memory, ret = %d\n", "failed to init gmv table memory, ret = %d\n",
@ -818,13 +827,13 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar); ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar);
if (ret) { if (ret) {
dev_err(dev, "Failed to allocate priv_uar.\n"); dev_err(dev, "failed to allocate priv_uar.\n");
goto err_uar_table_free; goto err_uar_table_free;
} }
ret = hns_roce_init_qp_table(hr_dev); ret = hns_roce_init_qp_table(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "Failed to init qp_table.\n"); dev_err(dev, "failed to init qp_table.\n");
goto err_uar_table_free; goto err_uar_table_free;
} }
@ -837,9 +846,8 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
hns_roce_init_cq_table(hr_dev); hns_roce_init_cq_table(hr_dev);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) { if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
hns_roce_init_srq_table(hr_dev); hns_roce_init_srq_table(hr_dev);
}
return 0; return 0;
@ -902,14 +910,14 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
if (hr_dev->hw->cmq_init) { if (hr_dev->hw->cmq_init) {
ret = hr_dev->hw->cmq_init(hr_dev); ret = hr_dev->hw->cmq_init(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "Init RoCE Command Queue failed!\n"); dev_err(dev, "init RoCE Command Queue failed!\n");
return ret; return ret;
} }
} }
ret = hr_dev->hw->hw_profile(hr_dev); ret = hr_dev->hw->hw_profile(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "Get RoCE engine profile failed!\n"); dev_err(dev, "get RoCE engine profile failed!\n");
goto error_failed_cmd_init; goto error_failed_cmd_init;
} }

View File

@ -190,7 +190,7 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
int ret; int ret;
mr = kzalloc(sizeof(*mr), GFP_KERNEL); mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (mr == NULL) if (!mr)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
mr->type = MR_TYPE_DMA; mr->type = MR_TYPE_DMA;
@ -249,7 +249,6 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto err_alloc_pbl; goto err_alloc_pbl;
mr->ibmr.rkey = mr->ibmr.lkey = mr->key; mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
mr->ibmr.length = length;
return &mr->ibmr; return &mr->ibmr;
@ -586,7 +585,7 @@ static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
while (offset < end && npage < max_count) { while (offset < end && npage < max_count) {
count = 0; count = 0;
mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list, mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
offset, &count, NULL); offset, &count);
if (!mtts) if (!mtts)
return -ENOBUFS; return -ENOBUFS;
@ -835,7 +834,7 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
mtt_count = 0; mtt_count = 0;
mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list, mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
start_index + total, start_index + total,
&mtt_count, NULL); &mtt_count);
if (!mtts || !mtt_count) if (!mtts || !mtt_count)
goto done; goto done;

View File

@ -56,7 +56,7 @@ static void flush_work_handle(struct work_struct *work)
if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) { if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) {
ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL); ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL);
if (ret) if (ret)
dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n", dev_err(dev, "modify QP to error state failed(%d) during CQE flush\n",
ret); ret);
} }
@ -105,7 +105,7 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
xa_unlock(&hr_dev->qp_table_xa); xa_unlock(&hr_dev->qp_table_xa);
if (!qp) { if (!qp) {
dev_warn(dev, "Async event for bogus QP %08x\n", qpn); dev_warn(dev, "async event for bogus QP %08x\n", qpn);
return; return;
} }
@ -218,7 +218,6 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
if (hr_qp->ibqp.qp_type == IB_QPT_GSI) { if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
num = 1; num = 1;
hr_qp->doorbell_qpn = 1;
} else { } else {
mutex_lock(&qp_table->bank_mutex); mutex_lock(&qp_table->bank_mutex);
bankid = get_least_load_bankid_for_qp(qp_table->bank); bankid = get_least_load_bankid_for_qp(qp_table->bank);
@ -234,8 +233,6 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
qp_table->bank[bankid].inuse++; qp_table->bank[bankid].inuse++;
mutex_unlock(&qp_table->bank_mutex); mutex_unlock(&qp_table->bank_mutex);
hr_qp->doorbell_qpn = (u32)num;
} }
hr_qp->qpn = num; hr_qp->qpn = num;
@ -278,7 +275,7 @@ static int hns_roce_qp_store(struct hns_roce_dev *hr_dev,
ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL)); ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL));
if (ret) if (ret)
dev_err(hr_dev->dev, "Failed to xa store for QPC\n"); dev_err(hr_dev->dev, "failed to xa store for QPC\n");
else else
/* add QP to device's QP list for softwc */ /* add QP to device's QP list for softwc */
add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq,
@ -299,14 +296,14 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
/* Alloc memory for QPC */ /* Alloc memory for QPC */
ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
if (ret) { if (ret) {
dev_err(dev, "Failed to get QPC table\n"); dev_err(dev, "failed to get QPC table\n");
goto err_out; goto err_out;
} }
/* Alloc memory for IRRL */ /* Alloc memory for IRRL */
ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn); ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
if (ret) { if (ret) {
dev_err(dev, "Failed to get IRRL table\n"); dev_err(dev, "failed to get IRRL table\n");
goto err_put_qp; goto err_put_qp;
} }
@ -315,7 +312,7 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table, ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
hr_qp->qpn); hr_qp->qpn);
if (ret) { if (ret) {
dev_err(dev, "Failed to get TRRL table\n"); dev_err(dev, "failed to get TRRL table\n");
goto err_put_irrl; goto err_put_irrl;
} }
} }
@ -325,7 +322,7 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table, ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
hr_qp->qpn); hr_qp->qpn);
if (ret) { if (ret) {
dev_err(dev, "Failed to get SCC CTX table\n"); dev_err(dev, "failed to get SCC CTX table\n");
goto err_put_trrl; goto err_put_trrl;
} }
} }
@ -1206,7 +1203,7 @@ int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp); ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp);
if (ret) if (ret)
ibdev_err(ibdev, "Create QP type 0x%x failed(%d)\n", ibdev_err(ibdev, "create QP type 0x%x failed(%d)\n",
init_attr->qp_type, ret); init_attr->qp_type, ret);
return ret; return ret;

View File

@ -9,91 +9,223 @@
#include "hns_roce_device.h" #include "hns_roce_device.h"
#include "hns_roce_hw_v2.h" #include "hns_roce_hw_v2.h"
static int hns_roce_fill_cq(struct sk_buff *msg, #define MAX_ENTRY_NUM 256
struct hns_roce_v2_cq_context *context)
int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq)
{ {
if (rdma_nl_put_driver_u32(msg, "state", struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
hr_reg_read(context, CQC_ARM_ST))) struct nlattr *table_attr;
table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
return -EMSGSIZE;
if (rdma_nl_put_driver_u32(msg, "cq_depth", hr_cq->cq_depth))
goto err; goto err;
if (rdma_nl_put_driver_u32(msg, "ceqn", if (rdma_nl_put_driver_u32(msg, "cons_index", hr_cq->cons_index))
hr_reg_read(context, CQC_CEQN)))
goto err; goto err;
if (rdma_nl_put_driver_u32(msg, "cqn", if (rdma_nl_put_driver_u32(msg, "cqe_size", hr_cq->cqe_size))
hr_reg_read(context, CQC_CQN)))
goto err; goto err;
if (rdma_nl_put_driver_u32(msg, "hopnum", if (rdma_nl_put_driver_u32(msg, "arm_sn", hr_cq->arm_sn))
hr_reg_read(context, CQC_CQE_HOP_NUM)))
goto err; goto err;
if (rdma_nl_put_driver_u32(msg, "pi", nla_nest_end(msg, table_attr);
hr_reg_read(context, CQC_CQ_PRODUCER_IDX)))
goto err;
if (rdma_nl_put_driver_u32(msg, "ci",
hr_reg_read(context, CQC_CQ_CONSUMER_IDX)))
goto err;
if (rdma_nl_put_driver_u32(msg, "coalesce",
hr_reg_read(context, CQC_CQ_MAX_CNT)))
goto err;
if (rdma_nl_put_driver_u32(msg, "period",
hr_reg_read(context, CQC_CQ_PERIOD)))
goto err;
if (rdma_nl_put_driver_u32(msg, "cnt",
hr_reg_read(context, CQC_CQE_CNT)))
goto err;
return 0; return 0;
err: err:
nla_nest_cancel(msg, table_attr);
return -EMSGSIZE; return -EMSGSIZE;
} }
int hns_roce_fill_res_cq_entry(struct sk_buff *msg, int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq)
struct ib_cq *ib_cq)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
struct hns_roce_v2_cq_context *context; struct hns_roce_v2_cq_context context;
struct nlattr *table_attr; u32 data[MAX_ENTRY_NUM] = {};
int offset = 0;
int ret; int ret;
if (!hr_dev->dfx->query_cqc_info) if (!hr_dev->hw->query_cqc)
return -EINVAL; return -EINVAL;
context = kzalloc(sizeof(struct hns_roce_v2_cq_context), GFP_KERNEL); ret = hr_dev->hw->query_cqc(hr_dev, hr_cq->cqn, &context);
if (!context)
return -ENOMEM;
ret = hr_dev->dfx->query_cqc_info(hr_dev, hr_cq->cqn, (int *)context);
if (ret) if (ret)
goto err; return -EINVAL;
data[offset++] = hr_reg_read(&context, CQC_CQ_ST);
data[offset++] = hr_reg_read(&context, CQC_SHIFT);
data[offset++] = hr_reg_read(&context, CQC_CQE_SIZE);
data[offset++] = hr_reg_read(&context, CQC_CQE_CNT);
data[offset++] = hr_reg_read(&context, CQC_CQ_PRODUCER_IDX);
data[offset++] = hr_reg_read(&context, CQC_CQ_CONSUMER_IDX);
data[offset++] = hr_reg_read(&context, CQC_DB_RECORD_EN);
data[offset++] = hr_reg_read(&context, CQC_ARM_ST);
data[offset++] = hr_reg_read(&context, CQC_CMD_SN);
data[offset++] = hr_reg_read(&context, CQC_CEQN);
data[offset++] = hr_reg_read(&context, CQC_CQ_MAX_CNT);
data[offset++] = hr_reg_read(&context, CQC_CQ_PERIOD);
data[offset++] = hr_reg_read(&context, CQC_CQE_HOP_NUM);
data[offset++] = hr_reg_read(&context, CQC_CQE_BAR_PG_SZ);
data[offset++] = hr_reg_read(&context, CQC_CQE_BUF_PG_SZ);
ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data);
return ret;
}
int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp)
{
struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
struct nlattr *table_attr;
table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr) { if (!table_attr)
ret = -EMSGSIZE; return -EMSGSIZE;
goto err;
}
if (hns_roce_fill_cq(msg, context)) { if (rdma_nl_put_driver_u32_hex(msg, "sq_wqe_cnt", hr_qp->sq.wqe_cnt))
ret = -EMSGSIZE; goto err;
goto err_cancel_table;
} if (rdma_nl_put_driver_u32_hex(msg, "sq_max_gs", hr_qp->sq.max_gs))
goto err;
if (rdma_nl_put_driver_u32_hex(msg, "rq_wqe_cnt", hr_qp->rq.wqe_cnt))
goto err;
if (rdma_nl_put_driver_u32_hex(msg, "rq_max_gs", hr_qp->rq.max_gs))
goto err;
if (rdma_nl_put_driver_u32_hex(msg, "ext_sge_sge_cnt", hr_qp->sge.sge_cnt))
goto err;
nla_nest_end(msg, table_attr); nla_nest_end(msg, table_attr);
kfree(context);
return 0; return 0;
err_cancel_table:
nla_nest_cancel(msg, table_attr);
err: err:
kfree(context); nla_nest_cancel(msg, table_attr);
return -EMSGSIZE;
}
int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
struct hns_roce_v2_qp_context context;
u32 data[MAX_ENTRY_NUM] = {};
int offset = 0;
int ret;
if (!hr_dev->hw->query_qpc)
return -EINVAL;
ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context);
if (ret)
return -EINVAL;
data[offset++] = hr_reg_read(&context, QPC_QP_ST);
data[offset++] = hr_reg_read(&context, QPC_ERR_TYPE);
data[offset++] = hr_reg_read(&context, QPC_CHECK_FLG);
data[offset++] = hr_reg_read(&context, QPC_SRQ_EN);
data[offset++] = hr_reg_read(&context, QPC_SRQN);
data[offset++] = hr_reg_read(&context, QPC_QKEY_XRCD);
data[offset++] = hr_reg_read(&context, QPC_TX_CQN);
data[offset++] = hr_reg_read(&context, QPC_RX_CQN);
data[offset++] = hr_reg_read(&context, QPC_SQ_PRODUCER_IDX);
data[offset++] = hr_reg_read(&context, QPC_SQ_CONSUMER_IDX);
data[offset++] = hr_reg_read(&context, QPC_RQ_RECORD_EN);
data[offset++] = hr_reg_read(&context, QPC_RQ_PRODUCER_IDX);
data[offset++] = hr_reg_read(&context, QPC_RQ_CONSUMER_IDX);
data[offset++] = hr_reg_read(&context, QPC_SQ_SHIFT);
data[offset++] = hr_reg_read(&context, QPC_RQWS);
data[offset++] = hr_reg_read(&context, QPC_RQ_SHIFT);
data[offset++] = hr_reg_read(&context, QPC_SGE_SHIFT);
data[offset++] = hr_reg_read(&context, QPC_SQ_HOP_NUM);
data[offset++] = hr_reg_read(&context, QPC_RQ_HOP_NUM);
data[offset++] = hr_reg_read(&context, QPC_SGE_HOP_NUM);
data[offset++] = hr_reg_read(&context, QPC_WQE_SGE_BA_PG_SZ);
data[offset++] = hr_reg_read(&context, QPC_WQE_SGE_BUF_PG_SZ);
data[offset++] = hr_reg_read(&context, QPC_RETRY_NUM_INIT);
data[offset++] = hr_reg_read(&context, QPC_RETRY_CNT);
data[offset++] = hr_reg_read(&context, QPC_SQ_CUR_PSN);
data[offset++] = hr_reg_read(&context, QPC_SQ_MAX_PSN);
data[offset++] = hr_reg_read(&context, QPC_SQ_FLUSH_IDX);
data[offset++] = hr_reg_read(&context, QPC_SQ_MAX_IDX);
data[offset++] = hr_reg_read(&context, QPC_SQ_TX_ERR);
data[offset++] = hr_reg_read(&context, QPC_SQ_RX_ERR);
data[offset++] = hr_reg_read(&context, QPC_RQ_RX_ERR);
data[offset++] = hr_reg_read(&context, QPC_RQ_TX_ERR);
data[offset++] = hr_reg_read(&context, QPC_RQ_CQE_IDX);
data[offset++] = hr_reg_read(&context, QPC_RQ_RTY_TX_ERR);
ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data);
return ret;
}
int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr)
{
struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
struct nlattr *table_attr;
table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
return -EMSGSIZE;
if (rdma_nl_put_driver_u32_hex(msg, "pbl_hop_num", hr_mr->pbl_hop_num))
goto err;
if (rdma_nl_put_driver_u32_hex(msg, "ba_pg_shift",
hr_mr->pbl_mtr.hem_cfg.ba_pg_shift))
goto err;
if (rdma_nl_put_driver_u32_hex(msg, "buf_pg_shift",
hr_mr->pbl_mtr.hem_cfg.buf_pg_shift))
goto err;
nla_nest_end(msg, table_attr);
return 0;
err:
nla_nest_cancel(msg, table_attr);
return -EMSGSIZE;
}
int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_mr->device);
struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
struct hns_roce_v2_mpt_entry context;
u32 data[MAX_ENTRY_NUM] = {};
int offset = 0;
int ret;
if (!hr_dev->hw->query_mpt)
return -EINVAL;
ret = hr_dev->hw->query_mpt(hr_dev, hr_mr->key, &context);
if (ret)
return -EINVAL;
data[offset++] = hr_reg_read(&context, MPT_ST);
data[offset++] = hr_reg_read(&context, MPT_PD);
data[offset++] = hr_reg_read(&context, MPT_LKEY);
data[offset++] = hr_reg_read(&context, MPT_LEN_L);
data[offset++] = hr_reg_read(&context, MPT_LEN_H);
data[offset++] = hr_reg_read(&context, MPT_PBL_SIZE);
data[offset++] = hr_reg_read(&context, MPT_PBL_HOP_NUM);
data[offset++] = hr_reg_read(&context, MPT_PBL_BA_PG_SZ);
data[offset++] = hr_reg_read(&context, MPT_PBL_BUF_PG_SZ);
ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data);
return ret; return ret;
} }

View File

@ -314,6 +314,7 @@ enum irdma_cqp_op_type {
#define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d #define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d
#define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e #define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e
#define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220 #define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220
#define IRDMA_AE_INVALID_REQUEST 0x0223
#define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301 #define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
#define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303 #define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
#define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304 #define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304

View File

@ -138,59 +138,68 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
switch (info->ae_id) { switch (info->ae_id) {
case IRDMA_AE_AMP_UNALLOCATED_STAG:
case IRDMA_AE_AMP_BOUNDS_VIOLATION: case IRDMA_AE_AMP_BOUNDS_VIOLATION:
case IRDMA_AE_AMP_INVALID_STAG: case IRDMA_AE_AMP_INVALID_STAG:
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; case IRDMA_AE_AMP_RIGHTS_VIOLATION:
fallthrough; case IRDMA_AE_AMP_UNALLOCATED_STAG:
case IRDMA_AE_AMP_BAD_PD: case IRDMA_AE_AMP_BAD_PD:
case IRDMA_AE_UDA_XMIT_BAD_PD:
qp->flush_code = FLUSH_PROT_ERR;
break;
case IRDMA_AE_AMP_BAD_QP: case IRDMA_AE_AMP_BAD_QP:
case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
qp->flush_code = FLUSH_LOC_QP_OP_ERR;
break;
case IRDMA_AE_AMP_BAD_STAG_KEY: case IRDMA_AE_AMP_BAD_STAG_KEY:
case IRDMA_AE_AMP_BAD_STAG_INDEX: case IRDMA_AE_AMP_BAD_STAG_INDEX:
case IRDMA_AE_AMP_TO_WRAP: case IRDMA_AE_AMP_TO_WRAP:
case IRDMA_AE_AMP_RIGHTS_VIOLATION:
case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
case IRDMA_AE_PRIV_OPERATION_DENIED: case IRDMA_AE_PRIV_OPERATION_DENIED:
case IRDMA_AE_IB_INVALID_REQUEST: qp->flush_code = FLUSH_PROT_ERR;
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
break;
case IRDMA_AE_UDA_XMIT_BAD_PD:
case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
qp->flush_code = FLUSH_LOC_QP_OP_ERR;
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
case IRDMA_AE_UDA_L4LEN_INVALID:
case IRDMA_AE_DDP_UBE_INVALID_MO:
case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
qp->flush_code = FLUSH_LOC_LEN_ERR;
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
case IRDMA_AE_IB_REMOTE_ACCESS_ERROR: case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
qp->flush_code = FLUSH_REM_ACCESS_ERR; qp->flush_code = FLUSH_REM_ACCESS_ERR;
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
break; break;
case IRDMA_AE_LLP_SEGMENT_TOO_SMALL: case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
case IRDMA_AE_UDA_L4LEN_INVALID:
case IRDMA_AE_ROCE_RSP_LENGTH_ERROR: case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
qp->flush_code = FLUSH_LOC_LEN_ERR; case IRDMA_AE_IB_REMOTE_OP_ERROR:
qp->flush_code = FLUSH_REM_OP_ERR;
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break; break;
case IRDMA_AE_LCE_QP_CATASTROPHIC: case IRDMA_AE_LCE_QP_CATASTROPHIC:
qp->flush_code = FLUSH_FATAL_ERR; qp->flush_code = FLUSH_FATAL_ERR;
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break; break;
case IRDMA_AE_DDP_UBE_INVALID_MO:
case IRDMA_AE_IB_RREQ_AND_Q1_FULL: case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
qp->flush_code = FLUSH_GENERAL_ERR; qp->flush_code = FLUSH_GENERAL_ERR;
break; break;
case IRDMA_AE_LLP_TOO_MANY_RETRIES: case IRDMA_AE_LLP_TOO_MANY_RETRIES:
qp->flush_code = FLUSH_RETRY_EXC_ERR; qp->flush_code = FLUSH_RETRY_EXC_ERR;
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break; break;
case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS: case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
case IRDMA_AE_AMP_MWBIND_BIND_DISABLED: case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS: case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
qp->flush_code = FLUSH_MW_BIND_ERR; qp->flush_code = FLUSH_MW_BIND_ERR;
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
break; break;
case IRDMA_AE_IB_REMOTE_OP_ERROR: case IRDMA_AE_IB_INVALID_REQUEST:
qp->flush_code = FLUSH_REM_OP_ERR; qp->flush_code = FLUSH_REM_INV_REQ_ERR;
qp->event_type = IRDMA_QP_EVENT_REQ_ERR;
break; break;
default: default:
qp->flush_code = FLUSH_FATAL_ERR; qp->flush_code = FLUSH_GENERAL_ERR;
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break; break;
} }
} }

View File

@ -98,6 +98,7 @@ enum irdma_term_mpa_errors {
enum irdma_qp_event_type { enum irdma_qp_event_type {
IRDMA_QP_EVENT_CATASTROPHIC, IRDMA_QP_EVENT_CATASTROPHIC,
IRDMA_QP_EVENT_ACCESS_ERR, IRDMA_QP_EVENT_ACCESS_ERR,
IRDMA_QP_EVENT_REQ_ERR,
}; };
enum irdma_hw_stats_index_32b { enum irdma_hw_stats_index_32b {

View File

@ -103,6 +103,7 @@ enum irdma_flush_opcode {
FLUSH_FATAL_ERR, FLUSH_FATAL_ERR,
FLUSH_RETRY_EXC_ERR, FLUSH_RETRY_EXC_ERR,
FLUSH_MW_BIND_ERR, FLUSH_MW_BIND_ERR,
FLUSH_REM_INV_REQ_ERR,
}; };
enum irdma_cmpl_status { enum irdma_cmpl_status {

View File

@ -2479,6 +2479,9 @@ void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
case IRDMA_QP_EVENT_ACCESS_ERR: case IRDMA_QP_EVENT_ACCESS_ERR:
ibevent.event = IB_EVENT_QP_ACCESS_ERR; ibevent.event = IB_EVENT_QP_ACCESS_ERR;
break; break;
case IRDMA_QP_EVENT_REQ_ERR:
ibevent.event = IB_EVENT_QP_REQ_ERR;
break;
} }
ibevent.device = iwqp->ibqp.device; ibevent.device = iwqp->ibqp.device;
ibevent.element.qp = &iwqp->ibqp; ibevent.element.qp = &iwqp->ibqp;

View File

@ -299,13 +299,19 @@ static void irdma_alloc_push_page(struct irdma_qp *iwqp)
static int irdma_alloc_ucontext(struct ib_ucontext *uctx, static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
struct ib_udata *udata) struct ib_udata *udata)
{ {
#define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8)
#define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd)
struct ib_device *ibdev = uctx->device; struct ib_device *ibdev = uctx->device;
struct irdma_device *iwdev = to_iwdev(ibdev); struct irdma_device *iwdev = to_iwdev(ibdev);
struct irdma_alloc_ucontext_req req; struct irdma_alloc_ucontext_req req = {};
struct irdma_alloc_ucontext_resp uresp = {}; struct irdma_alloc_ucontext_resp uresp = {};
struct irdma_ucontext *ucontext = to_ucontext(uctx); struct irdma_ucontext *ucontext = to_ucontext(uctx);
struct irdma_uk_attrs *uk_attrs; struct irdma_uk_attrs *uk_attrs;
if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
return -EINVAL;
if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
return -EINVAL; return -EINVAL;
@ -317,7 +323,7 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs; uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
/* GEN_1 legacy support with libi40iw */ /* GEN_1 legacy support with libi40iw */
if (udata->outlen < sizeof(uresp)) { if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
if (uk_attrs->hw_rev != IRDMA_GEN_1) if (uk_attrs->hw_rev != IRDMA_GEN_1)
return -EOPNOTSUPP; return -EOPNOTSUPP;
@ -389,6 +395,7 @@ static void irdma_dealloc_ucontext(struct ib_ucontext *context)
*/ */
static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata) static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{ {
#define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd)
struct irdma_pd *iwpd = to_iwpd(pd); struct irdma_pd *iwpd = to_iwpd(pd);
struct irdma_device *iwdev = to_iwdev(pd->device); struct irdma_device *iwdev = to_iwdev(pd->device);
struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
@ -398,6 +405,9 @@ static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
u32 pd_id = 0; u32 pd_id = 0;
int err; int err;
if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN)
return -EINVAL;
err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id, err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
&rf->next_pd); &rf->next_pd);
if (err) if (err)
@ -814,12 +824,14 @@ static int irdma_create_qp(struct ib_qp *ibqp,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
struct ib_udata *udata) struct ib_udata *udata)
{ {
#define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx)
#define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd)
struct ib_pd *ibpd = ibqp->pd; struct ib_pd *ibpd = ibqp->pd;
struct irdma_pd *iwpd = to_iwpd(ibpd); struct irdma_pd *iwpd = to_iwpd(ibpd);
struct irdma_device *iwdev = to_iwdev(ibpd->device); struct irdma_device *iwdev = to_iwdev(ibpd->device);
struct irdma_pci_f *rf = iwdev->rf; struct irdma_pci_f *rf = iwdev->rf;
struct irdma_qp *iwqp = to_iwqp(ibqp); struct irdma_qp *iwqp = to_iwqp(ibqp);
struct irdma_create_qp_req req; struct irdma_create_qp_req req = {};
struct irdma_create_qp_resp uresp = {}; struct irdma_create_qp_resp uresp = {};
u32 qp_num = 0; u32 qp_num = 0;
int err_code; int err_code;
@ -836,6 +848,10 @@ static int irdma_create_qp(struct ib_qp *ibqp,
if (err_code) if (err_code)
return err_code; return err_code;
if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN ||
udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
return -EINVAL;
sq_size = init_attr->cap.max_send_wr; sq_size = init_attr->cap.max_send_wr;
rq_size = init_attr->cap.max_recv_wr; rq_size = init_attr->cap.max_recv_wr;
@ -1120,6 +1136,8 @@ static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata) int attr_mask, struct ib_udata *udata)
{ {
#define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
#define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
struct irdma_pd *iwpd = to_iwpd(ibqp->pd); struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
struct irdma_qp *iwqp = to_iwqp(ibqp); struct irdma_qp *iwqp = to_iwqp(ibqp);
struct irdma_device *iwdev = iwqp->iwdev; struct irdma_device *iwdev = iwqp->iwdev;
@ -1138,6 +1156,13 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
roce_info = &iwqp->roce_info; roce_info = &iwqp->roce_info;
udp_info = &iwqp->udp_info; udp_info = &iwqp->udp_info;
if (udata) {
/* udata inlen/outlen can be 0 when supporting legacy libi40iw */
if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
(udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
return -EINVAL;
}
if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
return -EOPNOTSUPP; return -EOPNOTSUPP;
@ -1374,7 +1399,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) { if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
spin_unlock_irqrestore(&iwqp->lock, flags); spin_unlock_irqrestore(&iwqp->lock, flags);
if (udata) { if (udata && udata->inlen) {
if (ib_copy_from_udata(&ureq, udata, if (ib_copy_from_udata(&ureq, udata,
min(sizeof(ureq), udata->inlen))) min(sizeof(ureq), udata->inlen)))
return -EINVAL; return -EINVAL;
@ -1426,7 +1451,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
} else { } else {
iwqp->ibqp_state = attr->qp_state; iwqp->ibqp_state = attr->qp_state;
} }
if (udata && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
struct irdma_ucontext *ucontext; struct irdma_ucontext *ucontext;
ucontext = rdma_udata_to_drv_context(udata, ucontext = rdma_udata_to_drv_context(udata,
@ -1466,6 +1491,8 @@ exit:
int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
struct ib_udata *udata) struct ib_udata *udata)
{ {
#define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
#define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
struct irdma_qp *iwqp = to_iwqp(ibqp); struct irdma_qp *iwqp = to_iwqp(ibqp);
struct irdma_device *iwdev = iwqp->iwdev; struct irdma_device *iwdev = iwqp->iwdev;
struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
@ -1480,6 +1507,13 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
int err; int err;
unsigned long flags; unsigned long flags;
if (udata) {
/* udata inlen/outlen can be 0 when supporting legacy libi40iw */
if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
(udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
return -EINVAL;
}
if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
return -EOPNOTSUPP; return -EOPNOTSUPP;
@ -1565,7 +1599,7 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
case IB_QPS_RESET: case IB_QPS_RESET:
if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) { if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
spin_unlock_irqrestore(&iwqp->lock, flags); spin_unlock_irqrestore(&iwqp->lock, flags);
if (udata) { if (udata && udata->inlen) {
if (ib_copy_from_udata(&ureq, udata, if (ib_copy_from_udata(&ureq, udata,
min(sizeof(ureq), udata->inlen))) min(sizeof(ureq), udata->inlen)))
return -EINVAL; return -EINVAL;
@ -1662,7 +1696,7 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
} }
} }
} }
if (attr_mask & IB_QP_STATE && udata && if (attr_mask & IB_QP_STATE && udata && udata->outlen &&
dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
struct irdma_ucontext *ucontext; struct irdma_ucontext *ucontext;
@ -1797,6 +1831,7 @@ static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
static int irdma_resize_cq(struct ib_cq *ibcq, int entries, static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
struct ib_udata *udata) struct ib_udata *udata)
{ {
#define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer)
struct irdma_cq *iwcq = to_iwcq(ibcq); struct irdma_cq *iwcq = to_iwcq(ibcq);
struct irdma_sc_dev *dev = iwcq->sc_cq.dev; struct irdma_sc_dev *dev = iwcq->sc_cq.dev;
struct irdma_cqp_request *cqp_request; struct irdma_cqp_request *cqp_request;
@ -1819,6 +1854,9 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
IRDMA_FEATURE_CQ_RESIZE)) IRDMA_FEATURE_CQ_RESIZE))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN)
return -EINVAL;
if (entries > rf->max_cqe) if (entries > rf->max_cqe)
return -EINVAL; return -EINVAL;
@ -1951,6 +1989,8 @@ static int irdma_create_cq(struct ib_cq *ibcq,
const struct ib_cq_init_attr *attr, const struct ib_cq_init_attr *attr,
struct ib_udata *udata) struct ib_udata *udata)
{ {
#define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf)
#define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size)
struct ib_device *ibdev = ibcq->device; struct ib_device *ibdev = ibcq->device;
struct irdma_device *iwdev = to_iwdev(ibdev); struct irdma_device *iwdev = to_iwdev(ibdev);
struct irdma_pci_f *rf = iwdev->rf; struct irdma_pci_f *rf = iwdev->rf;
@ -1969,6 +2009,11 @@ static int irdma_create_cq(struct ib_cq *ibcq,
err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev); err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
if (err_code) if (err_code)
return err_code; return err_code;
if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
return -EINVAL;
err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num, err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
&rf->next_cq); &rf->next_cq);
if (err_code) if (err_code)
@ -2746,6 +2791,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
u64 virt, int access, u64 virt, int access,
struct ib_udata *udata) struct ib_udata *udata)
{ {
#define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
struct irdma_device *iwdev = to_iwdev(pd->device); struct irdma_device *iwdev = to_iwdev(pd->device);
struct irdma_ucontext *ucontext; struct irdma_ucontext *ucontext;
struct irdma_pble_alloc *palloc; struct irdma_pble_alloc *palloc;
@ -2763,6 +2809,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN)
return ERR_PTR(-EINVAL);
region = ib_umem_get(pd->device, start, len, access); region = ib_umem_get(pd->device, start, len, access);
if (IS_ERR(region)) { if (IS_ERR(region)) {
@ -3315,6 +3364,8 @@ static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode
return IB_WC_RETRY_EXC_ERR; return IB_WC_RETRY_EXC_ERR;
case FLUSH_MW_BIND_ERR: case FLUSH_MW_BIND_ERR:
return IB_WC_MW_BIND_ERR; return IB_WC_MW_BIND_ERR;
case FLUSH_REM_INV_REQ_ERR:
return IB_WC_REM_INV_REQ_ERR;
case FLUSH_FATAL_ERR: case FLUSH_FATAL_ERR:
default: default:
return IB_WC_FATAL_ERR; return IB_WC_FATAL_ERR;
@ -4296,12 +4347,16 @@ static int irdma_create_user_ah(struct ib_ah *ibah,
struct rdma_ah_init_attr *attr, struct rdma_ah_init_attr *attr,
struct ib_udata *udata) struct ib_udata *udata)
{ {
#define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd)
struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah); struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
struct irdma_device *iwdev = to_iwdev(ibah->pd->device); struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
struct irdma_create_ah_resp uresp; struct irdma_create_ah_resp uresp;
struct irdma_ah *parent_ah; struct irdma_ah *parent_ah;
int err; int err;
if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
return -EINVAL;
err = irdma_setup_ah(ibah, attr); err = irdma_setup_ah(ibah, attr);
if (err) if (err)
return err; return err;

View File

@ -439,7 +439,6 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto err_mr; goto err_mr;
mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
mr->ibmr.length = length;
mr->ibmr.page_size = 1U << shift; mr->ibmr.page_size = 1U << shift;
return &mr->ibmr; return &mr->ibmr;

View File

@ -907,6 +907,7 @@ static bool devx_is_whitelist_cmd(void *in)
case MLX5_CMD_OP_QUERY_HCA_CAP: case MLX5_CMD_OP_QUERY_HCA_CAP:
case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
return true; return true;
default: default:
return false; return false;
@ -962,6 +963,7 @@ static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
case MLX5_CMD_OP_QUERY_CONG_PARAMS: case MLX5_CMD_OP_QUERY_CONG_PARAMS:
case MLX5_CMD_OP_QUERY_CONG_STATISTICS: case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
case MLX5_CMD_OP_QUERY_LAG: case MLX5_CMD_OP_QUERY_LAG:
case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
return true; return true;
default: default:
return false; return false;
@ -2158,32 +2160,39 @@ err:
static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext, static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
struct uverbs_attr_bundle *attrs, struct uverbs_attr_bundle *attrs,
struct devx_umem *obj) struct devx_umem *obj, u32 access_flags)
{ {
u64 addr; u64 addr;
size_t size; size_t size;
u32 access;
int err; int err;
if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) || if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN)) uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
return -EFAULT; return -EFAULT;
err = uverbs_get_flags32(&access, attrs, err = ib_check_mr_access(&dev->ib_dev, access_flags);
MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ);
if (err) if (err)
return err; return err;
err = ib_check_mr_access(&dev->ib_dev, access); if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD)) {
if (err) struct ib_umem_dmabuf *umem_dmabuf;
return err; int dmabuf_fd;
obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access); err = uverbs_get_raw_fd(&dmabuf_fd, attrs,
if (IS_ERR(obj->umem)) MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD);
return PTR_ERR(obj->umem); if (err)
return -EFAULT;
umem_dmabuf = ib_umem_dmabuf_get_pinned(
&dev->ib_dev, addr, size, dmabuf_fd, access_flags);
if (IS_ERR(umem_dmabuf))
return PTR_ERR(umem_dmabuf);
obj->umem = &umem_dmabuf->umem;
} else {
obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access_flags);
if (IS_ERR(obj->umem))
return PTR_ERR(obj->umem);
}
return 0; return 0;
} }
@ -2222,7 +2231,8 @@ static unsigned int devx_umem_find_best_pgsize(struct ib_umem *umem,
static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev, static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
struct uverbs_attr_bundle *attrs, struct uverbs_attr_bundle *attrs,
struct devx_umem *obj, struct devx_umem *obj,
struct devx_umem_reg_cmd *cmd) struct devx_umem_reg_cmd *cmd,
int access)
{ {
unsigned long pgsz_bitmap; unsigned long pgsz_bitmap;
unsigned int page_size; unsigned int page_size;
@ -2271,6 +2281,9 @@ static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
MLX5_SET(umem, umem, page_offset, MLX5_SET(umem, umem, page_offset,
ib_umem_dma_offset(obj->umem, page_size)); ib_umem_dma_offset(obj->umem, page_size));
if (mlx5_umem_needs_ats(dev, obj->umem, access))
MLX5_SET(umem, umem, ats, 1);
mlx5_ib_populate_pas(obj->umem, page_size, mtt, mlx5_ib_populate_pas(obj->umem, page_size, mtt,
(obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) | (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
MLX5_IB_MTT_READ); MLX5_IB_MTT_READ);
@ -2288,20 +2301,30 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context( struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
int access_flags;
int err; int err;
if (!c->devx_uid) if (!c->devx_uid)
return -EINVAL; return -EINVAL;
err = uverbs_get_flags32(&access_flags, attrs,
MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ |
IB_ACCESS_RELAXED_ORDERING);
if (err)
return err;
obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL); obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
if (!obj) if (!obj)
return -ENOMEM; return -ENOMEM;
err = devx_umem_get(dev, &c->ibucontext, attrs, obj); err = devx_umem_get(dev, &c->ibucontext, attrs, obj, access_flags);
if (err) if (err)
goto err_obj_free; goto err_obj_free;
err = devx_umem_reg_cmd_alloc(dev, attrs, obj, &cmd); err = devx_umem_reg_cmd_alloc(dev, attrs, obj, &cmd, access_flags);
if (err) if (err)
goto err_umem_release; goto err_umem_release;
@ -2833,6 +2856,8 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN, UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
UVERBS_ATTR_TYPE(u64), UVERBS_ATTR_TYPE(u64),
UA_MANDATORY), UA_MANDATORY),
UVERBS_ATTR_RAW_FD(MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD,
UA_OPTIONAL),
UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS, UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
enum ib_access_flags), enum ib_access_flags),
UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP, UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,

View File

@ -26,7 +26,7 @@
#include <linux/mlx5/eswitch.h> #include <linux/mlx5/eswitch.h>
#include <linux/list.h> #include <linux/list.h>
#include <rdma/ib_smi.h> #include <rdma/ib_smi.h>
#include <rdma/ib_umem.h> #include <rdma/ib_umem_odp.h>
#include <rdma/lag.h> #include <rdma/lag.h>
#include <linux/in.h> #include <linux/in.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
@ -46,7 +46,6 @@
#include <rdma/uverbs_ioctl.h> #include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_verbs.h> #include <rdma/mlx5_user_ioctl_verbs.h>
#include <rdma/mlx5_user_ioctl_cmds.h> #include <rdma/mlx5_user_ioctl_cmds.h>
#include <rdma/ib_umem_odp.h>
#define UVERBS_MODULE_NAME mlx5_ib #define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h> #include <rdma/uverbs_named_ioctl.h>
@ -1826,6 +1825,9 @@ static int set_ucontext_resp(struct ib_ucontext *uctx,
if (MLX5_CAP_GEN(dev->mdev, drain_sigerr)) if (MLX5_CAP_GEN(dev->mdev, drain_sigerr))
resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS; resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS;
resp->comp_mask |=
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_MKEY_UPDATE_TAG;
return 0; return 0;
} }

View File

@ -30,7 +30,6 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h> #include <rdma/ib_umem_odp.h>
#include "mlx5_ib.h" #include "mlx5_ib.h"
#include <linux/jiffies.h> #include <linux/jiffies.h>
@ -152,6 +151,7 @@ static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
mlx5_write64(&mmio_wqe[i * 2], mlx5_write64(&mmio_wqe[i * 2],
bf->bfreg->map + bf->offset + i * 8); bf->bfreg->map + bf->offset + i * 8);
io_stop_wc();
bf->offset ^= bf->buf_size; bf->offset ^= bf->buf_size;

View File

@ -1563,4 +1563,40 @@ static inline bool rt_supported(int ts_cap)
return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME || return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME ||
ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME; ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
} }
/*
* PCI Peer to Peer is a trainwreck. If no switch is present then things
* sometimes work, depending on the pci_distance_p2p logic for excluding broken
* root complexes. However if a switch is present in the path, then things get
* really ugly depending on how the switch is setup. This table assumes that the
* root complex is strict and is validating that all req/reps are matches
* perfectly - so any scenario where it sees only half the transaction is a
* failure.
*
* CR/RR/DT ATS RO P2P
* 00X X X OK
* 010 X X fails (request is routed to root but root never sees comp)
* 011 0 X fails (request is routed to root but root never sees comp)
* 011 1 X OK
* 10X X 1 OK
* 101 X 0 fails (completion is routed to root but root didn't see req)
* 110 X 0 SLOW
* 111 0 0 SLOW
* 111 1 0 fails (completion is routed to root but root didn't see req)
* 111 1 1 OK
*
* Unfortunately we cannot reliably know if a switch is present or what the
* CR/RR/DT ACS settings are, as in a VM that is all hidden. Assume that
* CR/RR/DT is 111 if the ATS cap is enabled and follow the last three rows.
*
* For now assume if the umem is a dma_buf then it is P2P.
*/
static inline bool mlx5_umem_needs_ats(struct mlx5_ib_dev *dev,
struct ib_umem *umem, int access_flags)
{
if (!MLX5_CAP_GEN(dev->mdev, ats) || !umem->is_dmabuf)
return false;
return access_flags & IB_ACCESS_RELAXED_ORDERING;
}
#endif /* MLX5_IB_H */ #endif /* MLX5_IB_H */

View File

@ -39,9 +39,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
#include <linux/dma-resv.h> #include <linux/dma-resv.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h> #include <rdma/ib_umem_odp.h>
#include <rdma/ib_verbs.h>
#include "dm.h" #include "dm.h"
#include "mlx5_ib.h" #include "mlx5_ib.h"
#include "umr.h" #include "umr.h"
@ -937,7 +935,8 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
* cache then synchronously create an uncached one. * cache then synchronously create an uncached one.
*/ */
if (!ent || ent->limit == 0 || if (!ent || ent->limit == 0 ||
!mlx5r_umr_can_reconfig(dev, 0, access_flags)) { !mlx5r_umr_can_reconfig(dev, 0, access_flags) ||
mlx5_umem_needs_ats(dev, umem, access_flags)) {
mutex_lock(&dev->slow_path_mutex); mutex_lock(&dev->slow_path_mutex);
mr = reg_create(pd, umem, iova, access_flags, page_size, false); mr = reg_create(pd, umem, iova, access_flags, page_size, false);
mutex_unlock(&dev->slow_path_mutex); mutex_unlock(&dev->slow_path_mutex);
@ -1018,6 +1017,8 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
MLX5_SET(mkc, mkc, translations_octword_size, MLX5_SET(mkc, mkc, translations_octword_size,
get_octo_len(iova, umem->length, mr->page_shift)); get_octo_len(iova, umem->length, mr->page_shift));
MLX5_SET(mkc, mkc, log_page_size, mr->page_shift); MLX5_SET(mkc, mkc, log_page_size, mr->page_shift);
if (mlx5_umem_needs_ats(dev, umem, access_flags))
MLX5_SET(mkc, mkc, ma_translation_mode, 1);
if (populate) { if (populate) {
MLX5_SET(create_mkey_in, in, translations_octword_actual_size, MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
get_octo_len(iova, umem->length, mr->page_shift)); get_octo_len(iova, umem->length, mr->page_shift));
@ -1402,7 +1403,6 @@ static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,
upd_flags |= MLX5_IB_UPD_XLT_ACCESS; upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
} }
mr->ibmr.length = new_umem->length;
mr->ibmr.iova = iova; mr->ibmr.iova = iova;
mr->ibmr.length = new_umem->length; mr->ibmr.length = new_umem->length;
mr->page_shift = order_base_2(page_size); mr->page_shift = order_base_2(page_size);

View File

@ -30,7 +30,6 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h> #include <rdma/ib_umem_odp.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
@ -795,7 +794,8 @@ static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key)
{ {
if (!mmkey) if (!mmkey)
return false; return false;
if (mmkey->type == MLX5_MKEY_MW) if (mmkey->type == MLX5_MKEY_MW ||
mmkey->type == MLX5_MKEY_INDIRECT_DEVX)
return mlx5_base_mkey(mmkey->key) == mlx5_base_mkey(key); return mlx5_base_mkey(mmkey->key) == mlx5_base_mkey(key);
return mmkey->key == key; return mmkey->key == key;
} }

View File

@ -1252,7 +1252,7 @@ static void get_board_id(void *vsd, char *board_id)
if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN && if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) { be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MTHCA_BOARD_ID_LEN); strscpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MTHCA_BOARD_ID_LEN);
} else { } else {
/* /*
* The board ID is a string but the firmware byte * The board ID is a string but the firmware byte

View File

@ -1363,7 +1363,7 @@ static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
dev->hba_port_num = (hba_attribs->ptpnum_maxdoms_hbast_cv & dev->hba_port_num = (hba_attribs->ptpnum_maxdoms_hbast_cv &
OCRDMA_HBA_ATTRB_PTNUM_MASK) OCRDMA_HBA_ATTRB_PTNUM_MASK)
>> OCRDMA_HBA_ATTRB_PTNUM_SHIFT; >> OCRDMA_HBA_ATTRB_PTNUM_SHIFT;
strlcpy(dev->model_number, strscpy(dev->model_number,
hba_attribs->controller_model_number, hba_attribs->controller_model_number,
sizeof(dev->model_number)); sizeof(dev->model_number));
} }

View File

@ -2124,7 +2124,7 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
if (hwerrs & HWE_MASK(PowerOnBISTFailed)) { if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
isfatal = 1; isfatal = 1;
strlcpy(msg, strscpy(msg,
"[Memory BIST test failed, InfiniPath hardware unusable]", "[Memory BIST test failed, InfiniPath hardware unusable]",
msgl); msgl);
/* ignore from now on, so disable until driver reloaded */ /* ignore from now on, so disable until driver reloaded */

View File

@ -295,7 +295,7 @@ void qib_free_irq(struct qib_devdata *dd)
* Setup pcie interrupt stuff again after a reset. I'd like to just call * Setup pcie interrupt stuff again after a reset. I'd like to just call
* pci_enable_msi() again for msi, but when I do that, * pci_enable_msi() again for msi, but when I do that,
* the MSI enable bit doesn't get set in the command word, and * the MSI enable bit doesn't get set in the command word, and
* we switch to to a different interrupt vector, which is confusing, * we switch to a different interrupt vector, which is confusing,
* so I instead just do it all inline. Perhaps somehow can tie this * so I instead just do it all inline. Perhaps somehow can tie this
* into the PCIe hotplug support at some point * into the PCIe hotplug support at some point
*/ */

View File

@ -95,7 +95,6 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
int ret; int ret;
int off; int off;
int i; int i;
int flags;
dma_addr_t pa; dma_addr_t pa;
unsigned int gup_flags; unsigned int gup_flags;
struct mm_struct *mm; struct mm_struct *mm;
@ -132,8 +131,6 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
goto out; goto out;
} }
flags = IOMMU_READ | IOMMU_CACHE;
flags |= (writable) ? IOMMU_WRITE : 0;
gup_flags = FOLL_WRITE; gup_flags = FOLL_WRITE;
gup_flags |= (writable) ? 0 : FOLL_FORCE; gup_flags |= (writable) ? 0 : FOLL_FORCE;
cur_base = addr & PAGE_MASK; cur_base = addr & PAGE_MASK;

View File

@ -15,7 +15,7 @@
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("RDMA Verbs Transport Library"); MODULE_DESCRIPTION("RDMA Verbs Transport Library");
static int rvt_init(void) static int __init rvt_init(void)
{ {
int ret = rvt_driver_cq_init(); int ret = rvt_driver_cq_init();
@ -26,7 +26,7 @@ static int rvt_init(void)
} }
module_init(rvt_init); module_init(rvt_init);
static void rvt_cleanup(void) static void __exit rvt_cleanup(void)
{ {
rvt_cq_exit(); rvt_cq_exit();
} }

View File

@ -151,18 +151,8 @@ int rxe_icrc_check(struct sk_buff *skb, struct rxe_pkt_info *pkt)
payload_size(pkt) + bth_pad(pkt)); payload_size(pkt) + bth_pad(pkt));
icrc = ~icrc; icrc = ~icrc;
if (unlikely(icrc != pkt_icrc)) { if (unlikely(icrc != pkt_icrc))
if (skb->protocol == htons(ETH_P_IPV6))
pr_warn_ratelimited("bad ICRC from %pI6c\n",
&ipv6_hdr(skb)->saddr);
else if (skb->protocol == htons(ETH_P_IP))
pr_warn_ratelimited("bad ICRC from %pI4\n",
&ip_hdr(skb)->saddr);
else
pr_warn_ratelimited("bad ICRC from unknown\n");
return -EINVAL; return -EINVAL;
}
return 0; return 0;
} }

View File

@ -64,10 +64,10 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
/* rxe_mr.c */ /* rxe_mr.c */
u8 rxe_get_next_key(u32 last_key); u8 rxe_get_next_key(u32 last_key);
void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr); void rxe_mr_init_dma(int access, struct rxe_mr *mr);
int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
int access, struct rxe_mr *mr); int access, struct rxe_mr *mr);
int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr); int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr);
int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
enum rxe_mr_copy_dir dir); enum rxe_mr_copy_dir dir);
int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma, int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,

View File

@ -32,8 +32,8 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
case IB_MR_TYPE_USER: case IB_MR_TYPE_USER:
case IB_MR_TYPE_MEM_REG: case IB_MR_TYPE_MEM_REG:
if (iova < mr->iova || length > mr->length || if (iova < mr->ibmr.iova || length > mr->ibmr.length ||
iova > mr->iova + mr->length - length) iova > mr->ibmr.iova + mr->ibmr.length - length)
return -EFAULT; return -EFAULT;
return 0; return 0;
@ -103,17 +103,16 @@ err1:
return -ENOMEM; return -ENOMEM;
} }
void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr) void rxe_mr_init_dma(int access, struct rxe_mr *mr)
{ {
rxe_mr_init(access, mr); rxe_mr_init(access, mr);
mr->ibmr.pd = &pd->ibpd;
mr->access = access; mr->access = access;
mr->state = RXE_MR_STATE_VALID; mr->state = RXE_MR_STATE_VALID;
mr->type = IB_MR_TYPE_DMA; mr->type = IB_MR_TYPE_DMA;
} }
int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
int access, struct rxe_mr *mr) int access, struct rxe_mr *mr)
{ {
struct rxe_map **map; struct rxe_map **map;
@ -125,7 +124,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
int err; int err;
int i; int i;
umem = ib_umem_get(pd->ibpd.device, start, length, access); umem = ib_umem_get(&rxe->ib_dev, start, length, access);
if (IS_ERR(umem)) { if (IS_ERR(umem)) {
pr_warn("%s: Unable to pin memory region err = %d\n", pr_warn("%s: Unable to pin memory region err = %d\n",
__func__, (int)PTR_ERR(umem)); __func__, (int)PTR_ERR(umem));
@ -175,12 +174,8 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
} }
} }
mr->ibmr.pd = &pd->ibpd;
mr->umem = umem; mr->umem = umem;
mr->access = access; mr->access = access;
mr->length = length;
mr->iova = iova;
mr->va = start;
mr->offset = ib_umem_offset(umem); mr->offset = ib_umem_offset(umem);
mr->state = RXE_MR_STATE_VALID; mr->state = RXE_MR_STATE_VALID;
mr->type = IB_MR_TYPE_USER; mr->type = IB_MR_TYPE_USER;
@ -197,7 +192,7 @@ err_out:
return err; return err;
} }
int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr) int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
{ {
int err; int err;
@ -208,7 +203,6 @@ int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
if (err) if (err)
goto err1; goto err1;
mr->ibmr.pd = &pd->ibpd;
mr->max_buf = max_pages; mr->max_buf = max_pages;
mr->state = RXE_MR_STATE_FREE; mr->state = RXE_MR_STATE_FREE;
mr->type = IB_MR_TYPE_MEM_REG; mr->type = IB_MR_TYPE_MEM_REG;
@ -222,7 +216,7 @@ err1:
static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out, static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
size_t *offset_out) size_t *offset_out)
{ {
size_t offset = iova - mr->iova + mr->offset; size_t offset = iova - mr->ibmr.iova + mr->offset;
int map_index; int map_index;
int buf_index; int buf_index;
u64 length; u64 length;
@ -605,7 +599,7 @@ int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
mr->access = access; mr->access = access;
mr->lkey = key; mr->lkey = key;
mr->rkey = (access & IB_ACCESS_REMOTE) ? key : 0; mr->rkey = (access & IB_ACCESS_REMOTE) ? key : 0;
mr->iova = wqe->wr.wr.reg.mr->iova; mr->ibmr.iova = wqe->wr.wr.reg.mr->iova;
mr->state = RXE_MR_STATE_VALID; mr->state = RXE_MR_STATE_VALID;
return 0; return 0;

View File

@ -114,15 +114,15 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
/* C10-75 */ /* C10-75 */
if (mw->access & IB_ZERO_BASED) { if (mw->access & IB_ZERO_BASED) {
if (unlikely(wqe->wr.wr.mw.length > mr->length)) { if (unlikely(wqe->wr.wr.mw.length > mr->ibmr.length)) {
pr_err_once( pr_err_once(
"attempt to bind a ZB MW outside of the MR\n"); "attempt to bind a ZB MW outside of the MR\n");
return -EINVAL; return -EINVAL;
} }
} else { } else {
if (unlikely((wqe->wr.wr.mw.addr < mr->iova) || if (unlikely((wqe->wr.wr.mw.addr < mr->ibmr.iova) ||
((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) > ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) >
(mr->iova + mr->length)))) { (mr->ibmr.iova + mr->ibmr.length)))) {
pr_err_once( pr_err_once(
"attempt to bind a VA MW outside of the MR\n"); "attempt to bind a VA MW outside of the MR\n");
return -EINVAL; return -EINVAL;

View File

@ -145,7 +145,6 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
goto drop; goto drop;
if (skb_linearize(skb)) { if (skb_linearize(skb)) {
pr_err("skb_linearize failed\n");
ib_device_put(&rxe->ib_dev); ib_device_put(&rxe->ib_dev);
goto drop; goto drop;
} }

View File

@ -19,34 +19,34 @@ static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
int has_srq) int has_srq)
{ {
if (cap->max_send_wr > rxe->attr.max_qp_wr) { if (cap->max_send_wr > rxe->attr.max_qp_wr) {
pr_warn("invalid send wr = %d > %d\n", pr_debug("invalid send wr = %u > %d\n",
cap->max_send_wr, rxe->attr.max_qp_wr); cap->max_send_wr, rxe->attr.max_qp_wr);
goto err1; goto err1;
} }
if (cap->max_send_sge > rxe->attr.max_send_sge) { if (cap->max_send_sge > rxe->attr.max_send_sge) {
pr_warn("invalid send sge = %d > %d\n", pr_debug("invalid send sge = %u > %d\n",
cap->max_send_sge, rxe->attr.max_send_sge); cap->max_send_sge, rxe->attr.max_send_sge);
goto err1; goto err1;
} }
if (!has_srq) { if (!has_srq) {
if (cap->max_recv_wr > rxe->attr.max_qp_wr) { if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
pr_warn("invalid recv wr = %d > %d\n", pr_debug("invalid recv wr = %u > %d\n",
cap->max_recv_wr, rxe->attr.max_qp_wr); cap->max_recv_wr, rxe->attr.max_qp_wr);
goto err1; goto err1;
} }
if (cap->max_recv_sge > rxe->attr.max_recv_sge) { if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
pr_warn("invalid recv sge = %d > %d\n", pr_debug("invalid recv sge = %u > %d\n",
cap->max_recv_sge, rxe->attr.max_recv_sge); cap->max_recv_sge, rxe->attr.max_recv_sge);
goto err1; goto err1;
} }
} }
if (cap->max_inline_data > rxe->max_inline_data) { if (cap->max_inline_data > rxe->max_inline_data) {
pr_warn("invalid max inline data = %d > %d\n", pr_debug("invalid max inline data = %u > %d\n",
cap->max_inline_data, rxe->max_inline_data); cap->max_inline_data, rxe->max_inline_data);
goto err1; goto err1;
} }
@ -73,7 +73,7 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
} }
if (!init->recv_cq || !init->send_cq) { if (!init->recv_cq || !init->send_cq) {
pr_warn("missing cq\n"); pr_debug("missing cq\n");
goto err1; goto err1;
} }
@ -82,14 +82,14 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
if (init->qp_type == IB_QPT_GSI) { if (init->qp_type == IB_QPT_GSI) {
if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) { if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
pr_warn("invalid port = %d\n", port_num); pr_debug("invalid port = %d\n", port_num);
goto err1; goto err1;
} }
port = &rxe->port; port = &rxe->port;
if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) { if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
pr_warn("GSI QP exists for port %d\n", port_num); pr_debug("GSI QP exists for port %d\n", port_num);
goto err1; goto err1;
} }
} }
@ -242,9 +242,9 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
skb_queue_head_init(&qp->req_pkts); skb_queue_head_init(&qp->req_pkts);
rxe_init_task(rxe, &qp->req.task, qp, rxe_init_task(&qp->req.task, qp,
rxe_requester, "req"); rxe_requester, "req");
rxe_init_task(rxe, &qp->comp.task, qp, rxe_init_task(&qp->comp.task, qp,
rxe_completer, "comp"); rxe_completer, "comp");
qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */ qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
@ -292,7 +292,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
skb_queue_head_init(&qp->resp_pkts); skb_queue_head_init(&qp->resp_pkts);
rxe_init_task(rxe, &qp->resp.task, qp, rxe_init_task(&qp->resp.task, qp,
rxe_responder, "resp"); rxe_responder, "resp");
qp->resp.opcode = OPCODE_NONE; qp->resp.opcode = OPCODE_NONE;
@ -402,7 +402,7 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
attr->qp_state : cur_state; attr->qp_state : cur_state;
if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) { if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) {
pr_warn("invalid mask or state for qp\n"); pr_debug("invalid mask or state for qp\n");
goto err1; goto err1;
} }
@ -416,7 +416,7 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
if (mask & IB_QP_PORT) { if (mask & IB_QP_PORT) {
if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) { if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
pr_warn("invalid port %d\n", attr->port_num); pr_debug("invalid port %d\n", attr->port_num);
goto err1; goto err1;
} }
} }
@ -431,12 +431,12 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr)) if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
goto err1; goto err1;
if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num)) { if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num)) {
pr_warn("invalid alt port %d\n", attr->alt_port_num); pr_debug("invalid alt port %d\n", attr->alt_port_num);
goto err1; goto err1;
} }
if (attr->alt_timeout > 31) { if (attr->alt_timeout > 31) {
pr_warn("invalid QP alt timeout %d > 31\n", pr_debug("invalid QP alt timeout %d > 31\n",
attr->alt_timeout); attr->alt_timeout);
goto err1; goto err1;
} }
} }
@ -457,17 +457,16 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
if (mask & IB_QP_MAX_QP_RD_ATOMIC) { if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) { if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
pr_warn("invalid max_rd_atomic %d > %d\n", pr_debug("invalid max_rd_atomic %d > %d\n",
attr->max_rd_atomic, attr->max_rd_atomic,
rxe->attr.max_qp_rd_atom); rxe->attr.max_qp_rd_atom);
goto err1; goto err1;
} }
} }
if (mask & IB_QP_TIMEOUT) { if (mask & IB_QP_TIMEOUT) {
if (attr->timeout > 31) { if (attr->timeout > 31) {
pr_warn("invalid QP timeout %d > 31\n", pr_debug("invalid QP timeout %d > 31\n", attr->timeout);
attr->timeout);
goto err1; goto err1;
} }
} }
@ -797,7 +796,9 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
rxe_cleanup_task(&qp->comp.task); rxe_cleanup_task(&qp->comp.task);
/* flush out any receive wr's or pending requests */ /* flush out any receive wr's or pending requests */
__rxe_do_task(&qp->req.task); if (qp->req.task.func)
__rxe_do_task(&qp->req.task);
if (qp->sq.queue) { if (qp->sq.queue) {
__rxe_do_task(&qp->comp.task); __rxe_do_task(&qp->comp.task);
__rxe_do_task(&qp->req.task); __rxe_do_task(&qp->req.task);
@ -833,8 +834,10 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
free_rd_atomic_resources(qp); free_rd_atomic_resources(qp);
kernel_sock_shutdown(qp->sk, SHUT_RDWR); if (qp->sk) {
sock_release(qp->sk); kernel_sock_shutdown(qp->sk, SHUT_RDWR);
sock_release(qp->sk);
}
} }
/* called when the last reference to the qp is dropped */ /* called when the last reference to the qp is dropped */

View File

@ -112,23 +112,25 @@ static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
unsigned int num_elem) unsigned int num_elem)
{ {
enum queue_type type = q->type; enum queue_type type = q->type;
u32 new_prod;
u32 prod; u32 prod;
u32 cons; u32 cons;
if (!queue_empty(q, q->type) && (num_elem < queue_count(q, type))) if (!queue_empty(q, q->type) && (num_elem < queue_count(q, type)))
return -EINVAL; return -EINVAL;
prod = queue_get_producer(new_q, type); new_prod = queue_get_producer(new_q, type);
prod = queue_get_producer(q, type);
cons = queue_get_consumer(q, type); cons = queue_get_consumer(q, type);
while (!queue_empty(q, type)) { while ((prod - cons) & q->index_mask) {
memcpy(queue_addr_from_index(new_q, prod), memcpy(queue_addr_from_index(new_q, new_prod),
queue_addr_from_index(q, cons), new_q->elem_size); queue_addr_from_index(q, cons), new_q->elem_size);
prod = queue_next_index(new_q, prod); new_prod = queue_next_index(new_q, new_prod);
cons = queue_next_index(q, cons); cons = queue_next_index(q, cons);
} }
new_q->buf->producer_index = prod; new_q->buf->producer_index = new_prod;
q->buf->consumer_index = cons; q->buf->consumer_index = cons;
/* update private index copies */ /* update private index copies */

View File

@ -16,47 +16,36 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
unsigned int pkt_type; unsigned int pkt_type;
if (unlikely(!qp->valid)) if (unlikely(!qp->valid))
goto err1; return -EINVAL;
pkt_type = pkt->opcode & 0xe0; pkt_type = pkt->opcode & 0xe0;
switch (qp_type(qp)) { switch (qp_type(qp)) {
case IB_QPT_RC: case IB_QPT_RC:
if (unlikely(pkt_type != IB_OPCODE_RC)) { if (unlikely(pkt_type != IB_OPCODE_RC))
pr_warn_ratelimited("bad qp type\n"); return -EINVAL;
goto err1;
}
break; break;
case IB_QPT_UC: case IB_QPT_UC:
if (unlikely(pkt_type != IB_OPCODE_UC)) { if (unlikely(pkt_type != IB_OPCODE_UC))
pr_warn_ratelimited("bad qp type\n"); return -EINVAL;
goto err1;
}
break; break;
case IB_QPT_UD: case IB_QPT_UD:
case IB_QPT_GSI: case IB_QPT_GSI:
if (unlikely(pkt_type != IB_OPCODE_UD)) { if (unlikely(pkt_type != IB_OPCODE_UD))
pr_warn_ratelimited("bad qp type\n"); return -EINVAL;
goto err1;
}
break; break;
default: default:
pr_warn_ratelimited("unsupported qp type\n"); return -EINVAL;
goto err1;
} }
if (pkt->mask & RXE_REQ_MASK) { if (pkt->mask & RXE_REQ_MASK) {
if (unlikely(qp->resp.state != QP_STATE_READY)) if (unlikely(qp->resp.state != QP_STATE_READY))
goto err1; return -EINVAL;
} else if (unlikely(qp->req.state < QP_STATE_READY || } else if (unlikely(qp->req.state < QP_STATE_READY ||
qp->req.state > QP_STATE_DRAINED)) { qp->req.state > QP_STATE_DRAINED))
goto err1; return -EINVAL;
}
return 0; return 0;
err1:
return -EINVAL;
} }
static void set_bad_pkey_cntr(struct rxe_port *port) static void set_bad_pkey_cntr(struct rxe_port *port)
@ -84,26 +73,20 @@ static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
pkt->pkey_index = 0; pkt->pkey_index = 0;
if (!pkey_match(pkey, IB_DEFAULT_PKEY_FULL)) { if (!pkey_match(pkey, IB_DEFAULT_PKEY_FULL)) {
pr_warn_ratelimited("bad pkey = 0x%x\n", pkey);
set_bad_pkey_cntr(port); set_bad_pkey_cntr(port);
goto err1; return -EINVAL;
} }
if (qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) { if (qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) {
u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey; u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey;
if (unlikely(deth_qkey(pkt) != qkey)) { if (unlikely(deth_qkey(pkt) != qkey)) {
pr_warn_ratelimited("bad qkey, got 0x%x expected 0x%x for qpn 0x%x\n",
deth_qkey(pkt), qkey, qpn);
set_qkey_viol_cntr(port); set_qkey_viol_cntr(port);
goto err1; return -EINVAL;
} }
} }
return 0; return 0;
err1:
return -EINVAL;
} }
static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
@ -112,13 +95,10 @@ static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
struct sk_buff *skb = PKT_TO_SKB(pkt); struct sk_buff *skb = PKT_TO_SKB(pkt);
if (qp_type(qp) != IB_QPT_RC && qp_type(qp) != IB_QPT_UC) if (qp_type(qp) != IB_QPT_RC && qp_type(qp) != IB_QPT_UC)
goto done; return 0;
if (unlikely(pkt->port_num != qp->attr.port_num)) { if (unlikely(pkt->port_num != qp->attr.port_num))
pr_warn_ratelimited("port %d != qp port %d\n", return -EINVAL;
pkt->port_num, qp->attr.port_num);
goto err1;
}
if (skb->protocol == htons(ETH_P_IP)) { if (skb->protocol == htons(ETH_P_IP)) {
struct in_addr *saddr = struct in_addr *saddr =
@ -126,19 +106,9 @@ static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
struct in_addr *daddr = struct in_addr *daddr =
&qp->pri_av.dgid_addr._sockaddr_in.sin_addr; &qp->pri_av.dgid_addr._sockaddr_in.sin_addr;
if (ip_hdr(skb)->daddr != saddr->s_addr) { if ((ip_hdr(skb)->daddr != saddr->s_addr) ||
pr_warn_ratelimited("dst addr %pI4 != qp source addr %pI4\n", (ip_hdr(skb)->saddr != daddr->s_addr))
&ip_hdr(skb)->daddr, return -EINVAL;
&saddr->s_addr);
goto err1;
}
if (ip_hdr(skb)->saddr != daddr->s_addr) {
pr_warn_ratelimited("source addr %pI4 != qp dst addr %pI4\n",
&ip_hdr(skb)->saddr,
&daddr->s_addr);
goto err1;
}
} else if (skb->protocol == htons(ETH_P_IPV6)) { } else if (skb->protocol == htons(ETH_P_IPV6)) {
struct in6_addr *saddr = struct in6_addr *saddr =
@ -146,24 +116,12 @@ static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
struct in6_addr *daddr = struct in6_addr *daddr =
&qp->pri_av.dgid_addr._sockaddr_in6.sin6_addr; &qp->pri_av.dgid_addr._sockaddr_in6.sin6_addr;
if (memcmp(&ipv6_hdr(skb)->daddr, saddr, sizeof(*saddr))) { if (memcmp(&ipv6_hdr(skb)->daddr, saddr, sizeof(*saddr)) ||
pr_warn_ratelimited("dst addr %pI6 != qp source addr %pI6\n", memcmp(&ipv6_hdr(skb)->saddr, daddr, sizeof(*daddr)))
&ipv6_hdr(skb)->daddr, saddr); return -EINVAL;
goto err1;
}
if (memcmp(&ipv6_hdr(skb)->saddr, daddr, sizeof(*daddr))) {
pr_warn_ratelimited("source addr %pI6 != qp dst addr %pI6\n",
&ipv6_hdr(skb)->saddr, daddr);
goto err1;
}
} }
done:
return 0; return 0;
err1:
return -EINVAL;
} }
static int hdr_check(struct rxe_pkt_info *pkt) static int hdr_check(struct rxe_pkt_info *pkt)
@ -175,24 +133,18 @@ static int hdr_check(struct rxe_pkt_info *pkt)
int index; int index;
int err; int err;
if (unlikely(bth_tver(pkt) != BTH_TVER)) { if (unlikely(bth_tver(pkt) != BTH_TVER))
pr_warn_ratelimited("bad tver\n");
goto err1; goto err1;
}
if (unlikely(qpn == 0)) { if (unlikely(qpn == 0))
pr_warn_once("QP 0 not supported");
goto err1; goto err1;
}
if (qpn != IB_MULTICAST_QPN) { if (qpn != IB_MULTICAST_QPN) {
index = (qpn == 1) ? port->qp_gsi_index : qpn; index = (qpn == 1) ? port->qp_gsi_index : qpn;
qp = rxe_pool_get_index(&rxe->qp_pool, index); qp = rxe_pool_get_index(&rxe->qp_pool, index);
if (unlikely(!qp)) { if (unlikely(!qp))
pr_warn_ratelimited("no qp matches qpn 0x%x\n", qpn);
goto err1; goto err1;
}
err = check_type_state(rxe, pkt, qp); err = check_type_state(rxe, pkt, qp);
if (unlikely(err)) if (unlikely(err))
@ -206,10 +158,8 @@ static int hdr_check(struct rxe_pkt_info *pkt)
if (unlikely(err)) if (unlikely(err))
goto err2; goto err2;
} else { } else {
if (unlikely((pkt->mask & RXE_GRH_MASK) == 0)) { if (unlikely((pkt->mask & RXE_GRH_MASK) == 0))
pr_warn_ratelimited("no grh for mcast qpn\n");
goto err1; goto err1;
}
} }
pkt->qp = qp; pkt->qp = qp;
@ -364,10 +314,8 @@ void rxe_rcv(struct sk_buff *skb)
if (unlikely(skb->len < RXE_BTH_BYTES)) if (unlikely(skb->len < RXE_BTH_BYTES))
goto drop; goto drop;
if (rxe_chk_dgid(rxe, skb) < 0) { if (rxe_chk_dgid(rxe, skb) < 0)
pr_warn_ratelimited("failed checking dgid\n");
goto drop; goto drop;
}
pkt->opcode = bth_opcode(pkt); pkt->opcode = bth_opcode(pkt);
pkt->psn = bth_psn(pkt); pkt->psn = bth_psn(pkt);

View File

@ -809,10 +809,8 @@ static enum resp_states read_reply(struct rxe_qp *qp,
if (!skb) if (!skb)
return RESPST_ERR_RNR; return RESPST_ERR_RNR;
err = rxe_mr_copy(mr, res->read.va, payload_addr(&ack_pkt), rxe_mr_copy(mr, res->read.va, payload_addr(&ack_pkt),
payload, RXE_FROM_MR_OBJ); payload, RXE_FROM_MR_OBJ);
if (err)
pr_err("Failed copying memory\n");
if (mr) if (mr)
rxe_put(mr); rxe_put(mr);
@ -823,10 +821,8 @@ static enum resp_states read_reply(struct rxe_qp *qp,
} }
err = rxe_xmit_packet(qp, &ack_pkt, skb); err = rxe_xmit_packet(qp, &ack_pkt, skb);
if (err) { if (err)
pr_err("Failed sending RDMA reply.\n");
return RESPST_ERR_RNR; return RESPST_ERR_RNR;
}
res->read.va += payload; res->read.va += payload;
res->read.resid -= payload; res->read.resid -= payload;
@ -1028,50 +1024,41 @@ finish:
return RESPST_CLEANUP; return RESPST_CLEANUP;
} }
static int send_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
static int send_common_ack(struct rxe_qp *qp, u8 syndrome, u32 psn,
int opcode, const char *msg)
{ {
int err = 0; int err;
struct rxe_pkt_info ack_pkt; struct rxe_pkt_info ack_pkt;
struct sk_buff *skb; struct sk_buff *skb;
skb = prepare_ack_packet(qp, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE, skb = prepare_ack_packet(qp, &ack_pkt, opcode, 0, psn, syndrome);
0, psn, syndrome); if (!skb)
if (!skb) { return -ENOMEM;
err = -ENOMEM;
goto err1;
}
err = rxe_xmit_packet(qp, &ack_pkt, skb); err = rxe_xmit_packet(qp, &ack_pkt, skb);
if (err) if (err)
pr_err_ratelimited("Failed sending ack\n"); pr_err_ratelimited("Failed sending %s\n", msg);
err1:
return err; return err;
} }
static int send_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
{
return send_common_ack(qp, syndrome, psn,
IB_OPCODE_RC_ACKNOWLEDGE, "ACK");
}
static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn) static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
{ {
int err = 0; int ret = send_common_ack(qp, syndrome, psn,
struct rxe_pkt_info ack_pkt; IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, "ATOMIC ACK");
struct sk_buff *skb;
skb = prepare_ack_packet(qp, &ack_pkt, IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE,
0, psn, syndrome);
if (!skb) {
err = -ENOMEM;
goto out;
}
err = rxe_xmit_packet(qp, &ack_pkt, skb);
if (err)
pr_err_ratelimited("Failed sending atomic ack\n");
/* have to clear this since it is used to trigger /* have to clear this since it is used to trigger
* long read replies * long read replies
*/ */
qp->resp.res = NULL; qp->resp.res = NULL;
out: return ret;
return err;
} }
static enum resp_states acknowledge(struct rxe_qp *qp, static enum resp_states acknowledge(struct rxe_qp *qp,

View File

@ -94,10 +94,9 @@ void rxe_do_task(struct tasklet_struct *t)
task->ret = ret; task->ret = ret;
} }
int rxe_init_task(void *obj, struct rxe_task *task, int rxe_init_task(struct rxe_task *task,
void *arg, int (*func)(void *), char *name) void *arg, int (*func)(void *), char *name)
{ {
task->obj = obj;
task->arg = arg; task->arg = arg;
task->func = func; task->func = func;
snprintf(task->name, sizeof(task->name), "%s", name); snprintf(task->name, sizeof(task->name), "%s", name);

View File

@ -19,7 +19,6 @@ enum {
* called again. * called again.
*/ */
struct rxe_task { struct rxe_task {
void *obj;
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
int state; int state;
spinlock_t state_lock; /* spinlock for task state */ spinlock_t state_lock; /* spinlock for task state */
@ -35,7 +34,7 @@ struct rxe_task {
* arg => parameter to pass to fcn * arg => parameter to pass to fcn
* func => function to call until it returns != 0 * func => function to call until it returns != 0
*/ */
int rxe_init_task(void *obj, struct rxe_task *task, int rxe_init_task(struct rxe_task *task,
void *arg, int (*func)(void *), char *name); void *arg, int (*func)(void *), char *name);
/* cleanup task */ /* cleanup task */

View File

@ -262,7 +262,6 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_TO_DRIVER); recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_TO_DRIVER);
recv_wqe->wr_id = ibwr->wr_id; recv_wqe->wr_id = ibwr->wr_id;
recv_wqe->num_sge = num_sge;
memcpy(recv_wqe->dma.sge, ibwr->sg_list, memcpy(recv_wqe->dma.sge, ibwr->sg_list,
num_sge * sizeof(struct ib_sge)); num_sge * sizeof(struct ib_sge));
@ -526,7 +525,6 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
const struct ib_send_wr *ibwr) const struct ib_send_wr *ibwr)
{ {
wr->wr_id = ibwr->wr_id; wr->wr_id = ibwr->wr_id;
wr->num_sge = ibwr->num_sge;
wr->opcode = ibwr->opcode; wr->opcode = ibwr->opcode;
wr->send_flags = ibwr->send_flags; wr->send_flags = ibwr->send_flags;
@ -903,7 +901,9 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
rxe_get(pd); rxe_get(pd);
rxe_mr_init_dma(pd, access, mr); mr->ibmr.pd = ibpd;
rxe_mr_init_dma(access, mr);
rxe_finalize(mr); rxe_finalize(mr);
return &mr->ibmr; return &mr->ibmr;
@ -928,8 +928,9 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
rxe_get(pd); rxe_get(pd);
mr->ibmr.pd = ibpd;
err = rxe_mr_init_user(pd, start, length, iova, access, mr); err = rxe_mr_init_user(rxe, start, length, iova, access, mr);
if (err) if (err)
goto err3; goto err3;
@ -938,7 +939,6 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
return &mr->ibmr; return &mr->ibmr;
err3: err3:
rxe_put(pd);
rxe_cleanup(mr); rxe_cleanup(mr);
err2: err2:
return ERR_PTR(err); return ERR_PTR(err);
@ -962,8 +962,9 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
} }
rxe_get(pd); rxe_get(pd);
mr->ibmr.pd = ibpd;
err = rxe_mr_init_fast(pd, max_num_sg, mr); err = rxe_mr_init_fast(max_num_sg, mr);
if (err) if (err)
goto err2; goto err2;
@ -972,7 +973,6 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
return &mr->ibmr; return &mr->ibmr;
err2: err2:
rxe_put(pd);
rxe_cleanup(mr); rxe_cleanup(mr);
err1: err1:
return ERR_PTR(err); return ERR_PTR(err);
@ -1007,12 +1007,9 @@ static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page); n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
mr->va = ibmr->iova;
mr->iova = ibmr->iova;
mr->length = ibmr->length;
mr->page_shift = ilog2(ibmr->page_size); mr->page_shift = ilog2(ibmr->page_size);
mr->page_mask = ibmr->page_size - 1; mr->page_mask = ibmr->page_size - 1;
mr->offset = mr->iova & mr->page_mask; mr->offset = ibmr->iova & mr->page_mask;
return n; return n;
} }

View File

@ -305,9 +305,6 @@ struct rxe_mr {
u32 rkey; u32 rkey;
enum rxe_mr_state state; enum rxe_mr_state state;
enum ib_mr_type type; enum ib_mr_type type;
u64 va;
u64 iova;
size_t length;
u32 offset; u32 offset;
int access; int access;

View File

@ -1,7 +1,10 @@
config RDMA_SIW config RDMA_SIW
tristate "Software RDMA over TCP/IP (iWARP) driver" tristate "Software RDMA over TCP/IP (iWARP) driver"
depends on INET && INFINIBAND && LIBCRC32C depends on INET && INFINIBAND
depends on INFINIBAND_VIRT_DMA depends on INFINIBAND_VIRT_DMA
select LIBCRC32C
select CRYPTO
select CRYPTO_CRC32C
help help
This driver implements the iWARP RDMA transport over This driver implements the iWARP RDMA transport over
the Linux TCP/IP network stack. It enables a system with a the Linux TCP/IP network stack. It enables a system with a

View File

@ -418,6 +418,7 @@ struct siw_qp {
struct ib_qp base_qp; struct ib_qp base_qp;
struct siw_device *sdev; struct siw_device *sdev;
struct kref ref; struct kref ref;
struct completion qp_free;
struct list_head devq; struct list_head devq;
int tx_cpu; int tx_cpu;
struct siw_qp_attrs attrs; struct siw_qp_attrs attrs;

View File

@ -1342,6 +1342,6 @@ void siw_free_qp(struct kref *ref)
vfree(qp->orq); vfree(qp->orq);
siw_put_tx_cpu(qp->tx_cpu); siw_put_tx_cpu(qp->tx_cpu);
complete(&qp->qp_free);
atomic_dec(&sdev->num_qp); atomic_dec(&sdev->num_qp);
} }

View File

@ -961,27 +961,28 @@ out:
static int siw_get_trailer(struct siw_qp *qp, struct siw_rx_stream *srx) static int siw_get_trailer(struct siw_qp *qp, struct siw_rx_stream *srx)
{ {
struct sk_buff *skb = srx->skb; struct sk_buff *skb = srx->skb;
int avail = min(srx->skb_new, srx->fpdu_part_rem);
u8 *tbuf = (u8 *)&srx->trailer.crc - srx->pad; u8 *tbuf = (u8 *)&srx->trailer.crc - srx->pad;
__wsum crc_in, crc_own = 0; __wsum crc_in, crc_own = 0;
siw_dbg_qp(qp, "expected %d, available %d, pad %u\n", siw_dbg_qp(qp, "expected %d, available %d, pad %u\n",
srx->fpdu_part_rem, srx->skb_new, srx->pad); srx->fpdu_part_rem, srx->skb_new, srx->pad);
if (srx->skb_new < srx->fpdu_part_rem) skb_copy_bits(skb, srx->skb_offset, tbuf, avail);
srx->skb_new -= avail;
srx->skb_offset += avail;
srx->skb_copied += avail;
srx->fpdu_part_rem -= avail;
if (srx->fpdu_part_rem)
return -EAGAIN; return -EAGAIN;
skb_copy_bits(skb, srx->skb_offset, tbuf, srx->fpdu_part_rem);
if (srx->mpa_crc_hd && srx->pad)
crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad);
srx->skb_new -= srx->fpdu_part_rem;
srx->skb_offset += srx->fpdu_part_rem;
srx->skb_copied += srx->fpdu_part_rem;
if (!srx->mpa_crc_hd) if (!srx->mpa_crc_hd)
return 0; return 0;
if (srx->pad)
crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad);
/* /*
* CRC32 is computed, transmitted and received directly in NBO, * CRC32 is computed, transmitted and received directly in NBO,
* so there's never a reason to convert byte order. * so there's never a reason to convert byte order.
@ -1083,10 +1084,9 @@ static int siw_get_hdr(struct siw_rx_stream *srx)
* completely received. * completely received.
*/ */
if (iwarp_pktinfo[opcode].hdr_len > sizeof(struct iwarp_ctrl_tagged)) { if (iwarp_pktinfo[opcode].hdr_len > sizeof(struct iwarp_ctrl_tagged)) {
bytes = iwarp_pktinfo[opcode].hdr_len - MIN_DDP_HDR; int hdrlen = iwarp_pktinfo[opcode].hdr_len;
if (srx->skb_new < bytes) bytes = min_t(int, hdrlen - MIN_DDP_HDR, srx->skb_new);
return -EAGAIN;
skb_copy_bits(skb, srx->skb_offset, skb_copy_bits(skb, srx->skb_offset,
(char *)c_hdr + srx->fpdu_part_rcvd, bytes); (char *)c_hdr + srx->fpdu_part_rcvd, bytes);
@ -1096,6 +1096,9 @@ static int siw_get_hdr(struct siw_rx_stream *srx)
srx->skb_new -= bytes; srx->skb_new -= bytes;
srx->skb_offset += bytes; srx->skb_offset += bytes;
srx->skb_copied += bytes; srx->skb_copied += bytes;
if (srx->fpdu_part_rcvd < hdrlen)
return -EAGAIN;
} }
/* /*

View File

@ -480,6 +480,8 @@ int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
list_add_tail(&qp->devq, &sdev->qp_list); list_add_tail(&qp->devq, &sdev->qp_list);
spin_unlock_irqrestore(&sdev->lock, flags); spin_unlock_irqrestore(&sdev->lock, flags);
init_completion(&qp->qp_free);
return 0; return 0;
err_out_xa: err_out_xa:
@ -624,6 +626,7 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
qp->scq = qp->rcq = NULL; qp->scq = qp->rcq = NULL;
siw_qp_put(qp); siw_qp_put(qp);
wait_for_completion(&qp->qp_free);
return 0; return 0;
} }

View File

@ -884,8 +884,8 @@ int ipoib_cm_dev_open(struct net_device *dev)
goto err_cm; goto err_cm;
} }
ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num), ret = ib_cm_listen(priv->cm.id,
0); cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num));
if (ret) { if (ret) {
pr_warn("%s: failed to listen on ID 0x%llx\n", priv->ca->name, pr_warn("%s: failed to listen on ID 0x%llx\n", priv->ca->name,
IPOIB_CM_IETF_ID | priv->qp->qp_num); IPOIB_CM_IETF_ID | priv->qp->qp_num);

View File

@ -65,10 +65,10 @@ static void ipoib_get_drvinfo(struct net_device *netdev,
ib_get_device_fw_str(priv->ca, drvinfo->fw_version); ib_get_device_fw_str(priv->ca, drvinfo->fw_version);
strlcpy(drvinfo->bus_info, dev_name(priv->ca->dev.parent), strscpy(drvinfo->bus_info, dev_name(priv->ca->dev.parent),
sizeof(drvinfo->bus_info)); sizeof(drvinfo->bus_info));
strlcpy(drvinfo->driver, "ib_ipoib", sizeof(drvinfo->driver)); strscpy(drvinfo->driver, "ib_ipoib", sizeof(drvinfo->driver));
} }
static int ipoib_get_coalesce(struct net_device *dev, static int ipoib_get_coalesce(struct net_device *dev,

View File

@ -742,7 +742,7 @@ void ipoib_flush_paths(struct net_device *dev)
static void path_rec_completion(int status, static void path_rec_completion(int status,
struct sa_path_rec *pathrec, struct sa_path_rec *pathrec,
void *path_ptr) int num_prs, void *path_ptr)
{ {
struct ipoib_path *path = path_ptr; struct ipoib_path *path = path_ptr;
struct net_device *dev = path->dev; struct net_device *dev = path->dev;

View File

@ -124,8 +124,8 @@ static struct vnic_stats vnic_gstrings_stats[] = {
static void vnic_get_drvinfo(struct net_device *netdev, static void vnic_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo) struct ethtool_drvinfo *drvinfo)
{ {
strlcpy(drvinfo->driver, opa_vnic_driver_name, sizeof(drvinfo->driver)); strscpy(drvinfo->driver, opa_vnic_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent), strscpy(drvinfo->bus_info, dev_name(netdev->dev.parent),
sizeof(drvinfo->bus_info)); sizeof(drvinfo->bus_info));
} }

View File

@ -1,12 +1,18 @@
# SPDX-License-Identifier: GPL-2.0-or-later # SPDX-License-Identifier: GPL-2.0-or-later
CFLAGS_rtrs-clt-trace.o = -I$(src)
rtrs-client-y := rtrs-clt.o \ rtrs-client-y := rtrs-clt.o \
rtrs-clt-stats.o \ rtrs-clt-stats.o \
rtrs-clt-sysfs.o rtrs-clt-sysfs.o \
rtrs-clt-trace.o
CFLAGS_rtrs-srv-trace.o = -I$(src)
rtrs-server-y := rtrs-srv.o \ rtrs-server-y := rtrs-srv.o \
rtrs-srv-stats.o \ rtrs-srv-stats.o \
rtrs-srv-sysfs.o rtrs-srv-sysfs.o \
rtrs-srv-trace.o
rtrs-core-y := rtrs.o rtrs-core-y := rtrs.o

View File

@ -0,0 +1,15 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* RDMA Network Block Driver
*
* Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
*/
#include "rtrs.h"
#include "rtrs-clt.h"
/*
* We include this last to have the helpers above available for the trace
* event implementations.
*/
#define CREATE_TRACE_POINTS
#include "rtrs-clt-trace.h"

View File

@ -0,0 +1,86 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* RDMA Network Block Driver
*
* Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM rtrs_clt
#if !defined(_TRACE_RTRS_CLT_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_RTRS_CLT_H
#include <linux/tracepoint.h>
struct rtrs_clt_path;
struct rtrs_clt_sess;
TRACE_DEFINE_ENUM(RTRS_CLT_CONNECTING);
TRACE_DEFINE_ENUM(RTRS_CLT_CONNECTING_ERR);
TRACE_DEFINE_ENUM(RTRS_CLT_RECONNECTING);
TRACE_DEFINE_ENUM(RTRS_CLT_CONNECTED);
TRACE_DEFINE_ENUM(RTRS_CLT_CLOSING);
TRACE_DEFINE_ENUM(RTRS_CLT_CLOSED);
TRACE_DEFINE_ENUM(RTRS_CLT_DEAD);
#define show_rtrs_clt_state(x) \
__print_symbolic(x, \
{ RTRS_CLT_CONNECTING, "CONNECTING" }, \
{ RTRS_CLT_CONNECTING_ERR, "CONNECTING_ERR" }, \
{ RTRS_CLT_RECONNECTING, "RECONNECTING" }, \
{ RTRS_CLT_CONNECTED, "CONNECTED" }, \
{ RTRS_CLT_CLOSING, "CLOSING" }, \
{ RTRS_CLT_CLOSED, "CLOSED" }, \
{ RTRS_CLT_DEAD, "DEAD" })
DECLARE_EVENT_CLASS(rtrs_clt_conn_class,
TP_PROTO(struct rtrs_clt_path *clt_path),
TP_ARGS(clt_path),
TP_STRUCT__entry(
__field(int, state)
__field(int, reconnect_attempts)
__field(int, max_reconnect_attempts)
__field(int, fail_cnt)
__field(int, success_cnt)
__array(char, sessname, NAME_MAX)
),
TP_fast_assign(
struct rtrs_clt_sess *clt = clt_path->clt;
__entry->state = clt_path->state;
__entry->reconnect_attempts = clt_path->reconnect_attempts;
__entry->max_reconnect_attempts = clt->max_reconnect_attempts;
__entry->fail_cnt = clt_path->stats->reconnects.fail_cnt;
__entry->success_cnt = clt_path->stats->reconnects.successful_cnt;
memcpy(__entry->sessname, kobject_name(&clt_path->kobj), NAME_MAX);
),
TP_printk("RTRS-CLT: sess='%s' state=%s attempts='%d' max-attempts='%d' fail='%d' success='%d'",
__entry->sessname,
show_rtrs_clt_state(__entry->state),
__entry->reconnect_attempts,
__entry->max_reconnect_attempts,
__entry->fail_cnt,
__entry->success_cnt
)
);
#define DEFINE_CLT_CONN_EVENT(name) \
DEFINE_EVENT(rtrs_clt_conn_class, rtrs_##name, \
TP_PROTO(struct rtrs_clt_path *clt_path), \
TP_ARGS(clt_path))
DEFINE_CLT_CONN_EVENT(clt_reconnect_work);
DEFINE_CLT_CONN_EVENT(clt_close_conns);
DEFINE_CLT_CONN_EVENT(rdma_error_recovery);
#endif /* _TRACE_RTRS_CLT_H */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE rtrs-clt-trace
#include <trace/define_trace.h>

View File

@ -16,6 +16,7 @@
#include "rtrs-clt.h" #include "rtrs-clt.h"
#include "rtrs-log.h" #include "rtrs-log.h"
#include "rtrs-clt-trace.h"
#define RTRS_CONNECT_TIMEOUT_MS 30000 #define RTRS_CONNECT_TIMEOUT_MS 30000
/* /*
@ -53,7 +54,10 @@ static inline bool rtrs_clt_is_connected(const struct rtrs_clt_sess *clt)
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry)
connected |= READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED; if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED) {
connected = true;
break;
}
rcu_read_unlock(); rcu_read_unlock();
return connected; return connected;
@ -302,6 +306,8 @@ static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
{ {
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
trace_rtrs_rdma_error_recovery(clt_path);
if (rtrs_clt_change_state_from_to(clt_path, if (rtrs_clt_change_state_from_to(clt_path,
RTRS_CLT_CONNECTED, RTRS_CLT_CONNECTED,
RTRS_CLT_RECONNECTING)) { RTRS_CLT_RECONNECTING)) {
@ -1943,6 +1949,8 @@ static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait) void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait)
{ {
trace_rtrs_clt_close_conns(clt_path);
if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSING, NULL)) if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSING, NULL))
queue_work(rtrs_wq, &clt_path->close_work); queue_work(rtrs_wq, &clt_path->close_work);
if (wait) if (wait)
@ -2213,17 +2221,6 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path)
} }
} }
static inline bool xchg_paths(struct rtrs_clt_path __rcu **rcu_ppcpu_path,
struct rtrs_clt_path *clt_path,
struct rtrs_clt_path *next)
{
struct rtrs_clt_path **ppcpu_path;
/* Call cmpxchg() without sparse warnings */
ppcpu_path = (typeof(ppcpu_path))rcu_ppcpu_path;
return clt_path == cmpxchg(ppcpu_path, clt_path, next);
}
static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path) static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path)
{ {
struct rtrs_clt_sess *clt = clt_path->clt; struct rtrs_clt_sess *clt = clt_path->clt;
@ -2298,7 +2295,8 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path)
* We race with IO code path, which also changes pointer, * We race with IO code path, which also changes pointer,
* thus we have to be careful not to overwrite it. * thus we have to be careful not to overwrite it.
*/ */
if (xchg_paths(ppcpu_path, clt_path, next)) if (try_cmpxchg((struct rtrs_clt_path **)ppcpu_path, &clt_path,
next))
/* /*
* @ppcpu_path was successfully replaced with @next, * @ppcpu_path was successfully replaced with @next,
* that means that someone could also pick up the * that means that someone could also pick up the
@ -2649,6 +2647,8 @@ static void rtrs_clt_reconnect_work(struct work_struct *work)
reconnect_dwork); reconnect_dwork);
clt = clt_path->clt; clt = clt_path->clt;
trace_rtrs_clt_reconnect_work(clt_path);
if (READ_ONCE(clt_path->state) != RTRS_CLT_RECONNECTING) if (READ_ONCE(clt_path->state) != RTRS_CLT_RECONNECTING)
return; return;

View File

@ -26,11 +26,10 @@
/* /*
* Max IB immediate data size is 2^28 (MAX_IMM_PAYL_BITS) * Max IB immediate data size is 2^28 (MAX_IMM_PAYL_BITS)
* and the minimum chunk size is 4096 (2^12). * and the minimum chunk size is 4096 (2^12).
* So the maximum sess_queue_depth is 65536 (2^16) in theory. * So the maximum sess_queue_depth is 65535 (2^16 - 1) in theory
* But mempool_create, create_qp and ib_post_send fail with * since queue_depth in rtrs_msg_conn_rsp is defined as le16.
* "cannot allocate memory" error if sess_queue_depth is too big.
* Therefore the pratical max value of sess_queue_depth is * Therefore the pratical max value of sess_queue_depth is
* somewhere between 1 and 65534 and it depends on the system. * somewhere between 1 and 65535 and it depends on the system.
*/ */
#define MAX_SESS_QUEUE_DEPTH 65535 #define MAX_SESS_QUEUE_DEPTH 65535

View File

@ -0,0 +1,16 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* RDMA Network Block Driver
*
* Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
*/
#include "rtrs.h"
#include "rtrs-pri.h"
#include "rtrs-srv.h"
/*
* We include this last to have the helpers above available for the trace
* event implementations.
*/
#define CREATE_TRACE_POINTS
#include "rtrs-srv-trace.h"

View File

@ -0,0 +1,88 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* RDMA Network Block Driver
*
* Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM rtrs_srv
#if !defined(_TRACE_RTRS_SRV_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_RTRS_SRV_H
#include <linux/tracepoint.h>
struct rtrs_srv_op;
struct rtrs_srv_con;
struct rtrs_srv_path;
TRACE_DEFINE_ENUM(RTRS_SRV_CONNECTING);
TRACE_DEFINE_ENUM(RTRS_SRV_CONNECTED);
TRACE_DEFINE_ENUM(RTRS_SRV_CLOSING);
TRACE_DEFINE_ENUM(RTRS_SRV_CLOSED);
#define show_rtrs_srv_state(x) \
__print_symbolic(x, \
{ RTRS_SRV_CONNECTING, "CONNECTING" }, \
{ RTRS_SRV_CONNECTED, "CONNECTED" }, \
{ RTRS_SRV_CLOSING, "CLOSING" }, \
{ RTRS_SRV_CLOSED, "CLOSED" })
TRACE_EVENT(send_io_resp_imm,
TP_PROTO(struct rtrs_srv_op *id,
bool need_inval,
bool always_invalidate,
int errno),
TP_ARGS(id, need_inval, always_invalidate, errno),
TP_STRUCT__entry(
__field(u8, dir)
__field(bool, need_inval)
__field(bool, always_invalidate)
__field(u32, msg_id)
__field(int, wr_cnt)
__field(u32, signal_interval)
__field(int, state)
__field(int, errno)
__array(char, sessname, NAME_MAX)
),
TP_fast_assign(
struct rtrs_srv_con *con = id->con;
struct rtrs_path *s = con->c.path;
struct rtrs_srv_path *srv_path = to_srv_path(s);
__entry->dir = id->dir;
__entry->state = srv_path->state;
__entry->errno = errno;
__entry->need_inval = need_inval;
__entry->always_invalidate = always_invalidate;
__entry->msg_id = id->msg_id;
__entry->wr_cnt = atomic_read(&con->c.wr_cnt);
__entry->signal_interval = s->signal_interval;
memcpy(__entry->sessname, kobject_name(&srv_path->kobj), NAME_MAX);
),
TP_printk("sess='%s' state='%s' dir=%s err='%d' inval='%d' glob-inval='%d' msgid='%u' wrcnt='%d' sig-interval='%u'",
__entry->sessname,
show_rtrs_srv_state(__entry->state),
__print_symbolic(__entry->dir,
{ READ, "READ" },
{ WRITE, "WRITE" }),
__entry->errno,
__entry->need_inval,
__entry->always_invalidate,
__entry->msg_id,
__entry->wr_cnt,
__entry->signal_interval
)
);
#endif /* _TRACE_RTRS_SRV_H */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE rtrs-srv-trace
#include <trace/define_trace.h>

View File

@ -16,6 +16,7 @@
#include "rtrs-log.h" #include "rtrs-log.h"
#include <rdma/ib_cm.h> #include <rdma/ib_cm.h>
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include "rtrs-srv-trace.h"
MODULE_DESCRIPTION("RDMA Transport Server"); MODULE_DESCRIPTION("RDMA Transport Server");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
@ -57,11 +58,6 @@ static inline struct rtrs_srv_con *to_srv_con(struct rtrs_con *c)
return container_of(c, struct rtrs_srv_con, c); return container_of(c, struct rtrs_srv_con, c);
} }
static inline struct rtrs_srv_path *to_srv_path(struct rtrs_path *s)
{
return container_of(s, struct rtrs_srv_path, s);
}
static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path, static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
enum rtrs_srv_state new_state) enum rtrs_srv_state new_state)
{ {
@ -375,6 +371,8 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
} }
} }
trace_send_io_resp_imm(id, need_inval, always_invalidate, errno);
if (need_inval && always_invalidate) { if (need_inval && always_invalidate) {
wr = &inv_wr; wr = &inv_wr;
inv_wr.next = &rwr.wr; inv_wr.next = &rwr.wr;
@ -1024,7 +1022,7 @@ static void process_read(struct rtrs_srv_con *con,
usr_len = le16_to_cpu(msg->usr_len); usr_len = le16_to_cpu(msg->usr_len);
data_len = off - usr_len; data_len = off - usr_len;
data = page_address(srv->chunks[buf_id]); data = page_address(srv->chunks[buf_id]);
ret = ctx->ops.rdma_ev(srv->priv, id, READ, data, data_len, ret = ctx->ops.rdma_ev(srv->priv, id, data, data_len,
data + data_len, usr_len); data + data_len, usr_len);
if (ret) { if (ret) {
@ -1077,7 +1075,7 @@ static void process_write(struct rtrs_srv_con *con,
usr_len = le16_to_cpu(req->usr_len); usr_len = le16_to_cpu(req->usr_len);
data_len = off - usr_len; data_len = off - usr_len;
data = page_address(srv->chunks[buf_id]); data = page_address(srv->chunks[buf_id]);
ret = ctx->ops.rdma_ev(srv->priv, id, WRITE, data, data_len, ret = ctx->ops.rdma_ev(srv->priv, id, data, data_len,
data + data_len, usr_len); data + data_len, usr_len);
if (ret) { if (ret) {
rtrs_err_rl(s, rtrs_err_rl(s,

View File

@ -91,6 +91,11 @@ struct rtrs_srv_path {
struct rtrs_srv_stats *stats; struct rtrs_srv_stats *stats;
}; };
static inline struct rtrs_srv_path *to_srv_path(struct rtrs_path *s)
{
return container_of(s, struct rtrs_srv_path, s);
}
struct rtrs_srv_sess { struct rtrs_srv_sess {
struct list_head paths_list; struct list_head paths_list;
int paths_up; int paths_up;

View File

@ -175,7 +175,7 @@ int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
* length error * length error
*/ */
for (i = 0; i < num_sge; i++) for (i = 0; i < num_sge; i++)
if (WARN_ON(sge[i].length == 0)) if (WARN_ONCE(sge[i].length == 0, "sg %d is zero length\n", i))
return -EINVAL; return -EINVAL;
return rtrs_post_send(con->qp, head, &wr.wr, tail); return rtrs_post_send(con->qp, head, &wr.wr, tail);

View File

@ -139,7 +139,6 @@ struct rtrs_srv_ops {
* @priv: Private data set by rtrs_srv_set_sess_priv() * @priv: Private data set by rtrs_srv_set_sess_priv()
* @id: internal RTRS operation id * @id: internal RTRS operation id
* @dir: READ/WRITE
* @data: Pointer to (bidirectional) rdma memory area: * @data: Pointer to (bidirectional) rdma memory area:
* - in case of %RTRS_SRV_RDMA_EV_RECV contains * - in case of %RTRS_SRV_RDMA_EV_RECV contains
* data sent by the client * data sent by the client
@ -151,7 +150,7 @@ struct rtrs_srv_ops {
* @usrlen: Size of the user message * @usrlen: Size of the user message
*/ */
int (*rdma_ev)(void *priv, int (*rdma_ev)(void *priv,
struct rtrs_srv_op *id, int dir, struct rtrs_srv_op *id,
void *data, size_t datalen, const void *usr, void *data, size_t datalen, const void *usr,
size_t usrlen); size_t usrlen);
/** /**

View File

@ -699,7 +699,7 @@ static void srp_free_ch_ib(struct srp_target_port *target,
static void srp_path_rec_completion(int status, static void srp_path_rec_completion(int status,
struct sa_path_rec *pathrec, struct sa_path_rec *pathrec,
void *ch_ptr) int num_paths, void *ch_ptr)
{ {
struct srp_rdma_ch *ch = ch_ptr; struct srp_rdma_ch *ch = ch_ptr;
struct srp_target_port *target = ch->target; struct srp_target_port *target = ch->target;
@ -2789,7 +2789,7 @@ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
static int srp_abort(struct scsi_cmnd *scmnd) static int srp_abort(struct scsi_cmnd *scmnd)
{ {
struct srp_target_port *target = host_to_target(scmnd->device->host); struct srp_target_port *target = host_to_target(scmnd->device->host);
struct srp_request *req = (struct srp_request *) scmnd->host_scribble; struct srp_request *req = scsi_cmd_priv(scmnd);
u32 tag; u32 tag;
u16 ch_idx; u16 ch_idx;
struct srp_rdma_ch *ch; struct srp_rdma_ch *ch;
@ -2797,8 +2797,6 @@ static int srp_abort(struct scsi_cmnd *scmnd)
shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
if (!req)
return SUCCESS;
tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd)); tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
ch_idx = blk_mq_unique_tag_to_hwq(tag); ch_idx = blk_mq_unique_tag_to_hwq(tag);
if (WARN_ON_ONCE(ch_idx >= target->ch_count)) if (WARN_ON_ONCE(ch_idx >= target->ch_count))
@ -2991,7 +2989,7 @@ static ssize_t local_ib_port_show(struct device *dev,
{ {
struct srp_target_port *target = host_to_target(class_to_shost(dev)); struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sysfs_emit(buf, "%d\n", target->srp_host->port); return sysfs_emit(buf, "%u\n", target->srp_host->port);
} }
static DEVICE_ATTR_RO(local_ib_port); static DEVICE_ATTR_RO(local_ib_port);
@ -3179,11 +3177,16 @@ static void srp_release_dev(struct device *dev)
struct srp_host *host = struct srp_host *host =
container_of(dev, struct srp_host, dev); container_of(dev, struct srp_host, dev);
complete(&host->released); kfree(host);
} }
static struct attribute *srp_class_attrs[];
ATTRIBUTE_GROUPS(srp_class);
static struct class srp_class = { static struct class srp_class = {
.name = "infiniband_srp", .name = "infiniband_srp",
.dev_groups = srp_class_groups,
.dev_release = srp_release_dev .dev_release = srp_release_dev
}; };
@ -3884,12 +3887,19 @@ static ssize_t port_show(struct device *dev, struct device_attribute *attr,
{ {
struct srp_host *host = container_of(dev, struct srp_host, dev); struct srp_host *host = container_of(dev, struct srp_host, dev);
return sysfs_emit(buf, "%d\n", host->port); return sysfs_emit(buf, "%u\n", host->port);
} }
static DEVICE_ATTR_RO(port); static DEVICE_ATTR_RO(port);
static struct srp_host *srp_add_port(struct srp_device *device, u8 port) static struct attribute *srp_class_attrs[] = {
&dev_attr_add_target.attr,
&dev_attr_ibdev.attr,
&dev_attr_port.attr,
NULL
};
static struct srp_host *srp_add_port(struct srp_device *device, u32 port)
{ {
struct srp_host *host; struct srp_host *host;
@ -3899,33 +3909,24 @@ static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
INIT_LIST_HEAD(&host->target_list); INIT_LIST_HEAD(&host->target_list);
spin_lock_init(&host->target_lock); spin_lock_init(&host->target_lock);
init_completion(&host->released);
mutex_init(&host->add_target_mutex); mutex_init(&host->add_target_mutex);
host->srp_dev = device; host->srp_dev = device;
host->port = port; host->port = port;
device_initialize(&host->dev);
host->dev.class = &srp_class; host->dev.class = &srp_class;
host->dev.parent = device->dev->dev.parent; host->dev.parent = device->dev->dev.parent;
dev_set_name(&host->dev, "srp-%s-%d", dev_name(&device->dev->dev), if (dev_set_name(&host->dev, "srp-%s-%u", dev_name(&device->dev->dev),
port); port))
goto put_host;
if (device_register(&host->dev)) if (device_add(&host->dev))
goto free_host; goto put_host;
if (device_create_file(&host->dev, &dev_attr_add_target))
goto err_class;
if (device_create_file(&host->dev, &dev_attr_ibdev))
goto err_class;
if (device_create_file(&host->dev, &dev_attr_port))
goto err_class;
return host; return host;
err_class: put_host:
device_unregister(&host->dev); device_del(&host->dev);
put_device(&host->dev);
free_host:
kfree(host);
return NULL; return NULL;
} }
@ -3937,7 +3938,7 @@ static void srp_rename_dev(struct ib_device *device, void *client_data)
list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
char name[IB_DEVICE_NAME_MAX + 8]; char name[IB_DEVICE_NAME_MAX + 8];
snprintf(name, sizeof(name), "srp-%s-%d", snprintf(name, sizeof(name), "srp-%s-%u",
dev_name(&device->dev), host->port); dev_name(&device->dev), host->port);
device_rename(&host->dev, name); device_rename(&host->dev, name);
} }
@ -3949,7 +3950,7 @@ static int srp_add_one(struct ib_device *device)
struct ib_device_attr *attr = &device->attrs; struct ib_device_attr *attr = &device->attrs;
struct srp_host *host; struct srp_host *host;
int mr_page_shift; int mr_page_shift;
unsigned int p; u32 p;
u64 max_pages_per_mr; u64 max_pages_per_mr;
unsigned int flags = 0; unsigned int flags = 0;
@ -4031,12 +4032,11 @@ static void srp_remove_one(struct ib_device *device, void *client_data)
srp_dev = client_data; srp_dev = client_data;
list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
device_unregister(&host->dev);
/* /*
* Wait for the sysfs entry to go away, so that no new * Remove the add_target sysfs entry so that no new target ports
* target ports can be created. * can be created.
*/ */
wait_for_completion(&host->released); device_del(&host->dev);
/* /*
* Remove all target ports. * Remove all target ports.
@ -4054,7 +4054,7 @@ static void srp_remove_one(struct ib_device *device, void *client_data)
*/ */
flush_workqueue(srp_remove_wq); flush_workqueue(srp_remove_wq);
kfree(host); put_device(&host->dev);
} }
ib_dealloc_pd(srp_dev->pd); ib_dealloc_pd(srp_dev->pd);

View File

@ -120,11 +120,10 @@ struct srp_device {
*/ */
struct srp_host { struct srp_host {
struct srp_device *srp_dev; struct srp_device *srp_dev;
u8 port; u32 port;
struct device dev; struct device dev;
struct list_head target_list; struct list_head target_list;
spinlock_t target_lock; spinlock_t target_lock;
struct completion released;
struct list_head list; struct list_head list;
struct mutex add_target_mutex; struct mutex add_target_mutex;
}; };

Some files were not shown because too many files have changed in this diff Show More