Merge branch 'qedr' into k.o/for-next

Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Doug Ledford 2017-09-25 11:18:35 -04:00
commit 7ae6f2a3d5
11 changed files with 1208 additions and 91 deletions

View File

@ -11068,6 +11068,7 @@ F: drivers/net/ethernet/qlogic/qede/
QLOGIC QL4xxx RDMA DRIVER
M: Ram Amrani <Ram.Amrani@cavium.com>
M: Michal Kalderon <Michal.Kalderon@cavium.com>
M: Ariel Elior <Ariel.Elior@cavium.com>
L: linux-rdma@vger.kernel.org
S: Supported

View File

@ -1,3 +1,3 @@
obj-$(CONFIG_INFINIBAND_QEDR) := qedr.o
qedr-y := main.o verbs.o qedr_cm.o
qedr-y := main.o verbs.o qedr_roce_cm.o qedr_iw_cm.o

View File

@ -33,16 +33,20 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/iw_cm.h>
#include <rdma/ib_mad.h>
#include <linux/netdevice.h>
#include <linux/iommu.h>
#include <linux/pci.h>
#include <net/addrconf.h>
#include <linux/idr.h>
#include <linux/qed/qed_chain.h>
#include <linux/qed/qed_if.h>
#include "qedr.h"
#include "verbs.h"
#include <rdma/qedr-abi.h>
#include "qedr_iw_cm.h"
MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
MODULE_AUTHOR("QLogic Corporation");
@ -92,8 +96,84 @@ static struct net_device *qedr_get_netdev(struct ib_device *dev, u8 port_num)
return qdev->ndev;
}
int qedr_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
int err;
err = qedr_query_port(ibdev, port_num, &attr);
if (err)
return err;
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
return 0;
}
int qedr_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
int err;
err = qedr_query_port(ibdev, port_num, &attr);
if (err)
return err;
immutable->pkey_tbl_len = 1;
immutable->gid_tbl_len = 1;
immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
immutable->max_mad_size = 0;
return 0;
}
int qedr_iw_register_device(struct qedr_dev *dev)
{
dev->ibdev.node_type = RDMA_NODE_RNIC;
dev->ibdev.query_gid = qedr_iw_query_gid;
dev->ibdev.get_port_immutable = qedr_iw_port_immutable;
dev->ibdev.iwcm = kzalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
if (!dev->ibdev.iwcm)
return -ENOMEM;
dev->ibdev.iwcm->connect = qedr_iw_connect;
dev->ibdev.iwcm->accept = qedr_iw_accept;
dev->ibdev.iwcm->reject = qedr_iw_reject;
dev->ibdev.iwcm->create_listen = qedr_iw_create_listen;
dev->ibdev.iwcm->destroy_listen = qedr_iw_destroy_listen;
dev->ibdev.iwcm->add_ref = qedr_iw_qp_add_ref;
dev->ibdev.iwcm->rem_ref = qedr_iw_qp_rem_ref;
dev->ibdev.iwcm->get_qp = qedr_iw_get_qp;
memcpy(dev->ibdev.iwcm->ifname,
dev->ndev->name, sizeof(dev->ibdev.iwcm->ifname));
return 0;
}
void qedr_roce_register_device(struct qedr_dev *dev)
{
dev->ibdev.node_type = RDMA_NODE_IB_CA;
dev->ibdev.query_gid = qedr_query_gid;
dev->ibdev.add_gid = qedr_add_gid;
dev->ibdev.del_gid = qedr_del_gid;
dev->ibdev.get_port_immutable = qedr_roce_port_immutable;
}
static int qedr_register_device(struct qedr_dev *dev)
{
int rc;
strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX);
dev->ibdev.node_guid = dev->attr.node_guid;
@ -121,18 +201,21 @@ static int qedr_register_device(struct qedr_dev *dev)
QEDR_UVERBS(POST_SEND) |
QEDR_UVERBS(POST_RECV);
if (IS_IWARP(dev)) {
rc = qedr_iw_register_device(dev);
if (rc)
return rc;
} else {
qedr_roce_register_device(dev);
}
dev->ibdev.phys_port_cnt = 1;
dev->ibdev.num_comp_vectors = dev->num_cnq;
dev->ibdev.node_type = RDMA_NODE_IB_CA;
dev->ibdev.query_device = qedr_query_device;
dev->ibdev.query_port = qedr_query_port;
dev->ibdev.modify_port = qedr_modify_port;
dev->ibdev.query_gid = qedr_query_gid;
dev->ibdev.add_gid = qedr_add_gid;
dev->ibdev.del_gid = qedr_del_gid;
dev->ibdev.alloc_ucontext = qedr_alloc_ucontext;
dev->ibdev.dealloc_ucontext = qedr_dealloc_ucontext;
dev->ibdev.mmap = qedr_mmap;
@ -166,7 +249,7 @@ static int qedr_register_device(struct qedr_dev *dev)
dev->ibdev.post_recv = qedr_post_recv;
dev->ibdev.process_mad = qedr_process_mad;
dev->ibdev.get_port_immutable = qedr_port_immutable;
dev->ibdev.get_netdev = qedr_get_netdev;
dev->ibdev.dev.parent = &dev->pdev->dev;
@ -217,6 +300,9 @@ static void qedr_free_resources(struct qedr_dev *dev)
{
int i;
if (IS_IWARP(dev))
destroy_workqueue(dev->iwarp_wq);
for (i = 0; i < dev->num_cnq; i++) {
qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
@ -241,6 +327,12 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
spin_lock_init(&dev->sgid_lock);
if (IS_IWARP(dev)) {
spin_lock_init(&dev->idr_lock);
idr_init(&dev->qpidr);
dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
}
/* Allocate Status blocks for CNQ */
dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
GFP_KERNEL);
@ -716,6 +808,7 @@ static int qedr_init_hw(struct qedr_dev *dev)
in_params->events = &events;
in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS;
in_params->max_mtu = dev->ndev->mtu;
dev->iwarp_max_mtu = dev->ndev->mtu;
ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr);
rc = dev->ops->rdma_init(dev->cdev, in_params);

View File

@ -33,6 +33,7 @@
#define __QEDR_H__
#include <linux/pci.h>
#include <linux/idr.h>
#include <rdma/ib_addr.h>
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_chain.h>
@ -43,6 +44,8 @@
#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
#define DP_NAME(dev) ((dev)->ibdev.name)
#define IS_IWARP(_dev) ((_dev)->rdma_type == QED_RDMA_TYPE_IWARP)
#define IS_ROCE(_dev) ((_dev)->rdma_type == QED_RDMA_TYPE_ROCE)
#define DP_DEBUG(dev, module, fmt, ...) \
pr_debug("(%s) " module ": " fmt, \
@ -56,6 +59,7 @@
#define QEDR_MSG_SQ " SQ"
#define QEDR_MSG_QP " QP"
#define QEDR_MSG_GSI " GSI"
#define QEDR_MSG_IWARP " IW"
#define QEDR_CQ_MAGIC_NUMBER (0x11223344)
@ -160,6 +164,11 @@ struct qedr_dev {
struct qedr_cq *gsi_sqcq;
struct qedr_cq *gsi_rqcq;
struct qedr_qp *gsi_qp;
enum qed_rdma_type rdma_type;
spinlock_t idr_lock; /* Protect qpidr data-structure */
struct idr qpidr;
struct workqueue_struct *iwarp_wq;
u16 iwarp_max_mtu;
unsigned long enet_state;
@ -317,6 +326,9 @@ struct qedr_qp_hwq_info {
/* DB */
void __iomem *db;
union db_prod32 db_data;
void __iomem *iwarp_db2;
union db_prod32 iwarp_db2_data;
};
#define QEDR_INC_SW_IDX(p_info, index) \
@ -337,7 +349,7 @@ enum qedr_qp_err_bitmap {
struct qedr_qp {
struct ib_qp ibqp; /* must be first */
struct qedr_dev *dev;
struct qedr_iw_ep *ep;
struct qedr_qp_hwq_info sq;
struct qedr_qp_hwq_info rq;
@ -394,6 +406,8 @@ struct qedr_qp {
/* Relevant to qps created from user space only (applications) */
struct qedr_userq usq;
struct qedr_userq urq;
atomic_t refcnt;
bool destroyed;
};
struct qedr_ah {
@ -474,6 +488,21 @@ static inline int qedr_get_dmac(struct qedr_dev *dev,
return 0;
}
struct qedr_iw_listener {
struct qedr_dev *dev;
struct iw_cm_id *cm_id;
int backlog;
void *qed_handle;
};
struct qedr_iw_ep {
struct qedr_dev *dev;
struct iw_cm_id *cm_id;
struct qedr_qp *qp;
void *qed_context;
u8 during_connect;
};
static inline
struct qedr_ucontext *get_qedr_ucontext(struct ib_ucontext *ibucontext)
{

View File

@ -655,8 +655,10 @@ struct rdma_sq_rdma_wqe_1st {
#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_SHIFT 4
#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_MASK 0x1
#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK 0x3
#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT 6
#define RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG_MASK 0x1
#define RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG_SHIFT 6
#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK 0x1
#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT 7
u8 wqe_size;
u8 prev_wqe_size;
};

View File

@ -0,0 +1,749 @@
/* QLogic qedr NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and /or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/udp.h>
#include <net/addrconf.h>
#include <net/route.h>
#include <net/ip6_route.h>
#include <net/flow.h>
#include "qedr.h"
#include "qedr_iw_cm.h"
static inline void
qedr_fill_sockaddr4(const struct qed_iwarp_cm_info *cm_info,
struct iw_cm_event *event)
{
struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr;
struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr;
laddr->sin_family = AF_INET;
raddr->sin_family = AF_INET;
laddr->sin_port = htons(cm_info->local_port);
raddr->sin_port = htons(cm_info->remote_port);
laddr->sin_addr.s_addr = htonl(cm_info->local_ip[0]);
raddr->sin_addr.s_addr = htonl(cm_info->remote_ip[0]);
}
static inline void
qedr_fill_sockaddr6(const struct qed_iwarp_cm_info *cm_info,
struct iw_cm_event *event)
{
struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr;
struct sockaddr_in6 *raddr6 =
(struct sockaddr_in6 *)&event->remote_addr;
int i;
laddr6->sin6_family = AF_INET6;
raddr6->sin6_family = AF_INET6;
laddr6->sin6_port = htons(cm_info->local_port);
raddr6->sin6_port = htons(cm_info->remote_port);
for (i = 0; i < 4; i++) {
laddr6->sin6_addr.in6_u.u6_addr32[i] =
htonl(cm_info->local_ip[i]);
raddr6->sin6_addr.in6_u.u6_addr32[i] =
htonl(cm_info->remote_ip[i]);
}
}
void
qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params)
{
struct qedr_iw_listener *listener = (struct qedr_iw_listener *)context;
struct qedr_dev *dev = listener->dev;
struct iw_cm_event event;
struct qedr_iw_ep *ep;
ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
if (!ep)
return;
ep->dev = dev;
ep->qed_context = params->ep_context;
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REQUEST;
event.status = params->status;
if (!IS_ENABLED(CONFIG_IPV6) ||
params->cm_info->ip_version == QED_TCP_IPV4)
qedr_fill_sockaddr4(params->cm_info, &event);
else
qedr_fill_sockaddr6(params->cm_info, &event);
event.provider_data = (void *)ep;
event.private_data = (void *)params->cm_info->private_data;
event.private_data_len = (u8)params->cm_info->private_data_len;
event.ord = params->cm_info->ord;
event.ird = params->cm_info->ird;
listener->cm_id->event_handler(listener->cm_id, &event);
}
void
qedr_iw_issue_event(void *context,
struct qed_iwarp_cm_event_params *params,
enum iw_cm_event_type event_type)
{
struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
struct iw_cm_event event;
memset(&event, 0, sizeof(event));
event.status = params->status;
event.event = event_type;
if (params->cm_info) {
event.ird = params->cm_info->ird;
event.ord = params->cm_info->ord;
event.private_data_len = params->cm_info->private_data_len;
event.private_data = (void *)params->cm_info->private_data;
}
if (ep->cm_id)
ep->cm_id->event_handler(ep->cm_id, &event);
}
void
qedr_iw_close_event(void *context, struct qed_iwarp_cm_event_params *params)
{
struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
if (ep->cm_id) {
qedr_iw_issue_event(context, params, IW_CM_EVENT_CLOSE);
ep->cm_id->rem_ref(ep->cm_id);
ep->cm_id = NULL;
}
}
void
qedr_iw_qp_event(void *context,
struct qed_iwarp_cm_event_params *params,
enum ib_event_type ib_event, char *str)
{
struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
struct qedr_dev *dev = ep->dev;
struct ib_qp *ibqp = &ep->qp->ibqp;
struct ib_event event;
DP_NOTICE(dev, "QP error received: %s\n", str);
if (ibqp->event_handler) {
event.event = ib_event;
event.device = ibqp->device;
event.element.qp = ibqp;
ibqp->event_handler(&event, ibqp->qp_context);
}
}
struct qedr_discon_work {
struct work_struct work;
struct qedr_iw_ep *ep;
enum qed_iwarp_event_type event;
int status;
};
static void qedr_iw_disconnect_worker(struct work_struct *work)
{
struct qedr_discon_work *dwork =
container_of(work, struct qedr_discon_work, work);
struct qed_rdma_modify_qp_in_params qp_params = { 0 };
struct qedr_iw_ep *ep = dwork->ep;
struct qedr_dev *dev = ep->dev;
struct qedr_qp *qp = ep->qp;
struct iw_cm_event event;
if (qp->destroyed) {
kfree(dwork);
qedr_iw_qp_rem_ref(&qp->ibqp);
return;
}
memset(&event, 0, sizeof(event));
event.status = dwork->status;
event.event = IW_CM_EVENT_DISCONNECT;
/* Success means graceful disconnect was requested. modifying
* to SQD is translated to graceful disconnect. O/w reset is sent
*/
if (dwork->status)
qp_params.new_state = QED_ROCE_QP_STATE_ERR;
else
qp_params.new_state = QED_ROCE_QP_STATE_SQD;
kfree(dwork);
if (ep->cm_id)
ep->cm_id->event_handler(ep->cm_id, &event);
SET_FIELD(qp_params.modify_flags,
QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
dev->ops->rdma_modify_qp(dev->rdma_ctx, qp->qed_qp, &qp_params);
qedr_iw_qp_rem_ref(&qp->ibqp);
}
void
qedr_iw_disconnect_event(void *context,
struct qed_iwarp_cm_event_params *params)
{
struct qedr_discon_work *work;
struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
struct qedr_dev *dev = ep->dev;
struct qedr_qp *qp = ep->qp;
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
return;
qedr_iw_qp_add_ref(&qp->ibqp);
work->ep = ep;
work->event = params->event;
work->status = params->status;
INIT_WORK(&work->work, qedr_iw_disconnect_worker);
queue_work(dev->iwarp_wq, &work->work);
}
static void
qedr_iw_passive_complete(void *context,
struct qed_iwarp_cm_event_params *params)
{
struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
struct qedr_dev *dev = ep->dev;
/* We will only reach the following state if MPA_REJECT was called on
* passive. In this case there will be no associated QP.
*/
if ((params->status == -ECONNREFUSED) && (!ep->qp)) {
DP_DEBUG(dev, QEDR_MSG_IWARP,
"PASSIVE connection refused releasing ep...\n");
kfree(ep);
return;
}
qedr_iw_issue_event(context, params, IW_CM_EVENT_ESTABLISHED);
if (params->status < 0)
qedr_iw_close_event(context, params);
}
int
qedr_iw_mpa_reply(void *context, struct qed_iwarp_cm_event_params *params)
{
struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
struct qedr_dev *dev = ep->dev;
struct qed_iwarp_send_rtr_in rtr_in;
rtr_in.ep_context = params->ep_context;
return dev->ops->iwarp_send_rtr(dev->rdma_ctx, &rtr_in);
}
int
qedr_iw_event_handler(void *context, struct qed_iwarp_cm_event_params *params)
{
struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
struct qedr_dev *dev = ep->dev;
switch (params->event) {
case QED_IWARP_EVENT_MPA_REQUEST:
qedr_iw_mpa_request(context, params);
break;
case QED_IWARP_EVENT_ACTIVE_MPA_REPLY:
qedr_iw_mpa_reply(context, params);
break;
case QED_IWARP_EVENT_PASSIVE_COMPLETE:
ep->during_connect = 0;
qedr_iw_passive_complete(context, params);
break;
case QED_IWARP_EVENT_ACTIVE_COMPLETE:
ep->during_connect = 0;
qedr_iw_issue_event(context,
params,
IW_CM_EVENT_CONNECT_REPLY);
if (params->status < 0) {
struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
ep->cm_id->rem_ref(ep->cm_id);
ep->cm_id = NULL;
}
break;
case QED_IWARP_EVENT_DISCONNECT:
qedr_iw_disconnect_event(context, params);
break;
case QED_IWARP_EVENT_CLOSE:
ep->during_connect = 0;
qedr_iw_close_event(context, params);
break;
case QED_IWARP_EVENT_RQ_EMPTY:
qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
"QED_IWARP_EVENT_RQ_EMPTY");
break;
case QED_IWARP_EVENT_IRQ_FULL:
qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
"QED_IWARP_EVENT_IRQ_FULL");
break;
case QED_IWARP_EVENT_LLP_TIMEOUT:
qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
"QED_IWARP_EVENT_LLP_TIMEOUT");
break;
case QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR:
qedr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR,
"QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR");
break;
case QED_IWARP_EVENT_CQ_OVERFLOW:
qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
"QED_IWARP_EVENT_CQ_OVERFLOW");
break;
case QED_IWARP_EVENT_QP_CATASTROPHIC:
qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
"QED_IWARP_EVENT_QP_CATASTROPHIC");
break;
case QED_IWARP_EVENT_LOCAL_ACCESS_ERROR:
qedr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR,
"QED_IWARP_EVENT_LOCAL_ACCESS_ERROR");
break;
case QED_IWARP_EVENT_REMOTE_OPERATION_ERROR:
qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
"QED_IWARP_EVENT_REMOTE_OPERATION_ERROR");
break;
case QED_IWARP_EVENT_TERMINATE_RECEIVED:
DP_NOTICE(dev, "Got terminate message\n");
break;
default:
DP_NOTICE(dev, "Unknown event received %d\n", params->event);
break;
};
return 0;
}
static u16 qedr_iw_get_vlan_ipv4(struct qedr_dev *dev, u32 *addr)
{
struct net_device *ndev;
u16 vlan_id = 0;
ndev = ip_dev_find(&init_net, htonl(addr[0]));
if (ndev) {
vlan_id = rdma_vlan_dev_vlan_id(ndev);
dev_put(ndev);
}
if (vlan_id == 0xffff)
vlan_id = 0;
return vlan_id;
}
static u16 qedr_iw_get_vlan_ipv6(u32 *addr)
{
struct net_device *ndev = NULL;
struct in6_addr laddr6;
u16 vlan_id = 0;
int i;
if (!IS_ENABLED(CONFIG_IPV6))
return vlan_id;
for (i = 0; i < 4; i++)
laddr6.in6_u.u6_addr32[i] = htonl(addr[i]);
rcu_read_lock();
for_each_netdev_rcu(&init_net, ndev) {
if (ipv6_chk_addr(&init_net, &laddr6, ndev, 1)) {
vlan_id = rdma_vlan_dev_vlan_id(ndev);
break;
}
}
rcu_read_unlock();
if (vlan_id == 0xffff)
vlan_id = 0;
return vlan_id;
}
static int
qedr_addr4_resolve(struct qedr_dev *dev,
struct sockaddr_in *src_in,
struct sockaddr_in *dst_in, u8 *dst_mac)
{
__be32 src_ip = src_in->sin_addr.s_addr;
__be32 dst_ip = dst_in->sin_addr.s_addr;
struct neighbour *neigh = NULL;
struct rtable *rt = NULL;
int rc = 0;
rt = ip_route_output(&init_net, dst_ip, src_ip, 0, 0);
if (IS_ERR(rt)) {
DP_ERR(dev, "ip_route_output returned error\n");
return -EINVAL;
}
neigh = dst_neigh_lookup(&rt->dst, &dst_ip);
if (neigh) {
rcu_read_lock();
if (neigh->nud_state & NUD_VALID) {
ether_addr_copy(dst_mac, neigh->ha);
DP_DEBUG(dev, QEDR_MSG_QP, "mac_addr=[%pM]\n", dst_mac);
} else {
neigh_event_send(neigh, NULL);
}
rcu_read_unlock();
neigh_release(neigh);
}
ip_rt_put(rt);
return rc;
}
static int
qedr_addr6_resolve(struct qedr_dev *dev,
struct sockaddr_in6 *src_in,
struct sockaddr_in6 *dst_in, u8 *dst_mac)
{
struct neighbour *neigh = NULL;
struct dst_entry *dst;
struct flowi6 fl6;
int rc = 0;
memset(&fl6, 0, sizeof(fl6));
fl6.daddr = dst_in->sin6_addr;
fl6.saddr = src_in->sin6_addr;
dst = ip6_route_output(&init_net, NULL, &fl6);
if ((!dst) || dst->error) {
if (dst) {
dst_release(dst);
DP_ERR(dev,
"ip6_route_output returned dst->error = %d\n",
dst->error);
}
return -EINVAL;
}
neigh = dst_neigh_lookup(dst, &dst_in);
if (neigh) {
rcu_read_lock();
if (neigh->nud_state & NUD_VALID) {
ether_addr_copy(dst_mac, neigh->ha);
DP_DEBUG(dev, QEDR_MSG_QP, "mac_addr=[%pM]\n", dst_mac);
} else {
neigh_event_send(neigh, NULL);
}
rcu_read_unlock();
neigh_release(neigh);
}
dst_release(dst);
return rc;
}
int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{
struct qedr_dev *dev = get_qedr_dev(cm_id->device);
struct qed_iwarp_connect_out out_params;
struct qed_iwarp_connect_in in_params;
struct qed_iwarp_cm_info *cm_info;
struct sockaddr_in6 *laddr6;
struct sockaddr_in6 *raddr6;
struct sockaddr_in *laddr;
struct sockaddr_in *raddr;
struct qedr_iw_ep *ep;
struct qedr_qp *qp;
int rc = 0;
int i;
qp = idr_find(&dev->qpidr, conn_param->qpn);
laddr = (struct sockaddr_in *)&cm_id->local_addr;
raddr = (struct sockaddr_in *)&cm_id->remote_addr;
laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr;
raddr6 = (struct sockaddr_in6 *)&cm_id->remote_addr;
DP_DEBUG(dev, QEDR_MSG_IWARP,
"Connect source address: %pISpc, remote address: %pISpc\n",
&cm_id->local_addr, &cm_id->remote_addr);
if (!laddr->sin_port || !raddr->sin_port)
return -EINVAL;
ep = kzalloc(sizeof(*ep), GFP_KERNEL);
if (!ep)
return -ENOMEM;
ep->dev = dev;
ep->qp = qp;
qp->ep = ep;
cm_id->add_ref(cm_id);
ep->cm_id = cm_id;
in_params.event_cb = qedr_iw_event_handler;
in_params.cb_context = ep;
cm_info = &in_params.cm_info;
memset(cm_info->local_ip, 0, sizeof(cm_info->local_ip));
memset(cm_info->remote_ip, 0, sizeof(cm_info->remote_ip));
if (!IS_ENABLED(CONFIG_IPV6) ||
cm_id->remote_addr.ss_family == AF_INET) {
cm_info->ip_version = QED_TCP_IPV4;
cm_info->remote_ip[0] = ntohl(raddr->sin_addr.s_addr);
cm_info->local_ip[0] = ntohl(laddr->sin_addr.s_addr);
cm_info->remote_port = ntohs(raddr->sin_port);
cm_info->local_port = ntohs(laddr->sin_port);
cm_info->vlan = qedr_iw_get_vlan_ipv4(dev, cm_info->local_ip);
rc = qedr_addr4_resolve(dev, laddr, raddr,
(u8 *)in_params.remote_mac_addr);
in_params.mss = dev->iwarp_max_mtu -
(sizeof(struct iphdr) + sizeof(struct tcphdr));
} else {
in_params.cm_info.ip_version = QED_TCP_IPV6;
for (i = 0; i < 4; i++) {
cm_info->remote_ip[i] =
ntohl(raddr6->sin6_addr.in6_u.u6_addr32[i]);
cm_info->local_ip[i] =
ntohl(laddr6->sin6_addr.in6_u.u6_addr32[i]);
}
cm_info->local_port = ntohs(laddr6->sin6_port);
cm_info->remote_port = ntohs(raddr6->sin6_port);
in_params.mss = dev->iwarp_max_mtu -
(sizeof(struct ipv6hdr) + sizeof(struct tcphdr));
cm_info->vlan = qedr_iw_get_vlan_ipv6(cm_info->local_ip);
rc = qedr_addr6_resolve(dev, laddr6, raddr6,
(u8 *)in_params.remote_mac_addr);
}
if (rc)
goto err;
DP_DEBUG(dev, QEDR_MSG_IWARP,
"ord = %d ird=%d private_data=%p private_data_len=%d rq_psn=%d\n",
conn_param->ord, conn_param->ird, conn_param->private_data,
conn_param->private_data_len, qp->rq_psn);
cm_info->ord = conn_param->ord;
cm_info->ird = conn_param->ird;
cm_info->private_data = conn_param->private_data;
cm_info->private_data_len = conn_param->private_data_len;
in_params.qp = qp->qed_qp;
memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN);
ep->during_connect = 1;
rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
if (rc)
goto err;
return rc;
err:
cm_id->rem_ref(cm_id);
kfree(ep);
return rc;
}
int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog)
{
struct qedr_dev *dev = get_qedr_dev(cm_id->device);
struct qedr_iw_listener *listener;
struct qed_iwarp_listen_in iparams;
struct qed_iwarp_listen_out oparams;
struct sockaddr_in *laddr;
struct sockaddr_in6 *laddr6;
int rc;
int i;
laddr = (struct sockaddr_in *)&cm_id->local_addr;
laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr;
DP_DEBUG(dev, QEDR_MSG_IWARP,
"Create Listener address: %pISpc\n", &cm_id->local_addr);
listener = kzalloc(sizeof(*listener), GFP_KERNEL);
if (!listener)
return -ENOMEM;
listener->dev = dev;
cm_id->add_ref(cm_id);
listener->cm_id = cm_id;
listener->backlog = backlog;
iparams.cb_context = listener;
iparams.event_cb = qedr_iw_event_handler;
iparams.max_backlog = backlog;
if (!IS_ENABLED(CONFIG_IPV6) ||
cm_id->local_addr.ss_family == AF_INET) {
iparams.ip_version = QED_TCP_IPV4;
memset(iparams.ip_addr, 0, sizeof(iparams.ip_addr));
iparams.ip_addr[0] = ntohl(laddr->sin_addr.s_addr);
iparams.port = ntohs(laddr->sin_port);
iparams.vlan = qedr_iw_get_vlan_ipv4(dev, iparams.ip_addr);
} else {
iparams.ip_version = QED_TCP_IPV6;
for (i = 0; i < 4; i++) {
iparams.ip_addr[i] =
ntohl(laddr6->sin6_addr.in6_u.u6_addr32[i]);
}
iparams.port = ntohs(laddr6->sin6_port);
iparams.vlan = qedr_iw_get_vlan_ipv6(iparams.ip_addr);
}
rc = dev->ops->iwarp_create_listen(dev->rdma_ctx, &iparams, &oparams);
if (rc)
goto err;
listener->qed_handle = oparams.handle;
cm_id->provider_data = listener;
return rc;
err:
cm_id->rem_ref(cm_id);
kfree(listener);
return rc;
}
int qedr_iw_destroy_listen(struct iw_cm_id *cm_id)
{
struct qedr_iw_listener *listener = cm_id->provider_data;
struct qedr_dev *dev = get_qedr_dev(cm_id->device);
int rc = 0;
if (listener->qed_handle)
rc = dev->ops->iwarp_destroy_listen(dev->rdma_ctx,
listener->qed_handle);
cm_id->rem_ref(cm_id);
return rc;
}
int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{
struct qedr_iw_ep *ep = (struct qedr_iw_ep *)cm_id->provider_data;
struct qedr_dev *dev = ep->dev;
struct qedr_qp *qp;
struct qed_iwarp_accept_in params;
int rc;
DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn);
qp = idr_find(&dev->qpidr, conn_param->qpn);
if (!qp) {
DP_ERR(dev, "Invalid QP number %d\n", conn_param->qpn);
return -EINVAL;
}
ep->qp = qp;
qp->ep = ep;
cm_id->add_ref(cm_id);
ep->cm_id = cm_id;
params.ep_context = ep->qed_context;
params.cb_context = ep;
params.qp = ep->qp->qed_qp;
params.private_data = conn_param->private_data;
params.private_data_len = conn_param->private_data_len;
params.ird = conn_param->ird;
params.ord = conn_param->ord;
ep->during_connect = 1;
rc = dev->ops->iwarp_accept(dev->rdma_ctx, &params);
if (rc)
goto err;
return rc;
err:
ep->during_connect = 0;
cm_id->rem_ref(cm_id);
return rc;
}
int qedr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
{
struct qedr_iw_ep *ep = (struct qedr_iw_ep *)cm_id->provider_data;
struct qedr_dev *dev = ep->dev;
struct qed_iwarp_reject_in params;
params.ep_context = ep->qed_context;
params.cb_context = ep;
params.private_data = pdata;
params.private_data_len = pdata_len;
ep->qp = NULL;
return dev->ops->iwarp_reject(dev->rdma_ctx, &params);
}
void qedr_iw_qp_add_ref(struct ib_qp *ibqp)
{
struct qedr_qp *qp = get_qedr_qp(ibqp);
atomic_inc(&qp->refcnt);
}
void qedr_iw_qp_rem_ref(struct ib_qp *ibqp)
{
struct qedr_qp *qp = get_qedr_qp(ibqp);
if (atomic_dec_and_test(&qp->refcnt)) {
spin_lock_irq(&qp->dev->idr_lock);
idr_remove(&qp->dev->qpidr, qp->qp_id);
spin_unlock_irq(&qp->dev->idr_lock);
kfree(qp);
}
}
struct ib_qp *qedr_iw_get_qp(struct ib_device *ibdev, int qpn)
{
struct qedr_dev *dev = get_qedr_dev(ibdev);
return idr_find(&dev->qpidr, qpn);
}

View File

@ -0,0 +1,49 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and /or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <rdma/iw_cm.h>
int qedr_iw_connect(struct iw_cm_id *cm_id,
struct iw_cm_conn_param *conn_param);
int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog);
int qedr_iw_destroy_listen(struct iw_cm_id *cm_id);
int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
int qedr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
void qedr_iw_qp_add_ref(struct ib_qp *qp);
void qedr_iw_qp_rem_ref(struct ib_qp *qp);
struct ib_qp *qedr_iw_get_qp(struct ib_device *dev, int qpn);

View File

@ -48,7 +48,7 @@
#include "qedr.h"
#include "verbs.h"
#include <rdma/qedr-abi.h>
#include "qedr_cm.h"
#include "qedr_roce_cm.h"
void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
{

View File

@ -49,7 +49,7 @@
#include "qedr.h"
#include "verbs.h"
#include <rdma/qedr-abi.h>
#include "qedr_cm.h"
#include "qedr_roce_cm.h"
#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
@ -70,6 +70,20 @@ int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
return 0;
}
int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
int index, union ib_gid *sgid)
{
struct qedr_dev *dev = get_qedr_dev(ibdev);
memset(sgid->raw, 0, sizeof(sgid->raw));
ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
sgid->global.interface_id, sgid->global.subnet_prefix);
return 0;
}
int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
union ib_gid *sgid)
{
@ -263,8 +277,13 @@ int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
attr->sm_lid = 0;
attr->sm_sl = 0;
attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
attr->gid_tbl_len = QEDR_MAX_SGID;
attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
attr->gid_tbl_len = 1;
attr->pkey_tbl_len = 1;
} else {
attr->gid_tbl_len = QEDR_MAX_SGID;
attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
}
attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
attr->qkey_viol_cntr = 0;
get_link_speed_and_width(rdma_port->link_speed,
@ -770,7 +789,8 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
struct qedr_dev *dev,
struct qedr_userq *q,
u64 buf_addr, size_t buf_len,
int access, int dmasync)
int access, int dmasync,
int alloc_and_init)
{
u32 fw_pages;
int rc;
@ -791,19 +811,27 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
if (rc)
goto err0;
q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
if (IS_ERR(q->pbl_tbl)) {
rc = PTR_ERR(q->pbl_tbl);
goto err0;
}
if (alloc_and_init) {
q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
if (IS_ERR(q->pbl_tbl)) {
rc = PTR_ERR(q->pbl_tbl);
goto err0;
}
qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
FW_PAGE_SHIFT);
} else {
q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
if (!q->pbl_tbl) {
rc = -ENOMEM;
goto err0;
}
}
return 0;
err0:
ib_umem_release(q->umem);
q->umem = NULL;
return rc;
}
@ -929,7 +957,8 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
cq->cq_type = QEDR_CQ_TYPE_USER;
rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
ureq.len, IB_ACCESS_LOCAL_WRITE,
1, 1);
if (rc)
goto err0;
@ -1222,18 +1251,34 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
return 0;
}
static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
static void qedr_copy_rq_uresp(struct qedr_dev *dev,
struct qedr_create_qp_uresp *uresp,
struct qedr_qp *qp)
{
uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
/* iWARP requires two doorbells per RQ. */
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
uresp->rq_db_offset =
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
} else {
uresp->rq_db_offset =
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
}
uresp->rq_icid = qp->icid;
}
static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
static void qedr_copy_sq_uresp(struct qedr_dev *dev,
struct qedr_create_qp_uresp *uresp,
struct qedr_qp *qp)
{
uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
uresp->sq_icid = qp->icid + 1;
/* iWARP uses the same cid for rq and sq */
if (rdma_protocol_iwarp(&dev->ibdev, 1))
uresp->sq_icid = qp->icid;
else
uresp->sq_icid = qp->icid + 1;
}
static int qedr_copy_qp_uresp(struct qedr_dev *dev,
@ -1243,8 +1288,8 @@ static int qedr_copy_qp_uresp(struct qedr_dev *dev,
int rc;
memset(&uresp, 0, sizeof(uresp));
qedr_copy_sq_uresp(&uresp, qp);
qedr_copy_rq_uresp(&uresp, qp);
qedr_copy_sq_uresp(dev, &uresp, qp);
qedr_copy_rq_uresp(dev, &uresp, qp);
uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
uresp.qp_id = qp->qp_id;
@ -1264,6 +1309,7 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
struct ib_qp_init_attr *attrs)
{
spin_lock_init(&qp->q_lock);
atomic_set(&qp->refcnt, 1);
qp->pd = pd;
qp->qp_type = attrs->qp_type;
qp->max_inline_data = attrs->cap.max_inline_data;
@ -1334,6 +1380,52 @@ static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
}
static int qedr_idr_add(struct qedr_dev *dev, void *ptr, u32 id)
{
int rc;
if (!rdma_protocol_iwarp(&dev->ibdev, 1))
return 0;
idr_preload(GFP_KERNEL);
spin_lock_irq(&dev->idr_lock);
rc = idr_alloc(&dev->qpidr, ptr, id, id + 1, GFP_ATOMIC);
spin_unlock_irq(&dev->idr_lock);
idr_preload_end();
return rc < 0 ? rc : 0;
}
static void qedr_idr_remove(struct qedr_dev *dev, u32 id)
{
if (!rdma_protocol_iwarp(&dev->ibdev, 1))
return;
spin_lock_irq(&dev->idr_lock);
idr_remove(&dev->qpidr, id);
spin_unlock_irq(&dev->idr_lock);
}
static inline void
qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
struct qedr_qp *qp,
struct qed_rdma_create_qp_out_params *out_params)
{
qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
&qp->usq.pbl_info, FW_PAGE_SHIFT);
qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
&qp->urq.pbl_info, FW_PAGE_SHIFT);
}
static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
{
if (qp->usq.umem)
@ -1357,6 +1449,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
struct ib_ucontext *ib_ctx = NULL;
struct qedr_ucontext *ctx = NULL;
struct qedr_create_qp_ureq ureq;
int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
int rc = -EINVAL;
ib_ctx = ibpd->uobject->context;
@ -1371,14 +1464,13 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
/* SQ - read access only (0), dma sync not required (0) */
rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
ureq.sq_len, 0, 0);
ureq.sq_len, 0, 0, alloc_and_init);
if (rc)
return rc;
/* RQ - read access only (0), dma sync not required (0) */
rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
ureq.rq_len, 0, 0);
ureq.rq_len, 0, 0, alloc_and_init);
if (rc)
return rc;
@ -1399,6 +1491,9 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
goto err1;
}
if (rdma_protocol_iwarp(&dev->ibdev, 1))
qedr_iwarp_populate_user_qp(dev, qp, &out_params);
qp->qp_id = out_params.qp_id;
qp->icid = out_params.icid;
@ -1419,6 +1514,21 @@ err1:
return rc;
}
static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
{
qp->sq.db = dev->db_addr +
DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
qp->sq.db_data.data.icid = qp->icid;
qp->rq.db = dev->db_addr +
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
qp->rq.db_data.data.icid = qp->icid;
qp->rq.iwarp_db2 = dev->db_addr +
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
qp->rq.iwarp_db2_data.data.icid = qp->icid;
qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
}
static int
qedr_roce_create_kernel_qp(struct qedr_dev *dev,
struct qedr_qp *qp,
@ -1465,8 +1575,71 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev,
qp->icid = out_params.icid;
qedr_set_roce_db_info(dev, qp);
return rc;
}
return 0;
static int
qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
struct qedr_qp *qp,
struct qed_rdma_create_qp_in_params *in_params,
u32 n_sq_elems, u32 n_rq_elems)
{
struct qed_rdma_create_qp_out_params out_params;
struct qed_chain_ext_pbl ext_pbl;
int rc;
in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
QEDR_SQE_ELEMENT_SIZE,
QED_CHAIN_MODE_PBL);
in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
QEDR_RQE_ELEMENT_SIZE,
QED_CHAIN_MODE_PBL);
qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
in_params, &out_params);
if (!qp->qed_qp)
return -EINVAL;
/* Now we allocate the chain */
ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
rc = dev->ops->common->chain_alloc(dev->cdev,
QED_CHAIN_USE_TO_PRODUCE,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U32,
n_sq_elems,
QEDR_SQE_ELEMENT_SIZE,
&qp->sq.pbl, &ext_pbl);
if (rc)
goto err;
ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
rc = dev->ops->common->chain_alloc(dev->cdev,
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U32,
n_rq_elems,
QEDR_RQE_ELEMENT_SIZE,
&qp->rq.pbl, &ext_pbl);
if (rc)
goto err;
qp->qp_id = out_params.qp_id;
qp->icid = out_params.icid;
qedr_set_iwarp_db_info(dev, qp);
return rc;
err:
dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
return rc;
}
static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
@ -1541,8 +1714,12 @@ static int qedr_create_kernel_qp(struct qedr_dev *dev,
n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
n_sq_elems, n_rq_elems);
if (rdma_protocol_iwarp(&dev->ibdev, 1))
rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
n_sq_elems, n_rq_elems);
else
rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
n_sq_elems, n_rq_elems);
if (rc)
qedr_cleanup_kernel(dev, qp);
@ -1602,6 +1779,10 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
qp->ibqp.qp_num = qp->qp_id;
rc = qedr_idr_add(dev, qp, qp->qp_id);
if (rc)
goto err;
return &qp->ibqp;
err:
@ -1689,10 +1870,13 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
/* Update doorbell (in case post_recv was
* done before move to RTR)
*/
wmb();
writel(qp->rq.db_data.raw, qp->rq.db);
/* Make sure write takes effect */
mmiowb();
if (rdma_protocol_roce(&dev->ibdev, 1)) {
wmb();
writel(qp->rq.db_data.raw, qp->rq.db);
/* Make sure write takes effect */
mmiowb();
}
break;
case QED_ROCE_QP_STATE_ERR:
break;
@ -1786,16 +1970,18 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
else
new_qp_state = old_qp_state;
if (!ib_modify_qp_is_ok
(old_qp_state, new_qp_state, ibqp->qp_type, attr_mask,
IB_LINK_LAYER_ETHERNET)) {
DP_ERR(dev,
"modify qp: invalid attribute mask=0x%x specified for\n"
"qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state,
new_qp_state);
rc = -EINVAL;
goto err;
if (rdma_protocol_roce(&dev->ibdev, 1)) {
if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
ibqp->qp_type, attr_mask,
IB_LINK_LAYER_ETHERNET)) {
DP_ERR(dev,
"modify qp: invalid attribute mask=0x%x specified for\n"
"qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
attr_mask, qp->qp_id, ibqp->qp_type,
old_qp_state, new_qp_state);
rc = -EINVAL;
goto err;
}
}
/* Translate the masks... */
@ -2111,15 +2297,34 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
qp, qp->qp_type);
if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
(qp->state != QED_ROCE_QP_STATE_ERR) &&
(qp->state != QED_ROCE_QP_STATE_INIT)) {
if (rdma_protocol_roce(&dev->ibdev, 1)) {
if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
(qp->state != QED_ROCE_QP_STATE_ERR) &&
(qp->state != QED_ROCE_QP_STATE_INIT)) {
attr.qp_state = IB_QPS_ERR;
attr_mask |= IB_QP_STATE;
attr.qp_state = IB_QPS_ERR;
attr_mask |= IB_QP_STATE;
/* Change the QP state to ERROR */
qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
/* Change the QP state to ERROR */
qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
}
} else {
/* Wait for the connect/accept to complete */
if (qp->ep) {
int wait_count = 1;
while (qp->ep->during_connect) {
DP_DEBUG(dev, QEDR_MSG_QP,
"Still in during connect/accept\n");
msleep(100);
if (wait_count++ > 200) {
DP_NOTICE(dev,
"during connect timeout\n");
break;
}
}
}
}
if (qp->qp_type == IB_QPT_GSI)
@ -2127,8 +2332,10 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
qedr_free_qp_resources(dev, qp);
kfree(qp);
if (atomic_dec_and_test(&qp->refcnt)) {
qedr_idr_remove(dev, qp->qp_id);
kfree(qp);
}
return rc;
}
@ -2740,6 +2947,7 @@ static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
case IB_WR_SEND_WITH_INV:
return IB_WC_SEND;
case IB_WR_RDMA_READ:
case IB_WR_RDMA_READ_WITH_INV:
return IB_WC_RDMA_READ;
case IB_WR_ATOMIC_CMP_AND_SWP:
return IB_WC_COMP_SWAP;
@ -2900,11 +3108,8 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
break;
case IB_WR_RDMA_READ_WITH_INV:
DP_ERR(dev,
"RDMA READ WITH INVALIDATE not supported\n");
*bad_wr = wr;
rc = -EINVAL;
break;
SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
/* fallthrough... same is identical to RDMA READ */
case IB_WR_RDMA_READ:
wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
@ -3014,15 +3219,17 @@ int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qp->q_lock, flags);
if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
(qp->state != QED_ROCE_QP_STATE_ERR) &&
(qp->state != QED_ROCE_QP_STATE_SQD)) {
spin_unlock_irqrestore(&qp->q_lock, flags);
*bad_wr = wr;
DP_DEBUG(dev, QEDR_MSG_CQ,
"QP in wrong state! QP icid=0x%x state %d\n",
qp->icid, qp->state);
return -EINVAL;
if (rdma_protocol_roce(&dev->ibdev, 1)) {
if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
(qp->state != QED_ROCE_QP_STATE_ERR) &&
(qp->state != QED_ROCE_QP_STATE_SQD)) {
spin_unlock_irqrestore(&qp->q_lock, flags);
*bad_wr = wr;
DP_DEBUG(dev, QEDR_MSG_CQ,
"QP in wrong state! QP icid=0x%x state %d\n",
qp->icid, qp->state);
return -EINVAL;
}
}
while (wr) {
@ -3142,6 +3349,11 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
/* Make sure write sticks */
mmiowb();
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
mmiowb(); /* for second doorbell */
}
wr = wr->next;
}
@ -3603,23 +3815,3 @@ int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
return IB_MAD_RESULT_SUCCESS;
}
int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
int err;
immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
err = ib_query_port(ibdev, port_num, &attr);
if (err)
return err;
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
return 0;
}

View File

@ -39,6 +39,8 @@ int qedr_modify_port(struct ib_device *, u8 port, int mask,
struct ib_port_modify *props);
int qedr_query_gid(struct ib_device *, u8 port, int index, union ib_gid *gid);
int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
int index, union ib_gid *gid);
int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);