mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 14:12:06 +00:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: IB/qib: Use pci_dev->revision RDMA/iwcm: Get rid of enum iw_cm_event_status IB/ipath: Use pci_dev->revision, again IB/qib: Prevent driver hang with unprogrammed boards RDMA/cxgb4: EEH errors can hang the driver RDMA/cxgb4: Reset wait condition atomically RDMA/cxgb4: Fix missing parentheses RDMA/cxgb4: Initialization errors can cause crash RDMA/cxgb4: Don't change QP state outside EP lock RDMA/cma: Add an ID_REUSEADDR option RDMA/cma: Fix handling of IPv6 addressing in cma_use_port
This commit is contained in:
commit
75f5076b12
@ -148,6 +148,7 @@ struct rdma_id_private {
|
||||
u32 qp_num;
|
||||
u8 srq;
|
||||
u8 tos;
|
||||
u8 reuseaddr;
|
||||
};
|
||||
|
||||
struct cma_multicast {
|
||||
@ -712,6 +713,21 @@ static inline int cma_any_addr(struct sockaddr *addr)
|
||||
return cma_zero_addr(addr) || cma_loopback_addr(addr);
|
||||
}
|
||||
|
||||
static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst)
|
||||
{
|
||||
if (src->sa_family != dst->sa_family)
|
||||
return -1;
|
||||
|
||||
switch (src->sa_family) {
|
||||
case AF_INET:
|
||||
return ((struct sockaddr_in *) src)->sin_addr.s_addr !=
|
||||
((struct sockaddr_in *) dst)->sin_addr.s_addr;
|
||||
default:
|
||||
return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr,
|
||||
&((struct sockaddr_in6 *) dst)->sin6_addr);
|
||||
}
|
||||
}
|
||||
|
||||
static inline __be16 cma_port(struct sockaddr *addr)
|
||||
{
|
||||
if (addr->sa_family == AF_INET)
|
||||
@ -1564,50 +1580,6 @@ static void cma_listen_on_all(struct rdma_id_private *id_priv)
|
||||
mutex_unlock(&lock);
|
||||
}
|
||||
|
||||
int rdma_listen(struct rdma_cm_id *id, int backlog)
|
||||
{
|
||||
struct rdma_id_private *id_priv;
|
||||
int ret;
|
||||
|
||||
id_priv = container_of(id, struct rdma_id_private, id);
|
||||
if (id_priv->state == CMA_IDLE) {
|
||||
((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET;
|
||||
ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN))
|
||||
return -EINVAL;
|
||||
|
||||
id_priv->backlog = backlog;
|
||||
if (id->device) {
|
||||
switch (rdma_node_get_transport(id->device->node_type)) {
|
||||
case RDMA_TRANSPORT_IB:
|
||||
ret = cma_ib_listen(id_priv);
|
||||
if (ret)
|
||||
goto err;
|
||||
break;
|
||||
case RDMA_TRANSPORT_IWARP:
|
||||
ret = cma_iw_listen(id_priv, backlog);
|
||||
if (ret)
|
||||
goto err;
|
||||
break;
|
||||
default:
|
||||
ret = -ENOSYS;
|
||||
goto err;
|
||||
}
|
||||
} else
|
||||
cma_listen_on_all(id_priv);
|
||||
|
||||
return 0;
|
||||
err:
|
||||
id_priv->backlog = 0;
|
||||
cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_listen);
|
||||
|
||||
void rdma_set_service_type(struct rdma_cm_id *id, int tos)
|
||||
{
|
||||
struct rdma_id_private *id_priv;
|
||||
@ -2090,6 +2062,25 @@ err:
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_resolve_addr);
|
||||
|
||||
int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
|
||||
{
|
||||
struct rdma_id_private *id_priv;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
id_priv = container_of(id, struct rdma_id_private, id);
|
||||
spin_lock_irqsave(&id_priv->lock, flags);
|
||||
if (id_priv->state == CMA_IDLE) {
|
||||
id_priv->reuseaddr = reuse;
|
||||
ret = 0;
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
}
|
||||
spin_unlock_irqrestore(&id_priv->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_set_reuseaddr);
|
||||
|
||||
static void cma_bind_port(struct rdma_bind_list *bind_list,
|
||||
struct rdma_id_private *id_priv)
|
||||
{
|
||||
@ -2165,41 +2156,71 @@ retry:
|
||||
return -EADDRNOTAVAIL;
|
||||
}
|
||||
|
||||
static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
|
||||
/*
|
||||
* Check that the requested port is available. This is called when trying to
|
||||
* bind to a specific port, or when trying to listen on a bound port. In
|
||||
* the latter case, the provided id_priv may already be on the bind_list, but
|
||||
* we still need to check that it's okay to start listening.
|
||||
*/
|
||||
static int cma_check_port(struct rdma_bind_list *bind_list,
|
||||
struct rdma_id_private *id_priv, uint8_t reuseaddr)
|
||||
{
|
||||
struct rdma_id_private *cur_id;
|
||||
struct sockaddr_in *sin, *cur_sin;
|
||||
struct rdma_bind_list *bind_list;
|
||||
struct sockaddr *addr, *cur_addr;
|
||||
struct hlist_node *node;
|
||||
unsigned short snum;
|
||||
|
||||
sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
|
||||
snum = ntohs(sin->sin_port);
|
||||
addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
|
||||
if (cma_any_addr(addr) && !reuseaddr)
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
|
||||
if (id_priv == cur_id)
|
||||
continue;
|
||||
|
||||
if ((cur_id->state == CMA_LISTEN) ||
|
||||
!reuseaddr || !cur_id->reuseaddr) {
|
||||
cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr;
|
||||
if (cma_any_addr(cur_addr))
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
if (!cma_addr_cmp(addr, cur_addr))
|
||||
return -EADDRINUSE;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
|
||||
{
|
||||
struct rdma_bind_list *bind_list;
|
||||
unsigned short snum;
|
||||
int ret;
|
||||
|
||||
snum = ntohs(cma_port((struct sockaddr *) &id_priv->id.route.addr.src_addr));
|
||||
if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
|
||||
return -EACCES;
|
||||
|
||||
bind_list = idr_find(ps, snum);
|
||||
if (!bind_list)
|
||||
return cma_alloc_port(ps, id_priv, snum);
|
||||
|
||||
/*
|
||||
* We don't support binding to any address if anyone is bound to
|
||||
* a specific address on the same port.
|
||||
*/
|
||||
if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr))
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
|
||||
if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr))
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
|
||||
if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr)
|
||||
return -EADDRINUSE;
|
||||
if (!bind_list) {
|
||||
ret = cma_alloc_port(ps, id_priv, snum);
|
||||
} else {
|
||||
ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr);
|
||||
if (!ret)
|
||||
cma_bind_port(bind_list, id_priv);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
cma_bind_port(bind_list, id_priv);
|
||||
return 0;
|
||||
static int cma_bind_listen(struct rdma_id_private *id_priv)
|
||||
{
|
||||
struct rdma_bind_list *bind_list = id_priv->bind_list;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&lock);
|
||||
if (bind_list->owners.first->next)
|
||||
ret = cma_check_port(bind_list, id_priv, 0);
|
||||
mutex_unlock(&lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cma_get_port(struct rdma_id_private *id_priv)
|
||||
@ -2253,6 +2274,56 @@ static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rdma_listen(struct rdma_cm_id *id, int backlog)
|
||||
{
|
||||
struct rdma_id_private *id_priv;
|
||||
int ret;
|
||||
|
||||
id_priv = container_of(id, struct rdma_id_private, id);
|
||||
if (id_priv->state == CMA_IDLE) {
|
||||
((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET;
|
||||
ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN))
|
||||
return -EINVAL;
|
||||
|
||||
if (id_priv->reuseaddr) {
|
||||
ret = cma_bind_listen(id_priv);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
id_priv->backlog = backlog;
|
||||
if (id->device) {
|
||||
switch (rdma_node_get_transport(id->device->node_type)) {
|
||||
case RDMA_TRANSPORT_IB:
|
||||
ret = cma_ib_listen(id_priv);
|
||||
if (ret)
|
||||
goto err;
|
||||
break;
|
||||
case RDMA_TRANSPORT_IWARP:
|
||||
ret = cma_iw_listen(id_priv, backlog);
|
||||
if (ret)
|
||||
goto err;
|
||||
break;
|
||||
default:
|
||||
ret = -ENOSYS;
|
||||
goto err;
|
||||
}
|
||||
} else
|
||||
cma_listen_on_all(id_priv);
|
||||
|
||||
return 0;
|
||||
err:
|
||||
id_priv->backlog = 0;
|
||||
cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_listen);
|
||||
|
||||
int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
|
||||
{
|
||||
struct rdma_id_private *id_priv;
|
||||
|
@ -725,7 +725,7 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
|
||||
*/
|
||||
clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
|
||||
BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
|
||||
if (iw_event->status == IW_CM_EVENT_STATUS_ACCEPTED) {
|
||||
if (iw_event->status == 0) {
|
||||
cm_id_priv->id.local_addr = iw_event->local_addr;
|
||||
cm_id_priv->id.remote_addr = iw_event->remote_addr;
|
||||
cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
|
||||
|
@ -883,6 +883,13 @@ static int ucma_set_option_id(struct ucma_context *ctx, int optname,
|
||||
}
|
||||
rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
|
||||
break;
|
||||
case RDMA_OPTION_ID_REUSEADDR:
|
||||
if (optlen != sizeof(int)) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
|
||||
break;
|
||||
default:
|
||||
ret = -ENOSYS;
|
||||
}
|
||||
|
@ -1198,9 +1198,7 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
}
|
||||
PDBG("%s ep %p status %d error %d\n", __func__, ep,
|
||||
rpl->status, status2errno(rpl->status));
|
||||
ep->com.wr_wait.ret = status2errno(rpl->status);
|
||||
ep->com.wr_wait.done = 1;
|
||||
wake_up(&ep->com.wr_wait.wait);
|
||||
c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1234,9 +1232,7 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
struct c4iw_listen_ep *ep = lookup_stid(t, stid);
|
||||
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
ep->com.wr_wait.ret = status2errno(rpl->status);
|
||||
ep->com.wr_wait.done = 1;
|
||||
wake_up(&ep->com.wr_wait.wait);
|
||||
c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1466,7 +1462,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
struct c4iw_qp_attributes attrs;
|
||||
int disconnect = 1;
|
||||
int release = 0;
|
||||
int closing = 0;
|
||||
int abort = 0;
|
||||
struct tid_info *t = dev->rdev.lldi.tids;
|
||||
unsigned int tid = GET_TID(hdr);
|
||||
|
||||
@ -1492,23 +1488,22 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
* in rdma connection migration (see c4iw_accept_cr()).
|
||||
*/
|
||||
__state_set(&ep->com, CLOSING);
|
||||
ep->com.wr_wait.done = 1;
|
||||
ep->com.wr_wait.ret = -ECONNRESET;
|
||||
PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
|
||||
wake_up(&ep->com.wr_wait.wait);
|
||||
c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
|
||||
break;
|
||||
case MPA_REP_SENT:
|
||||
__state_set(&ep->com, CLOSING);
|
||||
ep->com.wr_wait.done = 1;
|
||||
ep->com.wr_wait.ret = -ECONNRESET;
|
||||
PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
|
||||
wake_up(&ep->com.wr_wait.wait);
|
||||
c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
|
||||
break;
|
||||
case FPDU_MODE:
|
||||
start_ep_timer(ep);
|
||||
__state_set(&ep->com, CLOSING);
|
||||
closing = 1;
|
||||
attrs.next_state = C4IW_QP_STATE_CLOSING;
|
||||
abort = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
|
||||
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
|
||||
peer_close_upcall(ep);
|
||||
disconnect = 1;
|
||||
break;
|
||||
case ABORTING:
|
||||
disconnect = 0;
|
||||
@ -1536,11 +1531,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
BUG_ON(1);
|
||||
}
|
||||
mutex_unlock(&ep->com.mutex);
|
||||
if (closing) {
|
||||
attrs.next_state = C4IW_QP_STATE_CLOSING;
|
||||
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
|
||||
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
|
||||
}
|
||||
if (disconnect)
|
||||
c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
|
||||
if (release)
|
||||
@ -1581,9 +1571,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
/*
|
||||
* Wake up any threads in rdma_init() or rdma_fini().
|
||||
*/
|
||||
ep->com.wr_wait.done = 1;
|
||||
ep->com.wr_wait.ret = -ECONNRESET;
|
||||
wake_up(&ep->com.wr_wait.wait);
|
||||
c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
|
||||
|
||||
mutex_lock(&ep->com.mutex);
|
||||
switch (ep->com.state) {
|
||||
@ -1710,14 +1698,14 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
ep = lookup_tid(t, tid);
|
||||
BUG_ON(!ep);
|
||||
|
||||
if (ep->com.qp) {
|
||||
if (ep && ep->com.qp) {
|
||||
printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
|
||||
ep->com.qp->wq.sq.qid);
|
||||
attrs.next_state = C4IW_QP_STATE_TERMINATE;
|
||||
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
|
||||
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
|
||||
} else
|
||||
printk(KERN_WARNING MOD "TERM received tid %u no qp\n", tid);
|
||||
printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2296,14 +2284,8 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||
ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
|
||||
wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
|
||||
PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
|
||||
if (wr_waitp) {
|
||||
if (ret)
|
||||
wr_waitp->ret = -ret;
|
||||
else
|
||||
wr_waitp->ret = 0;
|
||||
wr_waitp->done = 1;
|
||||
wake_up(&wr_waitp->wait);
|
||||
}
|
||||
if (wr_waitp)
|
||||
c4iw_wake_up(wr_waitp, ret ? -ret : 0);
|
||||
kfree_skb(skb);
|
||||
break;
|
||||
case 2:
|
||||
|
@ -44,7 +44,7 @@ MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_VERSION(DRV_VERSION);
|
||||
|
||||
static LIST_HEAD(dev_list);
|
||||
static LIST_HEAD(uld_ctx_list);
|
||||
static DEFINE_MUTEX(dev_mutex);
|
||||
|
||||
static struct dentry *c4iw_debugfs_root;
|
||||
@ -370,18 +370,23 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
|
||||
c4iw_destroy_resource(&rdev->resource);
|
||||
}
|
||||
|
||||
static void c4iw_remove(struct c4iw_dev *dev)
|
||||
struct uld_ctx {
|
||||
struct list_head entry;
|
||||
struct cxgb4_lld_info lldi;
|
||||
struct c4iw_dev *dev;
|
||||
};
|
||||
|
||||
static void c4iw_remove(struct uld_ctx *ctx)
|
||||
{
|
||||
PDBG("%s c4iw_dev %p\n", __func__, dev);
|
||||
list_del(&dev->entry);
|
||||
if (dev->registered)
|
||||
c4iw_unregister_device(dev);
|
||||
c4iw_rdev_close(&dev->rdev);
|
||||
idr_destroy(&dev->cqidr);
|
||||
idr_destroy(&dev->qpidr);
|
||||
idr_destroy(&dev->mmidr);
|
||||
iounmap(dev->rdev.oc_mw_kva);
|
||||
ib_dealloc_device(&dev->ibdev);
|
||||
PDBG("%s c4iw_dev %p\n", __func__, ctx->dev);
|
||||
c4iw_unregister_device(ctx->dev);
|
||||
c4iw_rdev_close(&ctx->dev->rdev);
|
||||
idr_destroy(&ctx->dev->cqidr);
|
||||
idr_destroy(&ctx->dev->qpidr);
|
||||
idr_destroy(&ctx->dev->mmidr);
|
||||
iounmap(ctx->dev->rdev.oc_mw_kva);
|
||||
ib_dealloc_device(&ctx->dev->ibdev);
|
||||
ctx->dev = NULL;
|
||||
}
|
||||
|
||||
static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
|
||||
@ -392,7 +397,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
|
||||
devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
|
||||
if (!devp) {
|
||||
printk(KERN_ERR MOD "Cannot allocate ib device\n");
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
devp->rdev.lldi = *infop;
|
||||
|
||||
@ -402,27 +407,23 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
|
||||
devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
|
||||
devp->rdev.lldi.vr->ocq.size);
|
||||
|
||||
printk(KERN_INFO MOD "ocq memory: "
|
||||
PDBG(KERN_INFO MOD "ocq memory: "
|
||||
"hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
|
||||
devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
|
||||
devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
|
||||
|
||||
mutex_lock(&dev_mutex);
|
||||
|
||||
ret = c4iw_rdev_open(&devp->rdev);
|
||||
if (ret) {
|
||||
mutex_unlock(&dev_mutex);
|
||||
printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
|
||||
ib_dealloc_device(&devp->ibdev);
|
||||
return NULL;
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
idr_init(&devp->cqidr);
|
||||
idr_init(&devp->qpidr);
|
||||
idr_init(&devp->mmidr);
|
||||
spin_lock_init(&devp->lock);
|
||||
list_add_tail(&devp->entry, &dev_list);
|
||||
mutex_unlock(&dev_mutex);
|
||||
|
||||
if (c4iw_debugfs_root) {
|
||||
devp->debugfs_root = debugfs_create_dir(
|
||||
@ -435,7 +436,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
|
||||
|
||||
static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
|
||||
{
|
||||
struct c4iw_dev *dev;
|
||||
struct uld_ctx *ctx;
|
||||
static int vers_printed;
|
||||
int i;
|
||||
|
||||
@ -443,25 +444,33 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
|
||||
printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n",
|
||||
DRV_VERSION);
|
||||
|
||||
dev = c4iw_alloc(infop);
|
||||
if (!dev)
|
||||
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
|
||||
if (!ctx) {
|
||||
ctx = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
ctx->lldi = *infop;
|
||||
|
||||
PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
|
||||
__func__, pci_name(dev->rdev.lldi.pdev),
|
||||
dev->rdev.lldi.nchan, dev->rdev.lldi.nrxq,
|
||||
dev->rdev.lldi.ntxq, dev->rdev.lldi.nports);
|
||||
__func__, pci_name(ctx->lldi.pdev),
|
||||
ctx->lldi.nchan, ctx->lldi.nrxq,
|
||||
ctx->lldi.ntxq, ctx->lldi.nports);
|
||||
|
||||
for (i = 0; i < dev->rdev.lldi.nrxq; i++)
|
||||
PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]);
|
||||
mutex_lock(&dev_mutex);
|
||||
list_add_tail(&ctx->entry, &uld_ctx_list);
|
||||
mutex_unlock(&dev_mutex);
|
||||
|
||||
for (i = 0; i < ctx->lldi.nrxq; i++)
|
||||
PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
|
||||
out:
|
||||
return dev;
|
||||
return ctx;
|
||||
}
|
||||
|
||||
static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
|
||||
const struct pkt_gl *gl)
|
||||
{
|
||||
struct c4iw_dev *dev = handle;
|
||||
struct uld_ctx *ctx = handle;
|
||||
struct c4iw_dev *dev = ctx->dev;
|
||||
struct sk_buff *skb;
|
||||
const struct cpl_act_establish *rpl;
|
||||
unsigned int opcode;
|
||||
@ -503,47 +512,49 @@ nomem:
|
||||
|
||||
static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
|
||||
{
|
||||
struct c4iw_dev *dev = handle;
|
||||
struct uld_ctx *ctx = handle;
|
||||
|
||||
PDBG("%s new_state %u\n", __func__, new_state);
|
||||
switch (new_state) {
|
||||
case CXGB4_STATE_UP:
|
||||
printk(KERN_INFO MOD "%s: Up\n", pci_name(dev->rdev.lldi.pdev));
|
||||
if (!dev->registered) {
|
||||
int ret;
|
||||
ret = c4iw_register_device(dev);
|
||||
if (ret)
|
||||
printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
|
||||
if (!ctx->dev) {
|
||||
int ret = 0;
|
||||
|
||||
ctx->dev = c4iw_alloc(&ctx->lldi);
|
||||
if (!IS_ERR(ctx->dev))
|
||||
ret = c4iw_register_device(ctx->dev);
|
||||
if (IS_ERR(ctx->dev) || ret)
|
||||
printk(KERN_ERR MOD
|
||||
"%s: RDMA registration failed: %d\n",
|
||||
pci_name(dev->rdev.lldi.pdev), ret);
|
||||
pci_name(ctx->lldi.pdev), ret);
|
||||
}
|
||||
break;
|
||||
case CXGB4_STATE_DOWN:
|
||||
printk(KERN_INFO MOD "%s: Down\n",
|
||||
pci_name(dev->rdev.lldi.pdev));
|
||||
if (dev->registered)
|
||||
c4iw_unregister_device(dev);
|
||||
pci_name(ctx->lldi.pdev));
|
||||
if (ctx->dev)
|
||||
c4iw_remove(ctx);
|
||||
break;
|
||||
case CXGB4_STATE_START_RECOVERY:
|
||||
printk(KERN_INFO MOD "%s: Fatal Error\n",
|
||||
pci_name(dev->rdev.lldi.pdev));
|
||||
dev->rdev.flags |= T4_FATAL_ERROR;
|
||||
if (dev->registered) {
|
||||
pci_name(ctx->lldi.pdev));
|
||||
if (ctx->dev) {
|
||||
struct ib_event event;
|
||||
|
||||
ctx->dev->rdev.flags |= T4_FATAL_ERROR;
|
||||
memset(&event, 0, sizeof event);
|
||||
event.event = IB_EVENT_DEVICE_FATAL;
|
||||
event.device = &dev->ibdev;
|
||||
event.device = &ctx->dev->ibdev;
|
||||
ib_dispatch_event(&event);
|
||||
c4iw_unregister_device(dev);
|
||||
c4iw_remove(ctx);
|
||||
}
|
||||
break;
|
||||
case CXGB4_STATE_DETACH:
|
||||
printk(KERN_INFO MOD "%s: Detach\n",
|
||||
pci_name(dev->rdev.lldi.pdev));
|
||||
mutex_lock(&dev_mutex);
|
||||
c4iw_remove(dev);
|
||||
mutex_unlock(&dev_mutex);
|
||||
pci_name(ctx->lldi.pdev));
|
||||
if (ctx->dev)
|
||||
c4iw_remove(ctx);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
@ -576,11 +587,13 @@ static int __init c4iw_init_module(void)
|
||||
|
||||
static void __exit c4iw_exit_module(void)
|
||||
{
|
||||
struct c4iw_dev *dev, *tmp;
|
||||
struct uld_ctx *ctx, *tmp;
|
||||
|
||||
mutex_lock(&dev_mutex);
|
||||
list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
|
||||
c4iw_remove(dev);
|
||||
list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) {
|
||||
if (ctx->dev)
|
||||
c4iw_remove(ctx);
|
||||
kfree(ctx);
|
||||
}
|
||||
mutex_unlock(&dev_mutex);
|
||||
cxgb4_unregister_uld(CXGB4_ULD_RDMA);
|
||||
|
@ -131,42 +131,58 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
|
||||
|
||||
#define C4IW_WR_TO (10*HZ)
|
||||
|
||||
enum {
|
||||
REPLY_READY = 0,
|
||||
};
|
||||
|
||||
struct c4iw_wr_wait {
|
||||
wait_queue_head_t wait;
|
||||
int done;
|
||||
unsigned long status;
|
||||
int ret;
|
||||
};
|
||||
|
||||
static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
|
||||
{
|
||||
wr_waitp->ret = 0;
|
||||
wr_waitp->done = 0;
|
||||
wr_waitp->status = 0;
|
||||
init_waitqueue_head(&wr_waitp->wait);
|
||||
}
|
||||
|
||||
static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
|
||||
{
|
||||
wr_waitp->ret = ret;
|
||||
set_bit(REPLY_READY, &wr_waitp->status);
|
||||
wake_up(&wr_waitp->wait);
|
||||
}
|
||||
|
||||
static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
|
||||
struct c4iw_wr_wait *wr_waitp,
|
||||
u32 hwtid, u32 qpid,
|
||||
const char *func)
|
||||
{
|
||||
unsigned to = C4IW_WR_TO;
|
||||
do {
|
||||
int ret;
|
||||
|
||||
wait_event_timeout(wr_waitp->wait, wr_waitp->done, to);
|
||||
if (!wr_waitp->done) {
|
||||
do {
|
||||
ret = wait_event_timeout(wr_waitp->wait,
|
||||
test_and_clear_bit(REPLY_READY, &wr_waitp->status), to);
|
||||
if (!ret) {
|
||||
printk(KERN_ERR MOD "%s - Device %s not responding - "
|
||||
"tid %u qpid %u\n", func,
|
||||
pci_name(rdev->lldi.pdev), hwtid, qpid);
|
||||
if (c4iw_fatal_error(rdev)) {
|
||||
wr_waitp->ret = -EIO;
|
||||
break;
|
||||
}
|
||||
to = to << 2;
|
||||
}
|
||||
} while (!wr_waitp->done);
|
||||
} while (!ret);
|
||||
if (wr_waitp->ret)
|
||||
printk(KERN_WARNING MOD "%s: FW reply %d tid %u qpid %u\n",
|
||||
pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
|
||||
PDBG("%s: FW reply %d tid %u qpid %u\n",
|
||||
pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
|
||||
return wr_waitp->ret;
|
||||
}
|
||||
|
||||
|
||||
struct c4iw_dev {
|
||||
struct ib_device ibdev;
|
||||
struct c4iw_rdev rdev;
|
||||
@ -175,9 +191,7 @@ struct c4iw_dev {
|
||||
struct idr qpidr;
|
||||
struct idr mmidr;
|
||||
spinlock_t lock;
|
||||
struct list_head entry;
|
||||
struct dentry *debugfs_root;
|
||||
u8 registered;
|
||||
};
|
||||
|
||||
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
|
||||
|
@ -516,7 +516,6 @@ int c4iw_register_device(struct c4iw_dev *dev)
|
||||
if (ret)
|
||||
goto bail2;
|
||||
}
|
||||
dev->registered = 1;
|
||||
return 0;
|
||||
bail2:
|
||||
ib_unregister_device(&dev->ibdev);
|
||||
@ -535,6 +534,5 @@ void c4iw_unregister_device(struct c4iw_dev *dev)
|
||||
c4iw_class_attributes[i]);
|
||||
ib_unregister_device(&dev->ibdev);
|
||||
kfree(dev->ibdev.iwcm);
|
||||
dev->registered = 0;
|
||||
return;
|
||||
}
|
||||
|
@ -214,7 +214,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
|
||||
V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
|
||||
V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
|
||||
V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
|
||||
t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0 |
|
||||
(t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
|
||||
V_FW_RI_RES_WR_IQID(scq->cqid));
|
||||
res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
|
||||
V_FW_RI_RES_WR_DCAEN(0) |
|
||||
@ -1210,7 +1210,6 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
|
||||
if (ret) {
|
||||
if (internal)
|
||||
c4iw_get_ep(&qhp->ep->com);
|
||||
disconnect = abort = 1;
|
||||
goto err;
|
||||
}
|
||||
break;
|
||||
|
@ -398,7 +398,6 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
|
||||
struct ipath_devdata *dd;
|
||||
unsigned long long addr;
|
||||
u32 bar0 = 0, bar1 = 0;
|
||||
u8 rev;
|
||||
|
||||
dd = ipath_alloc_devdata(pdev);
|
||||
if (IS_ERR(dd)) {
|
||||
@ -540,13 +539,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
|
||||
goto bail_regions;
|
||||
}
|
||||
|
||||
ret = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
|
||||
if (ret) {
|
||||
ipath_dev_err(dd, "Failed to read PCI revision ID unit "
|
||||
"%u: err %d\n", dd->ipath_unit, -ret);
|
||||
goto bail_regions; /* shouldn't ever happen */
|
||||
}
|
||||
dd->ipath_pcirev = rev;
|
||||
dd->ipath_pcirev = pdev->revision;
|
||||
|
||||
#if defined(__powerpc__)
|
||||
/* There isn't a generic way to specify writethrough mappings */
|
||||
|
@ -2563,7 +2563,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
|
||||
u16 last_ae;
|
||||
u8 original_hw_tcp_state;
|
||||
u8 original_ibqp_state;
|
||||
enum iw_cm_event_status disconn_status = IW_CM_EVENT_STATUS_OK;
|
||||
int disconn_status = 0;
|
||||
int issue_disconn = 0;
|
||||
int issue_close = 0;
|
||||
int issue_flush = 0;
|
||||
@ -2605,7 +2605,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
|
||||
(last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
|
||||
issue_disconn = 1;
|
||||
if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET)
|
||||
disconn_status = IW_CM_EVENT_STATUS_RESET;
|
||||
disconn_status = -ECONNRESET;
|
||||
}
|
||||
|
||||
if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
|
||||
@ -2666,7 +2666,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
|
||||
cm_id->provider_data = nesqp;
|
||||
/* Send up the close complete event */
|
||||
cm_event.event = IW_CM_EVENT_CLOSE;
|
||||
cm_event.status = IW_CM_EVENT_STATUS_OK;
|
||||
cm_event.status = 0;
|
||||
cm_event.provider_data = cm_id->provider_data;
|
||||
cm_event.local_addr = cm_id->local_addr;
|
||||
cm_event.remote_addr = cm_id->remote_addr;
|
||||
@ -2966,7 +2966,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
nes_add_ref(&nesqp->ibqp);
|
||||
|
||||
cm_event.event = IW_CM_EVENT_ESTABLISHED;
|
||||
cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED;
|
||||
cm_event.status = 0;
|
||||
cm_event.provider_data = (void *)nesqp;
|
||||
cm_event.local_addr = cm_id->local_addr;
|
||||
cm_event.remote_addr = cm_id->remote_addr;
|
||||
@ -3377,7 +3377,7 @@ static void cm_event_connected(struct nes_cm_event *event)
|
||||
|
||||
/* notify OF layer we successfully created the requested connection */
|
||||
cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
|
||||
cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED;
|
||||
cm_event.status = 0;
|
||||
cm_event.provider_data = cm_id->provider_data;
|
||||
cm_event.local_addr.sin_family = AF_INET;
|
||||
cm_event.local_addr.sin_port = cm_id->local_addr.sin_port;
|
||||
@ -3484,7 +3484,7 @@ static void cm_event_reset(struct nes_cm_event *event)
|
||||
nesqp->cm_id = NULL;
|
||||
/* cm_id->provider_data = NULL; */
|
||||
cm_event.event = IW_CM_EVENT_DISCONNECT;
|
||||
cm_event.status = IW_CM_EVENT_STATUS_RESET;
|
||||
cm_event.status = -ECONNRESET;
|
||||
cm_event.provider_data = cm_id->provider_data;
|
||||
cm_event.local_addr = cm_id->local_addr;
|
||||
cm_event.remote_addr = cm_id->remote_addr;
|
||||
@ -3495,7 +3495,7 @@ static void cm_event_reset(struct nes_cm_event *event)
|
||||
ret = cm_id->event_handler(cm_id, &cm_event);
|
||||
atomic_inc(&cm_closes);
|
||||
cm_event.event = IW_CM_EVENT_CLOSE;
|
||||
cm_event.status = IW_CM_EVENT_STATUS_OK;
|
||||
cm_event.status = 0;
|
||||
cm_event.provider_data = cm_id->provider_data;
|
||||
cm_event.local_addr = cm_id->local_addr;
|
||||
cm_event.remote_addr = cm_id->remote_addr;
|
||||
@ -3534,7 +3534,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
|
||||
cm_node, cm_id, jiffies);
|
||||
|
||||
cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
|
||||
cm_event.status = IW_CM_EVENT_STATUS_OK;
|
||||
cm_event.status = 0;
|
||||
cm_event.provider_data = (void *)cm_node;
|
||||
|
||||
cm_event.local_addr.sin_family = AF_INET;
|
||||
|
@ -1484,7 +1484,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
|
||||
(nesqp->ibqp_state == IB_QPS_RTR)) && (nesqp->cm_id)) {
|
||||
cm_id = nesqp->cm_id;
|
||||
cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
|
||||
cm_event.status = IW_CM_EVENT_STATUS_TIMEOUT;
|
||||
cm_event.status = -ETIMEDOUT;
|
||||
cm_event.local_addr = cm_id->local_addr;
|
||||
cm_event.remote_addr = cm_id->remote_addr;
|
||||
cm_event.private_data = NULL;
|
||||
|
@ -7534,7 +7534,8 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd)
|
||||
ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
|
||||
tstart = get_jiffies_64();
|
||||
while (chan_done &&
|
||||
!time_after64(tstart, tstart + msecs_to_jiffies(500))) {
|
||||
!time_after64(get_jiffies_64(),
|
||||
tstart + msecs_to_jiffies(500))) {
|
||||
msleep(20);
|
||||
for (chan = 0; chan < SERDES_CHANS; ++chan) {
|
||||
rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
|
||||
|
@ -526,11 +526,8 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
|
||||
*/
|
||||
devid = parent->device;
|
||||
if (devid >= 0x25e2 && devid <= 0x25fa) {
|
||||
u8 rev;
|
||||
|
||||
/* 5000 P/V/X/Z */
|
||||
pci_read_config_byte(parent, PCI_REVISION_ID, &rev);
|
||||
if (rev <= 0xb2)
|
||||
if (parent->revision <= 0xb2)
|
||||
bits = 1U << 10;
|
||||
else
|
||||
bits = 7U << 10;
|
||||
|
@ -46,18 +46,9 @@ enum iw_cm_event_type {
|
||||
IW_CM_EVENT_CLOSE /* close complete */
|
||||
};
|
||||
|
||||
enum iw_cm_event_status {
|
||||
IW_CM_EVENT_STATUS_OK = 0, /* request successful */
|
||||
IW_CM_EVENT_STATUS_ACCEPTED = 0, /* connect request accepted */
|
||||
IW_CM_EVENT_STATUS_REJECTED, /* connect request rejected */
|
||||
IW_CM_EVENT_STATUS_TIMEOUT, /* the operation timed out */
|
||||
IW_CM_EVENT_STATUS_RESET, /* reset from remote peer */
|
||||
IW_CM_EVENT_STATUS_EINVAL, /* asynchronous failure for bad parm */
|
||||
};
|
||||
|
||||
struct iw_cm_event {
|
||||
enum iw_cm_event_type event;
|
||||
enum iw_cm_event_status status;
|
||||
int status;
|
||||
struct sockaddr_in local_addr;
|
||||
struct sockaddr_in remote_addr;
|
||||
void *private_data;
|
||||
|
@ -329,4 +329,14 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr);
|
||||
*/
|
||||
void rdma_set_service_type(struct rdma_cm_id *id, int tos);
|
||||
|
||||
/**
|
||||
* rdma_set_reuseaddr - Allow the reuse of local addresses when binding
|
||||
* the rdma_cm_id.
|
||||
* @id: Communication identifier to configure.
|
||||
* @reuse: Value indicating if the bound address is reusable.
|
||||
*
|
||||
* Reuse must be set before an address is bound to the id.
|
||||
*/
|
||||
int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse);
|
||||
|
||||
#endif /* RDMA_CM_H */
|
||||
|
@ -221,8 +221,9 @@ enum {
|
||||
|
||||
/* Option details */
|
||||
enum {
|
||||
RDMA_OPTION_ID_TOS = 0,
|
||||
RDMA_OPTION_IB_PATH = 1
|
||||
RDMA_OPTION_ID_TOS = 0,
|
||||
RDMA_OPTION_ID_REUSEADDR = 1,
|
||||
RDMA_OPTION_IB_PATH = 1
|
||||
};
|
||||
|
||||
struct rdma_ucm_set_option {
|
||||
|
Loading…
Reference in New Issue
Block a user