forked from Minki/linux
RDMA/iw_cxgb4: Low resource fixes for connection manager
Pre-allocate buffers for sending various control messages to close connection, abort connection, etc so that we gracefully handle connections when system is running out of memory. Signed-off-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Hariprasad Shenai <hariprasad@chelsio.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
4c72efefd9
commit
4a740838bf
@ -294,6 +294,25 @@ static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int alloc_ep_skb_list(struct sk_buff_head *ep_skb_list, int size)
|
||||||
|
{
|
||||||
|
struct sk_buff *skb;
|
||||||
|
unsigned int i;
|
||||||
|
size_t len;
|
||||||
|
|
||||||
|
len = roundup(sizeof(union cpl_wr_size), 16);
|
||||||
|
for (i = 0; i < size; i++) {
|
||||||
|
skb = alloc_skb(len, GFP_KERNEL);
|
||||||
|
if (!skb)
|
||||||
|
goto fail;
|
||||||
|
skb_queue_tail(ep_skb_list, skb);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
fail:
|
||||||
|
skb_queue_purge(ep_skb_list);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
static void *alloc_ep(int size, gfp_t gfp)
|
static void *alloc_ep(int size, gfp_t gfp)
|
||||||
{
|
{
|
||||||
struct c4iw_ep_common *epc;
|
struct c4iw_ep_common *epc;
|
||||||
@ -384,6 +403,8 @@ void _c4iw_free_ep(struct kref *kref)
|
|||||||
if (ep->mpa_skb)
|
if (ep->mpa_skb)
|
||||||
kfree_skb(ep->mpa_skb);
|
kfree_skb(ep->mpa_skb);
|
||||||
}
|
}
|
||||||
|
if (!skb_queue_empty(&ep->com.ep_skb_list))
|
||||||
|
skb_queue_purge(&ep->com.ep_skb_list);
|
||||||
kfree(ep);
|
kfree(ep);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -620,25 +641,27 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
|
static int send_flowc(struct c4iw_ep *ep)
|
||||||
{
|
{
|
||||||
unsigned int flowclen = 80;
|
|
||||||
struct fw_flowc_wr *flowc;
|
struct fw_flowc_wr *flowc;
|
||||||
|
struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
|
||||||
int i;
|
int i;
|
||||||
u16 vlan = ep->l2t->vlan;
|
u16 vlan = ep->l2t->vlan;
|
||||||
int nparams;
|
int nparams;
|
||||||
|
|
||||||
|
if (WARN_ON(!skb))
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
if (vlan == CPL_L2T_VLAN_NONE)
|
if (vlan == CPL_L2T_VLAN_NONE)
|
||||||
nparams = 8;
|
nparams = 8;
|
||||||
else
|
else
|
||||||
nparams = 9;
|
nparams = 9;
|
||||||
|
|
||||||
skb = get_skb(skb, flowclen, GFP_KERNEL);
|
flowc = (struct fw_flowc_wr *)__skb_put(skb, FLOWC_LEN);
|
||||||
flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
|
|
||||||
|
|
||||||
flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
|
flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
|
||||||
FW_FLOWC_WR_NPARAMS_V(nparams));
|
FW_FLOWC_WR_NPARAMS_V(nparams));
|
||||||
flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen,
|
flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(FLOWC_LEN,
|
||||||
16)) | FW_WR_FLOWID_V(ep->hwtid));
|
16)) | FW_WR_FLOWID_V(ep->hwtid));
|
||||||
|
|
||||||
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
|
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
|
||||||
@ -679,18 +702,16 @@ static int send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
|
|||||||
return c4iw_ofld_send(&ep->com.dev->rdev, skb);
|
return c4iw_ofld_send(&ep->com.dev->rdev, skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
|
static int send_halfclose(struct c4iw_ep *ep)
|
||||||
{
|
{
|
||||||
struct cpl_close_con_req *req;
|
struct cpl_close_con_req *req;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
|
||||||
int wrlen = roundup(sizeof *req, 16);
|
int wrlen = roundup(sizeof *req, 16);
|
||||||
|
|
||||||
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
|
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
|
||||||
skb = get_skb(NULL, wrlen, gfp);
|
if (WARN_ON(!skb))
|
||||||
if (!skb) {
|
|
||||||
printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
|
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
|
||||||
t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
|
t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
|
||||||
req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
|
req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
|
||||||
@ -701,26 +722,24 @@ static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
|
|||||||
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
|
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
|
static int send_abort(struct c4iw_ep *ep)
|
||||||
{
|
{
|
||||||
struct cpl_abort_req *req;
|
struct cpl_abort_req *req;
|
||||||
int wrlen = roundup(sizeof *req, 16);
|
int wrlen = roundup(sizeof *req, 16);
|
||||||
|
struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
|
||||||
|
|
||||||
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
|
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
|
||||||
skb = get_skb(skb, wrlen, gfp);
|
if (WARN_ON(!req_skb))
|
||||||
if (!skb) {
|
|
||||||
printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
|
|
||||||
__func__);
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
|
set_wr_txq(req_skb, CPL_PRIORITY_DATA, ep->txq_idx);
|
||||||
t4_set_arp_err_handler(skb, ep, abort_arp_failure);
|
t4_set_arp_err_handler(req_skb, ep, abort_arp_failure);
|
||||||
req = (struct cpl_abort_req *) skb_put(skb, wrlen);
|
req = (struct cpl_abort_req *)skb_put(req_skb, wrlen);
|
||||||
memset(req, 0, wrlen);
|
memset(req, 0, wrlen);
|
||||||
INIT_TP_WR(req, ep->hwtid);
|
INIT_TP_WR(req, ep->hwtid);
|
||||||
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
|
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
|
||||||
req->cmd = CPL_ABORT_SEND_RST;
|
req->cmd = CPL_ABORT_SEND_RST;
|
||||||
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
|
return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void best_mtu(const unsigned short *mtus, unsigned short mtu,
|
static void best_mtu(const unsigned short *mtus, unsigned short mtu,
|
||||||
@ -1261,7 +1280,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||||||
set_bit(ACT_ESTAB, &ep->com.history);
|
set_bit(ACT_ESTAB, &ep->com.history);
|
||||||
|
|
||||||
/* start MPA negotiation */
|
/* start MPA negotiation */
|
||||||
ret = send_flowc(ep, NULL);
|
ret = send_flowc(ep);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
if (ep->retry_with_mpa_v1)
|
if (ep->retry_with_mpa_v1)
|
||||||
@ -2147,6 +2166,7 @@ out:
|
|||||||
static int c4iw_reconnect(struct c4iw_ep *ep)
|
static int c4iw_reconnect(struct c4iw_ep *ep)
|
||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
int size = 0;
|
||||||
struct sockaddr_in *laddr = (struct sockaddr_in *)
|
struct sockaddr_in *laddr = (struct sockaddr_in *)
|
||||||
&ep->com.cm_id->m_local_addr;
|
&ep->com.cm_id->m_local_addr;
|
||||||
struct sockaddr_in *raddr = (struct sockaddr_in *)
|
struct sockaddr_in *raddr = (struct sockaddr_in *)
|
||||||
@ -2162,6 +2182,21 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
|
|||||||
init_timer(&ep->timer);
|
init_timer(&ep->timer);
|
||||||
c4iw_init_wr_wait(&ep->com.wr_wait);
|
c4iw_init_wr_wait(&ep->com.wr_wait);
|
||||||
|
|
||||||
|
/* When MPA revision is different on nodes, the node with MPA_rev=2
|
||||||
|
* tries to reconnect with MPA_rev 1 for the same EP through
|
||||||
|
* c4iw_reconnect(), where the same EP is assigned with new tid for
|
||||||
|
* further connection establishment. As we are using the same EP pointer
|
||||||
|
* for reconnect, few skbs are used during the previous c4iw_connect(),
|
||||||
|
* which leaves the EP with inadequate skbs for further
|
||||||
|
* c4iw_reconnect(), Further causing an assert BUG_ON() due to empty
|
||||||
|
* skb_list() during peer_abort(). Allocate skbs which is already used.
|
||||||
|
*/
|
||||||
|
size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list));
|
||||||
|
if (alloc_ep_skb_list(&ep->com.ep_skb_list, size)) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto fail1;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate an active TID to initiate a TCP connection.
|
* Allocate an active TID to initiate a TCP connection.
|
||||||
*/
|
*/
|
||||||
@ -2227,6 +2262,7 @@ fail2:
|
|||||||
* response of 1st connect request.
|
* response of 1st connect request.
|
||||||
*/
|
*/
|
||||||
connect_reply_upcall(ep, -ECONNRESET);
|
connect_reply_upcall(ep, -ECONNRESET);
|
||||||
|
fail1:
|
||||||
c4iw_put_ep(&ep->com);
|
c4iw_put_ep(&ep->com);
|
||||||
out:
|
out:
|
||||||
return err;
|
return err;
|
||||||
@ -2593,6 +2629,10 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||||||
if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
|
if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
|
||||||
child_ep->mtu = peer_mss + hdrs;
|
child_ep->mtu = peer_mss + hdrs;
|
||||||
|
|
||||||
|
skb_queue_head_init(&child_ep->com.ep_skb_list);
|
||||||
|
if (alloc_ep_skb_list(&child_ep->com.ep_skb_list, CN_MAX_CON_BUF))
|
||||||
|
goto fail;
|
||||||
|
|
||||||
state_set(&child_ep->com, CONNECTING);
|
state_set(&child_ep->com, CONNECTING);
|
||||||
child_ep->com.dev = dev;
|
child_ep->com.dev = dev;
|
||||||
child_ep->com.cm_id = NULL;
|
child_ep->com.cm_id = NULL;
|
||||||
@ -2657,6 +2697,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||||||
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
|
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
|
||||||
}
|
}
|
||||||
goto out;
|
goto out;
|
||||||
|
fail:
|
||||||
|
c4iw_put_ep(&child_ep->com);
|
||||||
reject:
|
reject:
|
||||||
reject_cr(dev, hwtid, skb);
|
reject_cr(dev, hwtid, skb);
|
||||||
if (parent_ep)
|
if (parent_ep)
|
||||||
@ -2687,7 +2729,7 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||||||
ep->com.state = MPA_REQ_WAIT;
|
ep->com.state = MPA_REQ_WAIT;
|
||||||
start_ep_timer(ep);
|
start_ep_timer(ep);
|
||||||
set_bit(PASS_ESTAB, &ep->com.history);
|
set_bit(PASS_ESTAB, &ep->com.history);
|
||||||
ret = send_flowc(ep, skb);
|
ret = send_flowc(ep);
|
||||||
mutex_unlock(&ep->com.mutex);
|
mutex_unlock(&ep->com.mutex);
|
||||||
if (ret)
|
if (ret)
|
||||||
c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
|
c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
|
||||||
@ -2888,10 +2930,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||||||
}
|
}
|
||||||
mutex_unlock(&ep->com.mutex);
|
mutex_unlock(&ep->com.mutex);
|
||||||
|
|
||||||
rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
|
rpl_skb = skb_dequeue(&ep->com.ep_skb_list);
|
||||||
if (!rpl_skb) {
|
if (WARN_ON(!rpl_skb)) {
|
||||||
printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
|
|
||||||
__func__);
|
|
||||||
release = 1;
|
release = 1;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -3262,6 +3302,13 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
skb_queue_head_init(&ep->com.ep_skb_list);
|
||||||
|
if (alloc_ep_skb_list(&ep->com.ep_skb_list, CN_MAX_CON_BUF)) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto fail1;
|
||||||
|
}
|
||||||
|
|
||||||
init_timer(&ep->timer);
|
init_timer(&ep->timer);
|
||||||
ep->plen = conn_param->private_data_len;
|
ep->plen = conn_param->private_data_len;
|
||||||
if (ep->plen)
|
if (ep->plen)
|
||||||
@ -3280,7 +3327,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||||||
if (!ep->com.qp) {
|
if (!ep->com.qp) {
|
||||||
PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
|
PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto fail1;
|
goto fail2;
|
||||||
}
|
}
|
||||||
ref_qp(ep);
|
ref_qp(ep);
|
||||||
PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
|
PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
|
||||||
@ -3293,7 +3340,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||||||
if (ep->atid == -1) {
|
if (ep->atid == -1) {
|
||||||
printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
|
printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto fail1;
|
goto fail2;
|
||||||
}
|
}
|
||||||
insert_handle(dev, &dev->atid_idr, ep, ep->atid);
|
insert_handle(dev, &dev->atid_idr, ep, ep->atid);
|
||||||
|
|
||||||
@ -3317,7 +3364,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||||||
if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
|
if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
|
||||||
err = pick_local_ipaddrs(dev, cm_id);
|
err = pick_local_ipaddrs(dev, cm_id);
|
||||||
if (err)
|
if (err)
|
||||||
goto fail1;
|
goto fail2;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* find a route */
|
/* find a route */
|
||||||
@ -3337,7 +3384,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||||||
if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
|
if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
|
||||||
err = pick_local_ip6addrs(dev, cm_id);
|
err = pick_local_ip6addrs(dev, cm_id);
|
||||||
if (err)
|
if (err)
|
||||||
goto fail1;
|
goto fail2;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* find a route */
|
/* find a route */
|
||||||
@ -3353,14 +3400,14 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||||||
if (!ep->dst) {
|
if (!ep->dst) {
|
||||||
printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
|
printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
|
||||||
err = -EHOSTUNREACH;
|
err = -EHOSTUNREACH;
|
||||||
goto fail2;
|
goto fail3;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true,
|
err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true,
|
||||||
ep->com.dev->rdev.lldi.adapter_type, cm_id->tos);
|
ep->com.dev->rdev.lldi.adapter_type, cm_id->tos);
|
||||||
if (err) {
|
if (err) {
|
||||||
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
|
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
|
||||||
goto fail3;
|
goto fail4;
|
||||||
}
|
}
|
||||||
|
|
||||||
PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
|
PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
|
||||||
@ -3376,13 +3423,15 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
cxgb4_l2t_release(ep->l2t);
|
cxgb4_l2t_release(ep->l2t);
|
||||||
fail3:
|
fail4:
|
||||||
dst_release(ep->dst);
|
dst_release(ep->dst);
|
||||||
fail2:
|
fail3:
|
||||||
remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
|
remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
|
||||||
cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
|
cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
|
||||||
fail1:
|
fail2:
|
||||||
|
skb_queue_purge(&ep->com.ep_skb_list);
|
||||||
deref_cm_id(&ep->com);
|
deref_cm_id(&ep->com);
|
||||||
|
fail1:
|
||||||
c4iw_put_ep(&ep->com);
|
c4iw_put_ep(&ep->com);
|
||||||
out:
|
out:
|
||||||
return err;
|
return err;
|
||||||
@ -3475,6 +3524,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
|
|||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto fail1;
|
goto fail1;
|
||||||
}
|
}
|
||||||
|
skb_queue_head_init(&ep->com.ep_skb_list);
|
||||||
PDBG("%s ep %p\n", __func__, ep);
|
PDBG("%s ep %p\n", __func__, ep);
|
||||||
ep->com.cm_id = cm_id;
|
ep->com.cm_id = cm_id;
|
||||||
ref_cm_id(&ep->com);
|
ref_cm_id(&ep->com);
|
||||||
@ -3591,6 +3641,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
|
|||||||
case MPA_REQ_RCVD:
|
case MPA_REQ_RCVD:
|
||||||
case MPA_REP_SENT:
|
case MPA_REP_SENT:
|
||||||
case FPDU_MODE:
|
case FPDU_MODE:
|
||||||
|
case CONNECTING:
|
||||||
close = 1;
|
close = 1;
|
||||||
if (abrupt)
|
if (abrupt)
|
||||||
ep->com.state = ABORTING;
|
ep->com.state = ABORTING;
|
||||||
@ -3625,10 +3676,10 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
|
|||||||
if (abrupt) {
|
if (abrupt) {
|
||||||
set_bit(EP_DISC_ABORT, &ep->com.history);
|
set_bit(EP_DISC_ABORT, &ep->com.history);
|
||||||
close_complete_upcall(ep, -ECONNRESET);
|
close_complete_upcall(ep, -ECONNRESET);
|
||||||
ret = send_abort(ep, NULL, gfp);
|
ret = send_abort(ep);
|
||||||
} else {
|
} else {
|
||||||
set_bit(EP_DISC_CLOSE, &ep->com.history);
|
set_bit(EP_DISC_CLOSE, &ep->com.history);
|
||||||
ret = send_halfclose(ep, gfp);
|
ret = send_halfclose(ep);
|
||||||
}
|
}
|
||||||
if (ret) {
|
if (ret) {
|
||||||
set_bit(EP_DISC_FAIL, &ep->com.history);
|
set_bit(EP_DISC_FAIL, &ep->com.history);
|
||||||
|
@ -789,10 +789,29 @@ enum c4iw_ep_history {
|
|||||||
CM_ID_DEREFED = 28,
|
CM_ID_DEREFED = 28,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum conn_pre_alloc_buffers {
|
||||||
|
CN_ABORT_REQ_BUF,
|
||||||
|
CN_ABORT_RPL_BUF,
|
||||||
|
CN_CLOSE_CON_REQ_BUF,
|
||||||
|
CN_DESTROY_BUF,
|
||||||
|
CN_FLOWC_BUF,
|
||||||
|
CN_MAX_CON_BUF
|
||||||
|
};
|
||||||
|
|
||||||
|
#define FLOWC_LEN 80
|
||||||
|
union cpl_wr_size {
|
||||||
|
struct cpl_abort_req abrt_req;
|
||||||
|
struct cpl_abort_rpl abrt_rpl;
|
||||||
|
struct fw_ri_wr ri_req;
|
||||||
|
struct cpl_close_con_req close_req;
|
||||||
|
char flowc_buf[FLOWC_LEN];
|
||||||
|
};
|
||||||
|
|
||||||
struct c4iw_ep_common {
|
struct c4iw_ep_common {
|
||||||
struct iw_cm_id *cm_id;
|
struct iw_cm_id *cm_id;
|
||||||
struct c4iw_qp *qp;
|
struct c4iw_qp *qp;
|
||||||
struct c4iw_dev *dev;
|
struct c4iw_dev *dev;
|
||||||
|
struct sk_buff_head ep_skb_list;
|
||||||
enum c4iw_ep_state state;
|
enum c4iw_ep_state state;
|
||||||
struct kref kref;
|
struct kref kref;
|
||||||
struct mutex mutex;
|
struct mutex mutex;
|
||||||
|
@ -1081,9 +1081,10 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
|
|||||||
PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
|
PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
|
||||||
qhp->ep->hwtid);
|
qhp->ep->hwtid);
|
||||||
|
|
||||||
skb = alloc_skb(sizeof *wqe, gfp);
|
skb = skb_dequeue(&qhp->ep->com.ep_skb_list);
|
||||||
if (!skb)
|
if (WARN_ON(!skb))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
|
set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
|
||||||
|
|
||||||
wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
|
wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
|
||||||
@ -1202,9 +1203,10 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
|
|||||||
PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
|
PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
|
||||||
ep->hwtid);
|
ep->hwtid);
|
||||||
|
|
||||||
skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
|
skb = skb_dequeue(&ep->com.ep_skb_list);
|
||||||
if (!skb)
|
if (WARN_ON(!skb))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
|
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
|
||||||
|
|
||||||
wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
|
wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
|
||||||
|
Loading…
Reference in New Issue
Block a user