xprtrdma: Move send_wr to struct rpcrdma_req
Clean up: Most of the fields in each send_wr do not vary. There is no need to initialize them before each ib_post_send(). This removes a large-ish data structure from the stack. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
b157380af1
commit
90aab60296
@ -241,7 +241,8 @@ int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
|
|||||||
req->rl_send_iov[1].length = rpclen;
|
req->rl_send_iov[1].length = rpclen;
|
||||||
req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
|
req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
|
||||||
|
|
||||||
req->rl_niovs = 2;
|
req->rl_send_wr.num_sge = 2;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_map:
|
out_map:
|
||||||
|
@ -687,7 +687,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
|
|||||||
req->rl_send_iov[0].length = hdrlen;
|
req->rl_send_iov[0].length = hdrlen;
|
||||||
req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
|
req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
|
||||||
|
|
||||||
req->rl_niovs = 1;
|
req->rl_send_wr.num_sge = 1;
|
||||||
if (rtype == rpcrdma_areadch)
|
if (rtype == rpcrdma_areadch)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -697,7 +697,8 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
|
|||||||
req->rl_send_iov[1].length = rpclen;
|
req->rl_send_iov[1].length = rpclen;
|
||||||
req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
|
req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
|
||||||
|
|
||||||
req->rl_niovs = 2;
|
req->rl_send_wr.num_sge = 2;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_overflow:
|
out_overflow:
|
||||||
|
@ -849,6 +849,10 @@ rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
|
|||||||
req->rl_cqe.done = rpcrdma_wc_send;
|
req->rl_cqe.done = rpcrdma_wc_send;
|
||||||
req->rl_buffer = &r_xprt->rx_buf;
|
req->rl_buffer = &r_xprt->rx_buf;
|
||||||
INIT_LIST_HEAD(&req->rl_registered);
|
INIT_LIST_HEAD(&req->rl_registered);
|
||||||
|
req->rl_send_wr.next = NULL;
|
||||||
|
req->rl_send_wr.wr_cqe = &req->rl_cqe;
|
||||||
|
req->rl_send_wr.sg_list = req->rl_send_iov;
|
||||||
|
req->rl_send_wr.opcode = IB_WR_SEND;
|
||||||
return req;
|
return req;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1128,7 +1132,7 @@ rpcrdma_buffer_put(struct rpcrdma_req *req)
|
|||||||
struct rpcrdma_buffer *buffers = req->rl_buffer;
|
struct rpcrdma_buffer *buffers = req->rl_buffer;
|
||||||
struct rpcrdma_rep *rep = req->rl_reply;
|
struct rpcrdma_rep *rep = req->rl_reply;
|
||||||
|
|
||||||
req->rl_niovs = 0;
|
req->rl_send_wr.num_sge = 0;
|
||||||
req->rl_reply = NULL;
|
req->rl_reply = NULL;
|
||||||
|
|
||||||
spin_lock(&buffers->rb_lock);
|
spin_lock(&buffers->rb_lock);
|
||||||
@ -1259,38 +1263,32 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
|
|||||||
struct rpcrdma_req *req)
|
struct rpcrdma_req *req)
|
||||||
{
|
{
|
||||||
struct ib_device *device = ia->ri_device;
|
struct ib_device *device = ia->ri_device;
|
||||||
struct ib_send_wr send_wr, *send_wr_fail;
|
struct ib_send_wr *send_wr = &req->rl_send_wr;
|
||||||
struct rpcrdma_rep *rep = req->rl_reply;
|
struct ib_send_wr *send_wr_fail;
|
||||||
struct ib_sge *iov = req->rl_send_iov;
|
struct ib_sge *sge = req->rl_send_iov;
|
||||||
int i, rc;
|
int i, rc;
|
||||||
|
|
||||||
if (rep) {
|
if (req->rl_reply) {
|
||||||
rc = rpcrdma_ep_post_recv(ia, rep);
|
rc = rpcrdma_ep_post_recv(ia, req->rl_reply);
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
req->rl_reply = NULL;
|
req->rl_reply = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
send_wr.next = NULL;
|
for (i = 0; i < send_wr->num_sge; i++)
|
||||||
send_wr.wr_cqe = &req->rl_cqe;
|
ib_dma_sync_single_for_device(device, sge[i].addr,
|
||||||
send_wr.sg_list = iov;
|
sge[i].length, DMA_TO_DEVICE);
|
||||||
send_wr.num_sge = req->rl_niovs;
|
|
||||||
send_wr.opcode = IB_WR_SEND;
|
|
||||||
|
|
||||||
for (i = 0; i < send_wr.num_sge; i++)
|
|
||||||
ib_dma_sync_single_for_device(device, iov[i].addr,
|
|
||||||
iov[i].length, DMA_TO_DEVICE);
|
|
||||||
dprintk("RPC: %s: posting %d s/g entries\n",
|
dprintk("RPC: %s: posting %d s/g entries\n",
|
||||||
__func__, send_wr.num_sge);
|
__func__, send_wr->num_sge);
|
||||||
|
|
||||||
if (DECR_CQCOUNT(ep) > 0)
|
if (DECR_CQCOUNT(ep) > 0)
|
||||||
send_wr.send_flags = 0;
|
send_wr->send_flags = 0;
|
||||||
else { /* Provider must take a send completion every now and then */
|
else { /* Provider must take a send completion every now and then */
|
||||||
INIT_CQCOUNT(ep);
|
INIT_CQCOUNT(ep);
|
||||||
send_wr.send_flags = IB_SEND_SIGNALED;
|
send_wr->send_flags = IB_SEND_SIGNALED;
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail);
|
rc = ib_post_send(ia->ri_id->qp, send_wr, &send_wr_fail);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto out_postsend_err;
|
goto out_postsend_err;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -284,10 +284,10 @@ struct rpcrdma_mr_seg { /* chunk descriptors */
|
|||||||
struct rpcrdma_buffer;
|
struct rpcrdma_buffer;
|
||||||
struct rpcrdma_req {
|
struct rpcrdma_req {
|
||||||
struct list_head rl_free;
|
struct list_head rl_free;
|
||||||
unsigned int rl_niovs;
|
|
||||||
unsigned int rl_connect_cookie;
|
unsigned int rl_connect_cookie;
|
||||||
struct rpcrdma_buffer *rl_buffer;
|
struct rpcrdma_buffer *rl_buffer;
|
||||||
struct rpcrdma_rep *rl_reply;/* holder for reply buffer */
|
struct rpcrdma_rep *rl_reply;
|
||||||
|
struct ib_send_wr rl_send_wr;
|
||||||
struct ib_sge rl_send_iov[RPCRDMA_MAX_IOVS];
|
struct ib_sge rl_send_iov[RPCRDMA_MAX_IOVS];
|
||||||
struct rpcrdma_regbuf *rl_rdmabuf; /* xprt header */
|
struct rpcrdma_regbuf *rl_rdmabuf; /* xprt header */
|
||||||
struct rpcrdma_regbuf *rl_sendbuf; /* rq_snd_buf */
|
struct rpcrdma_regbuf *rl_sendbuf; /* rq_snd_buf */
|
||||||
|
Loading…
Reference in New Issue
Block a user