xprtrdma: "Unoptimize" rpcrdma_prepare_hdr_sge()

Commit 655fec6987 ("xprtrdma: Use gathered Send for large inline
messages") assumed that, since the zeroeth element of the Send SGE
array always pointed to req->rl_rdmabuf, it needed to be initialized
just once. This was a valid assumption because the Send SGE array
and rl_rdmabuf both live in the same rpcrdma_req.

In a subsequent patch, the Send SGE array will be separated from the
rpcrdma_req, so the zeroeth element of the SGE array needs to be
initialized every time.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
Chuck Lever 2017-10-20 10:48:03 -04:00 committed by Anna Schumaker
parent 857f9acab9
commit a062a2a3ef

View File

@ -533,7 +533,7 @@ rpcrdma_unmap_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
sge->addr, sge->length, DMA_TO_DEVICE); sge->addr, sge->length, DMA_TO_DEVICE);
} }
/* Prepare the RPC-over-RDMA header SGE. /* Prepare an SGE for the RPC-over-RDMA transport header.
*/ */
static bool static bool
rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req, rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
@ -542,13 +542,11 @@ rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
struct rpcrdma_regbuf *rb = req->rl_rdmabuf; struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
struct ib_sge *sge = &req->rl_send_sge[0]; struct ib_sge *sge = &req->rl_send_sge[0];
if (unlikely(!rpcrdma_regbuf_is_mapped(rb))) { if (!rpcrdma_dma_map_regbuf(ia, rb))
if (!__rpcrdma_dma_map_regbuf(ia, rb)) goto out_regbuf;
goto out_regbuf; sge->addr = rdmab_addr(rb);
sge->addr = rdmab_addr(rb);
sge->lkey = rdmab_lkey(rb);
}
sge->length = len; sge->length = len;
sge->lkey = rdmab_lkey(rb);
ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr,
sge->length, DMA_TO_DEVICE); sge->length, DMA_TO_DEVICE);