2007-12-12 22:13:23 +00:00
|
|
|
/*
|
2017-06-23 21:17:35 +00:00
|
|
|
* Copyright (c) 2016, 2017 Oracle. All rights reserved.
|
2014-05-28 20:12:01 +00:00
|
|
|
* Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
|
2007-12-12 22:13:23 +00:00
|
|
|
* Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* This software is available to you under a choice of one of two
|
|
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
|
|
* General Public License (GPL) Version 2, available from the file
|
|
|
|
* COPYING in the main directory of this source tree, or the BSD-type
|
|
|
|
* license below:
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
*
|
|
|
|
* Redistributions in binary form must reproduce the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer in the documentation and/or other materials provided
|
|
|
|
* with the distribution.
|
|
|
|
*
|
|
|
|
* Neither the name of the Network Appliance, Inc. nor the names of
|
|
|
|
* its contributors may be used to endorse or promote products
|
|
|
|
* derived from this software without specific prior written
|
|
|
|
* permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* Author: Tom Tucker <tom@opengridcomputing.com>
|
|
|
|
*/
|
|
|
|
|
2017-06-23 21:17:35 +00:00
|
|
|
#include <linux/sunrpc/xdr.h>
|
2007-12-12 22:13:23 +00:00
|
|
|
#include <linux/sunrpc/debug.h>
|
|
|
|
#include <linux/sunrpc/rpc_rdma.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <asm/unaligned.h>
|
|
|
|
#include <rdma/ib_verbs.h>
|
|
|
|
#include <rdma/rdma_cm.h>
|
|
|
|
#include <linux/sunrpc/svc_rdma.h>
|
|
|
|
|
|
|
|
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Replace the pages in the rq_argpages array with the pages from the SGE in
|
|
|
|
* the RDMA_RECV completion. The SGL should contain full pages up until the
|
|
|
|
* last one.
|
|
|
|
*/
|
|
|
|
static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
|
|
|
|
struct svc_rdma_op_ctxt *ctxt,
|
|
|
|
u32 byte_count)
|
|
|
|
{
|
2015-01-13 16:03:37 +00:00
|
|
|
struct rpcrdma_msg *rmsgp;
|
2007-12-12 22:13:23 +00:00
|
|
|
struct page *page;
|
|
|
|
u32 bc;
|
|
|
|
int sge_no;
|
|
|
|
|
|
|
|
/* Swap the page in the SGE with the page in argpages */
|
|
|
|
page = ctxt->pages[0];
|
|
|
|
put_page(rqstp->rq_pages[0]);
|
|
|
|
rqstp->rq_pages[0] = page;
|
|
|
|
|
|
|
|
/* Set up the XDR head */
|
|
|
|
rqstp->rq_arg.head[0].iov_base = page_address(page);
|
2014-05-28 20:12:01 +00:00
|
|
|
rqstp->rq_arg.head[0].iov_len =
|
|
|
|
min_t(size_t, byte_count, ctxt->sge[0].length);
|
2007-12-12 22:13:23 +00:00
|
|
|
rqstp->rq_arg.len = byte_count;
|
|
|
|
rqstp->rq_arg.buflen = byte_count;
|
|
|
|
|
|
|
|
/* Compute bytes past head in the SGL */
|
|
|
|
bc = byte_count - rqstp->rq_arg.head[0].iov_len;
|
|
|
|
|
|
|
|
/* If data remains, store it in the pagelist */
|
|
|
|
rqstp->rq_arg.page_len = bc;
|
|
|
|
rqstp->rq_arg.page_base = 0;
|
2015-01-13 16:03:37 +00:00
|
|
|
|
|
|
|
/* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */
|
|
|
|
rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base;
|
2015-06-04 15:21:10 +00:00
|
|
|
if (rmsgp->rm_type == rdma_nomsg)
|
2015-01-13 16:03:37 +00:00
|
|
|
rqstp->rq_arg.pages = &rqstp->rq_pages[0];
|
|
|
|
else
|
|
|
|
rqstp->rq_arg.pages = &rqstp->rq_pages[1];
|
|
|
|
|
2007-12-12 22:13:23 +00:00
|
|
|
sge_no = 1;
|
|
|
|
while (bc && sge_no < ctxt->count) {
|
|
|
|
page = ctxt->pages[sge_no];
|
|
|
|
put_page(rqstp->rq_pages[sge_no]);
|
|
|
|
rqstp->rq_pages[sge_no] = page;
|
2014-05-28 20:12:01 +00:00
|
|
|
bc -= min_t(u32, bc, ctxt->sge[sge_no].length);
|
2007-12-12 22:13:23 +00:00
|
|
|
rqstp->rq_arg.buflen += ctxt->sge[sge_no].length;
|
|
|
|
sge_no++;
|
|
|
|
}
|
|
|
|
rqstp->rq_respages = &rqstp->rq_pages[sge_no];
|
2014-03-25 20:14:57 +00:00
|
|
|
rqstp->rq_next_page = rqstp->rq_respages + 1;
|
2007-12-12 22:13:23 +00:00
|
|
|
|
|
|
|
/* If not all pages were used from the SGL, free the remaining ones */
|
|
|
|
bc = sge_no;
|
|
|
|
while (sge_no < ctxt->count) {
|
|
|
|
page = ctxt->pages[sge_no++];
|
|
|
|
put_page(page);
|
|
|
|
}
|
|
|
|
ctxt->count = bc;
|
|
|
|
|
|
|
|
/* Set up tail */
|
|
|
|
rqstp->rq_arg.tail[0].iov_base = NULL;
|
|
|
|
rqstp->rq_arg.tail[0].iov_len = 0;
|
|
|
|
}
|
|
|
|
|
2017-06-23 21:17:35 +00:00
|
|
|
static __be32 *xdr_check_read_list(__be32 *p, __be32 *end)
|
|
|
|
{
|
|
|
|
__be32 *next;
|
|
|
|
|
|
|
|
while (*p++ != xdr_zero) {
|
|
|
|
next = p + rpcrdma_readchunk_maxsz - 1;
|
|
|
|
if (next > end)
|
|
|
|
return NULL;
|
|
|
|
p = next;
|
|
|
|
}
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __be32 *xdr_check_write_list(__be32 *p, __be32 *end)
|
|
|
|
{
|
|
|
|
__be32 *next;
|
|
|
|
|
|
|
|
while (*p++ != xdr_zero) {
|
|
|
|
next = p + 1 + be32_to_cpup(p) * rpcrdma_segment_maxsz;
|
|
|
|
if (next > end)
|
|
|
|
return NULL;
|
|
|
|
p = next;
|
|
|
|
}
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __be32 *xdr_check_reply_chunk(__be32 *p, __be32 *end)
|
|
|
|
{
|
|
|
|
__be32 *next;
|
|
|
|
|
|
|
|
if (*p++ != xdr_zero) {
|
|
|
|
next = p + 1 + be32_to_cpup(p) * rpcrdma_segment_maxsz;
|
|
|
|
if (next > end)
|
|
|
|
return NULL;
|
|
|
|
p = next;
|
|
|
|
}
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* On entry, xdr->head[0].iov_base points to first byte in the
|
|
|
|
* RPC-over-RDMA header.
|
|
|
|
*
|
|
|
|
* On successful exit, head[0] points to first byte past the
|
|
|
|
* RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
|
|
|
|
* The length of the RPC-over-RDMA header is returned.
|
|
|
|
*
|
|
|
|
* Assumptions:
|
|
|
|
* - The transport header is entirely contained in the head iovec.
|
|
|
|
*/
|
|
|
|
static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg)
|
|
|
|
{
|
|
|
|
__be32 *p, *end, *rdma_argp;
|
|
|
|
unsigned int hdr_len;
|
|
|
|
char *proc;
|
|
|
|
|
|
|
|
/* Verify that there's enough bytes for header + something */
|
|
|
|
if (rq_arg->len <= RPCRDMA_HDRLEN_ERR)
|
|
|
|
goto out_short;
|
|
|
|
|
|
|
|
rdma_argp = rq_arg->head[0].iov_base;
|
|
|
|
if (*(rdma_argp + 1) != rpcrdma_version)
|
|
|
|
goto out_version;
|
|
|
|
|
|
|
|
switch (*(rdma_argp + 3)) {
|
|
|
|
case rdma_msg:
|
|
|
|
proc = "RDMA_MSG";
|
|
|
|
break;
|
|
|
|
case rdma_nomsg:
|
|
|
|
proc = "RDMA_NOMSG";
|
|
|
|
break;
|
|
|
|
|
|
|
|
case rdma_done:
|
|
|
|
goto out_drop;
|
|
|
|
|
|
|
|
case rdma_error:
|
|
|
|
goto out_drop;
|
|
|
|
|
|
|
|
default:
|
|
|
|
goto out_proc;
|
|
|
|
}
|
|
|
|
|
|
|
|
end = (__be32 *)((unsigned long)rdma_argp + rq_arg->len);
|
|
|
|
p = xdr_check_read_list(rdma_argp + 4, end);
|
|
|
|
if (!p)
|
|
|
|
goto out_inval;
|
|
|
|
p = xdr_check_write_list(p, end);
|
|
|
|
if (!p)
|
|
|
|
goto out_inval;
|
|
|
|
p = xdr_check_reply_chunk(p, end);
|
|
|
|
if (!p)
|
|
|
|
goto out_inval;
|
|
|
|
if (p > end)
|
|
|
|
goto out_inval;
|
|
|
|
|
|
|
|
rq_arg->head[0].iov_base = p;
|
|
|
|
hdr_len = (unsigned long)p - (unsigned long)rdma_argp;
|
|
|
|
rq_arg->head[0].iov_len -= hdr_len;
|
|
|
|
dprintk("svcrdma: received %s request for XID 0x%08x, hdr_len=%u\n",
|
|
|
|
proc, be32_to_cpup(rdma_argp), hdr_len);
|
|
|
|
return hdr_len;
|
|
|
|
|
|
|
|
out_short:
|
|
|
|
dprintk("svcrdma: header too short = %d\n", rq_arg->len);
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
out_version:
|
|
|
|
dprintk("svcrdma: bad xprt version: %u\n",
|
|
|
|
be32_to_cpup(rdma_argp + 1));
|
|
|
|
return -EPROTONOSUPPORT;
|
|
|
|
|
|
|
|
out_drop:
|
|
|
|
dprintk("svcrdma: dropping RDMA_DONE/ERROR message\n");
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_proc:
|
|
|
|
dprintk("svcrdma: bad rdma procedure (%u)\n",
|
|
|
|
be32_to_cpup(rdma_argp + 3));
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
out_inval:
|
|
|
|
dprintk("svcrdma: failed to parse transport header\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2014-05-28 20:12:01 +00:00
|
|
|
/* Issue an RDMA_READ using the local lkey to map the data sink */
|
2015-01-13 16:03:20 +00:00
|
|
|
int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
|
|
|
|
struct svc_rqst *rqstp,
|
|
|
|
struct svc_rdma_op_ctxt *head,
|
|
|
|
int *page_no,
|
|
|
|
u32 *page_offset,
|
|
|
|
u32 rs_handle,
|
|
|
|
u32 rs_length,
|
|
|
|
u64 rs_offset,
|
|
|
|
bool last)
|
2014-05-28 20:12:01 +00:00
|
|
|
{
|
2015-10-08 08:16:33 +00:00
|
|
|
struct ib_rdma_wr read_wr;
|
2014-05-28 20:12:01 +00:00
|
|
|
int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT;
|
|
|
|
struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt);
|
|
|
|
int ret, read, pno;
|
|
|
|
u32 pg_off = *page_offset;
|
|
|
|
u32 pg_no = *page_no;
|
|
|
|
|
|
|
|
ctxt->direction = DMA_FROM_DEVICE;
|
|
|
|
ctxt->read_hdr = head;
|
2015-07-27 23:10:12 +00:00
|
|
|
pages_needed = min_t(int, pages_needed, xprt->sc_max_sge_rd);
|
2015-09-28 21:46:06 +00:00
|
|
|
read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset,
|
|
|
|
rs_length);
|
2014-05-28 20:12:01 +00:00
|
|
|
|
|
|
|
for (pno = 0; pno < pages_needed; pno++) {
|
|
|
|
int len = min_t(int, rs_length, PAGE_SIZE - pg_off);
|
|
|
|
|
|
|
|
head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no];
|
|
|
|
head->arg.page_len += len;
|
2016-01-08 07:53:41 +00:00
|
|
|
|
2014-05-28 20:12:01 +00:00
|
|
|
head->arg.len += len;
|
|
|
|
if (!pg_off)
|
|
|
|
head->count++;
|
|
|
|
rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1];
|
2014-03-25 20:14:57 +00:00
|
|
|
rqstp->rq_next_page = rqstp->rq_respages + 1;
|
2014-05-28 20:12:01 +00:00
|
|
|
ctxt->sge[pno].addr =
|
|
|
|
ib_dma_map_page(xprt->sc_cm_id->device,
|
|
|
|
head->arg.pages[pg_no], pg_off,
|
|
|
|
PAGE_SIZE - pg_off,
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
ret = ib_dma_mapping_error(xprt->sc_cm_id->device,
|
|
|
|
ctxt->sge[pno].addr);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
2016-09-13 14:52:50 +00:00
|
|
|
svc_rdma_count_mappings(xprt, ctxt);
|
2007-12-12 22:13:23 +00:00
|
|
|
|
2016-01-08 07:53:41 +00:00
|
|
|
ctxt->sge[pno].lkey = xprt->sc_pd->local_dma_lkey;
|
2014-05-28 20:12:01 +00:00
|
|
|
ctxt->sge[pno].length = len;
|
|
|
|
ctxt->count++;
|
|
|
|
|
|
|
|
/* adjust offset and wrap to next page if needed */
|
|
|
|
pg_off += len;
|
|
|
|
if (pg_off == PAGE_SIZE) {
|
|
|
|
pg_off = 0;
|
|
|
|
pg_no++;
|
2007-12-12 22:13:23 +00:00
|
|
|
}
|
2014-05-28 20:12:01 +00:00
|
|
|
rs_length -= len;
|
2007-12-12 22:13:23 +00:00
|
|
|
}
|
2014-05-28 20:12:01 +00:00
|
|
|
|
|
|
|
if (last && rs_length == 0)
|
|
|
|
set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
|
|
|
|
else
|
|
|
|
clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
|
|
|
|
|
|
|
|
memset(&read_wr, 0, sizeof(read_wr));
|
2016-03-01 18:07:22 +00:00
|
|
|
ctxt->cqe.done = svc_rdma_wc_read;
|
|
|
|
read_wr.wr.wr_cqe = &ctxt->cqe;
|
2015-10-08 08:16:33 +00:00
|
|
|
read_wr.wr.opcode = IB_WR_RDMA_READ;
|
|
|
|
read_wr.wr.send_flags = IB_SEND_SIGNALED;
|
|
|
|
read_wr.rkey = rs_handle;
|
|
|
|
read_wr.remote_addr = rs_offset;
|
|
|
|
read_wr.wr.sg_list = ctxt->sge;
|
|
|
|
read_wr.wr.num_sge = pages_needed;
|
|
|
|
|
|
|
|
ret = svc_rdma_send(xprt, &read_wr.wr);
|
2014-05-28 20:12:01 +00:00
|
|
|
if (ret) {
|
|
|
|
pr_err("svcrdma: Error %d posting RDMA_READ\n", ret);
|
|
|
|
set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* return current location in page array */
|
|
|
|
*page_no = pg_no;
|
|
|
|
*page_offset = pg_off;
|
|
|
|
ret = read;
|
|
|
|
atomic_inc(&rdma_stat_read);
|
|
|
|
return ret;
|
|
|
|
err:
|
|
|
|
svc_rdma_unmap_dma(ctxt);
|
|
|
|
svc_rdma_put_context(ctxt, 0);
|
|
|
|
return ret;
|
2007-12-12 22:13:23 +00:00
|
|
|
}
|
|
|
|
|
2014-05-28 20:12:01 +00:00
|
|
|
/* Issue an RDMA_READ using an FRMR to map the data sink */
|
2015-01-13 16:03:20 +00:00
|
|
|
int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
|
|
|
|
struct svc_rqst *rqstp,
|
|
|
|
struct svc_rdma_op_ctxt *head,
|
|
|
|
int *page_no,
|
|
|
|
u32 *page_offset,
|
|
|
|
u32 rs_handle,
|
|
|
|
u32 rs_length,
|
|
|
|
u64 rs_offset,
|
|
|
|
bool last)
|
2008-08-12 20:12:10 +00:00
|
|
|
{
|
2015-10-08 08:16:33 +00:00
|
|
|
struct ib_rdma_wr read_wr;
|
2014-05-28 20:12:01 +00:00
|
|
|
struct ib_send_wr inv_wr;
|
2015-10-13 16:11:36 +00:00
|
|
|
struct ib_reg_wr reg_wr;
|
2014-05-28 20:12:01 +00:00
|
|
|
u8 key;
|
2015-10-13 16:11:36 +00:00
|
|
|
int nents = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT;
|
2014-05-28 20:12:01 +00:00
|
|
|
struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt);
|
|
|
|
struct svc_rdma_fastreg_mr *frmr = svc_rdma_get_frmr(xprt);
|
2015-10-13 16:11:36 +00:00
|
|
|
int ret, read, pno, dma_nents, n;
|
2014-05-28 20:12:01 +00:00
|
|
|
u32 pg_off = *page_offset;
|
|
|
|
u32 pg_no = *page_no;
|
2008-08-12 20:12:10 +00:00
|
|
|
|
|
|
|
if (IS_ERR(frmr))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2014-05-28 20:12:01 +00:00
|
|
|
ctxt->direction = DMA_FROM_DEVICE;
|
|
|
|
ctxt->frmr = frmr;
|
2015-10-13 16:11:36 +00:00
|
|
|
nents = min_t(unsigned int, nents, xprt->sc_frmr_pg_list_len);
|
2015-11-07 21:33:07 +00:00
|
|
|
read = min_t(int, (nents << PAGE_SHIFT) - *page_offset, rs_length);
|
2008-08-12 20:12:10 +00:00
|
|
|
|
|
|
|
frmr->direction = DMA_FROM_DEVICE;
|
|
|
|
frmr->access_flags = (IB_ACCESS_LOCAL_WRITE|IB_ACCESS_REMOTE_WRITE);
|
2015-10-13 16:11:36 +00:00
|
|
|
frmr->sg_nents = nents;
|
2014-05-28 20:12:01 +00:00
|
|
|
|
2015-10-13 16:11:36 +00:00
|
|
|
for (pno = 0; pno < nents; pno++) {
|
2014-05-28 20:12:01 +00:00
|
|
|
int len = min_t(int, rs_length, PAGE_SIZE - pg_off);
|
|
|
|
|
|
|
|
head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no];
|
|
|
|
head->arg.page_len += len;
|
|
|
|
head->arg.len += len;
|
|
|
|
if (!pg_off)
|
|
|
|
head->count++;
|
2015-10-13 16:11:36 +00:00
|
|
|
|
|
|
|
sg_set_page(&frmr->sg[pno], rqstp->rq_arg.pages[pg_no],
|
|
|
|
len, pg_off);
|
|
|
|
|
2014-05-28 20:12:01 +00:00
|
|
|
rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1];
|
|
|
|
rqstp->rq_next_page = rqstp->rq_respages + 1;
|
2008-08-12 20:12:10 +00:00
|
|
|
|
2014-05-28 20:12:01 +00:00
|
|
|
/* adjust offset and wrap to next page if needed */
|
|
|
|
pg_off += len;
|
|
|
|
if (pg_off == PAGE_SIZE) {
|
|
|
|
pg_off = 0;
|
|
|
|
pg_no++;
|
|
|
|
}
|
|
|
|
rs_length -= len;
|
2008-08-12 20:12:10 +00:00
|
|
|
}
|
|
|
|
|
2014-05-28 20:12:01 +00:00
|
|
|
if (last && rs_length == 0)
|
|
|
|
set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
|
|
|
|
else
|
|
|
|
clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
|
2007-12-12 22:13:23 +00:00
|
|
|
|
2015-10-13 16:11:36 +00:00
|
|
|
dma_nents = ib_dma_map_sg(xprt->sc_cm_id->device,
|
|
|
|
frmr->sg, frmr->sg_nents,
|
|
|
|
frmr->direction);
|
|
|
|
if (!dma_nents) {
|
|
|
|
pr_err("svcrdma: failed to dma map sg %p\n",
|
|
|
|
frmr->sg);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2016-05-12 17:49:15 +00:00
|
|
|
n = ib_map_mr_sg(frmr->mr, frmr->sg, frmr->sg_nents, NULL, PAGE_SIZE);
|
2015-10-13 16:11:36 +00:00
|
|
|
if (unlikely(n != frmr->sg_nents)) {
|
|
|
|
pr_err("svcrdma: failed to map mr %p (%d/%d elements)\n",
|
|
|
|
frmr->mr, n, frmr->sg_nents);
|
|
|
|
return n < 0 ? n : -EINVAL;
|
|
|
|
}
|
|
|
|
|
2014-05-28 20:12:01 +00:00
|
|
|
/* Bump the key */
|
|
|
|
key = (u8)(frmr->mr->lkey & 0x000000FF);
|
|
|
|
ib_update_fast_reg_key(frmr->mr, ++key);
|
|
|
|
|
2015-10-13 16:11:36 +00:00
|
|
|
ctxt->sge[0].addr = frmr->mr->iova;
|
2014-05-28 20:12:01 +00:00
|
|
|
ctxt->sge[0].lkey = frmr->mr->lkey;
|
2015-10-13 16:11:36 +00:00
|
|
|
ctxt->sge[0].length = frmr->mr->length;
|
2014-05-28 20:12:01 +00:00
|
|
|
ctxt->count = 1;
|
|
|
|
ctxt->read_hdr = head;
|
|
|
|
|
2015-10-13 16:11:36 +00:00
|
|
|
/* Prepare REG WR */
|
2016-03-01 18:07:22 +00:00
|
|
|
ctxt->reg_cqe.done = svc_rdma_wc_reg;
|
|
|
|
reg_wr.wr.wr_cqe = &ctxt->reg_cqe;
|
2015-10-13 16:11:36 +00:00
|
|
|
reg_wr.wr.opcode = IB_WR_REG_MR;
|
|
|
|
reg_wr.wr.send_flags = IB_SEND_SIGNALED;
|
|
|
|
reg_wr.wr.num_sge = 0;
|
|
|
|
reg_wr.mr = frmr->mr;
|
|
|
|
reg_wr.key = frmr->mr->lkey;
|
|
|
|
reg_wr.access = frmr->access_flags;
|
|
|
|
reg_wr.wr.next = &read_wr.wr;
|
2014-05-28 20:12:01 +00:00
|
|
|
|
|
|
|
/* Prepare RDMA_READ */
|
|
|
|
memset(&read_wr, 0, sizeof(read_wr));
|
2016-03-01 18:07:22 +00:00
|
|
|
ctxt->cqe.done = svc_rdma_wc_read;
|
|
|
|
read_wr.wr.wr_cqe = &ctxt->cqe;
|
2015-10-08 08:16:33 +00:00
|
|
|
read_wr.wr.send_flags = IB_SEND_SIGNALED;
|
|
|
|
read_wr.rkey = rs_handle;
|
|
|
|
read_wr.remote_addr = rs_offset;
|
|
|
|
read_wr.wr.sg_list = ctxt->sge;
|
|
|
|
read_wr.wr.num_sge = 1;
|
2014-05-28 20:12:01 +00:00
|
|
|
if (xprt->sc_dev_caps & SVCRDMA_DEVCAP_READ_W_INV) {
|
2015-10-08 08:16:33 +00:00
|
|
|
read_wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;
|
|
|
|
read_wr.wr.ex.invalidate_rkey = ctxt->frmr->mr->lkey;
|
2014-05-28 20:12:01 +00:00
|
|
|
} else {
|
2015-10-08 08:16:33 +00:00
|
|
|
read_wr.wr.opcode = IB_WR_RDMA_READ;
|
|
|
|
read_wr.wr.next = &inv_wr;
|
2014-05-28 20:12:01 +00:00
|
|
|
/* Prepare invalidate */
|
|
|
|
memset(&inv_wr, 0, sizeof(inv_wr));
|
2016-03-01 18:07:22 +00:00
|
|
|
ctxt->inv_cqe.done = svc_rdma_wc_inv;
|
|
|
|
inv_wr.wr_cqe = &ctxt->inv_cqe;
|
2014-05-28 20:12:01 +00:00
|
|
|
inv_wr.opcode = IB_WR_LOCAL_INV;
|
2014-06-05 14:54:31 +00:00
|
|
|
inv_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_FENCE;
|
2014-05-28 20:12:01 +00:00
|
|
|
inv_wr.ex.invalidate_rkey = frmr->mr->lkey;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Post the chain */
|
2015-10-13 16:11:36 +00:00
|
|
|
ret = svc_rdma_send(xprt, ®_wr.wr);
|
2014-05-28 20:12:01 +00:00
|
|
|
if (ret) {
|
|
|
|
pr_err("svcrdma: Error %d posting RDMA_READ\n", ret);
|
|
|
|
set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
|
|
|
|
goto err;
|
2007-12-12 22:13:23 +00:00
|
|
|
}
|
|
|
|
|
2014-05-28 20:12:01 +00:00
|
|
|
/* return current location in page array */
|
|
|
|
*page_no = pg_no;
|
|
|
|
*page_offset = pg_off;
|
|
|
|
ret = read;
|
|
|
|
atomic_inc(&rdma_stat_read);
|
|
|
|
return ret;
|
|
|
|
err:
|
|
|
|
svc_rdma_put_context(ctxt, 0);
|
|
|
|
svc_rdma_put_frmr(xprt, frmr);
|
|
|
|
return ret;
|
2007-12-12 22:13:23 +00:00
|
|
|
}
|
|
|
|
|
2015-01-13 16:02:54 +00:00
|
|
|
static unsigned int
|
|
|
|
rdma_rcl_chunk_count(struct rpcrdma_read_chunk *ch)
|
|
|
|
{
|
|
|
|
unsigned int count;
|
|
|
|
|
|
|
|
for (count = 0; ch->rc_discrim != xdr_zero; ch++)
|
|
|
|
count++;
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
svcrdma: Handle additional inline content
Most NFS RPCs place their large payload argument at the end of the
RPC header (eg, NFSv3 WRITE). For NFSv3 WRITE and SYMLINK, RPC/RDMA
sends the complete RPC header inline, and the payload argument in
the read list. Data in the read list is the last part of the XDR
stream.
One important case is not like this, however. NFSv4 COMPOUND is a
counted array of operations. A WRITE operation, with its large data
payload, can appear in the middle of the compound's operations
array. Thus NFSv4 WRITE compounds can have header content after the
WRITE payload.
The Linux client, for example, performs an NFSv4 WRITE like this:
{ PUTFH, WRITE, GETATTR }
Though RFC 5667 is not precise about this, the proper way to convey
this compound is to place the GETATTR inline, _after_ the front of
the RPC header. The receiver inserts the read list payload into the
XDR stream after the initial WRITE arguments, and before the GETATTR
operation, thanks to the value of the read list "position" field.
The Linux client currently sends the GETATTR at the end of the
RPC/RDMA read list, which is incorrect. It will be corrected in the
future.
The Linux server currently rejects NFSv4 compounds with inline
content after the read list. For the above NFSv4 WRITE compound, the
NFS compound header indicates there are three operations, but the
server finds nonsense when it looks in the XDR stream for the third
operation, and the compound fails with OP_ILLEGAL.
Move trailing inline content to the end of the XDR buffer's page
list. This presents incoming NFSv4 WRITE compounds to NFSD in the
same way the socket transport does.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
2015-01-13 16:03:53 +00:00
|
|
|
/* If there was additional inline content, append it to the end of arg.pages.
|
|
|
|
* Tail copy has to be done after the reader function has determined how many
|
|
|
|
* pages are needed for RDMA READ.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
rdma_copy_tail(struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head,
|
|
|
|
u32 position, u32 byte_count, u32 page_offset, int page_no)
|
|
|
|
{
|
|
|
|
char *srcp, *destp;
|
|
|
|
|
|
|
|
srcp = head->arg.head[0].iov_base + position;
|
|
|
|
byte_count = head->arg.head[0].iov_len - position;
|
|
|
|
if (byte_count > PAGE_SIZE) {
|
|
|
|
dprintk("svcrdma: large tail unsupported\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fit as much of the tail on the current page as possible */
|
|
|
|
if (page_offset != PAGE_SIZE) {
|
|
|
|
destp = page_address(rqstp->rq_arg.pages[page_no]);
|
|
|
|
destp += page_offset;
|
|
|
|
while (byte_count--) {
|
|
|
|
*destp++ = *srcp++;
|
|
|
|
page_offset++;
|
|
|
|
if (page_offset == PAGE_SIZE && byte_count)
|
|
|
|
goto more;
|
|
|
|
}
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
more:
|
|
|
|
/* Fit the rest on the next page */
|
|
|
|
page_no++;
|
|
|
|
destp = page_address(rqstp->rq_arg.pages[page_no]);
|
|
|
|
while (byte_count--)
|
|
|
|
*destp++ = *srcp++;
|
|
|
|
|
|
|
|
rqstp->rq_respages = &rqstp->rq_arg.pages[page_no+1];
|
|
|
|
rqstp->rq_next_page = rqstp->rq_respages + 1;
|
|
|
|
|
|
|
|
done:
|
|
|
|
byte_count = head->arg.head[0].iov_len - position;
|
|
|
|
head->arg.page_len += byte_count;
|
|
|
|
head->arg.len += byte_count;
|
|
|
|
head->arg.buflen += byte_count;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2016-11-29 16:04:42 +00:00
|
|
|
/* Returns the address of the first read chunk or <nul> if no read chunk
|
|
|
|
* is present
|
|
|
|
*/
|
|
|
|
static struct rpcrdma_read_chunk *
|
|
|
|
svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp)
|
|
|
|
{
|
|
|
|
struct rpcrdma_read_chunk *ch =
|
|
|
|
(struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
|
|
|
|
|
|
|
|
if (ch->rc_discrim == xdr_zero)
|
|
|
|
return NULL;
|
|
|
|
return ch;
|
|
|
|
}
|
|
|
|
|
2014-05-28 20:12:01 +00:00
|
|
|
static int rdma_read_chunks(struct svcxprt_rdma *xprt,
|
|
|
|
struct rpcrdma_msg *rmsgp,
|
|
|
|
struct svc_rqst *rqstp,
|
|
|
|
struct svc_rdma_op_ctxt *head)
|
2007-12-12 22:13:23 +00:00
|
|
|
{
|
2015-01-13 16:02:54 +00:00
|
|
|
int page_no, ret;
|
2007-12-12 22:13:23 +00:00
|
|
|
struct rpcrdma_read_chunk *ch;
|
2015-01-13 16:03:20 +00:00
|
|
|
u32 handle, page_offset, byte_count;
|
2015-01-13 16:03:28 +00:00
|
|
|
u32 position;
|
2014-05-28 20:12:01 +00:00
|
|
|
u64 rs_offset;
|
2015-01-13 16:03:20 +00:00
|
|
|
bool last;
|
2007-12-12 22:13:23 +00:00
|
|
|
|
|
|
|
/* If no read list is present, return 0 */
|
|
|
|
ch = svc_rdma_get_read_chunk(rmsgp);
|
|
|
|
if (!ch)
|
|
|
|
return 0;
|
|
|
|
|
2015-01-13 16:02:54 +00:00
|
|
|
if (rdma_rcl_chunk_count(ch) > RPCSVC_MAXPAGES)
|
2008-05-13 14:16:05 +00:00
|
|
|
return -EINVAL;
|
2008-08-12 20:12:10 +00:00
|
|
|
|
2014-05-28 20:12:01 +00:00
|
|
|
/* The request is completed when the RDMA_READs complete. The
|
|
|
|
* head context keeps all the pages that comprise the
|
|
|
|
* request.
|
|
|
|
*/
|
|
|
|
head->arg.head[0] = rqstp->rq_arg.head[0];
|
|
|
|
head->arg.tail[0] = rqstp->rq_arg.tail[0];
|
|
|
|
head->hdr_count = head->count;
|
|
|
|
head->arg.page_base = 0;
|
|
|
|
head->arg.page_len = 0;
|
|
|
|
head->arg.len = rqstp->rq_arg.len;
|
|
|
|
head->arg.buflen = rqstp->rq_arg.buflen;
|
2009-06-13 22:05:26 +00:00
|
|
|
|
2015-01-13 16:03:37 +00:00
|
|
|
/* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */
|
2016-05-04 14:53:14 +00:00
|
|
|
position = be32_to_cpu(ch->rc_position);
|
2015-01-13 16:03:37 +00:00
|
|
|
if (position == 0) {
|
|
|
|
head->arg.pages = &head->pages[0];
|
|
|
|
page_offset = head->byte_len;
|
|
|
|
} else {
|
|
|
|
head->arg.pages = &head->pages[head->count];
|
|
|
|
page_offset = 0;
|
|
|
|
}
|
|
|
|
|
2015-01-13 16:03:28 +00:00
|
|
|
ret = 0;
|
|
|
|
page_no = 0;
|
|
|
|
for (; ch->rc_discrim != xdr_zero; ch++) {
|
|
|
|
if (be32_to_cpu(ch->rc_position) != position)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
handle = be32_to_cpu(ch->rc_target.rs_handle),
|
2015-01-13 16:03:20 +00:00
|
|
|
byte_count = be32_to_cpu(ch->rc_target.rs_length);
|
2012-02-15 17:30:00 +00:00
|
|
|
xdr_decode_hyper((__be32 *)&ch->rc_target.rs_offset,
|
|
|
|
&rs_offset);
|
2014-05-28 20:12:01 +00:00
|
|
|
|
|
|
|
while (byte_count > 0) {
|
2015-01-13 16:03:20 +00:00
|
|
|
last = (ch + 1)->rc_discrim == xdr_zero;
|
|
|
|
ret = xprt->sc_reader(xprt, rqstp, head,
|
|
|
|
&page_no, &page_offset,
|
|
|
|
handle, byte_count,
|
|
|
|
rs_offset, last);
|
2014-05-28 20:12:01 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto err;
|
|
|
|
byte_count -= ret;
|
|
|
|
rs_offset += ret;
|
|
|
|
head->arg.buflen += ret;
|
2007-12-12 22:13:23 +00:00
|
|
|
}
|
|
|
|
}
|
2015-01-13 16:03:37 +00:00
|
|
|
|
2015-01-13 16:03:45 +00:00
|
|
|
/* Read list may need XDR round-up (see RFC 5666, s. 3.7) */
|
|
|
|
if (page_offset & 3) {
|
|
|
|
u32 pad = 4 - (page_offset & 3);
|
|
|
|
|
2016-05-04 14:52:55 +00:00
|
|
|
head->arg.tail[0].iov_len += pad;
|
2015-01-13 16:03:45 +00:00
|
|
|
head->arg.len += pad;
|
|
|
|
head->arg.buflen += pad;
|
svcrdma: Handle additional inline content
Most NFS RPCs place their large payload argument at the end of the
RPC header (eg, NFSv3 WRITE). For NFSv3 WRITE and SYMLINK, RPC/RDMA
sends the complete RPC header inline, and the payload argument in
the read list. Data in the read list is the last part of the XDR
stream.
One important case is not like this, however. NFSv4 COMPOUND is a
counted array of operations. A WRITE operation, with its large data
payload, can appear in the middle of the compound's operations
array. Thus NFSv4 WRITE compounds can have header content after the
WRITE payload.
The Linux client, for example, performs an NFSv4 WRITE like this:
{ PUTFH, WRITE, GETATTR }
Though RFC 5667 is not precise about this, the proper way to convey
this compound is to place the GETATTR inline, _after_ the front of
the RPC header. The receiver inserts the read list payload into the
XDR stream after the initial WRITE arguments, and before the GETATTR
operation, thanks to the value of the read list "position" field.
The Linux client currently sends the GETATTR at the end of the
RPC/RDMA read list, which is incorrect. It will be corrected in the
future.
The Linux server currently rejects NFSv4 compounds with inline
content after the read list. For the above NFSv4 WRITE compound, the
NFS compound header indicates there are three operations, but the
server finds nonsense when it looks in the XDR stream for the third
operation, and the compound fails with OP_ILLEGAL.
Move trailing inline content to the end of the XDR buffer's page
list. This presents incoming NFSv4 WRITE compounds to NFSD in the
same way the socket transport does.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
2015-01-13 16:03:53 +00:00
|
|
|
page_offset += pad;
|
2015-01-13 16:03:45 +00:00
|
|
|
}
|
|
|
|
|
2014-05-28 20:12:01 +00:00
|
|
|
ret = 1;
|
svcrdma: Handle additional inline content
Most NFS RPCs place their large payload argument at the end of the
RPC header (eg, NFSv3 WRITE). For NFSv3 WRITE and SYMLINK, RPC/RDMA
sends the complete RPC header inline, and the payload argument in
the read list. Data in the read list is the last part of the XDR
stream.
One important case is not like this, however. NFSv4 COMPOUND is a
counted array of operations. A WRITE operation, with its large data
payload, can appear in the middle of the compound's operations
array. Thus NFSv4 WRITE compounds can have header content after the
WRITE payload.
The Linux client, for example, performs an NFSv4 WRITE like this:
{ PUTFH, WRITE, GETATTR }
Though RFC 5667 is not precise about this, the proper way to convey
this compound is to place the GETATTR inline, _after_ the front of
the RPC header. The receiver inserts the read list payload into the
XDR stream after the initial WRITE arguments, and before the GETATTR
operation, thanks to the value of the read list "position" field.
The Linux client currently sends the GETATTR at the end of the
RPC/RDMA read list, which is incorrect. It will be corrected in the
future.
The Linux server currently rejects NFSv4 compounds with inline
content after the read list. For the above NFSv4 WRITE compound, the
NFS compound header indicates there are three operations, but the
server finds nonsense when it looks in the XDR stream for the third
operation, and the compound fails with OP_ILLEGAL.
Move trailing inline content to the end of the XDR buffer's page
list. This presents incoming NFSv4 WRITE compounds to NFSD in the
same way the socket transport does.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
2015-01-13 16:03:53 +00:00
|
|
|
if (position && position < head->arg.head[0].iov_len)
|
|
|
|
ret = rdma_copy_tail(rqstp, head, position,
|
|
|
|
byte_count, page_offset, page_no);
|
|
|
|
head->arg.head[0].iov_len = position;
|
2015-01-13 16:03:37 +00:00
|
|
|
head->position = position;
|
|
|
|
|
2014-05-28 20:12:01 +00:00
|
|
|
err:
|
2007-12-12 22:13:23 +00:00
|
|
|
/* Detach arg pages. svc_recv will replenish them */
|
2014-05-28 20:12:01 +00:00
|
|
|
for (page_no = 0;
|
|
|
|
&rqstp->rq_pages[page_no] < rqstp->rq_respages; page_no++)
|
|
|
|
rqstp->rq_pages[page_no] = NULL;
|
2007-12-12 22:13:23 +00:00
|
|
|
|
2014-05-28 20:12:01 +00:00
|
|
|
return ret;
|
2007-12-12 22:13:23 +00:00
|
|
|
}
|
|
|
|
|
2016-05-04 14:53:39 +00:00
|
|
|
static void rdma_read_complete(struct svc_rqst *rqstp,
|
|
|
|
struct svc_rdma_op_ctxt *head)
|
2007-12-12 22:13:23 +00:00
|
|
|
{
|
|
|
|
int page_no;
|
|
|
|
|
|
|
|
/* Copy RPC pages */
|
|
|
|
for (page_no = 0; page_no < head->count; page_no++) {
|
|
|
|
put_page(rqstp->rq_pages[page_no]);
|
|
|
|
rqstp->rq_pages[page_no] = head->pages[page_no];
|
|
|
|
}
|
2015-01-13 16:03:37 +00:00
|
|
|
|
|
|
|
/* Adjustments made for RDMA_NOMSG type requests */
|
|
|
|
if (head->position == 0) {
|
|
|
|
if (head->arg.len <= head->sge[0].length) {
|
|
|
|
head->arg.head[0].iov_len = head->arg.len -
|
|
|
|
head->byte_len;
|
|
|
|
head->arg.page_len = 0;
|
|
|
|
} else {
|
|
|
|
head->arg.head[0].iov_len = head->sge[0].length -
|
|
|
|
head->byte_len;
|
|
|
|
head->arg.page_len = head->arg.len -
|
|
|
|
head->sge[0].length;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-12 22:13:23 +00:00
|
|
|
/* Point rq_arg.pages past header */
|
2008-05-27 22:03:14 +00:00
|
|
|
rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count];
|
2007-12-12 22:13:23 +00:00
|
|
|
rqstp->rq_arg.page_len = head->arg.page_len;
|
|
|
|
rqstp->rq_arg.page_base = head->arg.page_base;
|
|
|
|
|
|
|
|
/* rq_respages starts after the last arg page */
|
2015-10-12 14:53:39 +00:00
|
|
|
rqstp->rq_respages = &rqstp->rq_pages[page_no];
|
2014-03-25 20:14:57 +00:00
|
|
|
rqstp->rq_next_page = rqstp->rq_respages + 1;
|
2007-12-12 22:13:23 +00:00
|
|
|
|
|
|
|
/* Rebuild rq_arg head and tail. */
|
|
|
|
rqstp->rq_arg.head[0] = head->arg.head[0];
|
|
|
|
rqstp->rq_arg.tail[0] = head->arg.tail[0];
|
|
|
|
rqstp->rq_arg.len = head->arg.len;
|
|
|
|
rqstp->rq_arg.buflen = head->arg.buflen;
|
|
|
|
}
|
|
|
|
|
2017-04-09 17:06:33 +00:00
|
|
|
static void svc_rdma_send_error(struct svcxprt_rdma *xprt,
|
|
|
|
__be32 *rdma_argp, int status)
|
|
|
|
{
|
|
|
|
struct svc_rdma_op_ctxt *ctxt;
|
|
|
|
__be32 *p, *err_msgp;
|
|
|
|
unsigned int length;
|
|
|
|
struct page *page;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = svc_rdma_repost_recv(xprt, GFP_KERNEL);
|
|
|
|
if (ret)
|
|
|
|
return;
|
|
|
|
|
|
|
|
page = alloc_page(GFP_KERNEL);
|
|
|
|
if (!page)
|
|
|
|
return;
|
|
|
|
err_msgp = page_address(page);
|
|
|
|
|
|
|
|
p = err_msgp;
|
|
|
|
*p++ = *rdma_argp;
|
|
|
|
*p++ = *(rdma_argp + 1);
|
|
|
|
*p++ = xprt->sc_fc_credits;
|
|
|
|
*p++ = rdma_error;
|
|
|
|
if (status == -EPROTONOSUPPORT) {
|
|
|
|
*p++ = err_vers;
|
|
|
|
*p++ = rpcrdma_version;
|
|
|
|
*p++ = rpcrdma_version;
|
|
|
|
} else {
|
|
|
|
*p++ = err_chunk;
|
|
|
|
}
|
|
|
|
length = (unsigned long)p - (unsigned long)err_msgp;
|
|
|
|
|
|
|
|
/* Map transport header; no RPC message payload */
|
|
|
|
ctxt = svc_rdma_get_context(xprt);
|
|
|
|
ret = svc_rdma_map_reply_hdr(xprt, ctxt, err_msgp, length);
|
|
|
|
if (ret) {
|
|
|
|
dprintk("svcrdma: Error %d mapping send for protocol error\n",
|
|
|
|
ret);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = svc_rdma_post_send_wr(xprt, ctxt, 1, 0);
|
|
|
|
if (ret) {
|
|
|
|
dprintk("svcrdma: Error %d posting send for protocol error\n",
|
|
|
|
ret);
|
|
|
|
svc_rdma_unmap_dma(ctxt);
|
|
|
|
svc_rdma_put_context(ctxt, 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-07 19:50:10 +00:00
|
|
|
/* By convention, backchannel calls arrive via rdma_msg type
|
|
|
|
* messages, and never populate the chunk lists. This makes
|
|
|
|
* the RPC/RDMA header small and fixed in size, so it is
|
|
|
|
* straightforward to check the RPC header's direction field.
|
|
|
|
*/
|
2017-04-09 17:06:49 +00:00
|
|
|
static bool svc_rdma_is_backchannel_reply(struct svc_xprt *xprt,
|
|
|
|
__be32 *rdma_resp)
|
2016-01-07 19:50:10 +00:00
|
|
|
{
|
2017-04-09 17:06:49 +00:00
|
|
|
__be32 *p;
|
2016-01-07 19:50:10 +00:00
|
|
|
|
|
|
|
if (!xprt->xpt_bc_xprt)
|
|
|
|
return false;
|
|
|
|
|
2017-04-09 17:06:49 +00:00
|
|
|
p = rdma_resp + 3;
|
|
|
|
if (*p++ != rdma_msg)
|
2016-01-07 19:50:10 +00:00
|
|
|
return false;
|
2017-04-09 17:06:49 +00:00
|
|
|
|
|
|
|
if (*p++ != xdr_zero)
|
2016-01-07 19:50:10 +00:00
|
|
|
return false;
|
2017-04-09 17:06:49 +00:00
|
|
|
if (*p++ != xdr_zero)
|
2016-01-07 19:50:10 +00:00
|
|
|
return false;
|
2017-04-09 17:06:49 +00:00
|
|
|
if (*p++ != xdr_zero)
|
2016-01-07 19:50:10 +00:00
|
|
|
return false;
|
|
|
|
|
2017-04-09 17:06:49 +00:00
|
|
|
/* XID sanity */
|
|
|
|
if (*p++ != *rdma_resp)
|
2016-01-07 19:50:10 +00:00
|
|
|
return false;
|
|
|
|
/* call direction */
|
2017-04-09 17:06:49 +00:00
|
|
|
if (*p == cpu_to_be32(RPC_CALL))
|
2016-01-07 19:50:10 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2007-12-12 22:13:23 +00:00
|
|
|
/*
|
|
|
|
* Set up the rqstp thread context to point to the RQ buffer. If
|
|
|
|
* necessary, pull additional data from the client with an RDMA_READ
|
|
|
|
* request.
|
|
|
|
*/
|
|
|
|
int svc_rdma_recvfrom(struct svc_rqst *rqstp)
|
|
|
|
{
|
|
|
|
struct svc_xprt *xprt = rqstp->rq_xprt;
|
|
|
|
struct svcxprt_rdma *rdma_xprt =
|
|
|
|
container_of(xprt, struct svcxprt_rdma, sc_xprt);
|
|
|
|
struct svc_rdma_op_ctxt *ctxt = NULL;
|
|
|
|
struct rpcrdma_msg *rmsgp;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
dprintk("svcrdma: rqstp=%p\n", rqstp);
|
|
|
|
|
2017-02-07 16:59:04 +00:00
|
|
|
spin_lock(&rdma_xprt->sc_rq_dto_lock);
|
2007-12-12 22:13:23 +00:00
|
|
|
if (!list_empty(&rdma_xprt->sc_read_complete_q)) {
|
2017-02-07 16:58:56 +00:00
|
|
|
ctxt = list_first_entry(&rdma_xprt->sc_read_complete_q,
|
|
|
|
struct svc_rdma_op_ctxt, list);
|
|
|
|
list_del(&ctxt->list);
|
2017-02-07 16:59:04 +00:00
|
|
|
spin_unlock(&rdma_xprt->sc_rq_dto_lock);
|
2016-05-04 14:53:39 +00:00
|
|
|
rdma_read_complete(rqstp, ctxt);
|
|
|
|
goto complete;
|
2014-05-28 20:12:01 +00:00
|
|
|
} else if (!list_empty(&rdma_xprt->sc_rq_dto_q)) {
|
2017-02-07 16:58:56 +00:00
|
|
|
ctxt = list_first_entry(&rdma_xprt->sc_rq_dto_q,
|
|
|
|
struct svc_rdma_op_ctxt, list);
|
|
|
|
list_del(&ctxt->list);
|
2007-12-12 22:13:23 +00:00
|
|
|
} else {
|
|
|
|
atomic_inc(&rdma_stat_rq_starve);
|
|
|
|
clear_bit(XPT_DATA, &xprt->xpt_flags);
|
|
|
|
ctxt = NULL;
|
|
|
|
}
|
2017-02-07 16:59:04 +00:00
|
|
|
spin_unlock(&rdma_xprt->sc_rq_dto_lock);
|
2007-12-12 22:13:23 +00:00
|
|
|
if (!ctxt) {
|
|
|
|
/* This is the EAGAIN path. The svc_recv routine will
|
|
|
|
* return -EAGAIN, the nfsd thread will go to call into
|
|
|
|
* svc_recv again and we shouldn't be on the active
|
|
|
|
* transport list
|
|
|
|
*/
|
|
|
|
if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
|
2016-03-01 18:07:05 +00:00
|
|
|
goto defer;
|
2007-12-12 22:13:23 +00:00
|
|
|
goto out;
|
|
|
|
}
|
2016-11-29 16:05:07 +00:00
|
|
|
dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p\n",
|
|
|
|
ctxt, rdma_xprt, rqstp);
|
2007-12-12 22:13:23 +00:00
|
|
|
atomic_inc(&rdma_stat_recv);
|
|
|
|
|
|
|
|
/* Build up the XDR from the receive buffers. */
|
|
|
|
rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
|
|
|
|
|
|
|
|
/* Decode the RDMA header. */
|
2016-03-01 18:06:47 +00:00
|
|
|
rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base;
|
2016-05-04 14:53:47 +00:00
|
|
|
ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg);
|
2016-03-01 18:06:38 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto out_err;
|
2016-03-01 18:06:56 +00:00
|
|
|
if (ret == 0)
|
|
|
|
goto out_drop;
|
2016-03-01 18:06:38 +00:00
|
|
|
rqstp->rq_xprt_hlen = ret;
|
2007-12-12 22:13:23 +00:00
|
|
|
|
2017-04-09 17:06:49 +00:00
|
|
|
if (svc_rdma_is_backchannel_reply(xprt, &rmsgp->rm_xid)) {
|
|
|
|
ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt,
|
|
|
|
&rmsgp->rm_xid,
|
2016-01-07 19:50:10 +00:00
|
|
|
&rqstp->rq_arg);
|
|
|
|
svc_rdma_put_context(ctxt, 0);
|
|
|
|
if (ret)
|
|
|
|
goto repost;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-05-06 15:04:50 +00:00
|
|
|
/* Read read-list data. */
|
2014-05-28 20:12:01 +00:00
|
|
|
ret = rdma_read_chunks(rdma_xprt, rmsgp, rqstp, ctxt);
|
2008-05-06 15:04:50 +00:00
|
|
|
if (ret > 0) {
|
|
|
|
/* read-list posted, defer until data received from client. */
|
2008-12-15 07:19:48 +00:00
|
|
|
goto defer;
|
2014-05-28 20:12:01 +00:00
|
|
|
} else if (ret < 0) {
|
2008-05-06 15:04:50 +00:00
|
|
|
/* Post of read-list failed, free context. */
|
|
|
|
svc_rdma_put_context(ctxt, 1);
|
|
|
|
return 0;
|
|
|
|
}
|
2007-12-12 22:13:23 +00:00
|
|
|
|
2016-05-04 14:53:39 +00:00
|
|
|
complete:
|
2007-12-12 22:13:23 +00:00
|
|
|
ret = rqstp->rq_arg.head[0].iov_len
|
|
|
|
+ rqstp->rq_arg.page_len
|
|
|
|
+ rqstp->rq_arg.tail[0].iov_len;
|
|
|
|
svc_rdma_put_context(ctxt, 0);
|
|
|
|
out:
|
2015-01-13 16:02:37 +00:00
|
|
|
dprintk("svcrdma: ret=%d, rq_arg.len=%u, "
|
|
|
|
"rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zd\n",
|
2007-12-12 22:13:23 +00:00
|
|
|
ret, rqstp->rq_arg.len,
|
|
|
|
rqstp->rq_arg.head[0].iov_base,
|
|
|
|
rqstp->rq_arg.head[0].iov_len);
|
|
|
|
rqstp->rq_prot = IPPROTO_MAX;
|
|
|
|
svc_xprt_copy_addrs(rqstp, xprt);
|
|
|
|
return ret;
|
|
|
|
|
2016-03-01 18:06:38 +00:00
|
|
|
out_err:
|
2017-04-09 17:06:33 +00:00
|
|
|
svc_rdma_send_error(rdma_xprt, &rmsgp->rm_xid, ret);
|
2016-03-01 18:06:38 +00:00
|
|
|
svc_rdma_put_context(ctxt, 0);
|
|
|
|
return 0;
|
|
|
|
|
2008-12-15 07:19:48 +00:00
|
|
|
defer:
|
2007-12-12 22:13:23 +00:00
|
|
|
return 0;
|
2016-01-07 19:50:10 +00:00
|
|
|
|
2016-03-01 18:06:56 +00:00
|
|
|
out_drop:
|
|
|
|
svc_rdma_put_context(ctxt, 1);
|
2016-01-07 19:50:10 +00:00
|
|
|
repost:
|
2016-03-01 18:06:20 +00:00
|
|
|
return svc_rdma_repost_recv(rdma_xprt, GFP_KERNEL);
|
2007-12-12 22:13:23 +00:00
|
|
|
}
|