mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 21:51:40 +00:00
xprtrdma: Allocate req's regbufs at xprt create time
Allocating an rpcrdma_req's regbufs at xprt create time enables a pair of micro-optimizations: First, if these regbufs are always there, we can eliminate two conditional branches from the hot xprt_rdma_allocate path. Second, by allocating a 1KB buffer, it places a lower bound on the size of these buffers, without adding yet another conditional branch. The lower bound reduces the number of hardway re- allocations. In fact, for some workloads it completely eliminates hardway allocations. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
8cec3dba76
commit
bb93a1ae2b
@ -28,10 +28,10 @@ static int rpcrdma_bc_setup_reqs(struct rpcrdma_xprt *r_xprt,
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < (count << 1); i++) {
|
||||
struct rpcrdma_regbuf *rb;
|
||||
size_t size;
|
||||
|
||||
req = rpcrdma_req_create(r_xprt, GFP_KERNEL);
|
||||
size = min_t(size_t, r_xprt->rx_data.inline_rsize, PAGE_SIZE);
|
||||
req = rpcrdma_req_create(r_xprt, size, GFP_KERNEL);
|
||||
if (!req)
|
||||
return -ENOMEM;
|
||||
rqst = &req->rl_slot;
|
||||
@ -42,20 +42,10 @@ static int rpcrdma_bc_setup_reqs(struct rpcrdma_xprt *r_xprt,
|
||||
spin_lock(&xprt->bc_pa_lock);
|
||||
list_add(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
|
||||
spin_unlock(&xprt->bc_pa_lock);
|
||||
|
||||
size = r_xprt->rx_data.inline_rsize;
|
||||
rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL);
|
||||
if (!rb)
|
||||
goto out_fail;
|
||||
req->rl_sendbuf = rb;
|
||||
xdr_buf_init(&rqst->rq_snd_buf, rdmab_data(rb),
|
||||
min_t(size_t, size, PAGE_SIZE));
|
||||
xdr_buf_init(&rqst->rq_snd_buf, rdmab_data(req->rl_sendbuf),
|
||||
size);
|
||||
}
|
||||
return 0;
|
||||
|
||||
out_fail:
|
||||
rpcrdma_req_destroy(req);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -591,7 +591,7 @@ rpcrdma_get_sendbuf(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
|
||||
{
|
||||
struct rpcrdma_regbuf *rb;
|
||||
|
||||
if (req->rl_sendbuf && rdmab_length(req->rl_sendbuf) >= size)
|
||||
if (likely(rdmab_length(req->rl_sendbuf) >= size))
|
||||
return true;
|
||||
|
||||
rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, flags);
|
||||
@ -621,7 +621,7 @@ rpcrdma_get_recvbuf(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
|
||||
{
|
||||
struct rpcrdma_regbuf *rb;
|
||||
|
||||
if (req->rl_recvbuf && rdmab_length(req->rl_recvbuf) >= size)
|
||||
if (likely(rdmab_length(req->rl_recvbuf) >= size))
|
||||
return true;
|
||||
|
||||
rb = rpcrdma_alloc_regbuf(size, DMA_NONE, flags);
|
||||
|
@ -998,11 +998,13 @@ rpcrdma_mr_refresh_worker(struct work_struct *work)
|
||||
/**
|
||||
* rpcrdma_req_create - Allocate an rpcrdma_req object
|
||||
* @r_xprt: controlling r_xprt
|
||||
* @size: initial size, in bytes, of send and receive buffers
|
||||
* @flags: GFP flags passed to memory allocators
|
||||
*
|
||||
* Returns an allocated and fully initialized rpcrdma_req or NULL.
|
||||
*/
|
||||
struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, gfp_t flags)
|
||||
struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
|
||||
gfp_t flags)
|
||||
{
|
||||
struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
|
||||
struct rpcrdma_regbuf *rb;
|
||||
@ -1010,22 +1012,37 @@ struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, gfp_t flags)
|
||||
|
||||
req = kzalloc(sizeof(*req), flags);
|
||||
if (req == NULL)
|
||||
return NULL;
|
||||
goto out1;
|
||||
|
||||
rb = rpcrdma_alloc_regbuf(RPCRDMA_HDRBUF_SIZE, DMA_TO_DEVICE, flags);
|
||||
if (!rb) {
|
||||
kfree(req);
|
||||
return NULL;
|
||||
}
|
||||
if (!rb)
|
||||
goto out2;
|
||||
req->rl_rdmabuf = rb;
|
||||
xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb));
|
||||
|
||||
req->rl_sendbuf = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, flags);
|
||||
if (!req->rl_sendbuf)
|
||||
goto out3;
|
||||
|
||||
req->rl_recvbuf = rpcrdma_alloc_regbuf(size, DMA_NONE, flags);
|
||||
if (!req->rl_recvbuf)
|
||||
goto out4;
|
||||
|
||||
req->rl_buffer = buffer;
|
||||
INIT_LIST_HEAD(&req->rl_registered);
|
||||
|
||||
spin_lock(&buffer->rb_lock);
|
||||
list_add(&req->rl_all, &buffer->rb_allreqs);
|
||||
spin_unlock(&buffer->rb_lock);
|
||||
return req;
|
||||
|
||||
out4:
|
||||
kfree(req->rl_sendbuf);
|
||||
out3:
|
||||
kfree(req->rl_rdmabuf);
|
||||
out2:
|
||||
kfree(req);
|
||||
out1:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static bool rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, bool temp)
|
||||
@ -1090,7 +1107,8 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
|
||||
for (i = 0; i < buf->rb_max_requests; i++) {
|
||||
struct rpcrdma_req *req;
|
||||
|
||||
req = rpcrdma_req_create(r_xprt, GFP_KERNEL);
|
||||
req = rpcrdma_req_create(r_xprt, RPCRDMA_V1_DEF_INLINE_SIZE,
|
||||
GFP_KERNEL);
|
||||
if (!req)
|
||||
goto out;
|
||||
list_add(&req->rl_list, &buf->rb_send_bufs);
|
||||
|
@ -529,7 +529,7 @@ int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
|
||||
/*
|
||||
* Buffer calls - xprtrdma/verbs.c
|
||||
*/
|
||||
struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt,
|
||||
struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
|
||||
gfp_t flags);
|
||||
void rpcrdma_req_destroy(struct rpcrdma_req *req);
|
||||
int rpcrdma_buffer_create(struct rpcrdma_xprt *);
|
||||
|
Loading…
Reference in New Issue
Block a user