NFS: Fix memory allocation in rpc_malloc()

When in a low memory situation, we do want rpciod to kick off direct
reclaim in the case where that helps, however we don't want it looping
forever in mempool_alloc().
So first try allocating from the slab using GFP_KERNEL | __GFP_NORETRY,
and then fall back to a GFP_NOWAIT allocation from the mempool.

Ditto for rpc_alloc_task()

Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
This commit is contained in:
Trond Myklebust 2022-03-14 22:02:22 -04:00
parent d0afde5fc6
commit 33e5c765bc
2 changed files with 15 additions and 7 deletions

View File

@ -262,6 +262,7 @@ void rpc_destroy_mempool(void);
extern struct workqueue_struct *rpciod_workqueue; extern struct workqueue_struct *rpciod_workqueue;
extern struct workqueue_struct *xprtiod_workqueue; extern struct workqueue_struct *xprtiod_workqueue;
void rpc_prepare_task(struct rpc_task *task); void rpc_prepare_task(struct rpc_task *task);
gfp_t rpc_task_gfp_mask(void);
static inline int rpc_wait_for_completion_task(struct rpc_task *task) static inline int rpc_wait_for_completion_task(struct rpc_task *task)
{ {

View File

@ -57,6 +57,13 @@ struct workqueue_struct *rpciod_workqueue __read_mostly;
struct workqueue_struct *xprtiod_workqueue __read_mostly; struct workqueue_struct *xprtiod_workqueue __read_mostly;
EXPORT_SYMBOL_GPL(xprtiod_workqueue); EXPORT_SYMBOL_GPL(xprtiod_workqueue);
gfp_t rpc_task_gfp_mask(void)
{
if (current->flags & PF_WQ_WORKER)
return GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
return GFP_KERNEL;
}
unsigned long unsigned long
rpc_task_timeout(const struct rpc_task *task) rpc_task_timeout(const struct rpc_task *task)
{ {
@ -1030,15 +1037,15 @@ int rpc_malloc(struct rpc_task *task)
struct rpc_rqst *rqst = task->tk_rqstp; struct rpc_rqst *rqst = task->tk_rqstp;
size_t size = rqst->rq_callsize + rqst->rq_rcvsize; size_t size = rqst->rq_callsize + rqst->rq_rcvsize;
struct rpc_buffer *buf; struct rpc_buffer *buf;
gfp_t gfp = GFP_KERNEL; gfp_t gfp = rpc_task_gfp_mask();
if (RPC_IS_ASYNC(task))
gfp = GFP_NOWAIT | __GFP_NOWARN;
size += sizeof(struct rpc_buffer); size += sizeof(struct rpc_buffer);
if (size <= RPC_BUFFER_MAXSIZE) if (size <= RPC_BUFFER_MAXSIZE) {
buf = mempool_alloc(rpc_buffer_mempool, gfp); buf = kmem_cache_alloc(rpc_buffer_slabp, gfp);
else /* Reach for the mempool if dynamic allocation fails */
if (!buf && RPC_IS_ASYNC(task))
buf = mempool_alloc(rpc_buffer_mempool, GFP_NOWAIT);
} else
buf = kmalloc(size, gfp); buf = kmalloc(size, gfp);
if (!buf) if (!buf)