NFSv4/pnfs: Ensure pNFS allocation modes are consistent with nfsiod

Ensure that pNFS allocations that can be called from rpciod/nfsiod
callback can fail in low memory mode, so that the threads don't block
and loop forever.

Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
This commit is contained in:
Trond Myklebust
2022-03-21 15:32:09 -04:00
parent 0bae835b63
commit 63d8a41b1d
2 changed files with 18 additions and 23 deletions

View File

@@ -1017,7 +1017,7 @@ int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg,
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (n > NFS42_LAYOUTERROR_MAX) if (n > NFS42_LAYOUTERROR_MAX)
return -EINVAL; return -EINVAL;
data = nfs42_alloc_layouterror_data(lseg, GFP_KERNEL); data = nfs42_alloc_layouterror_data(lseg, nfs_io_gfp_mask());
if (!data) if (!data)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {

View File

@@ -1233,7 +1233,7 @@ pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo,
int status = 0; int status = 0;
*pcred = NULL; *pcred = NULL;
lrp = kzalloc(sizeof(*lrp), GFP_KERNEL); lrp = kzalloc(sizeof(*lrp), nfs_io_gfp_mask());
if (unlikely(lrp == NULL)) { if (unlikely(lrp == NULL)) {
status = -ENOMEM; status = -ENOMEM;
spin_lock(&ino->i_lock); spin_lock(&ino->i_lock);
@@ -2206,7 +2206,7 @@ _pnfs_grab_empty_layout(struct inode *ino, struct nfs_open_context *ctx)
struct pnfs_layout_hdr *lo; struct pnfs_layout_hdr *lo;
spin_lock(&ino->i_lock); spin_lock(&ino->i_lock);
lo = pnfs_find_alloc_layout(ino, ctx, GFP_KERNEL); lo = pnfs_find_alloc_layout(ino, ctx, nfs_io_gfp_mask());
if (!lo) if (!lo)
goto out_unlock; goto out_unlock;
if (!test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) if (!test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags))
@@ -2249,8 +2249,8 @@ static void _lgopen_prepare_attached(struct nfs4_opendata *data,
lo = _pnfs_grab_empty_layout(ino, ctx); lo = _pnfs_grab_empty_layout(ino, ctx);
if (!lo) if (!lo)
return; return;
lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &current_stateid, lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &current_stateid, &rng,
&rng, GFP_KERNEL); nfs_io_gfp_mask());
if (!lgp) { if (!lgp) {
pnfs_clear_first_layoutget(lo); pnfs_clear_first_layoutget(lo);
nfs_layoutget_end(lo); nfs_layoutget_end(lo);
@@ -2275,8 +2275,8 @@ static void _lgopen_prepare_floating(struct nfs4_opendata *data,
}; };
struct nfs4_layoutget *lgp; struct nfs4_layoutget *lgp;
lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &current_stateid, lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &current_stateid, &rng,
&rng, GFP_KERNEL); nfs_io_gfp_mask());
if (!lgp) if (!lgp)
return; return;
data->lgp = lgp; data->lgp = lgp;
@@ -2691,13 +2691,11 @@ pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *r
else else
rd_size = nfs_dreq_bytes_left(pgio->pg_dreq); rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, pgio->pg_lseg =
nfs_req_openctx(req), pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
req_offset(req), req_offset(req), rd_size,
rd_size, IOMODE_READ, false,
IOMODE_READ, nfs_io_gfp_mask());
false,
GFP_KERNEL);
if (IS_ERR(pgio->pg_lseg)) { if (IS_ERR(pgio->pg_lseg)) {
pgio->pg_error = PTR_ERR(pgio->pg_lseg); pgio->pg_error = PTR_ERR(pgio->pg_lseg);
pgio->pg_lseg = NULL; pgio->pg_lseg = NULL;
@@ -2718,13 +2716,10 @@ pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
pnfs_generic_pg_check_layout(pgio); pnfs_generic_pg_check_layout(pgio);
pnfs_generic_pg_check_range(pgio, req); pnfs_generic_pg_check_range(pgio, req);
if (pgio->pg_lseg == NULL) { if (pgio->pg_lseg == NULL) {
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, pgio->pg_lseg =
nfs_req_openctx(req), pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
req_offset(req), req_offset(req), wb_size, IOMODE_RW,
wb_size, false, nfs_io_gfp_mask());
IOMODE_RW,
false,
GFP_KERNEL);
if (IS_ERR(pgio->pg_lseg)) { if (IS_ERR(pgio->pg_lseg)) {
pgio->pg_error = PTR_ERR(pgio->pg_lseg); pgio->pg_error = PTR_ERR(pgio->pg_lseg);
pgio->pg_lseg = NULL; pgio->pg_lseg = NULL;
@@ -3183,7 +3178,7 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
status = -ENOMEM; status = -ENOMEM;
/* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */ /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
data = kzalloc(sizeof(*data), GFP_NOFS); data = kzalloc(sizeof(*data), nfs_io_gfp_mask());
if (!data) if (!data)
goto clear_layoutcommitting; goto clear_layoutcommitting;
@@ -3250,7 +3245,7 @@ struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
{ {
struct nfs4_threshold *thp; struct nfs4_threshold *thp;
thp = kzalloc(sizeof(*thp), GFP_KERNEL); thp = kzalloc(sizeof(*thp), nfs_io_gfp_mask());
if (!thp) { if (!thp) {
dprintk("%s mdsthreshold allocation failed\n", __func__); dprintk("%s mdsthreshold allocation failed\n", __func__);
return NULL; return NULL;