mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
nfs: simplify nfs_folio_find_and_lock_request
nfs_folio_find_and_lock_request and the nfs_page_group_lock_head helper called by it spend quite some effort to deal with head vs subrequests. But given that only the head request can be stashed in the folio private data, non of that is required. Fold the locking logic from nfs_page_group_lock_head into nfs_folio_find_and_lock_request and simplify the result based on the invariant that we always find the head request in the folio private data. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
02e61ec1e2
commit
9eb7c484db
@ -187,25 +187,6 @@ nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
|
||||
|
||||
/*
|
||||
* nfs_page_lock_head_request - page lock the head of the page group
|
||||
* @req: any member of the page group
|
||||
*/
|
||||
struct nfs_page *
|
||||
nfs_page_group_lock_head(struct nfs_page *req)
|
||||
{
|
||||
struct nfs_page *head = req->wb_head;
|
||||
|
||||
while (!nfs_lock_request(head)) {
|
||||
int ret = nfs_wait_on_request(head);
|
||||
if (ret < 0)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
if (head != req)
|
||||
kref_get(&head->wb_kref);
|
||||
return head;
|
||||
}
|
||||
|
||||
/*
|
||||
* nfs_unroll_locks - unlock all newly locked reqs and wait on @req
|
||||
* @head: head request of page group, must be holding head lock
|
||||
|
@ -197,28 +197,32 @@ static struct nfs_page *nfs_folio_find_head_request(struct folio *folio)
|
||||
static struct nfs_page *nfs_folio_find_and_lock_request(struct folio *folio)
|
||||
{
|
||||
struct inode *inode = folio->mapping->host;
|
||||
struct nfs_page *req, *head;
|
||||
struct nfs_page *head;
|
||||
int ret;
|
||||
|
||||
for (;;) {
|
||||
req = nfs_folio_find_head_request(folio);
|
||||
if (!req)
|
||||
return req;
|
||||
head = nfs_page_group_lock_head(req);
|
||||
if (head != req)
|
||||
nfs_release_request(req);
|
||||
if (IS_ERR(head))
|
||||
return head;
|
||||
ret = nfs_cancel_remove_inode(head, inode);
|
||||
if (ret < 0) {
|
||||
nfs_unlock_and_release_request(head);
|
||||
retry:
|
||||
head = nfs_folio_find_head_request(folio);
|
||||
if (!head)
|
||||
return NULL;
|
||||
|
||||
while (!nfs_lock_request(head)) {
|
||||
ret = nfs_wait_on_request(head);
|
||||
if (ret < 0)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
/* Ensure that nobody removed the request before we locked it */
|
||||
if (head == folio->private)
|
||||
break;
|
||||
nfs_unlock_and_release_request(head);
|
||||
}
|
||||
|
||||
/* Ensure that nobody removed the request before we locked it */
|
||||
if (head != folio->private) {
|
||||
nfs_unlock_and_release_request(head);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
ret = nfs_cancel_remove_inode(head, inode);
|
||||
if (ret < 0) {
|
||||
nfs_unlock_and_release_request(head);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return head;
|
||||
}
|
||||
|
||||
|
@ -155,7 +155,6 @@ extern size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
|
||||
extern int nfs_wait_on_request(struct nfs_page *);
|
||||
extern void nfs_unlock_request(struct nfs_page *req);
|
||||
extern void nfs_unlock_and_release_request(struct nfs_page *);
|
||||
extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req);
|
||||
extern int nfs_page_group_lock_subrequests(struct nfs_page *head);
|
||||
extern void nfs_join_page_group(struct nfs_page *head,
|
||||
struct nfs_commit_info *cinfo,
|
||||
|
Loading…
Reference in New Issue
Block a user