mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 14:12:06 +00:00
NFS: Add functionality to allow waiting on all outstanding reads to complete
This will later allow NFS locking code to wait for readahead to complete before releasing byte range locks. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
This commit is contained in:
parent
bc7a05ca51
commit
577b42327d
@ -561,6 +561,7 @@ static void nfs_init_lock_context(struct nfs_lock_context *l_ctx)
|
|||||||
l_ctx->lockowner.l_owner = current->files;
|
l_ctx->lockowner.l_owner = current->files;
|
||||||
l_ctx->lockowner.l_pid = current->tgid;
|
l_ctx->lockowner.l_pid = current->tgid;
|
||||||
INIT_LIST_HEAD(&l_ctx->list);
|
INIT_LIST_HEAD(&l_ctx->list);
|
||||||
|
nfs_iocounter_init(&l_ctx->io_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct nfs_lock_context *__nfs_find_lock_context(struct nfs_open_context *ctx)
|
static struct nfs_lock_context *__nfs_find_lock_context(struct nfs_open_context *ctx)
|
||||||
|
@ -229,6 +229,13 @@ extern void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
|
|||||||
struct nfs_pgio_header *hdr,
|
struct nfs_pgio_header *hdr,
|
||||||
void (*release)(struct nfs_pgio_header *hdr));
|
void (*release)(struct nfs_pgio_header *hdr));
|
||||||
void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos);
|
void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos);
|
||||||
|
int nfs_iocounter_wait(struct nfs_io_counter *c);
|
||||||
|
|
||||||
|
static inline void nfs_iocounter_init(struct nfs_io_counter *c)
|
||||||
|
{
|
||||||
|
c->flags = 0;
|
||||||
|
atomic_set(&c->io_count, 0);
|
||||||
|
}
|
||||||
|
|
||||||
/* nfs2xdr.c */
|
/* nfs2xdr.c */
|
||||||
extern struct rpc_procinfo nfs_procedures[];
|
extern struct rpc_procinfo nfs_procedures[];
|
||||||
|
@ -84,6 +84,55 @@ nfs_page_free(struct nfs_page *p)
|
|||||||
kmem_cache_free(nfs_page_cachep, p);
|
kmem_cache_free(nfs_page_cachep, p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
nfs_iocounter_inc(struct nfs_io_counter *c)
|
||||||
|
{
|
||||||
|
atomic_inc(&c->io_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
nfs_iocounter_dec(struct nfs_io_counter *c)
|
||||||
|
{
|
||||||
|
if (atomic_dec_and_test(&c->io_count)) {
|
||||||
|
clear_bit(NFS_IO_INPROGRESS, &c->flags);
|
||||||
|
smp_mb__after_clear_bit();
|
||||||
|
wake_up_bit(&c->flags, NFS_IO_INPROGRESS);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
__nfs_iocounter_wait(struct nfs_io_counter *c)
|
||||||
|
{
|
||||||
|
wait_queue_head_t *wq = bit_waitqueue(&c->flags, NFS_IO_INPROGRESS);
|
||||||
|
DEFINE_WAIT_BIT(q, &c->flags, NFS_IO_INPROGRESS);
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
do {
|
||||||
|
prepare_to_wait(wq, &q.wait, TASK_KILLABLE);
|
||||||
|
set_bit(NFS_IO_INPROGRESS, &c->flags);
|
||||||
|
if (atomic_read(&c->io_count) == 0)
|
||||||
|
break;
|
||||||
|
ret = nfs_wait_bit_killable(&c->flags);
|
||||||
|
} while (atomic_read(&c->io_count) != 0);
|
||||||
|
finish_wait(wq, &q.wait);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* nfs_iocounter_wait - wait for i/o to complete
|
||||||
|
* @c: nfs_io_counter to use
|
||||||
|
*
|
||||||
|
* returns -ERESTARTSYS if interrupted by a fatal signal.
|
||||||
|
* Otherwise returns 0 once the io_count hits 0.
|
||||||
|
*/
|
||||||
|
int
|
||||||
|
nfs_iocounter_wait(struct nfs_io_counter *c)
|
||||||
|
{
|
||||||
|
if (atomic_read(&c->io_count) == 0)
|
||||||
|
return 0;
|
||||||
|
return __nfs_iocounter_wait(c);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* nfs_create_request - Create an NFS read/write request.
|
* nfs_create_request - Create an NFS read/write request.
|
||||||
* @ctx: open context to use
|
* @ctx: open context to use
|
||||||
@ -118,6 +167,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
|
|||||||
return ERR_CAST(l_ctx);
|
return ERR_CAST(l_ctx);
|
||||||
}
|
}
|
||||||
req->wb_lock_context = l_ctx;
|
req->wb_lock_context = l_ctx;
|
||||||
|
nfs_iocounter_inc(&l_ctx->io_count);
|
||||||
|
|
||||||
/* Initialize the request struct. Initially, we assume a
|
/* Initialize the request struct. Initially, we assume a
|
||||||
* long write-back delay. This will be adjusted in
|
* long write-back delay. This will be adjusted in
|
||||||
@ -177,6 +227,7 @@ static void nfs_clear_request(struct nfs_page *req)
|
|||||||
req->wb_page = NULL;
|
req->wb_page = NULL;
|
||||||
}
|
}
|
||||||
if (l_ctx != NULL) {
|
if (l_ctx != NULL) {
|
||||||
|
nfs_iocounter_dec(&l_ctx->io_count);
|
||||||
nfs_put_lock_context(l_ctx);
|
nfs_put_lock_context(l_ctx);
|
||||||
req->wb_lock_context = NULL;
|
req->wb_lock_context = NULL;
|
||||||
}
|
}
|
||||||
|
@ -59,11 +59,18 @@ struct nfs_lockowner {
|
|||||||
pid_t l_pid;
|
pid_t l_pid;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define NFS_IO_INPROGRESS 0
|
||||||
|
struct nfs_io_counter {
|
||||||
|
unsigned long flags;
|
||||||
|
atomic_t io_count;
|
||||||
|
};
|
||||||
|
|
||||||
struct nfs_lock_context {
|
struct nfs_lock_context {
|
||||||
atomic_t count;
|
atomic_t count;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct nfs_open_context *open_context;
|
struct nfs_open_context *open_context;
|
||||||
struct nfs_lockowner lockowner;
|
struct nfs_lockowner lockowner;
|
||||||
|
struct nfs_io_counter io_count;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct nfs4_state;
|
struct nfs4_state;
|
||||||
|
Loading…
Reference in New Issue
Block a user