fuse: cleanup request queuing towards virtiofs

Virtiofs has its own queuing mechanism, but still requests are first queued
on fiq->pending to be immediately dequeued and queued onto the virtio
queue.

The queuing on fiq->pending is unnecessary and might even have some
performance impact due to being a contention point.

Forget requests are handled similarly.

Move the queuing of requests and forgets into the fiq->ops->*.
fuse_iqueue_ops are renamed to reflect the new semantics.

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Fixed-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Jingbo Xu <jefflexu@linux.alibaba.com>
Tested-by: Peter-Jan Gootzen <pgootzen@nvidia.com>
Reviewed-by: Peter-Jan Gootzen <pgootzen@nvidia.com>
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
This commit is contained in:
Miklos Szeredi 2024-05-29 17:09:07 +02:00
parent 3ab394b363
commit 5de8acb41c
3 changed files with 106 additions and 113 deletions

View File

@ -194,11 +194,22 @@ unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args)
}
EXPORT_SYMBOL_GPL(fuse_len_args);
u64 fuse_get_unique(struct fuse_iqueue *fiq)
static u64 fuse_get_unique_locked(struct fuse_iqueue *fiq)
{
fiq->reqctr += FUSE_REQ_ID_STEP;
return fiq->reqctr;
}
u64 fuse_get_unique(struct fuse_iqueue *fiq)
{
u64 ret;
spin_lock(&fiq->lock);
ret = fuse_get_unique_locked(fiq);
spin_unlock(&fiq->lock);
return ret;
}
EXPORT_SYMBOL_GPL(fuse_get_unique);
static unsigned int fuse_req_hash(u64 unique)
@ -217,22 +228,68 @@ __releases(fiq->lock)
spin_unlock(&fiq->lock);
}
static void fuse_dev_queue_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *forget)
{
spin_lock(&fiq->lock);
if (fiq->connected) {
fiq->forget_list_tail->next = forget;
fiq->forget_list_tail = forget;
fuse_dev_wake_and_unlock(fiq);
} else {
kfree(forget);
spin_unlock(&fiq->lock);
}
}
static void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
{
spin_lock(&fiq->lock);
if (list_empty(&req->intr_entry)) {
list_add_tail(&req->intr_entry, &fiq->interrupts);
/*
* Pairs with smp_mb() implied by test_and_set_bit()
* from fuse_request_end().
*/
smp_mb();
if (test_bit(FR_FINISHED, &req->flags)) {
list_del_init(&req->intr_entry);
spin_unlock(&fiq->lock);
} else {
fuse_dev_wake_and_unlock(fiq);
}
} else {
spin_unlock(&fiq->lock);
}
}
static void fuse_dev_queue_req(struct fuse_iqueue *fiq, struct fuse_req *req)
{
spin_lock(&fiq->lock);
if (fiq->connected) {
if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
req->in.h.unique = fuse_get_unique_locked(fiq);
list_add_tail(&req->list, &fiq->pending);
fuse_dev_wake_and_unlock(fiq);
} else {
spin_unlock(&fiq->lock);
req->out.h.error = -ENOTCONN;
fuse_request_end(req);
}
}
const struct fuse_iqueue_ops fuse_dev_fiq_ops = {
.wake_forget_and_unlock = fuse_dev_wake_and_unlock,
.wake_interrupt_and_unlock = fuse_dev_wake_and_unlock,
.wake_pending_and_unlock = fuse_dev_wake_and_unlock,
.send_forget = fuse_dev_queue_forget,
.send_interrupt = fuse_dev_queue_interrupt,
.send_req = fuse_dev_queue_req,
};
EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops);
static void queue_request_and_unlock(struct fuse_iqueue *fiq,
struct fuse_req *req)
__releases(fiq->lock)
static void fuse_send_one(struct fuse_iqueue *fiq, struct fuse_req *req)
{
req->in.h.len = sizeof(struct fuse_in_header) +
fuse_len_args(req->args->in_numargs,
(struct fuse_arg *) req->args->in_args);
list_add_tail(&req->list, &fiq->pending);
fiq->ops->wake_pending_and_unlock(fiq);
fiq->ops->send_req(fiq, req);
}
void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
@ -243,15 +300,7 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
forget->forget_one.nodeid = nodeid;
forget->forget_one.nlookup = nlookup;
spin_lock(&fiq->lock);
if (fiq->connected) {
fiq->forget_list_tail->next = forget;
fiq->forget_list_tail = forget;
fiq->ops->wake_forget_and_unlock(fiq);
} else {
kfree(forget);
spin_unlock(&fiq->lock);
}
fiq->ops->send_forget(fiq, forget);
}
static void flush_bg_queue(struct fuse_conn *fc)
@ -265,9 +314,7 @@ static void flush_bg_queue(struct fuse_conn *fc)
req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
list_del(&req->list);
fc->active_background++;
spin_lock(&fiq->lock);
req->in.h.unique = fuse_get_unique(fiq);
queue_request_and_unlock(fiq, req);
fuse_send_one(fiq, req);
}
}
@ -337,29 +384,12 @@ static int queue_interrupt(struct fuse_req *req)
{
struct fuse_iqueue *fiq = &req->fm->fc->iq;
spin_lock(&fiq->lock);
/* Check for we've sent request to interrupt this req */
if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
spin_unlock(&fiq->lock);
if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags)))
return -EINVAL;
}
if (list_empty(&req->intr_entry)) {
list_add_tail(&req->intr_entry, &fiq->interrupts);
/*
* Pairs with smp_mb() implied by test_and_set_bit()
* from fuse_request_end().
*/
smp_mb();
if (test_bit(FR_FINISHED, &req->flags)) {
list_del_init(&req->intr_entry);
spin_unlock(&fiq->lock);
return 0;
}
fiq->ops->wake_interrupt_and_unlock(fiq);
} else {
spin_unlock(&fiq->lock);
}
fiq->ops->send_interrupt(fiq, req);
return 0;
}
@ -414,21 +444,15 @@ static void __fuse_request_send(struct fuse_req *req)
struct fuse_iqueue *fiq = &req->fm->fc->iq;
BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
spin_lock(&fiq->lock);
if (!fiq->connected) {
spin_unlock(&fiq->lock);
req->out.h.error = -ENOTCONN;
} else {
req->in.h.unique = fuse_get_unique(fiq);
/* acquire extra reference, since request is still needed
after fuse_request_end() */
__fuse_get_request(req);
queue_request_and_unlock(fiq, req);
request_wait_answer(req);
/* Pairs with smp_wmb() in fuse_request_end() */
smp_rmb();
}
/* acquire extra reference, since request is still needed after
fuse_request_end() */
__fuse_get_request(req);
fuse_send_one(fiq, req);
request_wait_answer(req);
/* Pairs with smp_wmb() in fuse_request_end() */
smp_rmb();
}
static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
@ -583,7 +607,6 @@ static int fuse_simple_notify_reply(struct fuse_mount *fm,
{
struct fuse_req *req;
struct fuse_iqueue *fiq = &fm->fc->iq;
int err = 0;
req = fuse_get_req(fm, false);
if (IS_ERR(req))
@ -594,16 +617,9 @@ static int fuse_simple_notify_reply(struct fuse_mount *fm,
fuse_args_to_req(req, args);
spin_lock(&fiq->lock);
if (fiq->connected) {
queue_request_and_unlock(fiq, req);
} else {
err = -ENODEV;
spin_unlock(&fiq->lock);
fuse_put_request(req);
}
fuse_send_one(fiq, req);
return err;
return 0;
}
/*
@ -1075,9 +1091,9 @@ __releases(fiq->lock)
return err ? err : reqsize;
}
struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
unsigned int max,
unsigned int *countp)
static struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
unsigned int max,
unsigned int *countp)
{
struct fuse_forget_link *head = fiq->forget_list_head.next;
struct fuse_forget_link **newhead = &head;
@ -1096,7 +1112,6 @@ struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
return head;
}
EXPORT_SYMBOL(fuse_dequeue_forget);
static int fuse_read_single_forget(struct fuse_iqueue *fiq,
struct fuse_copy_state *cs,
@ -1111,7 +1126,7 @@ __releases(fiq->lock)
struct fuse_in_header ih = {
.opcode = FUSE_FORGET,
.nodeid = forget->forget_one.nodeid,
.unique = fuse_get_unique(fiq),
.unique = fuse_get_unique_locked(fiq),
.len = sizeof(ih) + sizeof(arg),
};
@ -1142,7 +1157,7 @@ __releases(fiq->lock)
struct fuse_batch_forget_in arg = { .count = 0 };
struct fuse_in_header ih = {
.opcode = FUSE_BATCH_FORGET,
.unique = fuse_get_unique(fiq),
.unique = fuse_get_unique_locked(fiq),
.len = sizeof(ih) + sizeof(arg),
};
@ -1828,7 +1843,7 @@ static void fuse_resend(struct fuse_conn *fc)
}
/* iq and pq requests are both oldest to newest */
list_splice(&to_queue, &fiq->pending);
fiq->ops->wake_pending_and_unlock(fiq);
fuse_dev_wake_and_unlock(fiq);
}
static int fuse_notify_resend(struct fuse_conn *fc)

View File

@ -449,22 +449,19 @@ struct fuse_iqueue;
*/
struct fuse_iqueue_ops {
/**
* Signal that a forget has been queued
* Send one forget
*/
void (*wake_forget_and_unlock)(struct fuse_iqueue *fiq)
__releases(fiq->lock);
void (*send_forget)(struct fuse_iqueue *fiq, struct fuse_forget_link *link);
/**
* Signal that an INTERRUPT request has been queued
* Send interrupt for request
*/
void (*wake_interrupt_and_unlock)(struct fuse_iqueue *fiq)
__releases(fiq->lock);
void (*send_interrupt)(struct fuse_iqueue *fiq, struct fuse_req *req);
/**
* Signal that a request has been queued
* Send one request
*/
void (*wake_pending_and_unlock)(struct fuse_iqueue *fiq)
__releases(fiq->lock);
void (*send_req)(struct fuse_iqueue *fiq, struct fuse_req *req);
/**
* Clean up when fuse_iqueue is destroyed
@ -1053,10 +1050,6 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
struct fuse_forget_link *fuse_alloc_forget(void);
struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
unsigned int max,
unsigned int *countp);
/*
* Initialize READ or READDIR request
*/

View File

@ -1091,22 +1091,13 @@ static struct virtio_driver virtio_fs_driver = {
#endif
};
static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq)
__releases(fiq->lock)
static void virtio_fs_send_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *link)
{
struct fuse_forget_link *link;
struct virtio_fs_forget *forget;
struct virtio_fs_forget_req *req;
struct virtio_fs *fs;
struct virtio_fs_vq *fsvq;
u64 unique;
link = fuse_dequeue_forget(fiq, 1, NULL);
unique = fuse_get_unique(fiq);
fs = fiq->priv;
fsvq = &fs->vqs[VQ_HIPRIO];
spin_unlock(&fiq->lock);
struct virtio_fs *fs = fiq->priv;
struct virtio_fs_vq *fsvq = &fs->vqs[VQ_HIPRIO];
u64 unique = fuse_get_unique(fiq);
/* Allocate a buffer for the request */
forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL);
@ -1126,8 +1117,7 @@ __releases(fiq->lock)
kfree(link);
}
static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq)
__releases(fiq->lock)
static void virtio_fs_send_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
{
/*
* TODO interrupts.
@ -1136,7 +1126,6 @@ __releases(fiq->lock)
* Exceptions are blocking lock operations; for example fcntl(F_SETLKW)
* with shared lock between host and guest.
*/
spin_unlock(&fiq->lock);
}
/* Count number of scatter-gather elements required */
@ -1341,21 +1330,17 @@ out:
return ret;
}
static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq)
__releases(fiq->lock)
static void virtio_fs_send_req(struct fuse_iqueue *fiq, struct fuse_req *req)
{
unsigned int queue_id;
struct virtio_fs *fs;
struct fuse_req *req;
struct virtio_fs_vq *fsvq;
int ret;
WARN_ON(list_empty(&fiq->pending));
req = list_last_entry(&fiq->pending, struct fuse_req, list);
if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
req->in.h.unique = fuse_get_unique(fiq);
clear_bit(FR_PENDING, &req->flags);
list_del_init(&req->list);
WARN_ON(!list_empty(&fiq->pending));
spin_unlock(&fiq->lock);
fs = fiq->priv;
queue_id = VQ_REQUEST + fs->mq_map[raw_smp_processor_id()];
@ -1393,10 +1378,10 @@ __releases(fiq->lock)
}
static const struct fuse_iqueue_ops virtio_fs_fiq_ops = {
.wake_forget_and_unlock = virtio_fs_wake_forget_and_unlock,
.wake_interrupt_and_unlock = virtio_fs_wake_interrupt_and_unlock,
.wake_pending_and_unlock = virtio_fs_wake_pending_and_unlock,
.release = virtio_fs_fiq_release,
.send_forget = virtio_fs_send_forget,
.send_interrupt = virtio_fs_send_interrupt,
.send_req = virtio_fs_send_req,
.release = virtio_fs_fiq_release,
};
static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx)