mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
io_uring/eventfd: move ctx->evfd_last_cq_tail into io_ev_fd
Everything else about the io_uring eventfd support is nicely kept private to that code, except the cached_cq_tail tracking. With everything else in place, move io_eventfd_flush_signal() to using the ev_fd grab+release helpers, which then enables the direct use of io_ev_fd for this tracking too. Link: https://lore.kernel.org/r/20240921080307.185186-7-axboe@kernel.dk Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
83a4f865e2
commit
f4bb2f65bb
@ -13,10 +13,12 @@
|
|||||||
|
|
||||||
struct io_ev_fd {
|
struct io_ev_fd {
|
||||||
struct eventfd_ctx *cq_ev_fd;
|
struct eventfd_ctx *cq_ev_fd;
|
||||||
unsigned int eventfd_async: 1;
|
unsigned int eventfd_async;
|
||||||
struct rcu_head rcu;
|
/* protected by ->completion_lock */
|
||||||
|
unsigned last_cq_tail;
|
||||||
refcount_t refs;
|
refcount_t refs;
|
||||||
atomic_t ops;
|
atomic_t ops;
|
||||||
|
struct rcu_head rcu;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
@ -123,25 +125,31 @@ void io_eventfd_signal(struct io_ring_ctx *ctx)
|
|||||||
|
|
||||||
void io_eventfd_flush_signal(struct io_ring_ctx *ctx)
|
void io_eventfd_flush_signal(struct io_ring_ctx *ctx)
|
||||||
{
|
{
|
||||||
bool skip;
|
struct io_ev_fd *ev_fd;
|
||||||
|
|
||||||
spin_lock(&ctx->completion_lock);
|
ev_fd = io_eventfd_grab(ctx);
|
||||||
|
if (ev_fd) {
|
||||||
|
bool skip, put_ref = true;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Eventfd should only get triggered when at least one event has been
|
* Eventfd should only get triggered when at least one event
|
||||||
* posted. Some applications rely on the eventfd notification count
|
* has been posted. Some applications rely on the eventfd
|
||||||
* only changing IFF a new CQE has been added to the CQ ring. There's
|
* notification count only changing IFF a new CQE has been
|
||||||
* no depedency on 1:1 relationship between how many times this
|
* added to the CQ ring. There's no dependency on 1:1
|
||||||
* function is called (and hence the eventfd count) and number of CQEs
|
* relationship between how many times this function is called
|
||||||
* posted to the CQ ring.
|
* (and hence the eventfd count) and number of CQEs posted to
|
||||||
|
* the CQ ring.
|
||||||
*/
|
*/
|
||||||
skip = ctx->cached_cq_tail == ctx->evfd_last_cq_tail;
|
spin_lock(&ctx->completion_lock);
|
||||||
ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
|
skip = ctx->cached_cq_tail == ev_fd->last_cq_tail;
|
||||||
|
ev_fd->last_cq_tail = ctx->cached_cq_tail;
|
||||||
spin_unlock(&ctx->completion_lock);
|
spin_unlock(&ctx->completion_lock);
|
||||||
if (skip)
|
|
||||||
return;
|
|
||||||
|
|
||||||
io_eventfd_signal(ctx);
|
if (!skip)
|
||||||
|
put_ref = __io_eventfd_signal(ev_fd);
|
||||||
|
|
||||||
|
io_eventfd_release(ev_fd, put_ref);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
|
int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
|
||||||
@ -172,7 +180,7 @@ int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
|
|||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&ctx->completion_lock);
|
spin_lock(&ctx->completion_lock);
|
||||||
ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
|
ev_fd->last_cq_tail = ctx->cached_cq_tail;
|
||||||
spin_unlock(&ctx->completion_lock);
|
spin_unlock(&ctx->completion_lock);
|
||||||
|
|
||||||
ev_fd->eventfd_async = eventfd_async;
|
ev_fd->eventfd_async = eventfd_async;
|
||||||
|
Loading…
Reference in New Issue
Block a user