io_uring/napi: protect concurrent io_napi_entry timeout accesses

io_napi_entry timeout value can be updated while accessed from the poll
functions.

Its concurrent accesses are wrapped with READ_ONCE()/WRITE_ONCE() macros
to avoid incorrect compiler optimizations.

Signed-off-by: Olivier Langlois <olivier@trillion01.com>
Link: https://lore.kernel.org/r/3de3087563cf98f75266fd9f85fdba063a8720db.1728828877.git.olivier@trillion01.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Olivier Langlois 2024-10-13 14:28:24 -04:00 committed by Jens Axboe
parent 483242714f
commit 2f3cc8e441

View File

@ -60,7 +60,7 @@ void __io_napi_add(struct io_ring_ctx *ctx, struct socket *sock)
rcu_read_lock(); rcu_read_lock();
e = io_napi_hash_find(hash_list, napi_id); e = io_napi_hash_find(hash_list, napi_id);
if (e) { if (e) {
e->timeout = jiffies + NAPI_TIMEOUT; WRITE_ONCE(e->timeout, jiffies + NAPI_TIMEOUT);
rcu_read_unlock(); rcu_read_unlock();
return; return;
} }
@ -92,7 +92,7 @@ static void __io_napi_remove_stale(struct io_ring_ctx *ctx)
spin_lock(&ctx->napi_lock); spin_lock(&ctx->napi_lock);
hash_for_each(ctx->napi_ht, i, e, node) { hash_for_each(ctx->napi_ht, i, e, node) {
if (time_after(jiffies, e->timeout)) { if (time_after(jiffies, READ_ONCE(e->timeout))) {
list_del(&e->list); list_del(&e->list);
hash_del_rcu(&e->node); hash_del_rcu(&e->node);
kfree_rcu(e, rcu); kfree_rcu(e, rcu);
@ -150,7 +150,7 @@ static bool __io_napi_do_busy_loop(struct io_ring_ctx *ctx,
napi_busy_loop_rcu(e->napi_id, loop_end, loop_end_arg, napi_busy_loop_rcu(e->napi_id, loop_end, loop_end_arg,
ctx->napi_prefer_busy_poll, BUSY_POLL_BUDGET); ctx->napi_prefer_busy_poll, BUSY_POLL_BUDGET);
if (time_after(jiffies, e->timeout)) if (time_after(jiffies, READ_ONCE(e->timeout)))
is_stale = true; is_stale = true;
} }