io_uring/napi: pass ktime to io_napi_adjust_timeout

Pass the waiting time for __io_napi_adjust_timeout as ktime and get rid
of all timespec64 conversions. It's especially simpler since the caller
already have a ktime.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/4f5b8e8eed4f53a1879e031a6712b25381adc23d.1722003776.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2024-07-26 15:24:31 +01:00 committed by Jens Axboe
parent 342b2e395d
commit 3581696176
3 changed files with 11 additions and 17 deletions

View File

@ -2416,12 +2416,14 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
if (uts) { if (uts) {
struct timespec64 ts; struct timespec64 ts;
ktime_t dt;
if (get_timespec64(&ts, uts)) if (get_timespec64(&ts, uts))
return -EFAULT; return -EFAULT;
iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns()); dt = timespec64_to_ktime(ts);
io_napi_adjust_timeout(ctx, &iowq, &ts); iowq.timeout = ktime_add(dt, ktime_get());
io_napi_adjust_timeout(ctx, &iowq, dt);
} }
if (sig) { if (sig) {

View File

@ -282,20 +282,12 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
* the NAPI timeout accordingly. * the NAPI timeout accordingly.
*/ */
void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq, void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq,
struct timespec64 *ts) ktime_t to_wait)
{ {
ktime_t poll_dt = READ_ONCE(ctx->napi_busy_poll_dt); ktime_t poll_dt = READ_ONCE(ctx->napi_busy_poll_dt);
if (ts) { if (to_wait)
struct timespec64 poll_to_ts; poll_dt = min(poll_dt, to_wait);
poll_to_ts = ns_to_timespec64(ktime_to_ns(poll_dt));
if (timespec64_compare(ts, &poll_to_ts) < 0) {
s64 poll_to_ns = timespec64_to_ns(ts);
if (poll_to_ns > 0)
poll_dt = ns_to_ktime(poll_to_ns);
}
}
iowq->napi_busy_poll_dt = poll_dt; iowq->napi_busy_poll_dt = poll_dt;
} }

View File

@ -18,7 +18,7 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg);
void __io_napi_add(struct io_ring_ctx *ctx, struct socket *sock); void __io_napi_add(struct io_ring_ctx *ctx, struct socket *sock);
void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, void __io_napi_adjust_timeout(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq, struct timespec64 *ts); struct io_wait_queue *iowq, ktime_t to_wait);
void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq); void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq);
int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx); int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx);
@ -29,11 +29,11 @@ static inline bool io_napi(struct io_ring_ctx *ctx)
static inline void io_napi_adjust_timeout(struct io_ring_ctx *ctx, static inline void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq, struct io_wait_queue *iowq,
struct timespec64 *ts) ktime_t to_wait)
{ {
if (!io_napi(ctx)) if (!io_napi(ctx))
return; return;
__io_napi_adjust_timeout(ctx, iowq, ts); __io_napi_adjust_timeout(ctx, iowq, to_wait);
} }
static inline void io_napi_busy_loop(struct io_ring_ctx *ctx, static inline void io_napi_busy_loop(struct io_ring_ctx *ctx,
@ -88,7 +88,7 @@ static inline void io_napi_add(struct io_kiocb *req)
} }
static inline void io_napi_adjust_timeout(struct io_ring_ctx *ctx, static inline void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq, struct io_wait_queue *iowq,
struct timespec64 *ts) ktime_t to_wait)
{ {
} }
static inline void io_napi_busy_loop(struct io_ring_ctx *ctx, static inline void io_napi_busy_loop(struct io_ring_ctx *ctx,