forked from Minki/linux
io_uring: fix stopping iopoll'ing too early
Nobody adjusts *nr_events (number of completed requests) before calling io_iopoll_getevents(), so the passed @min shouldn't be adjusted as well. Othewise it can return less than initially asked @min without hitting need_resched(). Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
3aadc23e60
commit
eba0a4dd2a
@ -2044,7 +2044,7 @@ static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
|
||||
ret = io_do_iopoll(ctx, nr_events, min);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (!min || *nr_events >= min)
|
||||
if (*nr_events >= min)
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2087,8 +2087,6 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
|
||||
*/
|
||||
mutex_lock(&ctx->uring_lock);
|
||||
do {
|
||||
int tmin = 0;
|
||||
|
||||
/*
|
||||
* Don't enter poll loop if we already have events pending.
|
||||
* If we do, we can potentially be spinning for commands that
|
||||
@ -2113,10 +2111,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
|
||||
mutex_lock(&ctx->uring_lock);
|
||||
}
|
||||
|
||||
if (*nr_events < min)
|
||||
tmin = min - *nr_events;
|
||||
|
||||
ret = io_iopoll_getevents(ctx, nr_events, tmin);
|
||||
ret = io_iopoll_getevents(ctx, nr_events, min);
|
||||
if (ret <= 0)
|
||||
break;
|
||||
ret = 0;
|
||||
|
Loading…
Reference in New Issue
Block a user