mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
io-uring: move io_wait_queue definition to header file
This moves the definition of the io_wait_queue structure to the header file so it can be also used from other files. Signed-off-by: Stefan Roesch <shr@devkernel.io> Link: https://lore.kernel.org/r/20230608163839.2891748-4-shr@devkernel.io Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
adaad27980
commit
405b4dc14b
@ -2477,33 +2477,12 @@ int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct io_wait_queue {
|
||||
struct wait_queue_entry wq;
|
||||
struct io_ring_ctx *ctx;
|
||||
unsigned cq_tail;
|
||||
unsigned nr_timeouts;
|
||||
ktime_t timeout;
|
||||
};
|
||||
|
||||
static inline bool io_has_work(struct io_ring_ctx *ctx)
|
||||
{
|
||||
return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
|
||||
!llist_empty(&ctx->work_llist);
|
||||
}
|
||||
|
||||
static inline bool io_should_wake(struct io_wait_queue *iowq)
|
||||
{
|
||||
struct io_ring_ctx *ctx = iowq->ctx;
|
||||
int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
|
||||
|
||||
/*
|
||||
* Wake up if we have enough events, or if a timeout occurred since we
|
||||
* started waiting. For timeouts, we always want to return to userspace,
|
||||
* regardless of event count.
|
||||
*/
|
||||
return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
|
||||
}
|
||||
|
||||
static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
|
||||
int wake_flags, void *key)
|
||||
{
|
||||
|
@ -35,6 +35,28 @@ enum {
|
||||
IOU_STOP_MULTISHOT = -ECANCELED,
|
||||
};
|
||||
|
||||
struct io_wait_queue {
|
||||
struct wait_queue_entry wq;
|
||||
struct io_ring_ctx *ctx;
|
||||
unsigned cq_tail;
|
||||
unsigned nr_timeouts;
|
||||
ktime_t timeout;
|
||||
|
||||
};
|
||||
|
||||
static inline bool io_should_wake(struct io_wait_queue *iowq)
|
||||
{
|
||||
struct io_ring_ctx *ctx = iowq->ctx;
|
||||
int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
|
||||
|
||||
/*
|
||||
* Wake up if we have enough events, or if a timeout occurred since we
|
||||
* started waiting. For timeouts, we always want to return to userspace,
|
||||
* regardless of event count.
|
||||
*/
|
||||
return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
|
||||
}
|
||||
|
||||
bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
|
||||
void io_req_cqe_overflow(struct io_kiocb *req);
|
||||
int io_run_task_work_sig(struct io_ring_ctx *ctx);
|
||||
|
Loading…
Reference in New Issue
Block a user