io-wq: split hashing and enqueueing

It's a preparation patch removing io_wq_enqueue_hashed(), which
now should be done by io_wq_hash_work() + io_wq_enqueue().

Also, set hash value for dependant works, and do it as late as possible,
because req->file can be unavailable before. This hash will be ignored
by io-wq.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2020-03-14 00:31:04 +03:00 committed by Jens Axboe
parent d78298e73a
commit 8766dd516c
3 changed files with 21 additions and 24 deletions

View File

@ -385,7 +385,7 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe, unsigned *hash)
work = container_of(node, struct io_wq_work, list);
/* not hashed, can run anytime */
if (!(work->flags & IO_WQ_WORK_HASHED)) {
if (!io_wq_is_hashed(work)) {
wq_node_del(&wqe->work_list, node, prev);
return work;
}
@ -795,19 +795,15 @@ void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
}
/*
* Enqueue work, hashed by some key. Work items that hash to the same value
* will not be done in parallel. Used to limit concurrent writes, generally
* hashed by inode.
* Work items that hash to the same value will not be done in parallel.
* Used to limit concurrent writes, generally hashed by inode.
*/
void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val)
void io_wq_hash_work(struct io_wq_work *work, void *val)
{
struct io_wqe *wqe = wq->wqes[numa_node_id()];
unsigned bit;
unsigned int bit;
bit = hash_ptr(val, IO_WQ_HASH_ORDER);
work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
io_wqe_enqueue(wqe, work);
}
static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data)

View File

@ -94,7 +94,12 @@ bool io_wq_get(struct io_wq *wq, struct io_wq_data *data);
void io_wq_destroy(struct io_wq *wq);
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val);
void io_wq_hash_work(struct io_wq_work *work, void *val);
static inline bool io_wq_is_hashed(struct io_wq_work *work)
{
return work->flags & IO_WQ_WORK_HASHED;
}
void io_wq_cancel_all(struct io_wq *wq);
enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork);

View File

@ -1040,15 +1040,14 @@ static inline void io_req_work_drop_env(struct io_kiocb *req)
}
}
static inline bool io_prep_async_work(struct io_kiocb *req,
static inline void io_prep_async_work(struct io_kiocb *req,
struct io_kiocb **link)
{
const struct io_op_def *def = &io_op_defs[req->opcode];
bool do_hashed = false;
if (req->flags & REQ_F_ISREG) {
if (def->hash_reg_file)
do_hashed = true;
io_wq_hash_work(&req->work, file_inode(req->file));
} else {
if (def->unbound_nonreg_file)
req->work.flags |= IO_WQ_WORK_UNBOUND;
@ -1057,25 +1056,18 @@ static inline bool io_prep_async_work(struct io_kiocb *req,
io_req_work_grab_env(req, def);
*link = io_prep_linked_timeout(req);
return do_hashed;
}
static inline void io_queue_async_work(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *link;
bool do_hashed;
do_hashed = io_prep_async_work(req, &link);
io_prep_async_work(req, &link);
trace_io_uring_queue_async_work(ctx, do_hashed, req, &req->work,
req->flags);
if (!do_hashed) {
io_wq_enqueue(ctx->io_wq, &req->work);
} else {
io_wq_enqueue_hashed(ctx->io_wq, &req->work,
file_inode(req->file));
}
trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
&req->work, req->flags);
io_wq_enqueue(ctx->io_wq, &req->work);
if (link)
io_queue_linked_timeout(link);
@ -1582,6 +1574,10 @@ static void io_link_work_cb(struct io_wq_work **workptr)
static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
{
struct io_kiocb *link;
const struct io_op_def *def = &io_op_defs[nxt->opcode];
if ((nxt->flags & REQ_F_ISREG) && def->hash_reg_file)
io_wq_hash_work(&nxt->work, file_inode(nxt->file));
*workptr = &nxt->work;
link = io_prep_linked_timeout(nxt);