mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 13:41:51 +00:00
io-wq: ensure all pending work is canceled on exit
If we race on shutting down the io-wq, then we should ensure that any work that was queued after workers shutdown is canceled. Harden the add work check a bit too, checking for IO_WQ_BIT_EXIT and cancel if it's set. Add a WARN_ON() for having any work before we kill the io-wq context. Reported-by: syzbot+91b4b56ead187d35c9d3@syzkaller.appspotmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
e4b4a13f49
commit
f01272541d
42
fs/io-wq.c
42
fs/io-wq.c
@ -129,6 +129,17 @@ struct io_wq {
|
||||
|
||||
static enum cpuhp_state io_wq_online;
|
||||
|
||||
struct io_cb_cancel_data {
|
||||
work_cancel_fn *fn;
|
||||
void *data;
|
||||
int nr_running;
|
||||
int nr_pending;
|
||||
bool cancel_all;
|
||||
};
|
||||
|
||||
static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
|
||||
struct io_cb_cancel_data *match);
|
||||
|
||||
static bool io_worker_get(struct io_worker *worker)
|
||||
{
|
||||
return refcount_inc_not_zero(&worker->ref);
|
||||
@ -713,6 +724,23 @@ static void io_wq_check_workers(struct io_wq *wq)
|
||||
}
|
||||
}
|
||||
|
||||
static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static void io_wq_cancel_pending(struct io_wq *wq)
|
||||
{
|
||||
struct io_cb_cancel_data match = {
|
||||
.fn = io_wq_work_match_all,
|
||||
.cancel_all = true,
|
||||
};
|
||||
int node;
|
||||
|
||||
for_each_node(node)
|
||||
io_wqe_cancel_pending_work(wq->wqes[node], &match);
|
||||
}
|
||||
|
||||
/*
|
||||
* Manager thread. Tasked with creating new workers, if we need them.
|
||||
*/
|
||||
@ -748,6 +776,8 @@ static int io_wq_manager(void *data)
|
||||
/* we might not ever have created any workers */
|
||||
if (atomic_read(&wq->worker_refs))
|
||||
wait_for_completion(&wq->worker_done);
|
||||
|
||||
io_wq_cancel_pending(wq);
|
||||
complete(&wq->exited);
|
||||
do_exit(0);
|
||||
}
|
||||
@ -809,7 +839,8 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
|
||||
unsigned long flags;
|
||||
|
||||
/* Can only happen if manager creation fails after exec */
|
||||
if (unlikely(io_wq_fork_manager(wqe->wq))) {
|
||||
if (io_wq_fork_manager(wqe->wq) ||
|
||||
test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state)) {
|
||||
work->flags |= IO_WQ_WORK_CANCEL;
|
||||
wqe->wq->do_work(work);
|
||||
return;
|
||||
@ -845,14 +876,6 @@ void io_wq_hash_work(struct io_wq_work *work, void *val)
|
||||
work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
|
||||
}
|
||||
|
||||
struct io_cb_cancel_data {
|
||||
work_cancel_fn *fn;
|
||||
void *data;
|
||||
int nr_running;
|
||||
int nr_pending;
|
||||
bool cancel_all;
|
||||
};
|
||||
|
||||
static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
|
||||
{
|
||||
struct io_cb_cancel_data *match = data;
|
||||
@ -1086,6 +1109,7 @@ static void io_wq_destroy(struct io_wq *wq)
|
||||
struct io_wqe *wqe = wq->wqes[node];
|
||||
|
||||
list_del_init(&wqe->wait.entry);
|
||||
WARN_ON_ONCE(!wq_list_empty(&wqe->work_list));
|
||||
kfree(wqe);
|
||||
}
|
||||
spin_unlock_irq(&wq->hash->wait.lock);
|
||||
|
Loading…
Reference in New Issue
Block a user