mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
io-wq: optimise locking in io_worker_handle_work()
There are 2 optimisations: - Now, io_worker_handler_work() do io_assign_current_work() twice per request, and each one adds lock/unlock(worker->lock) pair. The first is to reset worker->cur_work to NULL, and the second to set a real work shortly after. If there is a dependant work, set it immediately, that effectively removes the extra NULL'ing. - And there is no use in taking wqe->lock for linked works, as they are not hashed now. Optimise it out. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
dc026a73c7
commit
58e3931987
15
fs/io-wq.c
15
fs/io-wq.c
@ -476,7 +476,7 @@ static void io_worker_handle_work(struct io_worker *worker)
|
||||
struct io_wq *wq = wqe->wq;
|
||||
|
||||
do {
|
||||
struct io_wq_work *work, *old_work;
|
||||
struct io_wq_work *work;
|
||||
unsigned hash = -1U;
|
||||
|
||||
/*
|
||||
@ -495,12 +495,13 @@ static void io_worker_handle_work(struct io_worker *worker)
|
||||
spin_unlock_irq(&wqe->lock);
|
||||
if (!work)
|
||||
break;
|
||||
io_assign_current_work(worker, work);
|
||||
|
||||
/* handle a whole dependent link */
|
||||
do {
|
||||
io_assign_current_work(worker, work);
|
||||
io_impersonate_work(worker, work);
|
||||
struct io_wq_work *old_work;
|
||||
|
||||
io_impersonate_work(worker, work);
|
||||
/*
|
||||
* OK to set IO_WQ_WORK_CANCEL even for uncancellable
|
||||
* work, the worker function will do the right thing.
|
||||
@ -513,10 +514,8 @@ static void io_worker_handle_work(struct io_worker *worker)
|
||||
|
||||
old_work = work;
|
||||
work->func(&work);
|
||||
|
||||
spin_lock_irq(&worker->lock);
|
||||
worker->cur_work = NULL;
|
||||
spin_unlock_irq(&worker->lock);
|
||||
work = (old_work == work) ? NULL : work;
|
||||
io_assign_current_work(worker, work);
|
||||
|
||||
if (wq->put_work)
|
||||
wq->put_work(old_work);
|
||||
@ -529,7 +528,7 @@ static void io_worker_handle_work(struct io_worker *worker)
|
||||
/* dependent work is not hashed */
|
||||
hash = -1U;
|
||||
}
|
||||
} while (work && work != old_work);
|
||||
} while (work);
|
||||
|
||||
spin_lock_irq(&wqe->lock);
|
||||
} while (1);
|
||||
|
Loading…
Reference in New Issue
Block a user