mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
io_uring-5.11-2021-01-16
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmADKaIQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpnjVD/402IBhG40EoLMdcztTbOF7S5JZLbECbWam HPN99D0YyjRQNwsd7CPzeO+7W4zO4P0T/NcnXLF1TmPnkRynDNsT4oygcLrJoH8n xZoAXi6ykM/4eE+4NSaNfWF/f3DtGepsIHMpSNNyzE+qxz4I36/EPTu7M13ga1A4 H6pAbvU1DrDZ+z62rhWcb+4ouUV7QDS02UTNo9cW4ueeMNLyW/Zxp9lBzNuAkbzF gRlsxkhXLgcqtA36EdRWKs9fOwcdTyiMGzS5DRRmmE/O/a3//yl0ENFTPmWd6+jf daZcAYW6BdB+0va1sYGbECl9zOkMlhcm/hNFnl3ZmFIW5a4QiRy2WLoWuiNwVSYM nGCz0l5P2pP32tt0fQlSQh8KdI+py+c7MhJ1IaRhJi8X4t1MHoOR3vlilw0UvrdQ zpm8H+hMmL3g9/6pZLwxBvd31N1Y/A8ghh6qTdQ2EwdPyjImR3Xv/wYUqdbfvBo5 VcB6ccoFwuP5KzQcyI2j2WRcx7AtHZuiVGvoqkA8QL9Onq3GWoY8tnkf/sP8RxlQ wL91wVkT46UMkCLeY7QCGdx4nnmSahCEa8SQEYxibnh++9jh+c6Y5FtZc3YNjnED GRFT9vJmqNmrvKCLo78iTf/F8vJllGXQRKDeMuU9DVj3ylhZOBSEhpmMIZwdXNQa CqMCqia4Xg== =XL14 -----END PGP SIGNATURE----- Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block Pull io_uring fixes from Jens Axboe: "We still have a pending fix for a cancelation issue, but it's still being investigated. In the meantime: - Dead mm handling fix (Pavel) - SQPOLL setup error handling (Pavel) - Flush timeout sequence fix (Marcelo) - Missing finish_wait() for one exit case" * tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block: io_uring: ensure finish_wait() is always called in __io_uring_task_cancel() io_uring: flush timeouts that should already have expired io_uring: do sqo disable on install_fd error io_uring: fix null-deref in io_disable_sqo_submit io_uring: don't take files/mm for a dead task io_uring: drop mm and files after task_work_run
This commit is contained in:
commit
11c0239ae2
@ -354,6 +354,7 @@ struct io_ring_ctx {
|
||||
unsigned cq_entries;
|
||||
unsigned cq_mask;
|
||||
atomic_t cq_timeouts;
|
||||
unsigned cq_last_tm_flush;
|
||||
unsigned long cq_check_overflow;
|
||||
struct wait_queue_head cq_wait;
|
||||
struct fasync_struct *cq_fasync;
|
||||
@ -1106,6 +1107,9 @@ static void io_sq_thread_drop_mm_files(void)
|
||||
|
||||
static int __io_sq_thread_acquire_files(struct io_ring_ctx *ctx)
|
||||
{
|
||||
if (current->flags & PF_EXITING)
|
||||
return -EFAULT;
|
||||
|
||||
if (!current->files) {
|
||||
struct files_struct *files;
|
||||
struct nsproxy *nsproxy;
|
||||
@ -1133,6 +1137,8 @@ static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
|
||||
if (current->flags & PF_EXITING)
|
||||
return -EFAULT;
|
||||
if (current->mm)
|
||||
return 0;
|
||||
|
||||
@ -1634,19 +1640,38 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx)
|
||||
|
||||
static void io_flush_timeouts(struct io_ring_ctx *ctx)
|
||||
{
|
||||
while (!list_empty(&ctx->timeout_list)) {
|
||||
u32 seq;
|
||||
|
||||
if (list_empty(&ctx->timeout_list))
|
||||
return;
|
||||
|
||||
seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
|
||||
|
||||
do {
|
||||
u32 events_needed, events_got;
|
||||
struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
|
||||
struct io_kiocb, timeout.list);
|
||||
|
||||
if (io_is_timeout_noseq(req))
|
||||
break;
|
||||
if (req->timeout.target_seq != ctx->cached_cq_tail
|
||||
- atomic_read(&ctx->cq_timeouts))
|
||||
|
||||
/*
|
||||
* Since seq can easily wrap around over time, subtract
|
||||
* the last seq at which timeouts were flushed before comparing.
|
||||
* Assuming not more than 2^31-1 events have happened since,
|
||||
* these subtractions won't have wrapped, so we can check if
|
||||
* target is in [last_seq, current_seq] by comparing the two.
|
||||
*/
|
||||
events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
|
||||
events_got = seq - ctx->cq_last_tm_flush;
|
||||
if (events_got < events_needed)
|
||||
break;
|
||||
|
||||
list_del_init(&req->timeout.list);
|
||||
io_kill_timeout(req);
|
||||
}
|
||||
} while (!list_empty(&ctx->timeout_list));
|
||||
|
||||
ctx->cq_last_tm_flush = seq;
|
||||
}
|
||||
|
||||
static void io_commit_cqring(struct io_ring_ctx *ctx)
|
||||
@ -5832,6 +5857,12 @@ static int io_timeout(struct io_kiocb *req)
|
||||
tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
|
||||
req->timeout.target_seq = tail + off;
|
||||
|
||||
/* Update the last seq here in case io_flush_timeouts() hasn't.
|
||||
* This is safe because ->completion_lock is held, and submissions
|
||||
* and completions are never mixed in the same ->completion_lock section.
|
||||
*/
|
||||
ctx->cq_last_tm_flush = tail;
|
||||
|
||||
/*
|
||||
* Insertion sort, ensuring the first entry in the list is always
|
||||
* the one we need first.
|
||||
@ -7056,6 +7087,7 @@ static int io_sq_thread(void *data)
|
||||
|
||||
if (sqt_spin || !time_after(jiffies, timeout)) {
|
||||
io_run_task_work();
|
||||
io_sq_thread_drop_mm_files();
|
||||
cond_resched();
|
||||
if (sqt_spin)
|
||||
timeout = jiffies + sqd->sq_thread_idle;
|
||||
@ -7093,6 +7125,7 @@ static int io_sq_thread(void *data)
|
||||
}
|
||||
|
||||
io_run_task_work();
|
||||
io_sq_thread_drop_mm_files();
|
||||
|
||||
if (cur_css)
|
||||
io_sq_thread_unassociate_blkcg();
|
||||
@ -8888,7 +8921,8 @@ static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
|
||||
mutex_unlock(&ctx->uring_lock);
|
||||
|
||||
/* make sure callers enter the ring to get error */
|
||||
io_ring_set_wakeup_flag(ctx);
|
||||
if (ctx->rings)
|
||||
io_ring_set_wakeup_flag(ctx);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -9067,6 +9101,7 @@ void __io_uring_task_cancel(void)
|
||||
finish_wait(&tctx->wait, &wait);
|
||||
} while (1);
|
||||
|
||||
finish_wait(&tctx->wait, &wait);
|
||||
atomic_dec(&tctx->in_idle);
|
||||
|
||||
io_uring_remove_task_files(tctx);
|
||||
@ -9700,6 +9735,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
|
||||
*/
|
||||
ret = io_uring_install_fd(ctx, file);
|
||||
if (ret < 0) {
|
||||
io_disable_sqo_submit(ctx);
|
||||
/* fput will clean it up */
|
||||
fput(file);
|
||||
return ret;
|
||||
|
Loading…
Reference in New Issue
Block a user