mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 14:12:06 +00:00
io_uring-5.9-2020-09-22
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl9qLpQQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpk/qD/0dj9STzEMkUsbl2XA5oifF2NVn6VHMidJ3 Ukdhoy4ihh2UFBFO2VZv2UNZ7o4Zt53TA3ha+fB0EL7I23g86XTOItTWd+JHOGpI M11JejYTxcSUzPVrPfd/2PJ/Tqx+ld4ojTxH8noS4hx7FgueSuRR80UU5gfLGAmr e7A7vHD8tr9ZoqNcyVVCYa0/80gUbxh1wYOMvqaE6dSPITe96keGKmmk8hRA8kQo SBfbZeEqf2oErlM0dTVOd34rZbQQyRuMpDmLuc/g6RNMFVPyBqEvQmGwqOtWNe4q RFS9/imQA1Wi1OD15NoDx0C7BGovmT53xfXpnqI3lXzywxSDGhGVQd0E8Udp6zha xszrFlQEqS4OFZrHK6B+tnJBFFBZ8jN0K3ZlHpO8QH83OGvyr2k/RokoHFWMTSYh +5pHRd+6p7o8traQ6h0MJXmacIxZ0hQdJPuawRjAnziBgRhMV2FMLAXgYHtWl0AD wUiBWUEIV9PP0phu78X2TxvB9L7CPjuv7orJ8Q5dBSkQc7i33ESYMe8Mix85CFm+ SQcazoQE7VLL175TN/FdDDKkBeyAsob9TjeEazb04Vywy0vHW+MGrSOescCBDLF7 RRDRE0E12Ur9BTVTBi/MJsXT2xtufxN2YU368ZX78RYwgI4r9lx4LZZDte3h9/gs xEPXk5vuzg== =ImBG -----END PGP SIGNATURE----- Merge tag 'io_uring-5.9-2020-09-22' of git://git.kernel.dk/linux-block Pull io_uring fixes from Jens Axboe: "A few fixes - most of them regression fixes from this cycle, but also a few stable heading fixes, and a build fix for the included demo tool since some systems now actually have gettid() available" * tag 'io_uring-5.9-2020-09-22' of git://git.kernel.dk/linux-block: io_uring: fix openat/openat2 unified prep handling io_uring: mark statx/files_update/epoll_ctl as non-SQPOLL tools/io_uring: fix compile breakage io_uring: don't use retry based buffered reads for non-async bdev io_uring: don't re-setup vecs/iter in io_resumit_prep() is already there io_uring: don't run task work on an exiting task io_uring: drop 'ctx' ref on task work cancelation io_uring: grab any needed state during defer prep
This commit is contained in:
commit
0baca07006
@ -1753,6 +1753,9 @@ static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb,
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
int ret, notify;
|
||||
|
||||
if (tsk->flags & PF_EXITING)
|
||||
return -ESRCH;
|
||||
|
||||
/*
|
||||
* SQPOLL kernel thread doesn't need notification, just a wakeup. For
|
||||
* all other cases, use TWA_SIGNAL unconditionally to ensure we're
|
||||
@ -1787,8 +1790,10 @@ static void __io_req_task_cancel(struct io_kiocb *req, int error)
|
||||
static void io_req_task_cancel(struct callback_head *cb)
|
||||
{
|
||||
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
__io_req_task_cancel(req, -ECANCELED);
|
||||
percpu_ref_put(&ctx->refs);
|
||||
}
|
||||
|
||||
static void __io_req_task_submit(struct io_kiocb *req)
|
||||
@ -2010,6 +2015,12 @@ static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
|
||||
|
||||
static inline bool io_run_task_work(void)
|
||||
{
|
||||
/*
|
||||
* Not safe to run on exiting task, and the task_work handling will
|
||||
* not add work to such a task.
|
||||
*/
|
||||
if (unlikely(current->flags & PF_EXITING))
|
||||
return false;
|
||||
if (current->task_works) {
|
||||
__set_current_state(TASK_RUNNING);
|
||||
task_work_run();
|
||||
@ -2283,13 +2294,17 @@ static bool io_resubmit_prep(struct io_kiocb *req, int error)
|
||||
goto end_req;
|
||||
}
|
||||
|
||||
ret = io_import_iovec(rw, req, &iovec, &iter, false);
|
||||
if (ret < 0)
|
||||
goto end_req;
|
||||
ret = io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
|
||||
if (!ret)
|
||||
if (!req->io) {
|
||||
ret = io_import_iovec(rw, req, &iovec, &iter, false);
|
||||
if (ret < 0)
|
||||
goto end_req;
|
||||
ret = io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
|
||||
if (!ret)
|
||||
return true;
|
||||
kfree(iovec);
|
||||
} else {
|
||||
return true;
|
||||
kfree(iovec);
|
||||
}
|
||||
end_req:
|
||||
req_set_fail_links(req);
|
||||
io_req_complete(req, ret);
|
||||
@ -3115,6 +3130,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
|
||||
struct iov_iter __iter, *iter = &__iter;
|
||||
ssize_t io_size, ret, ret2;
|
||||
size_t iov_count;
|
||||
bool no_async;
|
||||
|
||||
if (req->io)
|
||||
iter = &req->io->rw.iter;
|
||||
@ -3132,7 +3148,8 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
|
||||
kiocb->ki_flags &= ~IOCB_NOWAIT;
|
||||
|
||||
/* If the file doesn't support async, just async punt */
|
||||
if (force_nonblock && !io_file_supports_async(req->file, READ))
|
||||
no_async = force_nonblock && !io_file_supports_async(req->file, READ);
|
||||
if (no_async)
|
||||
goto copy_iov;
|
||||
|
||||
ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), iov_count);
|
||||
@ -3176,6 +3193,8 @@ copy_iov:
|
||||
ret = ret2;
|
||||
goto out_free;
|
||||
}
|
||||
if (no_async)
|
||||
return -EAGAIN;
|
||||
/* it's copied and will be cleaned with ->io */
|
||||
iovec = NULL;
|
||||
/* now use our persistent iterator, if we aren't already */
|
||||
@ -3508,8 +3527,6 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
|
||||
const char __user *fname;
|
||||
int ret;
|
||||
|
||||
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
|
||||
return -EINVAL;
|
||||
if (unlikely(sqe->ioprio || sqe->buf_index))
|
||||
return -EINVAL;
|
||||
if (unlikely(req->flags & REQ_F_FIXED_FILE))
|
||||
@ -3536,6 +3553,8 @@ static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
u64 flags, mode;
|
||||
|
||||
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
|
||||
return -EINVAL;
|
||||
if (req->flags & REQ_F_NEED_CLEANUP)
|
||||
return 0;
|
||||
mode = READ_ONCE(sqe->len);
|
||||
@ -3550,6 +3569,8 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
size_t len;
|
||||
int ret;
|
||||
|
||||
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
|
||||
return -EINVAL;
|
||||
if (req->flags & REQ_F_NEED_CLEANUP)
|
||||
return 0;
|
||||
how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
|
||||
@ -3767,7 +3788,7 @@ static int io_epoll_ctl_prep(struct io_kiocb *req,
|
||||
#if defined(CONFIG_EPOLL)
|
||||
if (sqe->ioprio || sqe->buf_index)
|
||||
return -EINVAL;
|
||||
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
|
||||
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
|
||||
return -EINVAL;
|
||||
|
||||
req->epoll.epfd = READ_ONCE(sqe->fd);
|
||||
@ -3882,7 +3903,7 @@ static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
|
||||
|
||||
static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
|
||||
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
|
||||
return -EINVAL;
|
||||
if (sqe->ioprio || sqe->buf_index)
|
||||
return -EINVAL;
|
||||
@ -5399,6 +5420,8 @@ static int io_async_cancel(struct io_kiocb *req)
|
||||
static int io_files_update_prep(struct io_kiocb *req,
|
||||
const struct io_uring_sqe *sqe)
|
||||
{
|
||||
if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
|
||||
return -EINVAL;
|
||||
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
|
||||
return -EINVAL;
|
||||
if (sqe->ioprio || sqe->rw_flags)
|
||||
@ -5449,6 +5472,8 @@ static int io_req_defer_prep(struct io_kiocb *req,
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
io_prep_async_work(req);
|
||||
|
||||
switch (req->opcode) {
|
||||
case IORING_OP_NOP:
|
||||
break;
|
||||
@ -8180,6 +8205,8 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
|
||||
/* cancel this request, or head link requests */
|
||||
io_attempt_cancel(ctx, cancel_req);
|
||||
io_put_req(cancel_req);
|
||||
/* cancellations _may_ trigger task work */
|
||||
io_run_task_work();
|
||||
schedule();
|
||||
finish_wait(&ctx->inflight_wait, &wait);
|
||||
}
|
||||
|
@ -130,7 +130,7 @@ static int io_uring_register_files(struct submitter *s)
|
||||
s->nr_files);
|
||||
}
|
||||
|
||||
static int gettid(void)
|
||||
static int lk_gettid(void)
|
||||
{
|
||||
return syscall(__NR_gettid);
|
||||
}
|
||||
@ -281,7 +281,7 @@ static void *submitter_fn(void *data)
|
||||
struct io_sq_ring *ring = &s->sq_ring;
|
||||
int ret, prepped;
|
||||
|
||||
printf("submitter=%d\n", gettid());
|
||||
printf("submitter=%d\n", lk_gettid());
|
||||
|
||||
srand48_r(pthread_self(), &s->rand);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user