forked from Minki/linux
io_uring/net: save address for sendzc async execution
We usually copy all bits that a request needs from the userspace for async execution, so the userspace can keep them on the stack. However, send zerocopy violates this pattern for addresses and may reloads it e.g. from io-wq. Save the address if any in ->async_data as usual. Reported-by: Stefan Metzmacher <metze@samba.org> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/d7512d7aa9abcd36e9afe1a4d292a24cb2d157e5.1661342812.git.asml.silence@gmail.com [axboe: fold in incremental fix] Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
5916943943
commit
581711c466
@ -182,6 +182,37 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req,
|
|||||||
&iomsg->free_iov);
|
&iomsg->free_iov);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int io_sendzc_prep_async(struct io_kiocb *req)
|
||||||
|
{
|
||||||
|
struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
|
||||||
|
struct io_async_msghdr *io;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!zc->addr || req_has_async_data(req))
|
||||||
|
return 0;
|
||||||
|
if (io_alloc_async_data(req))
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
io = req->async_data;
|
||||||
|
ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int io_setup_async_addr(struct io_kiocb *req,
|
||||||
|
struct sockaddr_storage *addr,
|
||||||
|
unsigned int issue_flags)
|
||||||
|
{
|
||||||
|
struct io_async_msghdr *io;
|
||||||
|
|
||||||
|
if (!addr || req_has_async_data(req))
|
||||||
|
return -EAGAIN;
|
||||||
|
if (io_alloc_async_data(req))
|
||||||
|
return -ENOMEM;
|
||||||
|
io = req->async_data;
|
||||||
|
memcpy(&io->addr, addr, sizeof(io->addr));
|
||||||
|
return -EAGAIN;
|
||||||
|
}
|
||||||
|
|
||||||
int io_sendmsg_prep_async(struct io_kiocb *req)
|
int io_sendmsg_prep_async(struct io_kiocb *req)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
@ -944,7 +975,7 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
|
|||||||
|
|
||||||
int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
|
int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
|
||||||
{
|
{
|
||||||
struct sockaddr_storage address;
|
struct sockaddr_storage __address, *addr = NULL;
|
||||||
struct io_ring_ctx *ctx = req->ctx;
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
|
struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
|
||||||
struct io_notif_slot *notif_slot;
|
struct io_notif_slot *notif_slot;
|
||||||
@ -978,10 +1009,17 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
msg.msg_namelen = 0;
|
msg.msg_namelen = 0;
|
||||||
|
|
||||||
if (zc->addr) {
|
if (zc->addr) {
|
||||||
ret = move_addr_to_kernel(zc->addr, zc->addr_len, &address);
|
if (req_has_async_data(req)) {
|
||||||
if (unlikely(ret < 0))
|
struct io_async_msghdr *io = req->async_data;
|
||||||
return ret;
|
|
||||||
msg.msg_name = (struct sockaddr *)&address;
|
msg.msg_name = addr = &io->addr;
|
||||||
|
} else {
|
||||||
|
ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
|
||||||
|
if (unlikely(ret < 0))
|
||||||
|
return ret;
|
||||||
|
msg.msg_name = (struct sockaddr *)&__address;
|
||||||
|
addr = &__address;
|
||||||
|
}
|
||||||
msg.msg_namelen = zc->addr_len;
|
msg.msg_namelen = zc->addr_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1013,13 +1051,14 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
|
|
||||||
if (unlikely(ret < min_ret)) {
|
if (unlikely(ret < min_ret)) {
|
||||||
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
|
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
|
||||||
return -EAGAIN;
|
return io_setup_async_addr(req, addr, issue_flags);
|
||||||
|
|
||||||
if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
|
if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
|
||||||
zc->len -= ret;
|
zc->len -= ret;
|
||||||
zc->buf += ret;
|
zc->buf += ret;
|
||||||
zc->done_io += ret;
|
zc->done_io += ret;
|
||||||
req->flags |= REQ_F_PARTIAL_IO;
|
req->flags |= REQ_F_PARTIAL_IO;
|
||||||
return -EAGAIN;
|
return io_setup_async_addr(req, addr, issue_flags);
|
||||||
}
|
}
|
||||||
if (ret == -ERESTARTSYS)
|
if (ret == -ERESTARTSYS)
|
||||||
ret = -EINTR;
|
ret = -EINTR;
|
||||||
|
@ -31,6 +31,7 @@ struct io_async_connect {
|
|||||||
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
||||||
int io_shutdown(struct io_kiocb *req, unsigned int issue_flags);
|
int io_shutdown(struct io_kiocb *req, unsigned int issue_flags);
|
||||||
|
|
||||||
|
int io_sendzc_prep_async(struct io_kiocb *req);
|
||||||
int io_sendmsg_prep_async(struct io_kiocb *req);
|
int io_sendmsg_prep_async(struct io_kiocb *req);
|
||||||
void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req);
|
void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req);
|
||||||
int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
||||||
|
@ -478,13 +478,15 @@ const struct io_op_def io_op_defs[] = {
|
|||||||
.pollout = 1,
|
.pollout = 1,
|
||||||
.audit_skip = 1,
|
.audit_skip = 1,
|
||||||
.ioprio = 1,
|
.ioprio = 1,
|
||||||
|
.manual_alloc = 1,
|
||||||
#if defined(CONFIG_NET)
|
#if defined(CONFIG_NET)
|
||||||
|
.async_size = sizeof(struct io_async_msghdr),
|
||||||
.prep = io_sendzc_prep,
|
.prep = io_sendzc_prep,
|
||||||
.issue = io_sendzc,
|
.issue = io_sendzc,
|
||||||
|
.prep_async = io_sendzc_prep_async,
|
||||||
#else
|
#else
|
||||||
.prep = io_eopnotsupp_prep,
|
.prep = io_eopnotsupp_prep,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user