mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 21:21:41 +00:00
io_uring: Introduce IORING_OP_BIND
IORING_OP_BIND provides the semantic of bind(2) via io_uring. While this is an essentially synchronous system call, the main point is to enable a network path to execute fully with io_uring registered and descriptorless files. Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de> Link: https://lore.kernel.org/r/20240614163047.31581-3-krisman@suse.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
bb6aaf7366
commit
7481fd93fa
@ -257,6 +257,7 @@ enum io_uring_op {
|
||||
IORING_OP_FUTEX_WAITV,
|
||||
IORING_OP_FIXED_FD_INSTALL,
|
||||
IORING_OP_FTRUNCATE,
|
||||
IORING_OP_BIND,
|
||||
|
||||
/* this goes last, obviously */
|
||||
IORING_OP_LAST,
|
||||
|
@ -51,6 +51,11 @@ struct io_connect {
|
||||
bool seen_econnaborted;
|
||||
};
|
||||
|
||||
struct io_bind {
|
||||
struct file *file;
|
||||
int addr_len;
|
||||
};
|
||||
|
||||
struct io_sr_msg {
|
||||
struct file *file;
|
||||
union {
|
||||
@ -1715,6 +1720,37 @@ out:
|
||||
return IOU_OK;
|
||||
}
|
||||
|
||||
int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind);
|
||||
struct sockaddr __user *uaddr;
|
||||
struct io_async_msghdr *io;
|
||||
|
||||
if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
|
||||
return -EINVAL;
|
||||
|
||||
uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr));
|
||||
bind->addr_len = READ_ONCE(sqe->addr2);
|
||||
|
||||
io = io_msg_alloc_async(req);
|
||||
if (unlikely(!io))
|
||||
return -ENOMEM;
|
||||
return move_addr_to_kernel(uaddr, bind->addr_len, &io->addr);
|
||||
}
|
||||
|
||||
int io_bind(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind);
|
||||
struct io_async_msghdr *io = req->async_data;
|
||||
int ret;
|
||||
|
||||
ret = __sys_bind_socket(sock_from_file(req->file), &io->addr, bind->addr_len);
|
||||
if (ret < 0)
|
||||
req_set_fail(req);
|
||||
io_req_set_res(req, ret, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void io_netmsg_cache_free(const void *entry)
|
||||
{
|
||||
struct io_async_msghdr *kmsg = (struct io_async_msghdr *) entry;
|
||||
|
@ -49,6 +49,9 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags);
|
||||
int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
||||
void io_send_zc_cleanup(struct io_kiocb *req);
|
||||
|
||||
int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
||||
int io_bind(struct io_kiocb *req, unsigned int issue_flags);
|
||||
|
||||
void io_netmsg_cache_free(const void *entry);
|
||||
#else
|
||||
static inline void io_netmsg_cache_free(const void *entry)
|
||||
|
@ -495,6 +495,16 @@ const struct io_issue_def io_issue_defs[] = {
|
||||
.prep = io_ftruncate_prep,
|
||||
.issue = io_ftruncate,
|
||||
},
|
||||
[IORING_OP_BIND] = {
|
||||
#if defined(CONFIG_NET)
|
||||
.needs_file = 1,
|
||||
.prep = io_bind_prep,
|
||||
.issue = io_bind,
|
||||
.async_size = sizeof(struct io_async_msghdr),
|
||||
#else
|
||||
.prep = io_eopnotsupp_prep,
|
||||
#endif
|
||||
},
|
||||
};
|
||||
|
||||
const struct io_cold_def io_cold_defs[] = {
|
||||
@ -716,6 +726,9 @@ const struct io_cold_def io_cold_defs[] = {
|
||||
[IORING_OP_FTRUNCATE] = {
|
||||
.name = "FTRUNCATE",
|
||||
},
|
||||
[IORING_OP_BIND] = {
|
||||
.name = "BIND",
|
||||
},
|
||||
};
|
||||
|
||||
const char *io_uring_get_opcode(u8 opcode)
|
||||
|
Loading…
Reference in New Issue
Block a user