io_uring: change io_get_ext_arg() to use uaccess begin + end

In scenarios where a high frequency of wait events are seen, the copy
of the struct io_uring_getevents_arg is quite noticeable in the
profiles in terms of time spent. It can be seen as up to 3.5-4.5%.
Rewrite the copy-in logic, saving about 0.5% of the time.

Reviewed-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2024-10-22 13:41:42 -06:00
parent 0a54a7dd0a
commit 371b47da25

View File

@ -3240,6 +3240,7 @@ static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t a
static int io_get_ext_arg(unsigned flags, const void __user *argp,
struct ext_arg *ext_arg)
{
const struct io_uring_getevents_arg __user *uarg = argp;
struct io_uring_getevents_arg arg;
/*
@ -3257,8 +3258,18 @@ static int io_get_ext_arg(unsigned flags, const void __user *argp,
*/
if (ext_arg->argsz != sizeof(arg))
return -EINVAL;
if (copy_from_user(&arg, argp, sizeof(arg)))
#ifdef CONFIG_64BIT
if (!user_access_begin(uarg, sizeof(*uarg)))
return -EFAULT;
unsafe_get_user(arg.sigmask, &uarg->sigmask, uaccess_end);
unsafe_get_user(arg.sigmask_sz, &uarg->sigmask_sz, uaccess_end);
unsafe_get_user(arg.min_wait_usec, &uarg->min_wait_usec, uaccess_end);
unsafe_get_user(arg.ts, &uarg->ts, uaccess_end);
user_access_end();
#else
if (copy_from_user(&arg, uarg, sizeof(arg)))
return -EFAULT;
#endif
ext_arg->min_time = arg.min_wait_usec * NSEC_PER_USEC;
ext_arg->sig = u64_to_user_ptr(arg.sigmask);
ext_arg->argsz = arg.sigmask_sz;
@ -3268,6 +3279,11 @@ static int io_get_ext_arg(unsigned flags, const void __user *argp,
ext_arg->ts_set = true;
}
return 0;
#ifdef CONFIG_64BIT
uaccess_end:
user_access_end();
return -EFAULT;
#endif
}
SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,