forked from Minki/linux
net: Fix data-races around sysctl_optmem_max.
While reading sysctl_optmem_max, it can be changed concurrently.
Thus, we need to add READ_ONCE() to its readers.
Fixes: 1da177e4c3
("Linux-2.6.12-rc2")
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
6bae8ceb90
commit
7de6d09f51
@ -310,11 +310,12 @@ BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
|
||||
static int bpf_sk_storage_charge(struct bpf_local_storage_map *smap,
|
||||
void *owner, u32 size)
|
||||
{
|
||||
int optmem_max = READ_ONCE(sysctl_optmem_max);
|
||||
struct sock *sk = (struct sock *)owner;
|
||||
|
||||
/* same check as in sock_kmalloc() */
|
||||
if (size <= sysctl_optmem_max &&
|
||||
atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
|
||||
if (size <= optmem_max &&
|
||||
atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
|
||||
atomic_add(size, &sk->sk_omem_alloc);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1214,10 +1214,11 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
|
||||
static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
|
||||
{
|
||||
u32 filter_size = bpf_prog_size(fp->prog->len);
|
||||
int optmem_max = READ_ONCE(sysctl_optmem_max);
|
||||
|
||||
/* same check as in sock_kmalloc() */
|
||||
if (filter_size <= sysctl_optmem_max &&
|
||||
atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
|
||||
if (filter_size <= optmem_max &&
|
||||
atomic_read(&sk->sk_omem_alloc) + filter_size < optmem_max) {
|
||||
atomic_add(filter_size, &sk->sk_omem_alloc);
|
||||
return true;
|
||||
}
|
||||
@ -1548,7 +1549,7 @@ int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
|
||||
if (IS_ERR(prog))
|
||||
return PTR_ERR(prog);
|
||||
|
||||
if (bpf_prog_size(prog->len) > sysctl_optmem_max)
|
||||
if (bpf_prog_size(prog->len) > READ_ONCE(sysctl_optmem_max))
|
||||
err = -ENOMEM;
|
||||
else
|
||||
err = reuseport_attach_prog(sk, prog);
|
||||
@ -1615,7 +1616,7 @@ int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
|
||||
}
|
||||
} else {
|
||||
/* BPF_PROG_TYPE_SOCKET_FILTER */
|
||||
if (bpf_prog_size(prog->len) > sysctl_optmem_max) {
|
||||
if (bpf_prog_size(prog->len) > READ_ONCE(sysctl_optmem_max)) {
|
||||
err = -ENOMEM;
|
||||
goto err_prog_put;
|
||||
}
|
||||
|
@ -2536,7 +2536,7 @@ struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
|
||||
|
||||
/* small safe race: SKB_TRUESIZE may differ from final skb->truesize */
|
||||
if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) >
|
||||
sysctl_optmem_max)
|
||||
READ_ONCE(sysctl_optmem_max))
|
||||
return NULL;
|
||||
|
||||
skb = alloc_skb(size, priority);
|
||||
@ -2554,8 +2554,10 @@ struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
|
||||
*/
|
||||
void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
|
||||
{
|
||||
if ((unsigned int)size <= sysctl_optmem_max &&
|
||||
atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
|
||||
int optmem_max = READ_ONCE(sysctl_optmem_max);
|
||||
|
||||
if ((unsigned int)size <= optmem_max &&
|
||||
atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
|
||||
void *mem;
|
||||
/* First do the add, to avoid the race if kmalloc
|
||||
* might sleep.
|
||||
|
@ -772,7 +772,7 @@ static int ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen)
|
||||
|
||||
if (optlen < GROUP_FILTER_SIZE(0))
|
||||
return -EINVAL;
|
||||
if (optlen > sysctl_optmem_max)
|
||||
if (optlen > READ_ONCE(sysctl_optmem_max))
|
||||
return -ENOBUFS;
|
||||
|
||||
gsf = memdup_sockptr(optval, optlen);
|
||||
@ -808,7 +808,7 @@ static int compat_ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
|
||||
|
||||
if (optlen < size0)
|
||||
return -EINVAL;
|
||||
if (optlen > sysctl_optmem_max - 4)
|
||||
if (optlen > READ_ONCE(sysctl_optmem_max) - 4)
|
||||
return -ENOBUFS;
|
||||
|
||||
p = kmalloc(optlen + 4, GFP_KERNEL);
|
||||
@ -1233,7 +1233,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, int optname,
|
||||
|
||||
if (optlen < IP_MSFILTER_SIZE(0))
|
||||
goto e_inval;
|
||||
if (optlen > sysctl_optmem_max) {
|
||||
if (optlen > READ_ONCE(sysctl_optmem_max)) {
|
||||
err = -ENOBUFS;
|
||||
break;
|
||||
}
|
||||
|
@ -210,7 +210,7 @@ static int ipv6_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
|
||||
|
||||
if (optlen < GROUP_FILTER_SIZE(0))
|
||||
return -EINVAL;
|
||||
if (optlen > sysctl_optmem_max)
|
||||
if (optlen > READ_ONCE(sysctl_optmem_max))
|
||||
return -ENOBUFS;
|
||||
|
||||
gsf = memdup_sockptr(optval, optlen);
|
||||
@ -244,7 +244,7 @@ static int compat_ipv6_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
|
||||
|
||||
if (optlen < size0)
|
||||
return -EINVAL;
|
||||
if (optlen > sysctl_optmem_max - 4)
|
||||
if (optlen > READ_ONCE(sysctl_optmem_max) - 4)
|
||||
return -ENOBUFS;
|
||||
|
||||
p = kmalloc(optlen + 4, GFP_KERNEL);
|
||||
|
Loading…
Reference in New Issue
Block a user