forked from Minki/linux
Merge branch 'sysctl-data-races'
Kuniyuki Iwashima says: ==================== net: sysctl: Fix data-races around net.core.XXX This series fixes data-races around all knobs in net_core_table and netns_core_table except for bpf stuff. These knobs are skipped: - 4 bpf knobs - netdev_rss_key: Written only once by net_get_random_once() and read-only knob - rps_sock_flow_entries: Protected with sock_flow_mutex - flow_limit_cpu_bitmap: Protected with flow_limit_update_mutex - flow_limit_table_len: Protected with flow_limit_update_mutex - default_qdisc: Protected with qdisc_mod_lock - warnings: Unused - high_order_alloc_disable: Protected with static_key_mutex - skb_defer_max: Already using READ_ONCE() - sysctl_txrehash: Already using READ_ONCE() Note 5th patch fixes net.core.message_cost and net.core.message_burst, and lib/ratelimit.c does not have an explicit maintainer. Changes: v3: * Fix build failures of CONFIG_SYSCTL=n case in 13th & 14th patches v2: https://lore.kernel.org/netdev/20220818035227.81567-1-kuniyu@amazon.com/ * Remove 4 bpf knobs and added 6 knobs v1: https://lore.kernel.org/netdev/20220816052347.70042-1-kuniyu@amazon.com/ ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
0c4a95417e
@ -271,7 +271,7 @@ poll cycle or the number of packets processed reaches netdev_budget.
|
||||
netdev_max_backlog
|
||||
------------------
|
||||
|
||||
Maximum number of packets, queued on the INPUT side, when the interface
|
||||
Maximum number of packets, queued on the INPUT side, when the interface
|
||||
receives packets faster than kernel can process them.
|
||||
|
||||
netdev_rss_key
|
||||
|
@ -640,9 +640,23 @@ extern int sysctl_devconf_inherit_init_net;
|
||||
*/
|
||||
static inline bool net_has_fallback_tunnels(const struct net *net)
|
||||
{
|
||||
return !IS_ENABLED(CONFIG_SYSCTL) ||
|
||||
!sysctl_fb_tunnels_only_for_init_net ||
|
||||
(net == &init_net && sysctl_fb_tunnels_only_for_init_net == 1);
|
||||
#if IS_ENABLED(CONFIG_SYSCTL)
|
||||
int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net);
|
||||
|
||||
return !fb_tunnels_only_for_init_net ||
|
||||
(net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1);
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int net_inherit_devconf(void)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_SYSCTL)
|
||||
return READ_ONCE(sysctl_devconf_inherit_init_net);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
|
||||
|
@ -33,7 +33,7 @@ extern unsigned int sysctl_net_busy_poll __read_mostly;
|
||||
|
||||
static inline bool net_busy_loop_on(void)
|
||||
{
|
||||
return sysctl_net_busy_poll;
|
||||
return READ_ONCE(sysctl_net_busy_poll);
|
||||
}
|
||||
|
||||
static inline bool sk_can_busy_loop(const struct sock *sk)
|
||||
|
@ -439,7 +439,7 @@ static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb,
|
||||
{
|
||||
list_add_tail(&skb->list, &napi->rx_list);
|
||||
napi->rx_count += segs;
|
||||
if (napi->rx_count >= gro_normal_batch)
|
||||
if (napi->rx_count >= READ_ONCE(gro_normal_batch))
|
||||
gro_normal_list(napi);
|
||||
}
|
||||
|
||||
|
@ -26,10 +26,16 @@
|
||||
*/
|
||||
int ___ratelimit(struct ratelimit_state *rs, const char *func)
|
||||
{
|
||||
/* Paired with WRITE_ONCE() in .proc_handler().
|
||||
* Changing two values seperately could be inconsistent
|
||||
* and some message could be lost. (See: net_ratelimit_state).
|
||||
*/
|
||||
int interval = READ_ONCE(rs->interval);
|
||||
int burst = READ_ONCE(rs->burst);
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (!rs->interval)
|
||||
if (!interval)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
@ -44,7 +50,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
|
||||
if (!rs->begin)
|
||||
rs->begin = jiffies;
|
||||
|
||||
if (time_is_before_jiffies(rs->begin + rs->interval)) {
|
||||
if (time_is_before_jiffies(rs->begin + interval)) {
|
||||
if (rs->missed) {
|
||||
if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
|
||||
printk_deferred(KERN_WARNING
|
||||
@ -56,7 +62,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
|
||||
rs->begin = jiffies;
|
||||
rs->printed = 0;
|
||||
}
|
||||
if (rs->burst && rs->burst > rs->printed) {
|
||||
if (burst && burst > rs->printed) {
|
||||
rs->printed++;
|
||||
ret = 1;
|
||||
} else {
|
||||
|
@ -310,11 +310,12 @@ BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
|
||||
static int bpf_sk_storage_charge(struct bpf_local_storage_map *smap,
|
||||
void *owner, u32 size)
|
||||
{
|
||||
int optmem_max = READ_ONCE(sysctl_optmem_max);
|
||||
struct sock *sk = (struct sock *)owner;
|
||||
|
||||
/* same check as in sock_kmalloc() */
|
||||
if (size <= sysctl_optmem_max &&
|
||||
atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
|
||||
if (size <= optmem_max &&
|
||||
atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
|
||||
atomic_add(size, &sk->sk_omem_alloc);
|
||||
return 0;
|
||||
}
|
||||
|
@ -4624,7 +4624,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
|
||||
struct softnet_data *sd;
|
||||
unsigned int old_flow, new_flow;
|
||||
|
||||
if (qlen < (netdev_max_backlog >> 1))
|
||||
if (qlen < (READ_ONCE(netdev_max_backlog) >> 1))
|
||||
return false;
|
||||
|
||||
sd = this_cpu_ptr(&softnet_data);
|
||||
@ -4672,7 +4672,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
|
||||
if (!netif_running(skb->dev))
|
||||
goto drop;
|
||||
qlen = skb_queue_len(&sd->input_pkt_queue);
|
||||
if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
|
||||
if (qlen <= READ_ONCE(netdev_max_backlog) && !skb_flow_limit(skb, qlen)) {
|
||||
if (qlen) {
|
||||
enqueue:
|
||||
__skb_queue_tail(&sd->input_pkt_queue, skb);
|
||||
@ -4928,7 +4928,7 @@ static int netif_rx_internal(struct sk_buff *skb)
|
||||
{
|
||||
int ret;
|
||||
|
||||
net_timestamp_check(netdev_tstamp_prequeue, skb);
|
||||
net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
|
||||
|
||||
trace_netif_rx(skb);
|
||||
|
||||
@ -5281,7 +5281,7 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
|
||||
int ret = NET_RX_DROP;
|
||||
__be16 type;
|
||||
|
||||
net_timestamp_check(!netdev_tstamp_prequeue, skb);
|
||||
net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb);
|
||||
|
||||
trace_netif_receive_skb(skb);
|
||||
|
||||
@ -5664,7 +5664,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
|
||||
{
|
||||
int ret;
|
||||
|
||||
net_timestamp_check(netdev_tstamp_prequeue, skb);
|
||||
net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
|
||||
|
||||
if (skb_defer_rx_timestamp(skb))
|
||||
return NET_RX_SUCCESS;
|
||||
@ -5694,7 +5694,7 @@ void netif_receive_skb_list_internal(struct list_head *head)
|
||||
|
||||
INIT_LIST_HEAD(&sublist);
|
||||
list_for_each_entry_safe(skb, next, head, list) {
|
||||
net_timestamp_check(netdev_tstamp_prequeue, skb);
|
||||
net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
|
||||
skb_list_del_init(skb);
|
||||
if (!skb_defer_rx_timestamp(skb))
|
||||
list_add_tail(&skb->list, &sublist);
|
||||
@ -5918,7 +5918,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
|
||||
net_rps_action_and_irq_enable(sd);
|
||||
}
|
||||
|
||||
napi->weight = dev_rx_weight;
|
||||
napi->weight = READ_ONCE(dev_rx_weight);
|
||||
while (again) {
|
||||
struct sk_buff *skb;
|
||||
|
||||
@ -6665,8 +6665,8 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
|
||||
{
|
||||
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
|
||||
unsigned long time_limit = jiffies +
|
||||
usecs_to_jiffies(netdev_budget_usecs);
|
||||
int budget = netdev_budget;
|
||||
usecs_to_jiffies(READ_ONCE(netdev_budget_usecs));
|
||||
int budget = READ_ONCE(netdev_budget);
|
||||
LIST_HEAD(list);
|
||||
LIST_HEAD(repoll);
|
||||
|
||||
@ -10284,7 +10284,7 @@ static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
|
||||
return dev;
|
||||
|
||||
if (time_after(jiffies, warning_time +
|
||||
netdev_unregister_timeout_secs * HZ)) {
|
||||
READ_ONCE(netdev_unregister_timeout_secs) * HZ)) {
|
||||
list_for_each_entry(dev, list, todo_list) {
|
||||
pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
|
||||
dev->name, netdev_refcnt_read(dev));
|
||||
|
@ -1214,10 +1214,11 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
|
||||
static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
|
||||
{
|
||||
u32 filter_size = bpf_prog_size(fp->prog->len);
|
||||
int optmem_max = READ_ONCE(sysctl_optmem_max);
|
||||
|
||||
/* same check as in sock_kmalloc() */
|
||||
if (filter_size <= sysctl_optmem_max &&
|
||||
atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
|
||||
if (filter_size <= optmem_max &&
|
||||
atomic_read(&sk->sk_omem_alloc) + filter_size < optmem_max) {
|
||||
atomic_add(filter_size, &sk->sk_omem_alloc);
|
||||
return true;
|
||||
}
|
||||
@ -1548,7 +1549,7 @@ int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
|
||||
if (IS_ERR(prog))
|
||||
return PTR_ERR(prog);
|
||||
|
||||
if (bpf_prog_size(prog->len) > sysctl_optmem_max)
|
||||
if (bpf_prog_size(prog->len) > READ_ONCE(sysctl_optmem_max))
|
||||
err = -ENOMEM;
|
||||
else
|
||||
err = reuseport_attach_prog(sk, prog);
|
||||
@ -1615,7 +1616,7 @@ int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
|
||||
}
|
||||
} else {
|
||||
/* BPF_PROG_TYPE_SOCKET_FILTER */
|
||||
if (bpf_prog_size(prog->len) > sysctl_optmem_max) {
|
||||
if (bpf_prog_size(prog->len) > READ_ONCE(sysctl_optmem_max)) {
|
||||
err = -ENOMEM;
|
||||
goto err_prog_put;
|
||||
}
|
||||
@ -5034,14 +5035,14 @@ static int __bpf_setsockopt(struct sock *sk, int level, int optname,
|
||||
/* Only some socketops are supported */
|
||||
switch (optname) {
|
||||
case SO_RCVBUF:
|
||||
val = min_t(u32, val, sysctl_rmem_max);
|
||||
val = min_t(u32, val, READ_ONCE(sysctl_rmem_max));
|
||||
val = min_t(int, val, INT_MAX / 2);
|
||||
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
|
||||
WRITE_ONCE(sk->sk_rcvbuf,
|
||||
max_t(int, val * 2, SOCK_MIN_RCVBUF));
|
||||
break;
|
||||
case SO_SNDBUF:
|
||||
val = min_t(u32, val, sysctl_wmem_max);
|
||||
val = min_t(u32, val, READ_ONCE(sysctl_wmem_max));
|
||||
val = min_t(int, val, INT_MAX / 2);
|
||||
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
|
||||
WRITE_ONCE(sk->sk_sndbuf,
|
||||
|
@ -26,7 +26,7 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
|
||||
|
||||
cell = this_cpu_ptr(gcells->cells);
|
||||
|
||||
if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
|
||||
if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(netdev_max_backlog)) {
|
||||
drop:
|
||||
dev_core_stats_rx_dropped_inc(dev);
|
||||
kfree_skb(skb);
|
||||
|
@ -4797,7 +4797,7 @@ static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
if (likely(sysctl_tstamp_allow_data || tsonly))
|
||||
if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly))
|
||||
return true;
|
||||
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
|
@ -1101,7 +1101,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
|
||||
* play 'guess the biggest size' games. RCVBUF/SNDBUF
|
||||
* are treated in BSD as hints
|
||||
*/
|
||||
val = min_t(u32, val, sysctl_wmem_max);
|
||||
val = min_t(u32, val, READ_ONCE(sysctl_wmem_max));
|
||||
set_sndbuf:
|
||||
/* Ensure val * 2 fits into an int, to prevent max_t()
|
||||
* from treating it as a negative value.
|
||||
@ -1133,7 +1133,7 @@ set_sndbuf:
|
||||
* play 'guess the biggest size' games. RCVBUF/SNDBUF
|
||||
* are treated in BSD as hints
|
||||
*/
|
||||
__sock_set_rcvbuf(sk, min_t(u32, val, sysctl_rmem_max));
|
||||
__sock_set_rcvbuf(sk, min_t(u32, val, READ_ONCE(sysctl_rmem_max)));
|
||||
break;
|
||||
|
||||
case SO_RCVBUFFORCE:
|
||||
@ -2536,7 +2536,7 @@ struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
|
||||
|
||||
/* small safe race: SKB_TRUESIZE may differ from final skb->truesize */
|
||||
if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) >
|
||||
sysctl_optmem_max)
|
||||
READ_ONCE(sysctl_optmem_max))
|
||||
return NULL;
|
||||
|
||||
skb = alloc_skb(size, priority);
|
||||
@ -2554,8 +2554,10 @@ struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
|
||||
*/
|
||||
void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
|
||||
{
|
||||
if ((unsigned int)size <= sysctl_optmem_max &&
|
||||
atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
|
||||
int optmem_max = READ_ONCE(sysctl_optmem_max);
|
||||
|
||||
if ((unsigned int)size <= optmem_max &&
|
||||
atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
|
||||
void *mem;
|
||||
/* First do the add, to avoid the race if kmalloc
|
||||
* might sleep.
|
||||
@ -3309,8 +3311,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
|
||||
timer_setup(&sk->sk_timer, NULL, 0);
|
||||
|
||||
sk->sk_allocation = GFP_KERNEL;
|
||||
sk->sk_rcvbuf = sysctl_rmem_default;
|
||||
sk->sk_sndbuf = sysctl_wmem_default;
|
||||
sk->sk_rcvbuf = READ_ONCE(sysctl_rmem_default);
|
||||
sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default);
|
||||
sk->sk_state = TCP_CLOSE;
|
||||
sk_set_socket(sk, sock);
|
||||
|
||||
@ -3365,7 +3367,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
|
||||
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
sk->sk_napi_id = 0;
|
||||
sk->sk_ll_usec = sysctl_net_busy_read;
|
||||
sk->sk_ll_usec = READ_ONCE(sysctl_net_busy_read);
|
||||
#endif
|
||||
|
||||
sk->sk_max_pacing_rate = ~0UL;
|
||||
|
@ -234,14 +234,17 @@ static int set_default_qdisc(struct ctl_table *table, int write,
|
||||
static int proc_do_dev_weight(struct ctl_table *table, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret;
|
||||
static DEFINE_MUTEX(dev_weight_mutex);
|
||||
int ret, weight;
|
||||
|
||||
mutex_lock(&dev_weight_mutex);
|
||||
ret = proc_dointvec(table, write, buffer, lenp, ppos);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
dev_rx_weight = weight_p * dev_weight_rx_bias;
|
||||
dev_tx_weight = weight_p * dev_weight_tx_bias;
|
||||
if (!ret && write) {
|
||||
weight = READ_ONCE(weight_p);
|
||||
WRITE_ONCE(dev_rx_weight, weight * dev_weight_rx_bias);
|
||||
WRITE_ONCE(dev_tx_weight, weight * dev_weight_tx_bias);
|
||||
}
|
||||
mutex_unlock(&dev_weight_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2682,23 +2682,27 @@ static __net_init int devinet_init_net(struct net *net)
|
||||
#endif
|
||||
|
||||
if (!net_eq(net, &init_net)) {
|
||||
if (IS_ENABLED(CONFIG_SYSCTL) &&
|
||||
sysctl_devconf_inherit_init_net == 3) {
|
||||
switch (net_inherit_devconf()) {
|
||||
case 3:
|
||||
/* copy from the current netns */
|
||||
memcpy(all, current->nsproxy->net_ns->ipv4.devconf_all,
|
||||
sizeof(ipv4_devconf));
|
||||
memcpy(dflt,
|
||||
current->nsproxy->net_ns->ipv4.devconf_dflt,
|
||||
sizeof(ipv4_devconf_dflt));
|
||||
} else if (!IS_ENABLED(CONFIG_SYSCTL) ||
|
||||
sysctl_devconf_inherit_init_net != 2) {
|
||||
/* inherit == 0 or 1: copy from init_net */
|
||||
break;
|
||||
case 0:
|
||||
case 1:
|
||||
/* copy from init_net */
|
||||
memcpy(all, init_net.ipv4.devconf_all,
|
||||
sizeof(ipv4_devconf));
|
||||
memcpy(dflt, init_net.ipv4.devconf_dflt,
|
||||
sizeof(ipv4_devconf_dflt));
|
||||
break;
|
||||
case 2:
|
||||
/* use compiled values */
|
||||
break;
|
||||
}
|
||||
/* else inherit == 2: use compiled values */
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
|
@ -1730,7 +1730,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
|
||||
|
||||
sk->sk_protocol = ip_hdr(skb)->protocol;
|
||||
sk->sk_bound_dev_if = arg->bound_dev_if;
|
||||
sk->sk_sndbuf = sysctl_wmem_default;
|
||||
sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default);
|
||||
ipc.sockc.mark = fl4.flowi4_mark;
|
||||
err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
|
||||
len, 0, &ipc, &rt, MSG_DONTWAIT);
|
||||
|
@ -772,7 +772,7 @@ static int ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen)
|
||||
|
||||
if (optlen < GROUP_FILTER_SIZE(0))
|
||||
return -EINVAL;
|
||||
if (optlen > sysctl_optmem_max)
|
||||
if (optlen > READ_ONCE(sysctl_optmem_max))
|
||||
return -ENOBUFS;
|
||||
|
||||
gsf = memdup_sockptr(optval, optlen);
|
||||
@ -808,7 +808,7 @@ static int compat_ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
|
||||
|
||||
if (optlen < size0)
|
||||
return -EINVAL;
|
||||
if (optlen > sysctl_optmem_max - 4)
|
||||
if (optlen > READ_ONCE(sysctl_optmem_max) - 4)
|
||||
return -ENOBUFS;
|
||||
|
||||
p = kmalloc(optlen + 4, GFP_KERNEL);
|
||||
@ -1233,7 +1233,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, int optname,
|
||||
|
||||
if (optlen < IP_MSFILTER_SIZE(0))
|
||||
goto e_inval;
|
||||
if (optlen > sysctl_optmem_max) {
|
||||
if (optlen > READ_ONCE(sysctl_optmem_max)) {
|
||||
err = -ENOBUFS;
|
||||
break;
|
||||
}
|
||||
|
@ -1000,7 +1000,7 @@ new_segment:
|
||||
|
||||
i = skb_shinfo(skb)->nr_frags;
|
||||
can_coalesce = skb_can_coalesce(skb, i, page, offset);
|
||||
if (!can_coalesce && i >= sysctl_max_skb_frags) {
|
||||
if (!can_coalesce && i >= READ_ONCE(sysctl_max_skb_frags)) {
|
||||
tcp_mark_push(tp, skb);
|
||||
goto new_segment;
|
||||
}
|
||||
@ -1354,7 +1354,7 @@ new_segment:
|
||||
|
||||
if (!skb_can_coalesce(skb, i, pfrag->page,
|
||||
pfrag->offset)) {
|
||||
if (i >= sysctl_max_skb_frags) {
|
||||
if (i >= READ_ONCE(sysctl_max_skb_frags)) {
|
||||
tcp_mark_push(tp, skb);
|
||||
goto new_segment;
|
||||
}
|
||||
|
@ -239,7 +239,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
|
||||
if (wscale_ok) {
|
||||
/* Set window scaling on max possible window */
|
||||
space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
|
||||
space = max_t(u32, space, sysctl_rmem_max);
|
||||
space = max_t(u32, space, READ_ONCE(sysctl_rmem_max));
|
||||
space = min_t(u32, space, *window_clamp);
|
||||
*rcv_wscale = clamp_t(int, ilog2(space) - 15,
|
||||
0, TCP_MAX_WSCALE);
|
||||
|
@ -7162,9 +7162,8 @@ static int __net_init addrconf_init_net(struct net *net)
|
||||
if (!dflt)
|
||||
goto err_alloc_dflt;
|
||||
|
||||
if (IS_ENABLED(CONFIG_SYSCTL) &&
|
||||
!net_eq(net, &init_net)) {
|
||||
switch (sysctl_devconf_inherit_init_net) {
|
||||
if (!net_eq(net, &init_net)) {
|
||||
switch (net_inherit_devconf()) {
|
||||
case 1: /* copy from init_net */
|
||||
memcpy(all, init_net.ipv6.devconf_all,
|
||||
sizeof(ipv6_devconf));
|
||||
|
@ -210,7 +210,7 @@ static int ipv6_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
|
||||
|
||||
if (optlen < GROUP_FILTER_SIZE(0))
|
||||
return -EINVAL;
|
||||
if (optlen > sysctl_optmem_max)
|
||||
if (optlen > READ_ONCE(sysctl_optmem_max))
|
||||
return -ENOBUFS;
|
||||
|
||||
gsf = memdup_sockptr(optval, optlen);
|
||||
@ -244,7 +244,7 @@ static int compat_ipv6_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
|
||||
|
||||
if (optlen < size0)
|
||||
return -EINVAL;
|
||||
if (optlen > sysctl_optmem_max - 4)
|
||||
if (optlen > READ_ONCE(sysctl_optmem_max) - 4)
|
||||
return -ENOBUFS;
|
||||
|
||||
p = kmalloc(optlen + 4, GFP_KERNEL);
|
||||
|
@ -1263,7 +1263,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
|
||||
|
||||
i = skb_shinfo(skb)->nr_frags;
|
||||
can_coalesce = skb_can_coalesce(skb, i, dfrag->page, offset);
|
||||
if (!can_coalesce && i >= sysctl_max_skb_frags) {
|
||||
if (!can_coalesce && i >= READ_ONCE(sysctl_max_skb_frags)) {
|
||||
tcp_mark_push(tcp_sk(ssk), skb);
|
||||
goto alloc_skb;
|
||||
}
|
||||
|
@ -1280,12 +1280,12 @@ static void set_sock_size(struct sock *sk, int mode, int val)
|
||||
lock_sock(sk);
|
||||
if (mode) {
|
||||
val = clamp_t(int, val, (SOCK_MIN_SNDBUF + 1) / 2,
|
||||
sysctl_wmem_max);
|
||||
READ_ONCE(sysctl_wmem_max));
|
||||
sk->sk_sndbuf = val * 2;
|
||||
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
|
||||
} else {
|
||||
val = clamp_t(int, val, (SOCK_MIN_RCVBUF + 1) / 2,
|
||||
sysctl_rmem_max);
|
||||
READ_ONCE(sysctl_rmem_max));
|
||||
sk->sk_rcvbuf = val * 2;
|
||||
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
|
||||
}
|
||||
|
@ -409,7 +409,7 @@ static inline bool qdisc_restart(struct Qdisc *q, int *packets)
|
||||
|
||||
void __qdisc_run(struct Qdisc *q)
|
||||
{
|
||||
int quota = dev_tx_weight;
|
||||
int quota = READ_ONCE(dev_tx_weight);
|
||||
int packets;
|
||||
|
||||
while (qdisc_restart(q, &packets)) {
|
||||
|
@ -1801,7 +1801,7 @@ int __sys_listen(int fd, int backlog)
|
||||
|
||||
sock = sockfd_lookup_light(fd, &err, &fput_needed);
|
||||
if (sock) {
|
||||
somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn;
|
||||
somaxconn = READ_ONCE(sock_net(sock->sk)->core.sysctl_somaxconn);
|
||||
if ((unsigned int)backlog > somaxconn)
|
||||
backlog = somaxconn;
|
||||
|
||||
|
@ -168,7 +168,7 @@ int espintcp_queue_out(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct espintcp_ctx *ctx = espintcp_getctx(sk);
|
||||
|
||||
if (skb_queue_len(&ctx->out_queue) >= netdev_max_backlog)
|
||||
if (skb_queue_len(&ctx->out_queue) >= READ_ONCE(netdev_max_backlog))
|
||||
return -ENOBUFS;
|
||||
|
||||
__skb_queue_tail(&ctx->out_queue, skb);
|
||||
|
@ -782,7 +782,7 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
|
||||
|
||||
trans = this_cpu_ptr(&xfrm_trans_tasklet);
|
||||
|
||||
if (skb_queue_len(&trans->queue) >= netdev_max_backlog)
|
||||
if (skb_queue_len(&trans->queue) >= READ_ONCE(netdev_max_backlog))
|
||||
return -ENOBUFS;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb));
|
||||
|
Loading…
Reference in New Issue
Block a user