mirror of
https://github.com/torvalds/linux.git
synced 2024-12-15 15:41:58 +00:00
fbb8295248
I forgot tcp had per netns tracking of timewait sockets, and their sysctl to change the limit. After0dad4087a8
("tcp/dccp: get rid of inet_twsk_purge()"), whole struct net can be freed before last tw socket is freed. We need to allocate a separate struct inet_timewait_death_row object per netns. tw_count becomes a refcount and gains associated debugging infrastructure. BUG: KASAN: use-after-free in inet_twsk_kill+0x358/0x3c0 net/ipv4/inet_timewait_sock.c:46 Read of size 8 at addr ffff88807d5f9f40 by task kworker/1:7/3690 CPU: 1 PID: 3690 Comm: kworker/1:7 Not tainted 5.16.0-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Workqueue: events pwq_unbound_release_workfn Call Trace: <IRQ> __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:106 print_address_description.constprop.0.cold+0x8d/0x336 mm/kasan/report.c:255 __kasan_report mm/kasan/report.c:442 [inline] kasan_report.cold+0x83/0xdf mm/kasan/report.c:459 inet_twsk_kill+0x358/0x3c0 net/ipv4/inet_timewait_sock.c:46 call_timer_fn+0x1a5/0x6b0 kernel/time/timer.c:1421 expire_timers kernel/time/timer.c:1466 [inline] __run_timers.part.0+0x67c/0xa30 kernel/time/timer.c:1734 __run_timers kernel/time/timer.c:1715 [inline] run_timer_softirq+0xb3/0x1d0 kernel/time/timer.c:1747 __do_softirq+0x29b/0x9c2 kernel/softirq.c:558 invoke_softirq kernel/softirq.c:432 [inline] __irq_exit_rcu+0x123/0x180 kernel/softirq.c:637 irq_exit_rcu+0x5/0x20 kernel/softirq.c:649 sysvec_apic_timer_interrupt+0x93/0xc0 arch/x86/kernel/apic/apic.c:1097 </IRQ> <TASK> asm_sysvec_apic_timer_interrupt+0x12/0x20 arch/x86/include/asm/idtentry.h:638 RIP: 0010:lockdep_unregister_key+0x1c9/0x250 kernel/locking/lockdep.c:6328 Code: 00 00 00 48 89 ee e8 46 fd ff ff 4c 89 f7 e8 5e c9 ff ff e8 09 cc ff ff 9c 58 f6 c4 02 75 26 41 f7 c4 00 02 00 00 74 01 fb 5b <5d> 41 5c 41 5d 41 5e 41 5f e9 19 4a 08 00 0f 0b 5b 5d 41 5c 41 5d RSP: 0018:ffffc90004077cb8 EFLAGS: 00000206 RAX: 0000000000000046 RBX: ffff88807b61b498 RCX: 0000000000000001 RDX: dffffc0000000000 RSI: 0000000000000000 RDI: 0000000000000000 RBP: ffff888077027128 R08: 0000000000000001 R09: ffffffff8f1ea4fc R10: fffffbfff1ff93ee R11: 000000000000af1e R12: 0000000000000246 R13: 0000000000000000 R14: ffffffff8ffc89b8 R15: ffffffff90157fb0 wq_unregister_lockdep kernel/workqueue.c:3508 [inline] pwq_unbound_release_workfn+0x254/0x340 kernel/workqueue.c:3746 process_one_work+0x9ac/0x1650 kernel/workqueue.c:2307 worker_thread+0x657/0x1110 kernel/workqueue.c:2454 kthread+0x2e9/0x3a0 kernel/kthread.c:377 ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:295 </TASK> Allocated by task 3635: kasan_save_stack+0x1e/0x50 mm/kasan/common.c:38 kasan_set_track mm/kasan/common.c:46 [inline] set_alloc_info mm/kasan/common.c:437 [inline] __kasan_slab_alloc+0x90/0xc0 mm/kasan/common.c:470 kasan_slab_alloc include/linux/kasan.h:260 [inline] slab_post_alloc_hook mm/slab.h:732 [inline] slab_alloc_node mm/slub.c:3230 [inline] slab_alloc mm/slub.c:3238 [inline] kmem_cache_alloc+0x202/0x3a0 mm/slub.c:3243 kmem_cache_zalloc include/linux/slab.h:705 [inline] net_alloc net/core/net_namespace.c:407 [inline] copy_net_ns+0x125/0x760 net/core/net_namespace.c:462 create_new_namespaces+0x3f6/0xb20 kernel/nsproxy.c:110 unshare_nsproxy_namespaces+0xc1/0x1f0 kernel/nsproxy.c:226 ksys_unshare+0x445/0x920 kernel/fork.c:3048 __do_sys_unshare kernel/fork.c:3119 [inline] __se_sys_unshare kernel/fork.c:3117 [inline] __x64_sys_unshare+0x2d/0x40 kernel/fork.c:3117 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae The buggy address belongs to the object at ffff88807d5f9a80 which belongs to the cache net_namespace of size 6528 The buggy address is located 1216 bytes inside of 6528-byte region [ffff88807d5f9a80, ffff88807d5fb400) The buggy address belongs to the page: page:ffffea0001f57e00 refcount:1 mapcount:0 mapping:0000000000000000 index:0xffff88807d5f9a80 pfn:0x7d5f8 head:ffffea0001f57e00 order:3 compound_mapcount:0 compound_pincount:0 memcg:ffff888070023001 flags: 0xfff00000010200(slab|head|node=0|zone=1|lastcpupid=0x7ff) raw: 00fff00000010200 ffff888010dd4f48 ffffea0001404e08 ffff8880118fd000 raw: ffff88807d5f9a80 0000000000040002 00000001ffffffff ffff888070023001 page dumped because: kasan: bad access detected page_owner tracks the page as allocated page last allocated via order 3, migratetype Unmovable, gfp_mask 0xd20c0(__GFP_IO|__GFP_FS|__GFP_NOWARN|__GFP_NORETRY|__GFP_COMP|__GFP_NOMEMALLOC), pid 3634, ts 119694798460, free_ts 119693556950 prep_new_page mm/page_alloc.c:2434 [inline] get_page_from_freelist+0xa72/0x2f50 mm/page_alloc.c:4165 __alloc_pages+0x1b2/0x500 mm/page_alloc.c:5389 alloc_pages+0x1aa/0x310 mm/mempolicy.c:2271 alloc_slab_page mm/slub.c:1799 [inline] allocate_slab mm/slub.c:1944 [inline] new_slab+0x28a/0x3b0 mm/slub.c:2004 ___slab_alloc+0x87c/0xe90 mm/slub.c:3018 __slab_alloc.constprop.0+0x4d/0xa0 mm/slub.c:3105 slab_alloc_node mm/slub.c:3196 [inline] slab_alloc mm/slub.c:3238 [inline] kmem_cache_alloc+0x35c/0x3a0 mm/slub.c:3243 kmem_cache_zalloc include/linux/slab.h:705 [inline] net_alloc net/core/net_namespace.c:407 [inline] copy_net_ns+0x125/0x760 net/core/net_namespace.c:462 create_new_namespaces+0x3f6/0xb20 kernel/nsproxy.c:110 unshare_nsproxy_namespaces+0xc1/0x1f0 kernel/nsproxy.c:226 ksys_unshare+0x445/0x920 kernel/fork.c:3048 __do_sys_unshare kernel/fork.c:3119 [inline] __se_sys_unshare kernel/fork.c:3117 [inline] __x64_sys_unshare+0x2d/0x40 kernel/fork.c:3117 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae page last free stack trace: reset_page_owner include/linux/page_owner.h:24 [inline] free_pages_prepare mm/page_alloc.c:1352 [inline] free_pcp_prepare+0x374/0x870 mm/page_alloc.c:1404 free_unref_page_prepare mm/page_alloc.c:3325 [inline] free_unref_page+0x19/0x690 mm/page_alloc.c:3404 skb_free_head net/core/skbuff.c:655 [inline] skb_release_data+0x65d/0x790 net/core/skbuff.c:677 skb_release_all net/core/skbuff.c:742 [inline] __kfree_skb net/core/skbuff.c:756 [inline] consume_skb net/core/skbuff.c:914 [inline] consume_skb+0xc2/0x160 net/core/skbuff.c:908 skb_free_datagram+0x1b/0x1f0 net/core/datagram.c:325 netlink_recvmsg+0x636/0xea0 net/netlink/af_netlink.c:1998 sock_recvmsg_nosec net/socket.c:948 [inline] sock_recvmsg net/socket.c:966 [inline] sock_recvmsg net/socket.c:962 [inline] ____sys_recvmsg+0x2c4/0x600 net/socket.c:2632 ___sys_recvmsg+0x127/0x200 net/socket.c:2674 __sys_recvmsg+0xe2/0x1a0 net/socket.c:2704 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae Memory state around the buggy address: ffff88807d5f9e00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff88807d5f9e80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb >ffff88807d5f9f00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ^ ffff88807d5f9f80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff88807d5fa000: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb Fixes:0dad4087a8
("tcp/dccp: get rid of inet_twsk_purge()") Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: syzbot <syzkaller@googlegroups.com> Reported-by: Paolo Abeni <pabeni@redhat.com> Tested-by: Paolo Abeni <pabeni@redhat.com> Link: https://lore.kernel.org/r/20220126180714.845362-1-eric.dumazet@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
273 lines
7.4 KiB
C
273 lines
7.4 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/*
|
|
* net/dccp/minisocks.c
|
|
*
|
|
* An implementation of the DCCP protocol
|
|
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>
|
|
*/
|
|
|
|
#include <linux/dccp.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/skbuff.h>
|
|
#include <linux/timer.h>
|
|
|
|
#include <net/sock.h>
|
|
#include <net/xfrm.h>
|
|
#include <net/inet_timewait_sock.h>
|
|
|
|
#include "ackvec.h"
|
|
#include "ccid.h"
|
|
#include "dccp.h"
|
|
#include "feat.h"
|
|
|
|
struct inet_timewait_death_row dccp_death_row = {
|
|
.tw_refcount = REFCOUNT_INIT(1),
|
|
.sysctl_max_tw_buckets = NR_FILE * 2,
|
|
.hashinfo = &dccp_hashinfo,
|
|
};
|
|
|
|
EXPORT_SYMBOL_GPL(dccp_death_row);
|
|
|
|
void dccp_time_wait(struct sock *sk, int state, int timeo)
|
|
{
|
|
struct inet_timewait_sock *tw;
|
|
|
|
tw = inet_twsk_alloc(sk, &dccp_death_row, state);
|
|
|
|
if (tw != NULL) {
|
|
const struct inet_connection_sock *icsk = inet_csk(sk);
|
|
const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
if (tw->tw_family == PF_INET6) {
|
|
tw->tw_v6_daddr = sk->sk_v6_daddr;
|
|
tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
|
|
tw->tw_ipv6only = sk->sk_ipv6only;
|
|
}
|
|
#endif
|
|
|
|
/* Get the TIME_WAIT timeout firing. */
|
|
if (timeo < rto)
|
|
timeo = rto;
|
|
|
|
if (state == DCCP_TIME_WAIT)
|
|
timeo = DCCP_TIMEWAIT_LEN;
|
|
|
|
/* tw_timer is pinned, so we need to make sure BH are disabled
|
|
* in following section, otherwise timer handler could run before
|
|
* we complete the initialization.
|
|
*/
|
|
local_bh_disable();
|
|
inet_twsk_schedule(tw, timeo);
|
|
/* Linkage updates.
|
|
* Note that access to tw after this point is illegal.
|
|
*/
|
|
inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
|
|
local_bh_enable();
|
|
} else {
|
|
/* Sorry, if we're out of memory, just CLOSE this
|
|
* socket up. We've got bigger problems than
|
|
* non-graceful socket closings.
|
|
*/
|
|
DCCP_WARN("time wait bucket table overflow\n");
|
|
}
|
|
|
|
dccp_done(sk);
|
|
}
|
|
|
|
struct sock *dccp_create_openreq_child(const struct sock *sk,
|
|
const struct request_sock *req,
|
|
const struct sk_buff *skb)
|
|
{
|
|
/*
|
|
* Step 3: Process LISTEN state
|
|
*
|
|
* (* Generate a new socket and switch to that socket *)
|
|
* Set S := new socket for this port pair
|
|
*/
|
|
struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
|
|
|
|
if (newsk != NULL) {
|
|
struct dccp_request_sock *dreq = dccp_rsk(req);
|
|
struct inet_connection_sock *newicsk = inet_csk(newsk);
|
|
struct dccp_sock *newdp = dccp_sk(newsk);
|
|
|
|
newdp->dccps_role = DCCP_ROLE_SERVER;
|
|
newdp->dccps_hc_rx_ackvec = NULL;
|
|
newdp->dccps_service_list = NULL;
|
|
newdp->dccps_hc_rx_ccid = NULL;
|
|
newdp->dccps_hc_tx_ccid = NULL;
|
|
newdp->dccps_service = dreq->dreq_service;
|
|
newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo;
|
|
newdp->dccps_timestamp_time = dreq->dreq_timestamp_time;
|
|
newicsk->icsk_rto = DCCP_TIMEOUT_INIT;
|
|
|
|
INIT_LIST_HEAD(&newdp->dccps_featneg);
|
|
/*
|
|
* Step 3: Process LISTEN state
|
|
*
|
|
* Choose S.ISS (initial seqno) or set from Init Cookies
|
|
* Initialize S.GAR := S.ISS
|
|
* Set S.ISR, S.GSR from packet (or Init Cookies)
|
|
*
|
|
* Setting AWL/AWH and SWL/SWH happens as part of the feature
|
|
* activation below, as these windows all depend on the local
|
|
* and remote Sequence Window feature values (7.5.2).
|
|
*/
|
|
newdp->dccps_iss = dreq->dreq_iss;
|
|
newdp->dccps_gss = dreq->dreq_gss;
|
|
newdp->dccps_gar = newdp->dccps_iss;
|
|
newdp->dccps_isr = dreq->dreq_isr;
|
|
newdp->dccps_gsr = dreq->dreq_gsr;
|
|
|
|
/*
|
|
* Activate features: initialise CCIDs, sequence windows etc.
|
|
*/
|
|
if (dccp_feat_activate_values(newsk, &dreq->dreq_featneg)) {
|
|
sk_free_unlock_clone(newsk);
|
|
return NULL;
|
|
}
|
|
dccp_init_xmit_timers(newsk);
|
|
|
|
__DCCP_INC_STATS(DCCP_MIB_PASSIVEOPENS);
|
|
}
|
|
return newsk;
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dccp_create_openreq_child);
|
|
|
|
/*
|
|
* Process an incoming packet for RESPOND sockets represented
|
|
* as an request_sock.
|
|
*/
|
|
struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
|
|
struct request_sock *req)
|
|
{
|
|
struct sock *child = NULL;
|
|
struct dccp_request_sock *dreq = dccp_rsk(req);
|
|
bool own_req;
|
|
|
|
/* TCP/DCCP listeners became lockless.
|
|
* DCCP stores complex state in its request_sock, so we need
|
|
* a protection for them, now this code runs without being protected
|
|
* by the parent (listener) lock.
|
|
*/
|
|
spin_lock_bh(&dreq->dreq_lock);
|
|
|
|
/* Check for retransmitted REQUEST */
|
|
if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
|
|
|
|
if (after48(DCCP_SKB_CB(skb)->dccpd_seq, dreq->dreq_gsr)) {
|
|
dccp_pr_debug("Retransmitted REQUEST\n");
|
|
dreq->dreq_gsr = DCCP_SKB_CB(skb)->dccpd_seq;
|
|
/*
|
|
* Send another RESPONSE packet
|
|
* To protect against Request floods, increment retrans
|
|
* counter (backoff, monitored by dccp_response_timer).
|
|
*/
|
|
inet_rtx_syn_ack(sk, req);
|
|
}
|
|
/* Network Duplicate, discard packet */
|
|
goto out;
|
|
}
|
|
|
|
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
|
|
|
|
if (dccp_hdr(skb)->dccph_type != DCCP_PKT_ACK &&
|
|
dccp_hdr(skb)->dccph_type != DCCP_PKT_DATAACK)
|
|
goto drop;
|
|
|
|
/* Invalid ACK */
|
|
if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
|
|
dreq->dreq_iss, dreq->dreq_gss)) {
|
|
dccp_pr_debug("Invalid ACK number: ack_seq=%llu, "
|
|
"dreq_iss=%llu, dreq_gss=%llu\n",
|
|
(unsigned long long)
|
|
DCCP_SKB_CB(skb)->dccpd_ack_seq,
|
|
(unsigned long long) dreq->dreq_iss,
|
|
(unsigned long long) dreq->dreq_gss);
|
|
goto drop;
|
|
}
|
|
|
|
if (dccp_parse_options(sk, dreq, skb))
|
|
goto drop;
|
|
|
|
child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
|
|
req, &own_req);
|
|
if (child) {
|
|
child = inet_csk_complete_hashdance(sk, child, req, own_req);
|
|
goto out;
|
|
}
|
|
|
|
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
|
|
drop:
|
|
if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
|
|
req->rsk_ops->send_reset(sk, skb);
|
|
|
|
inet_csk_reqsk_queue_drop(sk, req);
|
|
out:
|
|
spin_unlock_bh(&dreq->dreq_lock);
|
|
return child;
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dccp_check_req);
|
|
|
|
/*
|
|
* Queue segment on the new socket if the new socket is active,
|
|
* otherwise we just shortcircuit this and continue with
|
|
* the new socket.
|
|
*/
|
|
int dccp_child_process(struct sock *parent, struct sock *child,
|
|
struct sk_buff *skb)
|
|
__releases(child)
|
|
{
|
|
int ret = 0;
|
|
const int state = child->sk_state;
|
|
|
|
if (!sock_owned_by_user(child)) {
|
|
ret = dccp_rcv_state_process(child, skb, dccp_hdr(skb),
|
|
skb->len);
|
|
|
|
/* Wakeup parent, send SIGIO */
|
|
if (state == DCCP_RESPOND && child->sk_state != state)
|
|
parent->sk_data_ready(parent);
|
|
} else {
|
|
/* Alas, it is possible again, because we do lookup
|
|
* in main socket hash table and lock on listening
|
|
* socket does not protect us more.
|
|
*/
|
|
__sk_add_backlog(child, skb);
|
|
}
|
|
|
|
bh_unlock_sock(child);
|
|
sock_put(child);
|
|
return ret;
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dccp_child_process);
|
|
|
|
void dccp_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
|
|
struct request_sock *rsk)
|
|
{
|
|
DCCP_BUG("DCCP-ACK packets are never sent in LISTEN/RESPOND state");
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dccp_reqsk_send_ack);
|
|
|
|
int dccp_reqsk_init(struct request_sock *req,
|
|
struct dccp_sock const *dp, struct sk_buff const *skb)
|
|
{
|
|
struct dccp_request_sock *dreq = dccp_rsk(req);
|
|
|
|
spin_lock_init(&dreq->dreq_lock);
|
|
inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport;
|
|
inet_rsk(req)->ir_num = ntohs(dccp_hdr(skb)->dccph_dport);
|
|
inet_rsk(req)->acked = 0;
|
|
dreq->dreq_timestamp_echo = 0;
|
|
|
|
/* inherit feature negotiation options from listening socket */
|
|
return dccp_feat_clone_list(&dp->dccps_featneg, &dreq->dreq_featneg);
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dccp_reqsk_init);
|