mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 12:42:02 +00:00
449809a66c
SYN processing really was meant to be handled from BH. When I got rid of BH blocking while processing socket backlog in commit5413d1babe
("net: do not block BH while processing socket backlog"), I forgot that a malicious user could transition to TCP_LISTEN from a state that allowed (SYN) packets to be parked in the socket backlog while socket is owned by the thread doing the listen() call. Sure enough syzkaller found this and reported the bug ;) ================================= [ INFO: inconsistent lock state ] 4.10.0+ #60 Not tainted --------------------------------- inconsistent {IN-SOFTIRQ-W} -> {SOFTIRQ-ON-W} usage. syz-executor0/5090 [HC0[0]:SC0[0]:HE1:SE1] takes: (&(&hashinfo->ehash_locks[i])->rlock){+.?...}, at: [<ffffffff83a6a370>] spin_lock include/linux/spinlock.h:299 [inline] (&(&hashinfo->ehash_locks[i])->rlock){+.?...}, at: [<ffffffff83a6a370>] inet_ehash_insert+0x240/0xad0 net/ipv4/inet_hashtables.c:407 {IN-SOFTIRQ-W} state was registered at: mark_irqflags kernel/locking/lockdep.c:2923 [inline] __lock_acquire+0xbcf/0x3270 kernel/locking/lockdep.c:3295 lock_acquire+0x241/0x580 kernel/locking/lockdep.c:3753 __raw_spin_lock include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x33/0x50 kernel/locking/spinlock.c:151 spin_lock include/linux/spinlock.h:299 [inline] inet_ehash_insert+0x240/0xad0 net/ipv4/inet_hashtables.c:407 reqsk_queue_hash_req net/ipv4/inet_connection_sock.c:753 [inline] inet_csk_reqsk_queue_hash_add+0x1b7/0x2a0 net/ipv4/inet_connection_sock.c:764 tcp_conn_request+0x25cc/0x3310 net/ipv4/tcp_input.c:6399 tcp_v4_conn_request+0x157/0x220 net/ipv4/tcp_ipv4.c:1262 tcp_rcv_state_process+0x802/0x4130 net/ipv4/tcp_input.c:5889 tcp_v4_do_rcv+0x56b/0x940 net/ipv4/tcp_ipv4.c:1433 tcp_v4_rcv+0x2e12/0x3210 net/ipv4/tcp_ipv4.c:1711 ip_local_deliver_finish+0x4ce/0xc40 net/ipv4/ip_input.c:216 NF_HOOK include/linux/netfilter.h:257 [inline] ip_local_deliver+0x1ce/0x710 net/ipv4/ip_input.c:257 dst_input include/net/dst.h:492 [inline] ip_rcv_finish+0xb1d/0x2110 net/ipv4/ip_input.c:396 NF_HOOK include/linux/netfilter.h:257 [inline] ip_rcv+0xd90/0x19c0 net/ipv4/ip_input.c:487 __netif_receive_skb_core+0x1ad1/0x3400 net/core/dev.c:4179 __netif_receive_skb+0x2a/0x170 net/core/dev.c:4217 netif_receive_skb_internal+0x1d6/0x430 net/core/dev.c:4245 napi_skb_finish net/core/dev.c:4602 [inline] napi_gro_receive+0x4e6/0x680 net/core/dev.c:4636 e1000_receive_skb drivers/net/ethernet/intel/e1000/e1000_main.c:4033 [inline] e1000_clean_rx_irq+0x5e0/0x1490 drivers/net/ethernet/intel/e1000/e1000_main.c:4489 e1000_clean+0xb9a/0x2910 drivers/net/ethernet/intel/e1000/e1000_main.c:3834 napi_poll net/core/dev.c:5171 [inline] net_rx_action+0xe70/0x1900 net/core/dev.c:5236 __do_softirq+0x2fb/0xb7d kernel/softirq.c:284 invoke_softirq kernel/softirq.c:364 [inline] irq_exit+0x19e/0x1d0 kernel/softirq.c:405 exiting_irq arch/x86/include/asm/apic.h:658 [inline] do_IRQ+0x81/0x1a0 arch/x86/kernel/irq.c:250 ret_from_intr+0x0/0x20 native_safe_halt+0x6/0x10 arch/x86/include/asm/irqflags.h:53 arch_safe_halt arch/x86/include/asm/paravirt.h:98 [inline] default_idle+0x8f/0x410 arch/x86/kernel/process.c:271 arch_cpu_idle+0xa/0x10 arch/x86/kernel/process.c:262 default_idle_call+0x36/0x60 kernel/sched/idle.c:96 cpuidle_idle_call kernel/sched/idle.c:154 [inline] do_idle+0x348/0x440 kernel/sched/idle.c:243 cpu_startup_entry+0x18/0x20 kernel/sched/idle.c:345 start_secondary+0x344/0x440 arch/x86/kernel/smpboot.c:272 verify_cpu+0x0/0xfc irq event stamp: 1741 hardirqs last enabled at (1741): [<ffffffff84d49d77>] __raw_spin_unlock_irqrestore include/linux/spinlock_api_smp.h:160 [inline] hardirqs last enabled at (1741): [<ffffffff84d49d77>] _raw_spin_unlock_irqrestore+0xf7/0x1a0 kernel/locking/spinlock.c:191 hardirqs last disabled at (1740): [<ffffffff84d4a732>] __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:108 [inline] hardirqs last disabled at (1740): [<ffffffff84d4a732>] _raw_spin_lock_irqsave+0xa2/0x110 kernel/locking/spinlock.c:159 softirqs last enabled at (1738): [<ffffffff84d4deff>] __do_softirq+0x7cf/0xb7d kernel/softirq.c:310 softirqs last disabled at (1571): [<ffffffff84d4b92c>] do_softirq_own_stack+0x1c/0x30 arch/x86/entry/entry_64.S:902 other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(&(&hashinfo->ehash_locks[i])->rlock); <Interrupt> lock(&(&hashinfo->ehash_locks[i])->rlock); *** DEADLOCK *** 1 lock held by syz-executor0/5090: #0: (sk_lock-AF_INET6){+.+.+.}, at: [<ffffffff83406b43>] lock_sock include/net/sock.h:1460 [inline] #0: (sk_lock-AF_INET6){+.+.+.}, at: [<ffffffff83406b43>] sock_setsockopt+0x233/0x1e40 net/core/sock.c:683 stack backtrace: CPU: 1 PID: 5090 Comm: syz-executor0 Not tainted 4.10.0+ #60 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:15 [inline] dump_stack+0x292/0x398 lib/dump_stack.c:51 print_usage_bug+0x3ef/0x450 kernel/locking/lockdep.c:2387 valid_state kernel/locking/lockdep.c:2400 [inline] mark_lock_irq kernel/locking/lockdep.c:2602 [inline] mark_lock+0xf30/0x1410 kernel/locking/lockdep.c:3065 mark_irqflags kernel/locking/lockdep.c:2941 [inline] __lock_acquire+0x6dc/0x3270 kernel/locking/lockdep.c:3295 lock_acquire+0x241/0x580 kernel/locking/lockdep.c:3753 __raw_spin_lock include/linux/spinlock_api_smp.h:142 [inline] _raw_spin_lock+0x33/0x50 kernel/locking/spinlock.c:151 spin_lock include/linux/spinlock.h:299 [inline] inet_ehash_insert+0x240/0xad0 net/ipv4/inet_hashtables.c:407 reqsk_queue_hash_req net/ipv4/inet_connection_sock.c:753 [inline] inet_csk_reqsk_queue_hash_add+0x1b7/0x2a0 net/ipv4/inet_connection_sock.c:764 dccp_v6_conn_request+0xada/0x11b0 net/dccp/ipv6.c:380 dccp_rcv_state_process+0x51e/0x1660 net/dccp/input.c:606 dccp_v6_do_rcv+0x213/0x350 net/dccp/ipv6.c:632 sk_backlog_rcv include/net/sock.h:896 [inline] __release_sock+0x127/0x3a0 net/core/sock.c:2052 release_sock+0xa5/0x2b0 net/core/sock.c:2539 sock_setsockopt+0x60f/0x1e40 net/core/sock.c:1016 SYSC_setsockopt net/socket.c:1782 [inline] SyS_setsockopt+0x2fb/0x3a0 net/socket.c:1765 entry_SYSCALL_64_fastpath+0x1f/0xc2 RIP: 0033:0x4458b9 RSP: 002b:00007fe8b26c2b58 EFLAGS: 00000292 ORIG_RAX: 0000000000000036 RAX: ffffffffffffffda RBX: 0000000000000006 RCX: 00000000004458b9 RDX: 000000000000001a RSI: 0000000000000001 RDI: 0000000000000006 RBP: 00000000006e2110 R08: 0000000000000010 R09: 0000000000000000 R10: 00000000208c3000 R11: 0000000000000292 R12: 0000000000708000 R13: 0000000020000000 R14: 0000000000001000 R15: 0000000000000000 Fixes:5413d1babe
("net: do not block BH while processing socket backlog") Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Andrey Konovalov <andreyknvl@google.com> Acked-by: Soheil Hassas Yeganeh <soheil@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
740 lines
22 KiB
C
740 lines
22 KiB
C
/*
|
|
* net/dccp/input.c
|
|
*
|
|
* An implementation of the DCCP protocol
|
|
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
|
|
#include <linux/dccp.h>
|
|
#include <linux/skbuff.h>
|
|
#include <linux/slab.h>
|
|
|
|
#include <net/sock.h>
|
|
|
|
#include "ackvec.h"
|
|
#include "ccid.h"
|
|
#include "dccp.h"
|
|
|
|
/* rate-limit for syncs in reply to sequence-invalid packets; RFC 4340, 7.5.4 */
|
|
int sysctl_dccp_sync_ratelimit __read_mostly = HZ / 8;
|
|
|
|
static void dccp_enqueue_skb(struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
__skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
|
|
__skb_queue_tail(&sk->sk_receive_queue, skb);
|
|
skb_set_owner_r(skb, sk);
|
|
sk->sk_data_ready(sk);
|
|
}
|
|
|
|
static void dccp_fin(struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
/*
|
|
* On receiving Close/CloseReq, both RD/WR shutdown are performed.
|
|
* RFC 4340, 8.3 says that we MAY send further Data/DataAcks after
|
|
* receiving the closing segment, but there is no guarantee that such
|
|
* data will be processed at all.
|
|
*/
|
|
sk->sk_shutdown = SHUTDOWN_MASK;
|
|
sock_set_flag(sk, SOCK_DONE);
|
|
dccp_enqueue_skb(sk, skb);
|
|
}
|
|
|
|
static int dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
int queued = 0;
|
|
|
|
switch (sk->sk_state) {
|
|
/*
|
|
* We ignore Close when received in one of the following states:
|
|
* - CLOSED (may be a late or duplicate packet)
|
|
* - PASSIVE_CLOSEREQ (the peer has sent a CloseReq earlier)
|
|
* - RESPOND (already handled by dccp_check_req)
|
|
*/
|
|
case DCCP_CLOSING:
|
|
/*
|
|
* Simultaneous-close: receiving a Close after sending one. This
|
|
* can happen if both client and server perform active-close and
|
|
* will result in an endless ping-pong of crossing and retrans-
|
|
* mitted Close packets, which only terminates when one of the
|
|
* nodes times out (min. 64 seconds). Quicker convergence can be
|
|
* achieved when one of the nodes acts as tie-breaker.
|
|
* This is ok as both ends are done with data transfer and each
|
|
* end is just waiting for the other to acknowledge termination.
|
|
*/
|
|
if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT)
|
|
break;
|
|
/* fall through */
|
|
case DCCP_REQUESTING:
|
|
case DCCP_ACTIVE_CLOSEREQ:
|
|
dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
|
|
dccp_done(sk);
|
|
break;
|
|
case DCCP_OPEN:
|
|
case DCCP_PARTOPEN:
|
|
/* Give waiting application a chance to read pending data */
|
|
queued = 1;
|
|
dccp_fin(sk, skb);
|
|
dccp_set_state(sk, DCCP_PASSIVE_CLOSE);
|
|
/* fall through */
|
|
case DCCP_PASSIVE_CLOSE:
|
|
/*
|
|
* Retransmitted Close: we have already enqueued the first one.
|
|
*/
|
|
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
|
|
}
|
|
return queued;
|
|
}
|
|
|
|
static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
int queued = 0;
|
|
|
|
/*
|
|
* Step 7: Check for unexpected packet types
|
|
* If (S.is_server and P.type == CloseReq)
|
|
* Send Sync packet acknowledging P.seqno
|
|
* Drop packet and return
|
|
*/
|
|
if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) {
|
|
dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC);
|
|
return queued;
|
|
}
|
|
|
|
/* Step 13: process relevant Client states < CLOSEREQ */
|
|
switch (sk->sk_state) {
|
|
case DCCP_REQUESTING:
|
|
dccp_send_close(sk, 0);
|
|
dccp_set_state(sk, DCCP_CLOSING);
|
|
break;
|
|
case DCCP_OPEN:
|
|
case DCCP_PARTOPEN:
|
|
/* Give waiting application a chance to read pending data */
|
|
queued = 1;
|
|
dccp_fin(sk, skb);
|
|
dccp_set_state(sk, DCCP_PASSIVE_CLOSEREQ);
|
|
/* fall through */
|
|
case DCCP_PASSIVE_CLOSEREQ:
|
|
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
|
|
}
|
|
return queued;
|
|
}
|
|
|
|
static u16 dccp_reset_code_convert(const u8 code)
|
|
{
|
|
const u16 error_code[] = {
|
|
[DCCP_RESET_CODE_CLOSED] = 0, /* normal termination */
|
|
[DCCP_RESET_CODE_UNSPECIFIED] = 0, /* nothing known */
|
|
[DCCP_RESET_CODE_ABORTED] = ECONNRESET,
|
|
|
|
[DCCP_RESET_CODE_NO_CONNECTION] = ECONNREFUSED,
|
|
[DCCP_RESET_CODE_CONNECTION_REFUSED] = ECONNREFUSED,
|
|
[DCCP_RESET_CODE_TOO_BUSY] = EUSERS,
|
|
[DCCP_RESET_CODE_AGGRESSION_PENALTY] = EDQUOT,
|
|
|
|
[DCCP_RESET_CODE_PACKET_ERROR] = ENOMSG,
|
|
[DCCP_RESET_CODE_BAD_INIT_COOKIE] = EBADR,
|
|
[DCCP_RESET_CODE_BAD_SERVICE_CODE] = EBADRQC,
|
|
[DCCP_RESET_CODE_OPTION_ERROR] = EILSEQ,
|
|
[DCCP_RESET_CODE_MANDATORY_ERROR] = EOPNOTSUPP,
|
|
};
|
|
|
|
return code >= DCCP_MAX_RESET_CODES ? 0 : error_code[code];
|
|
}
|
|
|
|
static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
u16 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code);
|
|
|
|
sk->sk_err = err;
|
|
|
|
/* Queue the equivalent of TCP fin so that dccp_recvmsg exits the loop */
|
|
dccp_fin(sk, skb);
|
|
|
|
if (err && !sock_flag(sk, SOCK_DEAD))
|
|
sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
|
|
dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
|
|
}
|
|
|
|
static void dccp_handle_ackvec_processing(struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
struct dccp_ackvec *av = dccp_sk(sk)->dccps_hc_rx_ackvec;
|
|
|
|
if (av == NULL)
|
|
return;
|
|
if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
|
|
dccp_ackvec_clear_state(av, DCCP_SKB_CB(skb)->dccpd_ack_seq);
|
|
dccp_ackvec_input(av, skb);
|
|
}
|
|
|
|
static void dccp_deliver_input_to_ccids(struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
const struct dccp_sock *dp = dccp_sk(sk);
|
|
|
|
/* Don't deliver to RX CCID when node has shut down read end. */
|
|
if (!(sk->sk_shutdown & RCV_SHUTDOWN))
|
|
ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
|
|
/*
|
|
* Until the TX queue has been drained, we can not honour SHUT_WR, since
|
|
* we need received feedback as input to adjust congestion control.
|
|
*/
|
|
if (sk->sk_write_queue.qlen > 0 || !(sk->sk_shutdown & SEND_SHUTDOWN))
|
|
ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
|
|
}
|
|
|
|
static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
const struct dccp_hdr *dh = dccp_hdr(skb);
|
|
struct dccp_sock *dp = dccp_sk(sk);
|
|
u64 lswl, lawl, seqno = DCCP_SKB_CB(skb)->dccpd_seq,
|
|
ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
|
|
|
|
/*
|
|
* Step 5: Prepare sequence numbers for Sync
|
|
* If P.type == Sync or P.type == SyncAck,
|
|
* If S.AWL <= P.ackno <= S.AWH and P.seqno >= S.SWL,
|
|
* / * P is valid, so update sequence number variables
|
|
* accordingly. After this update, P will pass the tests
|
|
* in Step 6. A SyncAck is generated if necessary in
|
|
* Step 15 * /
|
|
* Update S.GSR, S.SWL, S.SWH
|
|
* Otherwise,
|
|
* Drop packet and return
|
|
*/
|
|
if (dh->dccph_type == DCCP_PKT_SYNC ||
|
|
dh->dccph_type == DCCP_PKT_SYNCACK) {
|
|
if (between48(ackno, dp->dccps_awl, dp->dccps_awh) &&
|
|
dccp_delta_seqno(dp->dccps_swl, seqno) >= 0)
|
|
dccp_update_gsr(sk, seqno);
|
|
else
|
|
return -1;
|
|
}
|
|
|
|
/*
|
|
* Step 6: Check sequence numbers
|
|
* Let LSWL = S.SWL and LAWL = S.AWL
|
|
* If P.type == CloseReq or P.type == Close or P.type == Reset,
|
|
* LSWL := S.GSR + 1, LAWL := S.GAR
|
|
* If LSWL <= P.seqno <= S.SWH
|
|
* and (P.ackno does not exist or LAWL <= P.ackno <= S.AWH),
|
|
* Update S.GSR, S.SWL, S.SWH
|
|
* If P.type != Sync,
|
|
* Update S.GAR
|
|
*/
|
|
lswl = dp->dccps_swl;
|
|
lawl = dp->dccps_awl;
|
|
|
|
if (dh->dccph_type == DCCP_PKT_CLOSEREQ ||
|
|
dh->dccph_type == DCCP_PKT_CLOSE ||
|
|
dh->dccph_type == DCCP_PKT_RESET) {
|
|
lswl = ADD48(dp->dccps_gsr, 1);
|
|
lawl = dp->dccps_gar;
|
|
}
|
|
|
|
if (between48(seqno, lswl, dp->dccps_swh) &&
|
|
(ackno == DCCP_PKT_WITHOUT_ACK_SEQ ||
|
|
between48(ackno, lawl, dp->dccps_awh))) {
|
|
dccp_update_gsr(sk, seqno);
|
|
|
|
if (dh->dccph_type != DCCP_PKT_SYNC &&
|
|
ackno != DCCP_PKT_WITHOUT_ACK_SEQ &&
|
|
after48(ackno, dp->dccps_gar))
|
|
dp->dccps_gar = ackno;
|
|
} else {
|
|
unsigned long now = jiffies;
|
|
/*
|
|
* Step 6: Check sequence numbers
|
|
* Otherwise,
|
|
* If P.type == Reset,
|
|
* Send Sync packet acknowledging S.GSR
|
|
* Otherwise,
|
|
* Send Sync packet acknowledging P.seqno
|
|
* Drop packet and return
|
|
*
|
|
* These Syncs are rate-limited as per RFC 4340, 7.5.4:
|
|
* at most 1 / (dccp_sync_rate_limit * HZ) Syncs per second.
|
|
*/
|
|
if (time_before(now, (dp->dccps_rate_last +
|
|
sysctl_dccp_sync_ratelimit)))
|
|
return -1;
|
|
|
|
DCCP_WARN("Step 6 failed for %s packet, "
|
|
"(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
|
|
"(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), "
|
|
"sending SYNC...\n", dccp_packet_name(dh->dccph_type),
|
|
(unsigned long long) lswl, (unsigned long long) seqno,
|
|
(unsigned long long) dp->dccps_swh,
|
|
(ackno == DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist"
|
|
: "exists",
|
|
(unsigned long long) lawl, (unsigned long long) ackno,
|
|
(unsigned long long) dp->dccps_awh);
|
|
|
|
dp->dccps_rate_last = now;
|
|
|
|
if (dh->dccph_type == DCCP_PKT_RESET)
|
|
seqno = dp->dccps_gsr;
|
|
dccp_send_sync(sk, seqno, DCCP_PKT_SYNC);
|
|
return -1;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
|
const struct dccp_hdr *dh, const unsigned int len)
|
|
{
|
|
struct dccp_sock *dp = dccp_sk(sk);
|
|
|
|
switch (dccp_hdr(skb)->dccph_type) {
|
|
case DCCP_PKT_DATAACK:
|
|
case DCCP_PKT_DATA:
|
|
/*
|
|
* FIXME: schedule DATA_DROPPED (RFC 4340, 11.7.2) if and when
|
|
* - sk_shutdown == RCV_SHUTDOWN, use Code 1, "Not Listening"
|
|
* - sk_receive_queue is full, use Code 2, "Receive Buffer"
|
|
*/
|
|
dccp_enqueue_skb(sk, skb);
|
|
return 0;
|
|
case DCCP_PKT_ACK:
|
|
goto discard;
|
|
case DCCP_PKT_RESET:
|
|
/*
|
|
* Step 9: Process Reset
|
|
* If P.type == Reset,
|
|
* Tear down connection
|
|
* S.state := TIMEWAIT
|
|
* Set TIMEWAIT timer
|
|
* Drop packet and return
|
|
*/
|
|
dccp_rcv_reset(sk, skb);
|
|
return 0;
|
|
case DCCP_PKT_CLOSEREQ:
|
|
if (dccp_rcv_closereq(sk, skb))
|
|
return 0;
|
|
goto discard;
|
|
case DCCP_PKT_CLOSE:
|
|
if (dccp_rcv_close(sk, skb))
|
|
return 0;
|
|
goto discard;
|
|
case DCCP_PKT_REQUEST:
|
|
/* Step 7
|
|
* or (S.is_server and P.type == Response)
|
|
* or (S.is_client and P.type == Request)
|
|
* or (S.state >= OPEN and P.type == Request
|
|
* and P.seqno >= S.OSR)
|
|
* or (S.state >= OPEN and P.type == Response
|
|
* and P.seqno >= S.OSR)
|
|
* or (S.state == RESPOND and P.type == Data),
|
|
* Send Sync packet acknowledging P.seqno
|
|
* Drop packet and return
|
|
*/
|
|
if (dp->dccps_role != DCCP_ROLE_LISTEN)
|
|
goto send_sync;
|
|
goto check_seq;
|
|
case DCCP_PKT_RESPONSE:
|
|
if (dp->dccps_role != DCCP_ROLE_CLIENT)
|
|
goto send_sync;
|
|
check_seq:
|
|
if (dccp_delta_seqno(dp->dccps_osr,
|
|
DCCP_SKB_CB(skb)->dccpd_seq) >= 0) {
|
|
send_sync:
|
|
dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
|
|
DCCP_PKT_SYNC);
|
|
}
|
|
break;
|
|
case DCCP_PKT_SYNC:
|
|
dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
|
|
DCCP_PKT_SYNCACK);
|
|
/*
|
|
* From RFC 4340, sec. 5.7
|
|
*
|
|
* As with DCCP-Ack packets, DCCP-Sync and DCCP-SyncAck packets
|
|
* MAY have non-zero-length application data areas, whose
|
|
* contents receivers MUST ignore.
|
|
*/
|
|
goto discard;
|
|
}
|
|
|
|
DCCP_INC_STATS(DCCP_MIB_INERRS);
|
|
discard:
|
|
__kfree_skb(skb);
|
|
return 0;
|
|
}
|
|
|
|
int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
|
const struct dccp_hdr *dh, const unsigned int len)
|
|
{
|
|
if (dccp_check_seqno(sk, skb))
|
|
goto discard;
|
|
|
|
if (dccp_parse_options(sk, NULL, skb))
|
|
return 1;
|
|
|
|
dccp_handle_ackvec_processing(sk, skb);
|
|
dccp_deliver_input_to_ccids(sk, skb);
|
|
|
|
return __dccp_rcv_established(sk, skb, dh, len);
|
|
discard:
|
|
__kfree_skb(skb);
|
|
return 0;
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dccp_rcv_established);
|
|
|
|
static int dccp_rcv_request_sent_state_process(struct sock *sk,
|
|
struct sk_buff *skb,
|
|
const struct dccp_hdr *dh,
|
|
const unsigned int len)
|
|
{
|
|
/*
|
|
* Step 4: Prepare sequence numbers in REQUEST
|
|
* If S.state == REQUEST,
|
|
* If (P.type == Response or P.type == Reset)
|
|
* and S.AWL <= P.ackno <= S.AWH,
|
|
* / * Set sequence number variables corresponding to the
|
|
* other endpoint, so P will pass the tests in Step 6 * /
|
|
* Set S.GSR, S.ISR, S.SWL, S.SWH
|
|
* / * Response processing continues in Step 10; Reset
|
|
* processing continues in Step 9 * /
|
|
*/
|
|
if (dh->dccph_type == DCCP_PKT_RESPONSE) {
|
|
const struct inet_connection_sock *icsk = inet_csk(sk);
|
|
struct dccp_sock *dp = dccp_sk(sk);
|
|
long tstamp = dccp_timestamp();
|
|
|
|
if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
|
|
dp->dccps_awl, dp->dccps_awh)) {
|
|
dccp_pr_debug("invalid ackno: S.AWL=%llu, "
|
|
"P.ackno=%llu, S.AWH=%llu\n",
|
|
(unsigned long long)dp->dccps_awl,
|
|
(unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
|
|
(unsigned long long)dp->dccps_awh);
|
|
goto out_invalid_packet;
|
|
}
|
|
|
|
/*
|
|
* If option processing (Step 8) failed, return 1 here so that
|
|
* dccp_v4_do_rcv() sends a Reset. The Reset code depends on
|
|
* the option type and is set in dccp_parse_options().
|
|
*/
|
|
if (dccp_parse_options(sk, NULL, skb))
|
|
return 1;
|
|
|
|
/* Obtain usec RTT sample from SYN exchange (used by TFRC). */
|
|
if (likely(dp->dccps_options_received.dccpor_timestamp_echo))
|
|
dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * (tstamp -
|
|
dp->dccps_options_received.dccpor_timestamp_echo));
|
|
|
|
/* Stop the REQUEST timer */
|
|
inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
|
|
WARN_ON(sk->sk_send_head == NULL);
|
|
kfree_skb(sk->sk_send_head);
|
|
sk->sk_send_head = NULL;
|
|
|
|
/*
|
|
* Set ISR, GSR from packet. ISS was set in dccp_v{4,6}_connect
|
|
* and GSS in dccp_transmit_skb(). Setting AWL/AWH and SWL/SWH
|
|
* is done as part of activating the feature values below, since
|
|
* these settings depend on the local/remote Sequence Window
|
|
* features, which were undefined or not confirmed until now.
|
|
*/
|
|
dp->dccps_gsr = dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq;
|
|
|
|
dccp_sync_mss(sk, icsk->icsk_pmtu_cookie);
|
|
|
|
/*
|
|
* Step 10: Process REQUEST state (second part)
|
|
* If S.state == REQUEST,
|
|
* / * If we get here, P is a valid Response from the
|
|
* server (see Step 4), and we should move to
|
|
* PARTOPEN state. PARTOPEN means send an Ack,
|
|
* don't send Data packets, retransmit Acks
|
|
* periodically, and always include any Init Cookie
|
|
* from the Response * /
|
|
* S.state := PARTOPEN
|
|
* Set PARTOPEN timer
|
|
* Continue with S.state == PARTOPEN
|
|
* / * Step 12 will send the Ack completing the
|
|
* three-way handshake * /
|
|
*/
|
|
dccp_set_state(sk, DCCP_PARTOPEN);
|
|
|
|
/*
|
|
* If feature negotiation was successful, activate features now;
|
|
* an activation failure means that this host could not activate
|
|
* one ore more features (e.g. insufficient memory), which would
|
|
* leave at least one feature in an undefined state.
|
|
*/
|
|
if (dccp_feat_activate_values(sk, &dp->dccps_featneg))
|
|
goto unable_to_proceed;
|
|
|
|
/* Make sure socket is routed, for correct metrics. */
|
|
icsk->icsk_af_ops->rebuild_header(sk);
|
|
|
|
if (!sock_flag(sk, SOCK_DEAD)) {
|
|
sk->sk_state_change(sk);
|
|
sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
|
|
}
|
|
|
|
if (sk->sk_write_pending || icsk->icsk_ack.pingpong ||
|
|
icsk->icsk_accept_queue.rskq_defer_accept) {
|
|
/* Save one ACK. Data will be ready after
|
|
* several ticks, if write_pending is set.
|
|
*
|
|
* It may be deleted, but with this feature tcpdumps
|
|
* look so _wonderfully_ clever, that I was not able
|
|
* to stand against the temptation 8) --ANK
|
|
*/
|
|
/*
|
|
* OK, in DCCP we can as well do a similar trick, its
|
|
* even in the draft, but there is no need for us to
|
|
* schedule an ack here, as dccp_sendmsg does this for
|
|
* us, also stated in the draft. -acme
|
|
*/
|
|
__kfree_skb(skb);
|
|
return 0;
|
|
}
|
|
dccp_send_ack(sk);
|
|
return -1;
|
|
}
|
|
|
|
out_invalid_packet:
|
|
/* dccp_v4_do_rcv will send a reset */
|
|
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
|
|
return 1;
|
|
|
|
unable_to_proceed:
|
|
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_ABORTED;
|
|
/*
|
|
* We mark this socket as no longer usable, so that the loop in
|
|
* dccp_sendmsg() terminates and the application gets notified.
|
|
*/
|
|
dccp_set_state(sk, DCCP_CLOSED);
|
|
sk->sk_err = ECOMM;
|
|
return 1;
|
|
}
|
|
|
|
static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
|
|
struct sk_buff *skb,
|
|
const struct dccp_hdr *dh,
|
|
const unsigned int len)
|
|
{
|
|
struct dccp_sock *dp = dccp_sk(sk);
|
|
u32 sample = dp->dccps_options_received.dccpor_timestamp_echo;
|
|
int queued = 0;
|
|
|
|
switch (dh->dccph_type) {
|
|
case DCCP_PKT_RESET:
|
|
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
|
|
break;
|
|
case DCCP_PKT_DATA:
|
|
if (sk->sk_state == DCCP_RESPOND)
|
|
break;
|
|
case DCCP_PKT_DATAACK:
|
|
case DCCP_PKT_ACK:
|
|
/*
|
|
* FIXME: we should be resetting the PARTOPEN (DELACK) timer
|
|
* here but only if we haven't used the DELACK timer for
|
|
* something else, like sending a delayed ack for a TIMESTAMP
|
|
* echo, etc, for now were not clearing it, sending an extra
|
|
* ACK when there is nothing else to do in DELACK is not a big
|
|
* deal after all.
|
|
*/
|
|
|
|
/* Stop the PARTOPEN timer */
|
|
if (sk->sk_state == DCCP_PARTOPEN)
|
|
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
|
|
|
|
/* Obtain usec RTT sample from SYN exchange (used by TFRC). */
|
|
if (likely(sample)) {
|
|
long delta = dccp_timestamp() - sample;
|
|
|
|
dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * delta);
|
|
}
|
|
|
|
dp->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq;
|
|
dccp_set_state(sk, DCCP_OPEN);
|
|
|
|
if (dh->dccph_type == DCCP_PKT_DATAACK ||
|
|
dh->dccph_type == DCCP_PKT_DATA) {
|
|
__dccp_rcv_established(sk, skb, dh, len);
|
|
queued = 1; /* packet was queued
|
|
(by __dccp_rcv_established) */
|
|
}
|
|
break;
|
|
}
|
|
|
|
return queued;
|
|
}
|
|
|
|
int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
|
struct dccp_hdr *dh, unsigned int len)
|
|
{
|
|
struct dccp_sock *dp = dccp_sk(sk);
|
|
struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
|
|
const int old_state = sk->sk_state;
|
|
bool acceptable;
|
|
int queued = 0;
|
|
|
|
/*
|
|
* Step 3: Process LISTEN state
|
|
*
|
|
* If S.state == LISTEN,
|
|
* If P.type == Request or P contains a valid Init Cookie option,
|
|
* (* Must scan the packet's options to check for Init
|
|
* Cookies. Only Init Cookies are processed here,
|
|
* however; other options are processed in Step 8. This
|
|
* scan need only be performed if the endpoint uses Init
|
|
* Cookies *)
|
|
* (* Generate a new socket and switch to that socket *)
|
|
* Set S := new socket for this port pair
|
|
* S.state = RESPOND
|
|
* Choose S.ISS (initial seqno) or set from Init Cookies
|
|
* Initialize S.GAR := S.ISS
|
|
* Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init
|
|
* Cookies Continue with S.state == RESPOND
|
|
* (* A Response packet will be generated in Step 11 *)
|
|
* Otherwise,
|
|
* Generate Reset(No Connection) unless P.type == Reset
|
|
* Drop packet and return
|
|
*/
|
|
if (sk->sk_state == DCCP_LISTEN) {
|
|
if (dh->dccph_type == DCCP_PKT_REQUEST) {
|
|
/* It is possible that we process SYN packets from backlog,
|
|
* so we need to make sure to disable BH right there.
|
|
*/
|
|
local_bh_disable();
|
|
acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0;
|
|
local_bh_enable();
|
|
if (!acceptable)
|
|
return 1;
|
|
consume_skb(skb);
|
|
return 0;
|
|
}
|
|
if (dh->dccph_type == DCCP_PKT_RESET)
|
|
goto discard;
|
|
|
|
/* Caller (dccp_v4_do_rcv) will send Reset */
|
|
dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
|
|
return 1;
|
|
} else if (sk->sk_state == DCCP_CLOSED) {
|
|
dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
|
|
return 1;
|
|
}
|
|
|
|
/* Step 6: Check sequence numbers (omitted in LISTEN/REQUEST state) */
|
|
if (sk->sk_state != DCCP_REQUESTING && dccp_check_seqno(sk, skb))
|
|
goto discard;
|
|
|
|
/*
|
|
* Step 7: Check for unexpected packet types
|
|
* If (S.is_server and P.type == Response)
|
|
* or (S.is_client and P.type == Request)
|
|
* or (S.state == RESPOND and P.type == Data),
|
|
* Send Sync packet acknowledging P.seqno
|
|
* Drop packet and return
|
|
*/
|
|
if ((dp->dccps_role != DCCP_ROLE_CLIENT &&
|
|
dh->dccph_type == DCCP_PKT_RESPONSE) ||
|
|
(dp->dccps_role == DCCP_ROLE_CLIENT &&
|
|
dh->dccph_type == DCCP_PKT_REQUEST) ||
|
|
(sk->sk_state == DCCP_RESPOND && dh->dccph_type == DCCP_PKT_DATA)) {
|
|
dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNC);
|
|
goto discard;
|
|
}
|
|
|
|
/* Step 8: Process options */
|
|
if (dccp_parse_options(sk, NULL, skb))
|
|
return 1;
|
|
|
|
/*
|
|
* Step 9: Process Reset
|
|
* If P.type == Reset,
|
|
* Tear down connection
|
|
* S.state := TIMEWAIT
|
|
* Set TIMEWAIT timer
|
|
* Drop packet and return
|
|
*/
|
|
if (dh->dccph_type == DCCP_PKT_RESET) {
|
|
dccp_rcv_reset(sk, skb);
|
|
return 0;
|
|
} else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) { /* Step 13 */
|
|
if (dccp_rcv_closereq(sk, skb))
|
|
return 0;
|
|
goto discard;
|
|
} else if (dh->dccph_type == DCCP_PKT_CLOSE) { /* Step 14 */
|
|
if (dccp_rcv_close(sk, skb))
|
|
return 0;
|
|
goto discard;
|
|
}
|
|
|
|
switch (sk->sk_state) {
|
|
case DCCP_REQUESTING:
|
|
queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
|
|
if (queued >= 0)
|
|
return queued;
|
|
|
|
__kfree_skb(skb);
|
|
return 0;
|
|
|
|
case DCCP_PARTOPEN:
|
|
/* Step 8: if using Ack Vectors, mark packet acknowledgeable */
|
|
dccp_handle_ackvec_processing(sk, skb);
|
|
dccp_deliver_input_to_ccids(sk, skb);
|
|
/* fall through */
|
|
case DCCP_RESPOND:
|
|
queued = dccp_rcv_respond_partopen_state_process(sk, skb,
|
|
dh, len);
|
|
break;
|
|
}
|
|
|
|
if (dh->dccph_type == DCCP_PKT_ACK ||
|
|
dh->dccph_type == DCCP_PKT_DATAACK) {
|
|
switch (old_state) {
|
|
case DCCP_PARTOPEN:
|
|
sk->sk_state_change(sk);
|
|
sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
|
|
break;
|
|
}
|
|
} else if (unlikely(dh->dccph_type == DCCP_PKT_SYNC)) {
|
|
dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNCACK);
|
|
goto discard;
|
|
}
|
|
|
|
if (!queued) {
|
|
discard:
|
|
__kfree_skb(skb);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dccp_rcv_state_process);
|
|
|
|
/**
|
|
* dccp_sample_rtt - Validate and finalise computation of RTT sample
|
|
* @delta: number of microseconds between packet and acknowledgment
|
|
*
|
|
* The routine is kept generic to work in different contexts. It should be
|
|
* called immediately when the ACK used for the RTT sample arrives.
|
|
*/
|
|
u32 dccp_sample_rtt(struct sock *sk, long delta)
|
|
{
|
|
/* dccpor_elapsed_time is either zeroed out or set and > 0 */
|
|
delta -= dccp_sk(sk)->dccps_options_received.dccpor_elapsed_time * 10;
|
|
|
|
if (unlikely(delta <= 0)) {
|
|
DCCP_WARN("unusable RTT sample %ld, using min\n", delta);
|
|
return DCCP_SANE_RTT_MIN;
|
|
}
|
|
if (unlikely(delta > DCCP_SANE_RTT_MAX)) {
|
|
DCCP_WARN("RTT sample %ld too large, using max\n", delta);
|
|
return DCCP_SANE_RTT_MAX;
|
|
}
|
|
|
|
return delta;
|
|
}
|