mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
Fix race for duplicate reqsk on identical SYN
When bonding is configured in BOND_MODE_BROADCAST mode, if two identical
SYN packets are received at the same time and processed on different CPUs,
it can potentially create the same sk (sock) but two different reqsk
(request_sock) in tcp_conn_request().
These two different reqsk will respond with two SYNACK packets, and since
the generation of the seq (ISN) incorporates a timestamp, the final two
SYNACK packets will have different seq values.
The consequence is that when the Client receives and replies with an ACK
to the earlier SYNACK packet, we will reset(RST) it.
========================================================================
This behavior is consistently reproducible in my local setup,
which comprises:
| NETA1 ------ NETB1 |
PC_A --- bond --- | | --- bond --- PC_B
| NETA2 ------ NETB2 |
- PC_A is the Server and has two network cards, NETA1 and NETA2. I have
bonded these two cards using BOND_MODE_BROADCAST mode and configured
them to be handled by different CPU.
- PC_B is the Client, also equipped with two network cards, NETB1 and
NETB2, which are also bonded and configured in BOND_MODE_BROADCAST mode.
If the client attempts a TCP connection to the server, it might encounter
a failure. Capturing packets from the server side reveals:
10.10.10.10.45182 > localhost: Flags [S], seq 320236027,
10.10.10.10.45182 > localhost: Flags [S], seq 320236027,
localhost > 10.10.10.10.45182: Flags [S.], seq 2967855116,
localhost > 10.10.10.10.45182: Flags [S.], seq 2967855123, <==
10.10.10.10.45182 > localhost: Flags [.], ack 4294967290,
10.10.10.10.45182 > localhost: Flags [.], ack 4294967290,
localhost > 10.10.10.10.45182: Flags [R], seq 2967855117, <==
localhost > 10.10.10.10.45182: Flags [R], seq 2967855117,
Two SYNACKs with different seq numbers are sent by localhost,
resulting in an anomaly.
========================================================================
The attempted solution is as follows:
Add a return value to inet_csk_reqsk_queue_hash_add() to confirm if the
ehash insertion is successful (Up to now, the reason for unsuccessful
insertion is that a reqsk for the same connection has already been
inserted). If the insertion fails, release the reqsk.
Due to the refcnt, Kuniyuki suggests also adding a return value check
for the DCCP module; if ehash insertion fails, indicating a successful
insertion of the same connection, simply release the reqsk as well.
Simultaneously, In the reqsk_queue_hash_req(), the start of the
req->rsk_timer is adjusted to be after successful insertion.
Fixes: 1da177e4c3
("Linux-2.6.12-rc2")
Signed-off-by: luoxuanqiang <luoxuanqiang@kylinos.cn>
Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Link: https://lore.kernel.org/r/20240621013929.1386815-1-luoxuanqiang@kylinos.cn
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
parent
0983d288ca
commit
ff46e3b442
@ -263,7 +263,7 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
|
|||||||
struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
|
struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
|
||||||
struct request_sock *req,
|
struct request_sock *req,
|
||||||
struct sock *child);
|
struct sock *child);
|
||||||
void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
|
bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
|
||||||
unsigned long timeout);
|
unsigned long timeout);
|
||||||
struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
|
struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
|
||||||
struct request_sock *req,
|
struct request_sock *req,
|
||||||
|
@ -657,8 +657,11 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
|||||||
if (dccp_v4_send_response(sk, req))
|
if (dccp_v4_send_response(sk, req))
|
||||||
goto drop_and_free;
|
goto drop_and_free;
|
||||||
|
|
||||||
inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
|
if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT)))
|
||||||
reqsk_put(req);
|
reqsk_free(req);
|
||||||
|
else
|
||||||
|
reqsk_put(req);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
drop_and_free:
|
drop_and_free:
|
||||||
|
@ -400,8 +400,11 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
|||||||
if (dccp_v6_send_response(sk, req))
|
if (dccp_v6_send_response(sk, req))
|
||||||
goto drop_and_free;
|
goto drop_and_free;
|
||||||
|
|
||||||
inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
|
if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT)))
|
||||||
reqsk_put(req);
|
reqsk_free(req);
|
||||||
|
else
|
||||||
|
reqsk_put(req);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
drop_and_free:
|
drop_and_free:
|
||||||
|
@ -1122,25 +1122,34 @@ drop:
|
|||||||
inet_csk_reqsk_queue_drop_and_put(oreq->rsk_listener, oreq);
|
inet_csk_reqsk_queue_drop_and_put(oreq->rsk_listener, oreq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void reqsk_queue_hash_req(struct request_sock *req,
|
static bool reqsk_queue_hash_req(struct request_sock *req,
|
||||||
unsigned long timeout)
|
unsigned long timeout)
|
||||||
{
|
{
|
||||||
|
bool found_dup_sk = false;
|
||||||
|
|
||||||
|
if (!inet_ehash_insert(req_to_sk(req), NULL, &found_dup_sk))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* The timer needs to be setup after a successful insertion. */
|
||||||
timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
|
timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
|
||||||
mod_timer(&req->rsk_timer, jiffies + timeout);
|
mod_timer(&req->rsk_timer, jiffies + timeout);
|
||||||
|
|
||||||
inet_ehash_insert(req_to_sk(req), NULL, NULL);
|
|
||||||
/* before letting lookups find us, make sure all req fields
|
/* before letting lookups find us, make sure all req fields
|
||||||
* are committed to memory and refcnt initialized.
|
* are committed to memory and refcnt initialized.
|
||||||
*/
|
*/
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
refcount_set(&req->rsk_refcnt, 2 + 1);
|
refcount_set(&req->rsk_refcnt, 2 + 1);
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
|
bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
|
||||||
unsigned long timeout)
|
unsigned long timeout)
|
||||||
{
|
{
|
||||||
reqsk_queue_hash_req(req, timeout);
|
if (!reqsk_queue_hash_req(req, timeout))
|
||||||
|
return false;
|
||||||
|
|
||||||
inet_csk_reqsk_queue_added(sk);
|
inet_csk_reqsk_queue_added(sk);
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
|
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
|
||||||
|
|
||||||
|
@ -7257,7 +7257,12 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
|
|||||||
tcp_rsk(req)->tfo_listener = false;
|
tcp_rsk(req)->tfo_listener = false;
|
||||||
if (!want_cookie) {
|
if (!want_cookie) {
|
||||||
req->timeout = tcp_timeout_init((struct sock *)req);
|
req->timeout = tcp_timeout_init((struct sock *)req);
|
||||||
inet_csk_reqsk_queue_hash_add(sk, req, req->timeout);
|
if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req,
|
||||||
|
req->timeout))) {
|
||||||
|
reqsk_free(req);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
af_ops->send_synack(sk, dst, &fl, req, &foc,
|
af_ops->send_synack(sk, dst, &fl, req, &foc,
|
||||||
!want_cookie ? TCP_SYNACK_NORMAL :
|
!want_cookie ? TCP_SYNACK_NORMAL :
|
||||||
|
Loading…
Reference in New Issue
Block a user