mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 20:51:44 +00:00
tcp: tcp_probe: use spin_lock_bh()
tcp_rcv_established() can now run in process context.
We need to disable BH while acquiring tcp probe spinlock,
or risk a deadlock.
Fixes: 5413d1babe
("net: do not block BH while processing socket backlog")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reported-by: Ricardo Nabinger Sanchez <rnsanchez@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
a725eb15db
commit
e70ac17165
@ -117,7 +117,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
|||||||
(fwmark > 0 && skb->mark == fwmark)) &&
|
(fwmark > 0 && skb->mark == fwmark)) &&
|
||||||
(full || tp->snd_cwnd != tcp_probe.lastcwnd)) {
|
(full || tp->snd_cwnd != tcp_probe.lastcwnd)) {
|
||||||
|
|
||||||
spin_lock(&tcp_probe.lock);
|
spin_lock_bh(&tcp_probe.lock);
|
||||||
/* If log fills, just silently drop */
|
/* If log fills, just silently drop */
|
||||||
if (tcp_probe_avail() > 1) {
|
if (tcp_probe_avail() > 1) {
|
||||||
struct tcp_log *p = tcp_probe.log + tcp_probe.head;
|
struct tcp_log *p = tcp_probe.log + tcp_probe.head;
|
||||||
@ -157,7 +157,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
|||||||
tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
|
tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
|
||||||
}
|
}
|
||||||
tcp_probe.lastcwnd = tp->snd_cwnd;
|
tcp_probe.lastcwnd = tp->snd_cwnd;
|
||||||
spin_unlock(&tcp_probe.lock);
|
spin_unlock_bh(&tcp_probe.lock);
|
||||||
|
|
||||||
wake_up(&tcp_probe.wait);
|
wake_up(&tcp_probe.wait);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user