tcp: md5: input path is run under rcu protected sections

It is guaranteed that both tcp_v4_rcv() and tcp_v6_rcv()
run from rcu read locked sections :

ip_local_deliver_finish() and ip6_input_finish() both
use rcu_read_lock()

Also align tcp_v6_inbound_md5_hash() on tcp_v4_inbound_md5_hash()
by returning a boolean.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric Dumazet
2015-03-24 15:58:54 -07:00
committed by David S. Miller
parent 0980c1e308
commit ff74e23f7e
2 changed files with 9 additions and 33 deletions

View File

@@ -1153,8 +1153,9 @@ clear_hash_noput:
}
EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
static bool __tcp_v4_inbound_md5_hash(struct sock *sk,
const struct sk_buff *skb)
/* Called with rcu_read_lock() */
static bool tcp_v4_inbound_md5_hash(struct sock *sk,
const struct sk_buff *skb)
{
/*
* This gets called for each TCP segment that arrives
@@ -1206,18 +1207,6 @@ static bool __tcp_v4_inbound_md5_hash(struct sock *sk,
}
return false;
}
static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
{
bool ret;
rcu_read_lock();
ret = __tcp_v4_inbound_md5_hash(sk, skb);
rcu_read_unlock();
return ret;
}
#endif
static void tcp_v4_init_req(struct request_sock *req, struct sock *sk_listener,