rxrpc: Rework local endpoint management

Rework the local RxRPC endpoint management.

Local endpoint objects are maintained in a flat list as before.  This
should be okay as there shouldn't be more than one per open AF_RXRPC socket
(there can be fewer as local endpoints can be shared if their local service
ID is 0 and they share the same local transport parameters).

Changes:

 (1) Local endpoints may now only be shared if they have local service ID 0
     (ie. they're not being used for listening).

     This prevents a scenario where process A is listening of the Cache
     Manager port and process B contacts a fileserver - which may then
     attempt to send CM requests back to B.  But if A and B are sharing a
     local endpoint, A will get the CM requests meant for B.

 (2) We use a mutex to handle lookups and don't provide RCU-only lookups
     since we only expect to access the list when opening a socket or
     destroying an endpoint.

     The local endpoint object is pointed to by the transport socket's
     sk_user_data for the life of the transport socket - allowing us to
     refer to it directly from the sk_data_ready and sk_error_report
     callbacks.

 (3) atomic_inc_not_zero() now exists and can be used to only share a local
     endpoint if the last reference hasn't yet gone.

 (4) We can remove rxrpc_local_lock - a spinlock that had to be taken with
     BH processing disabled given that we assume sk_user_data won't change
     under us.

 (5) The transport socket is shut down before we clear the sk_user_data
     pointer so that we can be sure that the transport socket's callbacks
     won't be invoked once the RCU destruction is scheduled.

 (6) Local endpoints have a work item that handles both destruction and
     event processing.  The means that destruction doesn't then need to
     wait for event processing.  The event queues can then be cleared after
     the transport socket is shut down.

 (7) Local endpoints are no longer available for resurrection beyond the
     life of the sockets that had them open.  As soon as their last ref
     goes, they are scheduled for destruction and may not have their usage
     count moved from 0.

Signed-off-by: David Howells <dhowells@redhat.com>
This commit is contained in:
David Howells
2016-04-04 14:00:35 +01:00
parent 875636163b
commit 4f95dd78a7
7 changed files with 277 additions and 231 deletions

View File

@@ -594,9 +594,8 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
{
_enter("%p,%p", local, skb);
atomic_inc(&local->usage);
skb_queue_tail(&local->event_queue, skb);
rxrpc_queue_work(&local->event_processor);
rxrpc_queue_work(&local->processor);
}
/*
@@ -664,11 +663,15 @@ cant_find_conn:
/*
* handle data received on the local endpoint
* - may be called in interrupt context
*
* The socket is locked by the caller and this prevents the socket from being
* shut down and the local endpoint from going away, thus sk_user_data will not
* be cleared until this function returns.
*/
void rxrpc_data_ready(struct sock *sk)
{
struct rxrpc_skb_priv *sp;
struct rxrpc_local *local;
struct rxrpc_local *local = sk->sk_user_data;
struct sk_buff *skb;
int ret;
@@ -676,21 +679,8 @@ void rxrpc_data_ready(struct sock *sk)
ASSERT(!irqs_disabled());
read_lock_bh(&rxrpc_local_lock);
local = sk->sk_user_data;
if (local && atomic_read(&local->usage) > 0)
rxrpc_get_local(local);
else
local = NULL;
read_unlock_bh(&rxrpc_local_lock);
if (!local) {
_leave(" [local dead]");
return;
}
skb = skb_recv_datagram(sk, 0, 1, &ret);
if (!skb) {
rxrpc_put_local(local);
if (ret == -EAGAIN)
return;
_debug("UDP socket error %d", ret);
@@ -704,7 +694,6 @@ void rxrpc_data_ready(struct sock *sk)
/* we'll probably need to checksum it (didn't call sock_recvmsg) */
if (skb_checksum_complete(skb)) {
rxrpc_free_skb(skb);
rxrpc_put_local(local);
__UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0);
_leave(" [CSUM failed]");
return;
@@ -769,7 +758,6 @@ void rxrpc_data_ready(struct sock *sk)
}
out:
rxrpc_put_local(local);
return;
cant_route_call:
@@ -779,8 +767,7 @@ cant_route_call:
if (sp->hdr.seq == 1) {
_debug("first packet");
skb_queue_tail(&local->accept_queue, skb);
rxrpc_queue_work(&local->acceptor);
rxrpc_put_local(local);
rxrpc_queue_work(&local->processor);
_leave(" [incoming]");
return;
}
@@ -793,13 +780,11 @@ cant_route_call:
_debug("reject type %d",sp->hdr.type);
rxrpc_reject_packet(local, skb);
}
rxrpc_put_local(local);
_leave(" [no call]");
return;
bad_message:
skb->priority = RX_PROTOCOL_ERROR;
rxrpc_reject_packet(local, skb);
rxrpc_put_local(local);
_leave(" [badmsg]");
}