linux/net/sctp/endpointola.c

502 lines
13 KiB
C
Raw Normal View History

/* SCTP kernel implementation
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2002 International Business Machines, Corp.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll
*
* This file is part of the SCTP kernel implementation
*
* This abstraction represents an SCTP endpoint.
*
* The SCTP implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* The SCTP implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Jon Grimm <jgrimm@austin.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
* Dajiang Zhang <dajiang.zhang@nokia.com>
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/in.h>
#include <linux/random.h> /* get_random_bytes() */
#include <linux/crypto.h>
#include <net/sock.h>
#include <net/ipv6.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
/* Forward declarations for internal helpers. */
static void sctp_endpoint_bh_rcv(struct work_struct *work);
/*
* Initialize the base fields of the endpoint structure.
*/
static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
struct sock *sk,
gfp_t gfp)
{
struct net *net = sock_net(sk);
struct sctp_hmac_algo_param *auth_hmacs = NULL;
struct sctp_chunks_param *auth_chunks = NULL;
struct sctp_shared_key *null_key;
int err;
ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp);
if (!ep->digest)
return NULL;
if (net->sctp.auth_enable) {
/* Allocate space for HMACS and CHUNKS authentication
* variables. There are arrays that we encode directly
* into parameters to make the rest of the operations easier.
*/
auth_hmacs = kzalloc(sizeof(sctp_hmac_algo_param_t) +
sizeof(__u16) * SCTP_AUTH_NUM_HMACS, gfp);
if (!auth_hmacs)
goto nomem;
auth_chunks = kzalloc(sizeof(sctp_chunks_param_t) +
SCTP_NUM_CHUNK_TYPES, gfp);
if (!auth_chunks)
goto nomem;
/* Initialize the HMACS parameter.
* SCTP-AUTH: Section 3.3
* Every endpoint supporting SCTP chunk authentication MUST
* support the HMAC based on the SHA-1 algorithm.
*/
auth_hmacs->param_hdr.type = SCTP_PARAM_HMAC_ALGO;
auth_hmacs->param_hdr.length =
htons(sizeof(sctp_paramhdr_t) + 2);
auth_hmacs->hmac_ids[0] = htons(SCTP_AUTH_HMAC_ID_SHA1);
/* Initialize the CHUNKS parameter */
auth_chunks->param_hdr.type = SCTP_PARAM_CHUNKS;
auth_chunks->param_hdr.length = htons(sizeof(sctp_paramhdr_t));
/* If the Add-IP functionality is enabled, we must
* authenticate, ASCONF and ASCONF-ACK chunks
*/
if (net->sctp.addip_enable) {
auth_chunks->chunks[0] = SCTP_CID_ASCONF;
auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK;
auth_chunks->param_hdr.length =
htons(sizeof(sctp_paramhdr_t) + 2);
}
}
/* Initialize the base structure. */
/* What type of endpoint are we? */
ep->base.type = SCTP_EP_TYPE_SOCKET;
/* Initialize the basic object fields. */
atomic_set(&ep->base.refcnt, 1);
ep->base.dead = false;
/* Create an input queue. */
sctp_inq_init(&ep->base.inqueue);
/* Set its top-half handler */
sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv);
/* Initialize the bind addr area */
sctp_bind_addr_init(&ep->base.bind_addr, 0);
/* Remember who we are attached to. */
ep->base.sk = sk;
sock_hold(ep->base.sk);
/* Create the lists of associations. */
INIT_LIST_HEAD(&ep->asocs);
/* Use SCTP specific send buffer space queues. */
ep->sndbuf_policy = net->sctp.sndbuf_policy;
sk->sk_data_ready = sctp_data_ready;
sk->sk_write_space = sctp_write_space;
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
/* Get the receive buffer policy for this endpoint */
ep->rcvbuf_policy = net->sctp.rcvbuf_policy;
/* Initialize the secret key used with cookie. */
get_random_bytes(ep->secret_key, sizeof(ep->secret_key));
/* SCTP-AUTH extensions*/
INIT_LIST_HEAD(&ep->endpoint_shared_keys);
null_key = sctp_auth_shkey_create(0, gfp);
if (!null_key)
goto nomem;
list_add(&null_key->key_list, &ep->endpoint_shared_keys);
/* Allocate and initialize transorms arrays for supported HMACs. */
err = sctp_auth_init_hmacs(ep, gfp);
if (err)
goto nomem_hmacs;
/* Add the null key to the endpoint shared keys list and
* set the hmcas and chunks pointers.
*/
ep->auth_hmacs_list = auth_hmacs;
ep->auth_chunk_list = auth_chunks;
return ep;
nomem_hmacs:
sctp_auth_destroy_keys(&ep->endpoint_shared_keys);
nomem:
/* Free all allocations */
kfree(auth_hmacs);
kfree(auth_chunks);
kfree(ep->digest);
return NULL;
}
/* Create a sctp_endpoint with all that boring stuff initialized.
* Returns NULL if there isn't enough memory.
*/
struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, gfp_t gfp)
{
struct sctp_endpoint *ep;
/* Build a local endpoint. */
ep = kzalloc(sizeof(*ep), gfp);
if (!ep)
goto fail;
if (!sctp_endpoint_init(ep, sk, gfp))
goto fail_init;
SCTP_DBG_OBJCNT_INC(ep);
return ep;
fail_init:
kfree(ep);
fail:
return NULL;
}
/* Add an association to an endpoint. */
void sctp_endpoint_add_asoc(struct sctp_endpoint *ep,
struct sctp_association *asoc)
{
struct sock *sk = ep->base.sk;
/* If this is a temporary association, don't bother
* since we'll be removing it shortly and don't
* want anyone to find it anyway.
*/
if (asoc->temp)
return;
/* Now just add it to our list of asocs */
list_add_tail(&asoc->asocs, &ep->asocs);
/* Increment the backlog value for a TCP-style listening socket. */
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
sk->sk_ack_backlog++;
}
/* Free the endpoint structure. Delay cleanup until
* all users have released their reference count on this structure.
*/
void sctp_endpoint_free(struct sctp_endpoint *ep)
{
ep->base.dead = true;
ep->base.sk->sk_state = SCTP_SS_CLOSED;
/* Unlink this endpoint, so we can't find it again! */
sctp_unhash_endpoint(ep);
sctp_endpoint_put(ep);
}
/* Final destructor for endpoint. */
static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
{
struct sock *sk;
net: sctp: rework debugging framework to use pr_debug and friends We should get rid of all own SCTP debug printk macros and use the ones that the kernel offers anyway instead. This makes the code more readable and conform to the kernel code, and offers all the features of dynamic debbuging that pr_debug() et al has, such as only turning on/off portions of debug messages at runtime through debugfs. The runtime cost of having CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing, is negligible [1]. If kernel debugging is completly turned off, then these statements will also compile into "empty" functions. While we're at it, we also need to change the Kconfig option as it /now/ only refers to the ifdef'ed code portions in outqueue.c that enable further debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code was enabled with this Kconfig option and has now been removed, we transform those code parts into WARNs resp. where appropriate BUG_ONs so that those bugs can be more easily detected as probably not many people have SCTP debugging permanently turned on. To turn on all SCTP debugging, the following steps are needed: # mount -t debugfs none /sys/kernel/debug # echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control This can be done more fine-grained on a per file, per line basis and others as described in [2]. [1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf [2] Documentation/dynamic-debug-howto.txt Signed-off-by: Daniel Borkmann <dborkman@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-28 17:49:40 +00:00
if (unlikely(!ep->base.dead)) {
WARN(1, "Attempt to destroy undead endpoint %p!\n", ep);
return;
}
/* Free the digest buffer */
kfree(ep->digest);
/* SCTP-AUTH: Free up AUTH releated data such as shared keys
* chunks and hmacs arrays that were allocated
*/
sctp_auth_destroy_keys(&ep->endpoint_shared_keys);
kfree(ep->auth_hmacs_list);
kfree(ep->auth_chunk_list);
/* AUTH - Free any allocated HMAC transform containers */
sctp_auth_destroy_hmacs(ep->auth_hmacs);
/* Cleanup. */
sctp_inq_free(&ep->base.inqueue);
sctp_bind_addr_free(&ep->base.bind_addr);
memset(ep->secret_key, 0, sizeof(ep->secret_key));
/* Give up our hold on the sock. */
sk = ep->base.sk;
if (sk != NULL) {
/* Remove and free the port */
if (sctp_sk(sk)->bind_hash)
sctp_put_port(sk);
sock_put(sk);
}
kfree(ep);
SCTP_DBG_OBJCNT_DEC(ep);
}
/* Hold a reference to an endpoint. */
void sctp_endpoint_hold(struct sctp_endpoint *ep)
{
atomic_inc(&ep->base.refcnt);
}
/* Release a reference to an endpoint and clean up if there are
* no more references.
*/
void sctp_endpoint_put(struct sctp_endpoint *ep)
{
if (atomic_dec_and_test(&ep->base.refcnt))
sctp_endpoint_destroy(ep);
}
/* Is this the endpoint we are looking for? */
struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep,
struct net *net,
const union sctp_addr *laddr)
{
struct sctp_endpoint *retval = NULL;
if ((htons(ep->base.bind_addr.port) == laddr->v4.sin_port) &&
net_eq(sock_net(ep->base.sk), net)) {
if (sctp_bind_addr_match(&ep->base.bind_addr, laddr,
sctp_sk(ep->base.sk)))
retval = ep;
}
return retval;
}
/* Find the association that goes with this chunk.
* We do a linear search of the associations for this endpoint.
* We return the matching transport address too.
*/
static struct sctp_association *__sctp_endpoint_lookup_assoc(
const struct sctp_endpoint *ep,
const union sctp_addr *paddr,
struct sctp_transport **transport)
{
struct sctp_association *asoc = NULL;
struct sctp_association *tmp;
struct sctp_transport *t = NULL;
struct sctp_hashbucket *head;
struct sctp_ep_common *epb;
int hash;
int rport;
*transport = NULL;
/* If the local port is not set, there can't be any associations
* on this endpoint.
*/
if (!ep->base.bind_addr.port)
goto out;
rport = ntohs(paddr->v4.sin_port);
hash = sctp_assoc_hashfn(sock_net(ep->base.sk), ep->base.bind_addr.port,
rport);
head = &sctp_assoc_hashtable[hash];
read_lock(&head->lock);
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 01:06:00 +00:00
sctp_for_each_hentry(epb, &head->chain) {
tmp = sctp_assoc(epb);
if (tmp->ep != ep || rport != tmp->peer.port)
continue;
t = sctp_assoc_lookup_paddr(tmp, paddr);
if (t) {
asoc = tmp;
*transport = t;
break;
}
}
read_unlock(&head->lock);
out:
return asoc;
}
/* Lookup association on an endpoint based on a peer address. BH-safe. */
struct sctp_association *sctp_endpoint_lookup_assoc(
const struct sctp_endpoint *ep,
const union sctp_addr *paddr,
struct sctp_transport **transport)
{
struct sctp_association *asoc;
sctp_local_bh_disable();
asoc = __sctp_endpoint_lookup_assoc(ep, paddr, transport);
sctp_local_bh_enable();
return asoc;
}
/* Look for any peeled off association from the endpoint that matches the
* given peer address.
*/
int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
const union sctp_addr *paddr)
{
struct sctp_sockaddr_entry *addr;
struct sctp_bind_addr *bp;
struct net *net = sock_net(ep->base.sk);
bp = &ep->base.bind_addr;
/* This function is called with the socket lock held,
* so the address_list can not change.
*/
list_for_each_entry(addr, &bp->address_list, list) {
if (sctp_has_association(net, &addr->a, paddr))
return 1;
}
return 0;
}
/* Do delayed input processing. This is scheduled by sctp_rcv().
* This may be called on BH or task time.
*/
static void sctp_endpoint_bh_rcv(struct work_struct *work)
{
struct sctp_endpoint *ep =
container_of(work, struct sctp_endpoint,
base.inqueue.immediate);
struct sctp_association *asoc;
struct sock *sk;
struct net *net;
struct sctp_transport *transport;
struct sctp_chunk *chunk;
struct sctp_inq *inqueue;
sctp_subtype_t subtype;
sctp_state_t state;
int error = 0;
int first_time = 1; /* is this the first time through the loop */
if (ep->base.dead)
return;
asoc = NULL;
inqueue = &ep->base.inqueue;
sk = ep->base.sk;
net = sock_net(sk);
while (NULL != (chunk = sctp_inq_pop(inqueue))) {
subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
/* If the first chunk in the packet is AUTH, do special
* processing specified in Section 6.3 of SCTP-AUTH spec
*/
if (first_time && (subtype.chunk == SCTP_CID_AUTH)) {
struct sctp_chunkhdr *next_hdr;
next_hdr = sctp_inq_peek(inqueue);
if (!next_hdr)
goto normal;
/* If the next chunk is COOKIE-ECHO, skip the AUTH
* chunk while saving a pointer to it so we can do
* Authentication later (during cookie-echo
* processing).
*/
if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
chunk->auth_chunk = skb_clone(chunk->skb,
GFP_ATOMIC);
chunk->auth = 1;
continue;
}
}
normal:
/* We might have grown an association since last we
* looked, so try again.
*
* This happens when we've just processed our
* COOKIE-ECHO chunk.
*/
if (NULL == chunk->asoc) {
asoc = sctp_endpoint_lookup_assoc(ep,
sctp_source(chunk),
&transport);
chunk->asoc = asoc;
chunk->transport = transport;
}
state = asoc ? asoc->state : SCTP_STATE_CLOSED;
if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
continue;
/* Remember where the last DATA chunk came from so we
* know where to send the SACK.
*/
if (asoc && sctp_chunk_is_data(chunk))
asoc->peer.last_data_from = chunk->transport;
sctp: Add support to per-association statistics via a new SCTP_GET_ASSOC_STATS call The current SCTP stack is lacking a mechanism to have per association statistics. This is an implementation modeled after OpenSolaris' SCTP_GET_ASSOC_STATS. Userspace part will follow on lksctp if/when there is a general ACK on this. V4: - Move ipackets++ before q->immediate.func() for consistency reasons - Move sctp_max_rto() at the end of sctp_transport_update_rto() to avoid returning bogus RTO values - return asoc->rto_min when max_obs_rto value has not changed V3: - Increase ictrlchunks in sctp_assoc_bh_rcv() as well - Move ipackets++ to sctp_inq_push() - return 0 when no rto updates took place since the last call V2: - Implement partial retrieval of stat struct to cope for future expansion - Kill the rtxpackets counter as it cannot be precise anyway - Rename outseqtsns to outofseqtsns to make it clearer that these are out of sequence unexpected TSNs - Move asoc->ipackets++ under a lock to avoid potential miscounts - Fold asoc->opackets++ into the already existing asoc check - Kill unneeded (q->asoc) test when increasing rtxchunks - Do not count octrlchunks if sending failed (SCTP_XMIT_OK != 0) - Don't count SHUTDOWNs as SACKs - Move SCTP_GET_ASSOC_STATS to the private space API - Adjust the len check in sctp_getsockopt_assoc_stats() to allow for future struct growth - Move association statistics in their own struct - Update idupchunks when we send a SACK with dup TSNs - return min_rto in max_rto when RTO has not changed. Also return the transport when max_rto last changed. Signed-off: Michele Baldessari <michele@acksyn.org> Acked-by: Vlad Yasevich <vyasevich@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2012-12-01 04:49:42 +00:00
else {
SCTP_INC_STATS(sock_net(ep->base.sk), SCTP_MIB_INCTRLCHUNKS);
sctp: Add support to per-association statistics via a new SCTP_GET_ASSOC_STATS call The current SCTP stack is lacking a mechanism to have per association statistics. This is an implementation modeled after OpenSolaris' SCTP_GET_ASSOC_STATS. Userspace part will follow on lksctp if/when there is a general ACK on this. V4: - Move ipackets++ before q->immediate.func() for consistency reasons - Move sctp_max_rto() at the end of sctp_transport_update_rto() to avoid returning bogus RTO values - return asoc->rto_min when max_obs_rto value has not changed V3: - Increase ictrlchunks in sctp_assoc_bh_rcv() as well - Move ipackets++ to sctp_inq_push() - return 0 when no rto updates took place since the last call V2: - Implement partial retrieval of stat struct to cope for future expansion - Kill the rtxpackets counter as it cannot be precise anyway - Rename outseqtsns to outofseqtsns to make it clearer that these are out of sequence unexpected TSNs - Move asoc->ipackets++ under a lock to avoid potential miscounts - Fold asoc->opackets++ into the already existing asoc check - Kill unneeded (q->asoc) test when increasing rtxchunks - Do not count octrlchunks if sending failed (SCTP_XMIT_OK != 0) - Don't count SHUTDOWNs as SACKs - Move SCTP_GET_ASSOC_STATS to the private space API - Adjust the len check in sctp_getsockopt_assoc_stats() to allow for future struct growth - Move association statistics in their own struct - Update idupchunks when we send a SACK with dup TSNs - return min_rto in max_rto when RTO has not changed. Also return the transport when max_rto last changed. Signed-off: Michele Baldessari <michele@acksyn.org> Acked-by: Vlad Yasevich <vyasevich@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2012-12-01 04:49:42 +00:00
if (asoc)
asoc->stats.ictrlchunks++;
}
if (chunk->transport)
chunk->transport->last_time_heard = jiffies;
error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, state,
ep, asoc, chunk, GFP_ATOMIC);
if (error && chunk)
chunk->pdiscard = 1;
/* Check to see if the endpoint is freed in response to
* the incoming chunk. If so, get out of the while loop.
*/
if (!sctp_sk(sk)->ep)
break;
if (first_time)
first_time = 0;
}
}