forked from Minki/linux
9a19a6db37
Pull vfs updates from Al Viro: - more ->d_init() stuff (work.dcache) - pathname resolution cleanups (work.namei) - a few missing iov_iter primitives - copy_from_iter_full() and friends. Either copy the full requested amount, advance the iterator and return true, or fail, return false and do _not_ advance the iterator. Quite a few open-coded callers converted (and became more readable and harder to fuck up that way) (work.iov_iter) - several assorted patches, the big one being logfs removal * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: logfs: remove from tree vfs: fix put_compat_statfs64() does not handle errors namei: fold should_follow_link() with the step into not-followed link namei: pass both WALK_GET and WALK_MORE to should_follow_link() namei: invert WALK_PUT logics namei: shift interpretation of LOOKUP_FOLLOW inside should_follow_link() namei: saner calling conventions for mountpoint_last() namei.c: get rid of user_path_parent() switch getfrag callbacks to ..._full() primitives make skb_add_data,{_nocache}() and skb_copy_to_page_nocache() advance only on success [iov_iter] new primitives - copy_from_iter_full() and friends don't open-code file_inode() ceph: switch to use of ->d_init() ceph: unify dentry_operations instances lustre: switch to use of ->d_init()
135 lines
3.8 KiB
C
135 lines
3.8 KiB
C
/*
|
|
* Definitions for the UDP-Lite (RFC 3828) code.
|
|
*/
|
|
#ifndef _UDPLITE_H
|
|
#define _UDPLITE_H
|
|
|
|
#include <net/ip6_checksum.h>
|
|
|
|
/* UDP-Lite socket options */
|
|
#define UDPLITE_SEND_CSCOV 10 /* sender partial coverage (as sent) */
|
|
#define UDPLITE_RECV_CSCOV 11 /* receiver partial coverage (threshold ) */
|
|
|
|
extern struct proto udplite_prot;
|
|
extern struct udp_table udplite_table;
|
|
|
|
/*
|
|
* Checksum computation is all in software, hence simpler getfrag.
|
|
*/
|
|
static __inline__ int udplite_getfrag(void *from, char *to, int offset,
|
|
int len, int odd, struct sk_buff *skb)
|
|
{
|
|
struct msghdr *msg = from;
|
|
return copy_from_iter_full(to, len, &msg->msg_iter) ? 0 : -EFAULT;
|
|
}
|
|
|
|
/* Designate sk as UDP-Lite socket */
|
|
static inline int udplite_sk_init(struct sock *sk)
|
|
{
|
|
udp_sk(sk)->pcflag = UDPLITE_BIT;
|
|
sk->sk_destruct = udp_destruct_sock;
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Checksumming routines
|
|
*/
|
|
static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
|
|
{
|
|
u16 cscov;
|
|
|
|
/* In UDPv4 a zero checksum means that the transmitter generated no
|
|
* checksum. UDP-Lite (like IPv6) mandates checksums, hence packets
|
|
* with a zero checksum field are illegal. */
|
|
if (uh->check == 0) {
|
|
net_dbg_ratelimited("UDPLite: zeroed checksum field\n");
|
|
return 1;
|
|
}
|
|
|
|
cscov = ntohs(uh->len);
|
|
|
|
if (cscov == 0) /* Indicates that full coverage is required. */
|
|
;
|
|
else if (cscov < 8 || cscov > skb->len) {
|
|
/*
|
|
* Coverage length violates RFC 3828: log and discard silently.
|
|
*/
|
|
net_dbg_ratelimited("UDPLite: bad csum coverage %d/%d\n",
|
|
cscov, skb->len);
|
|
return 1;
|
|
|
|
} else if (cscov < skb->len) {
|
|
UDP_SKB_CB(skb)->partial_cov = 1;
|
|
UDP_SKB_CB(skb)->cscov = cscov;
|
|
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
|
skb->ip_summed = CHECKSUM_NONE;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* Slow-path computation of checksum. Socket is locked. */
|
|
static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
const struct udp_sock *up = udp_sk(skb->sk);
|
|
int cscov = up->len;
|
|
__wsum csum = 0;
|
|
|
|
if (up->pcflag & UDPLITE_SEND_CC) {
|
|
/*
|
|
* Sender has set `partial coverage' option on UDP-Lite socket.
|
|
* The special case "up->pcslen == 0" signifies full coverage.
|
|
*/
|
|
if (up->pcslen < up->len) {
|
|
if (0 < up->pcslen)
|
|
cscov = up->pcslen;
|
|
udp_hdr(skb)->len = htons(up->pcslen);
|
|
}
|
|
/*
|
|
* NOTE: Causes for the error case `up->pcslen > up->len':
|
|
* (i) Application error (will not be penalized).
|
|
* (ii) Payload too big for send buffer: data is split
|
|
* into several packets, each with its own header.
|
|
* In this case (e.g. last segment), coverage may
|
|
* exceed packet length.
|
|
* Since packets with coverage length > packet length are
|
|
* illegal, we fall back to the defaults here.
|
|
*/
|
|
}
|
|
|
|
skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
|
|
|
|
skb_queue_walk(&sk->sk_write_queue, skb) {
|
|
const int off = skb_transport_offset(skb);
|
|
const int len = skb->len - off;
|
|
|
|
csum = skb_checksum(skb, off, (cscov > len)? len : cscov, csum);
|
|
|
|
if ((cscov -= len) <= 0)
|
|
break;
|
|
}
|
|
return csum;
|
|
}
|
|
|
|
/* Fast-path computation of checksum. Socket may not be locked. */
|
|
static inline __wsum udplite_csum(struct sk_buff *skb)
|
|
{
|
|
const struct udp_sock *up = udp_sk(skb->sk);
|
|
const int off = skb_transport_offset(skb);
|
|
int len = skb->len - off;
|
|
|
|
if ((up->pcflag & UDPLITE_SEND_CC) && up->pcslen < len) {
|
|
if (0 < up->pcslen)
|
|
len = up->pcslen;
|
|
udp_hdr(skb)->len = htons(up->pcslen);
|
|
}
|
|
skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
|
|
|
|
return skb_checksum(skb, off, len, 0);
|
|
}
|
|
|
|
void udplite4_register(void);
|
|
int udplite_get_port(struct sock *sk, unsigned short snum,
|
|
int (*scmp)(const struct sock *, const struct sock *));
|
|
#endif /* _UDPLITE_H */
|