All callers of these primitives will * discard anything we might've copied in case of error * ignore the csum value in case of error * always pass 0xffffffff as the initial sum, so the resulting csum value (in case of success, that is) will never be 0. That suggest the following calling conventions: * don't pass err_ptr - just return 0 on error. * don't bother with zeroing destination, etc. in case of error * don't pass the initial sum - just use 0xffffffff. This commit does the minimal conversion in the instances of csum_and_copy_...(); the changes of actual asm code behind them are done later in the series. Note that this asm code is often shared with csum_partial_copy_nocheck(); the difference is that csum_partial_copy_nocheck() passes 0 for initial sum while csum_and_copy_..._user() pass 0xffffffff. Fortunately, we are free to pass 0xffffffff in all cases and subsequent patches will use that freedom without any special comments. A part that could be split off: parisc and uml/i386 claimed to have csum_and_copy_to_user() instances of their own, but those were identical to the generic one, so we simply drop them. Not sure if it's worth a separate commit... Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
		
			
				
	
	
		
			151 lines
		
	
	
		
			3.4 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			151 lines
		
	
	
		
			3.4 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /* SPDX-License-Identifier: GPL-2.0 */
 | |
| #ifndef _M68K_CHECKSUM_H
 | |
| #define _M68K_CHECKSUM_H
 | |
| 
 | |
| #include <linux/in6.h>
 | |
| 
 | |
| #ifdef CONFIG_GENERIC_CSUM
 | |
| #include <asm-generic/checksum.h>
 | |
| #else
 | |
| 
 | |
| /*
 | |
|  * computes the checksum of a memory block at buff, length len,
 | |
|  * and adds in "sum" (32-bit)
 | |
|  *
 | |
|  * returns a 32-bit number suitable for feeding into itself
 | |
|  * or csum_tcpudp_magic
 | |
|  *
 | |
|  * this function must be called with even lengths, except
 | |
|  * for the last fragment, which may be odd
 | |
|  *
 | |
|  * it's best to have buff aligned on a 32-bit boundary
 | |
|  */
 | |
| __wsum csum_partial(const void *buff, int len, __wsum sum);
 | |
| 
 | |
| /*
 | |
|  * the same as csum_partial, but copies from src while it
 | |
|  * checksums
 | |
|  *
 | |
|  * here even more important to align src and dst on a 32-bit (or even
 | |
|  * better 64-bit) boundary
 | |
|  */
 | |
| 
 | |
| #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
 | |
| #define _HAVE_ARCH_CSUM_AND_COPY
 | |
| extern __wsum csum_and_copy_from_user(const void __user *src,
 | |
| 						void *dst,
 | |
| 						int len);
 | |
| 
 | |
| extern __wsum csum_partial_copy_nocheck(const void *src,
 | |
| 					      void *dst, int len);
 | |
| 
 | |
| /*
 | |
|  *	This is a version of ip_fast_csum() optimized for IP headers,
 | |
|  *	which always checksum on 4 octet boundaries.
 | |
|  */
 | |
| static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 | |
| {
 | |
| 	unsigned int sum = 0;
 | |
| 	unsigned long tmp;
 | |
| 
 | |
| 	__asm__ ("subqw #1,%2\n"
 | |
| 		 "1:\t"
 | |
| 		 "movel %1@+,%3\n\t"
 | |
| 		 "addxl %3,%0\n\t"
 | |
| 		 "dbra  %2,1b\n\t"
 | |
| 		 "movel %0,%3\n\t"
 | |
| 		 "swap  %3\n\t"
 | |
| 		 "addxw %3,%0\n\t"
 | |
| 		 "clrw  %3\n\t"
 | |
| 		 "addxw %3,%0\n\t"
 | |
| 		 : "=d" (sum), "=&a" (iph), "=&d" (ihl), "=&d" (tmp)
 | |
| 		 : "0" (sum), "1" (iph), "2" (ihl)
 | |
| 		 : "memory");
 | |
| 	return (__force __sum16)~sum;
 | |
| }
 | |
| 
 | |
| static inline __sum16 csum_fold(__wsum sum)
 | |
| {
 | |
| 	unsigned int tmp = (__force u32)sum;
 | |
| 
 | |
| 	__asm__("swap %1\n\t"
 | |
| 		"addw %1, %0\n\t"
 | |
| 		"clrw %1\n\t"
 | |
| 		"addxw %1, %0"
 | |
| 		: "=&d" (sum), "=&d" (tmp)
 | |
| 		: "0" (sum), "1" (tmp));
 | |
| 
 | |
| 	return (__force __sum16)~sum;
 | |
| }
 | |
| 
 | |
| static inline __wsum
 | |
| csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
 | |
| 		  unsigned short proto, __wsum sum)
 | |
| {
 | |
| 	__asm__ ("addl  %2,%0\n\t"
 | |
| 		 "addxl %3,%0\n\t"
 | |
| 		 "addxl %4,%0\n\t"
 | |
| 		 "clrl %1\n\t"
 | |
| 		 "addxl %1,%0"
 | |
| 		 : "=&d" (sum), "=d" (saddr)
 | |
| 		 : "g" (daddr), "1" (saddr), "d" (len + proto),
 | |
| 		   "0" (sum));
 | |
| 	return sum;
 | |
| }
 | |
| 
 | |
| 
 | |
| /*
 | |
|  * computes the checksum of the TCP/UDP pseudo-header
 | |
|  * returns a 16-bit checksum, already complemented
 | |
|  */
 | |
| static inline __sum16
 | |
| csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
 | |
| 		  unsigned short proto, __wsum sum)
 | |
| {
 | |
| 	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * this routine is used for miscellaneous IP-like checksums, mainly
 | |
|  * in icmp.c
 | |
|  */
 | |
| 
 | |
| static inline __sum16 ip_compute_csum(const void *buff, int len)
 | |
| {
 | |
| 	return csum_fold (csum_partial(buff, len, 0));
 | |
| }
 | |
| 
 | |
| #define _HAVE_ARCH_IPV6_CSUM
 | |
| static __inline__ __sum16
 | |
| csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
 | |
| 		__u32 len, __u8 proto, __wsum sum)
 | |
| {
 | |
| 	register unsigned long tmp;
 | |
| 	__asm__("addl %2@,%0\n\t"
 | |
| 		"movel %2@(4),%1\n\t"
 | |
| 		"addxl %1,%0\n\t"
 | |
| 		"movel %2@(8),%1\n\t"
 | |
| 		"addxl %1,%0\n\t"
 | |
| 		"movel %2@(12),%1\n\t"
 | |
| 		"addxl %1,%0\n\t"
 | |
| 		"movel %3@,%1\n\t"
 | |
| 		"addxl %1,%0\n\t"
 | |
| 		"movel %3@(4),%1\n\t"
 | |
| 		"addxl %1,%0\n\t"
 | |
| 		"movel %3@(8),%1\n\t"
 | |
| 		"addxl %1,%0\n\t"
 | |
| 		"movel %3@(12),%1\n\t"
 | |
| 		"addxl %1,%0\n\t"
 | |
| 		"addxl %4,%0\n\t"
 | |
| 		"clrl %1\n\t"
 | |
| 		"addxl %1,%0"
 | |
| 		: "=&d" (sum), "=&d" (tmp)
 | |
| 		: "a" (saddr), "a" (daddr), "d" (len + proto),
 | |
| 		  "0" (sum));
 | |
| 
 | |
| 	return csum_fold(sum);
 | |
| }
 | |
| 
 | |
| #endif /* CONFIG_GENERIC_CSUM */
 | |
| #endif /* _M68K_CHECKSUM_H */
 |