forked from Minki/linux
6e41c585e3
quite a few architectures have the same csum_partial_copy_nocheck() - simply memcpy() the data and then return the csum of the copy. hexagon, parisc, ia64, s390, um: explicitly spelled out that way. arc, arm64, csky, h8300, m68k/nommu, microblaze, mips/GENERIC_CSUM, nds32, nios2, openrisc, riscv, unicore32: end up picking the same thing spelled out in lib/checksum.h (with varying amounts of perversions along the way). everybody else (alpha, arm, c6x, m68k/mmu, mips/!GENERIC_CSUM, powerpc, sh, sparc, x86, xtensa) have non-generic variants. For all except c6x the declaration is in their asm/checksum.h. c6x uses the wrapper from asm-generic/checksum.h that would normally lead to the lib/checksum.h instance, but in case of c6x we end up using an asm function from arch/c6x instead. Screw that mess - have architectures with private instances define _HAVE_ARCH_CSUM_AND_COPY in their asm/checksum.h and have the default one right in net/checksum.h conditional on _HAVE_ARCH_CSUM_AND_COPY *not* defined. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
71 lines
1.7 KiB
C
71 lines
1.7 KiB
C
/*
|
|
* Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
|
|
* Copyright (C) 2004 Microtronix Datacom Ltd.
|
|
*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*/
|
|
|
|
#ifndef _ASM_NIOS_CHECKSUM_H
|
|
#define _ASM_NIOS_CHECKSUM_H
|
|
|
|
/* Take these from lib/checksum.c */
|
|
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
|
|
extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
|
|
extern __sum16 ip_compute_csum(const void *buff, int len);
|
|
|
|
/*
|
|
* Fold a partial checksum
|
|
*/
|
|
static inline __sum16 csum_fold(__wsum sum)
|
|
{
|
|
__asm__ __volatile__(
|
|
"add %0, %1, %0\n"
|
|
"cmpltu r8, %0, %1\n"
|
|
"srli %0, %0, 16\n"
|
|
"add %0, %0, r8\n"
|
|
"nor %0, %0, %0\n"
|
|
: "=r" (sum)
|
|
: "r" (sum << 16), "0" (sum)
|
|
: "r8");
|
|
return (__force __sum16) sum;
|
|
}
|
|
|
|
/*
|
|
* computes the checksum of the TCP/UDP pseudo-header
|
|
* returns a 16-bit checksum, already complemented
|
|
*/
|
|
#define csum_tcpudp_nofold csum_tcpudp_nofold
|
|
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
|
|
__u32 len, __u8 proto,
|
|
__wsum sum)
|
|
{
|
|
__asm__ __volatile__(
|
|
"add %0, %1, %0\n"
|
|
"cmpltu r8, %0, %1\n"
|
|
"add %0, %0, r8\n" /* add carry */
|
|
"add %0, %2, %0\n"
|
|
"cmpltu r8, %0, %2\n"
|
|
"add %0, %0, r8\n" /* add carry */
|
|
"add %0, %3, %0\n"
|
|
"cmpltu r8, %0, %3\n"
|
|
"add %0, %0, r8\n" /* add carry */
|
|
: "=r" (sum), "=r" (saddr)
|
|
: "r" (daddr), "r" ((len + proto) << 8),
|
|
"0" (sum),
|
|
"1" (saddr)
|
|
: "r8");
|
|
|
|
return sum;
|
|
}
|
|
|
|
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
|
|
__u32 len, __u8 proto,
|
|
__wsum sum)
|
|
{
|
|
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
|
|
}
|
|
|
|
#endif /* _ASM_NIOS_CHECKSUM_H */
|