mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 12:42:02 +00:00
riscv: Add checksum header
Provide checksum algorithms that have been designed to leverage riscv instructions such as rotate. In 64-bit, can take advantage of the larger register to avoid some overflow checking. Signed-off-by: Charlie Jenkins <charlie@rivosinc.com> Acked-by: Conor Dooley <conor.dooley@microchip.com> Reviewed-by: Xiao Wang <xiao.w.wang@intel.com> Link: https://lore.kernel.org/r/20240108-optimize_checksum-v15-3-1c50de5f2167@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
This commit is contained in:
parent
2ce5729fce
commit
e11e367e9f
82
arch/riscv/include/asm/checksum.h
Normal file
82
arch/riscv/include/asm/checksum.h
Normal file
@ -0,0 +1,82 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Checksum routines
|
||||
*
|
||||
* Copyright (C) 2023 Rivos Inc.
|
||||
*/
|
||||
#ifndef __ASM_RISCV_CHECKSUM_H
|
||||
#define __ASM_RISCV_CHECKSUM_H
|
||||
|
||||
#include <linux/in6.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#define ip_fast_csum ip_fast_csum
|
||||
|
||||
/* Define riscv versions of functions before importing asm-generic/checksum.h */
|
||||
#include <asm-generic/checksum.h>
|
||||
|
||||
/**
|
||||
* Quickly compute an IP checksum with the assumption that IPv4 headers will
|
||||
* always be in multiples of 32-bits, and have an ihl of at least 5.
|
||||
*
|
||||
* @ihl: the number of 32 bit segments and must be greater than or equal to 5.
|
||||
* @iph: assumed to be word aligned given that NET_IP_ALIGN is set to 2 on
|
||||
* riscv, defining IP headers to be aligned.
|
||||
*/
|
||||
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
|
||||
{
|
||||
unsigned long csum = 0;
|
||||
int pos = 0;
|
||||
|
||||
do {
|
||||
csum += ((const unsigned int *)iph)[pos];
|
||||
if (IS_ENABLED(CONFIG_32BIT))
|
||||
csum += csum < ((const unsigned int *)iph)[pos];
|
||||
} while (++pos < ihl);
|
||||
|
||||
/*
|
||||
* ZBB only saves three instructions on 32-bit and five on 64-bit so not
|
||||
* worth checking if supported without Alternatives.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) &&
|
||||
IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
|
||||
unsigned long fold_temp;
|
||||
|
||||
asm_volatile_goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
|
||||
RISCV_ISA_EXT_ZBB, 1)
|
||||
:
|
||||
:
|
||||
:
|
||||
: no_zbb);
|
||||
|
||||
if (IS_ENABLED(CONFIG_32BIT)) {
|
||||
asm(".option push \n\
|
||||
.option arch,+zbb \n\
|
||||
not %[fold_temp], %[csum] \n\
|
||||
rori %[csum], %[csum], 16 \n\
|
||||
sub %[csum], %[fold_temp], %[csum] \n\
|
||||
.option pop"
|
||||
: [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp));
|
||||
} else {
|
||||
asm(".option push \n\
|
||||
.option arch,+zbb \n\
|
||||
rori %[fold_temp], %[csum], 32 \n\
|
||||
add %[csum], %[fold_temp], %[csum] \n\
|
||||
srli %[csum], %[csum], 32 \n\
|
||||
not %[fold_temp], %[csum] \n\
|
||||
roriw %[csum], %[csum], 16 \n\
|
||||
subw %[csum], %[fold_temp], %[csum] \n\
|
||||
.option pop"
|
||||
: [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp));
|
||||
}
|
||||
return (__force __sum16)(csum >> 16);
|
||||
}
|
||||
no_zbb:
|
||||
#ifndef CONFIG_32BIT
|
||||
csum += ror64(csum, 32);
|
||||
csum >>= 32;
|
||||
#endif
|
||||
return csum_fold((__force __wsum)csum);
|
||||
}
|
||||
|
||||
#endif /* __ASM_RISCV_CHECKSUM_H */
|
Loading…
Reference in New Issue
Block a user