mirror of
https://github.com/torvalds/linux.git
synced 2024-12-07 03:21:32 +00:00
c2a658d419
This patch utilizes Vector to perform copy_to_user/copy_from_user. If Vector is available and the size of copy is large enough for Vector to perform better than scalar, then direct the kernel to do Vector copies for userspace. Though the best programming practice for users is to reduce the copy, this provides a faster variant when copies are inevitable. The optimal size for using Vector, copy_to_user_thres, is only a heuristic for now. We can add DT parsing if people feel the need of customizing it. The exception fixup code of the __asm_vector_usercopy must fallback to the scalar one because accessing user pages might fault, and must be sleepable. Current kernel-mode Vector does not allow tasks to be preemptible, so we must disactivate Vector and perform a scalar fallback in such case. The original implementation of Vector operations comes from https://github.com/sifive/sifive-libc, which we agree to contribute to Linux kernel. Co-developed-by: Jerry Shih <jerry.shih@sifive.com> Signed-off-by: Jerry Shih <jerry.shih@sifive.com> Co-developed-by: Nick Knight <nick.knight@sifive.com> Signed-off-by: Nick Knight <nick.knight@sifive.com> Suggested-by: Guo Ren <guoren@kernel.org> Signed-off-by: Andy Chiu <andy.chiu@sifive.com> Tested-by: Björn Töpel <bjorn@rivosinc.com> Tested-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com> Link: https://lore.kernel.org/r/20240115055929.4736-6-andy.chiu@sifive.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
54 lines
1.1 KiB
ArmAsm
54 lines
1.1 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm-generic/export.h>
|
|
#include <asm/asm.h>
|
|
#include <asm/asm-extable.h>
|
|
#include <asm/csr.h>
|
|
|
|
#define pDst a0
|
|
#define pSrc a1
|
|
#define iNum a2
|
|
|
|
#define iVL a3
|
|
|
|
#define ELEM_LMUL_SETTING m8
|
|
#define vData v0
|
|
|
|
.macro fixup op reg addr lbl
|
|
100:
|
|
\op \reg, \addr
|
|
_asm_extable 100b, \lbl
|
|
.endm
|
|
|
|
SYM_FUNC_START(__asm_vector_usercopy)
|
|
/* Enable access to user memory */
|
|
li t6, SR_SUM
|
|
csrs CSR_STATUS, t6
|
|
|
|
loop:
|
|
vsetvli iVL, iNum, e8, ELEM_LMUL_SETTING, ta, ma
|
|
fixup vle8.v vData, (pSrc), 10f
|
|
sub iNum, iNum, iVL
|
|
add pSrc, pSrc, iVL
|
|
fixup vse8.v vData, (pDst), 11f
|
|
add pDst, pDst, iVL
|
|
bnez iNum, loop
|
|
|
|
/* Exception fixup for vector load is shared with normal exit */
|
|
10:
|
|
/* Disable access to user memory */
|
|
csrc CSR_STATUS, t6
|
|
mv a0, iNum
|
|
ret
|
|
|
|
/* Exception fixup code for vector store. */
|
|
11:
|
|
/* Undo the subtraction after vle8.v */
|
|
add iNum, iNum, iVL
|
|
/* Make sure the scalar fallback skip already processed bytes */
|
|
csrr t2, CSR_VSTART
|
|
sub iNum, iNum, t2
|
|
j 10b
|
|
SYM_FUNC_END(__asm_vector_usercopy)
|