riscv: __asm_copy_to-from_user: Fix: Typos in comments
Fixing typos and grammar mistakes and using more intuitive label
name.
Signed-off-by: Akira Tsukamoto <akira.tsukamoto@gmail.com>
Fixes: ca6eaaa210
("riscv: __asm_copy_to-from_user: Optimize unaligned memory access and pipeline stall")
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
This commit is contained in:
parent
d4b3e0105e
commit
ea196c548c
@ -33,19 +33,20 @@ ENTRY(__asm_copy_from_user)
|
||||
|
||||
/*
|
||||
* Use byte copy only if too small.
|
||||
* SZREG holds 4 for RV32 and 8 for RV64
|
||||
*/
|
||||
li a3, 9*SZREG /* size must be larger than size in word_copy */
|
||||
bltu a2, a3, .Lbyte_copy_tail
|
||||
|
||||
/*
|
||||
* Copy first bytes until dst is align to word boundary.
|
||||
* Copy first bytes until dst is aligned to word boundary.
|
||||
* a0 - start of dst
|
||||
* t1 - start of aligned dst
|
||||
*/
|
||||
addi t1, a0, SZREG-1
|
||||
andi t1, t1, ~(SZREG-1)
|
||||
/* dst is already aligned, skip */
|
||||
beq a0, t1, .Lskip_first_bytes
|
||||
beq a0, t1, .Lskip_align_dst
|
||||
1:
|
||||
/* a5 - one byte for copying data */
|
||||
fixup lb a5, 0(a1), 10f
|
||||
@ -54,7 +55,7 @@ ENTRY(__asm_copy_from_user)
|
||||
addi a0, a0, 1 /* dst */
|
||||
bltu a0, t1, 1b /* t1 - start of aligned dst */
|
||||
|
||||
.Lskip_first_bytes:
|
||||
.Lskip_align_dst:
|
||||
/*
|
||||
* Now dst is aligned.
|
||||
* Use shift-copy if src is misaligned.
|
||||
@ -71,7 +72,6 @@ ENTRY(__asm_copy_from_user)
|
||||
*
|
||||
* a0 - start of aligned dst
|
||||
* a1 - start of aligned src
|
||||
* a3 - a1 & mask:(SZREG-1)
|
||||
* t0 - end of aligned dst
|
||||
*/
|
||||
addi t0, t0, -(8*SZREG) /* not to over run */
|
||||
@ -106,7 +106,7 @@ ENTRY(__asm_copy_from_user)
|
||||
* For misaligned copy we still perform aligned word copy, but
|
||||
* we need to use the value fetched from the previous iteration and
|
||||
* do some shifts.
|
||||
* This is safe because reading less than a word size.
|
||||
* This is safe because reading is less than a word size.
|
||||
*
|
||||
* a0 - start of aligned dst
|
||||
* a1 - start of src
|
||||
@ -116,7 +116,7 @@ ENTRY(__asm_copy_from_user)
|
||||
*/
|
||||
/* calculating aligned word boundary for dst */
|
||||
andi t1, t0, ~(SZREG-1)
|
||||
/* Converting unaligned src to aligned arc */
|
||||
/* Converting unaligned src to aligned src */
|
||||
andi a1, a1, ~(SZREG-1)
|
||||
|
||||
/*
|
||||
@ -128,7 +128,7 @@ ENTRY(__asm_copy_from_user)
|
||||
li a5, SZREG*8
|
||||
sub t4, a5, t3
|
||||
|
||||
/* Load the first word to combine with seceond word */
|
||||
/* Load the first word to combine with second word */
|
||||
fixup REG_L a5, 0(a1), 10f
|
||||
|
||||
3:
|
||||
@ -160,7 +160,7 @@ ENTRY(__asm_copy_from_user)
|
||||
* a1 - start of remaining src
|
||||
* t0 - end of remaining dst
|
||||
*/
|
||||
bgeu a0, t0, 5f
|
||||
bgeu a0, t0, .Lout_copy_user /* check if end of copy */
|
||||
4:
|
||||
fixup lb a5, 0(a1), 10f
|
||||
addi a1, a1, 1 /* src */
|
||||
@ -168,7 +168,7 @@ ENTRY(__asm_copy_from_user)
|
||||
addi a0, a0, 1 /* dst */
|
||||
bltu a0, t0, 4b /* t0 - end of dst */
|
||||
|
||||
5:
|
||||
.Lout_copy_user:
|
||||
/* Disable access to user memory */
|
||||
csrc CSR_STATUS, t6
|
||||
li a0, 0
|
||||
|
Loading…
Reference in New Issue
Block a user